code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import rlutils.tf as rlu
import tensorflow as tf
from rlutils.infra.runner import TFOffPolicyRunner, run_func_as_main
class SACAgent(tf.keras.Model):
def __init__(self,
obs_spec,
act_spec,
num_ensembles=2,
policy_mlp_hidden=256,
policy_lr=3e-4,
q_mlp_hidden=256,
q_lr=3e-4,
alpha=1.0,
alpha_lr=1e-3,
tau=5e-3,
gamma=0.99,
target_entropy=None,
auto_alpha=True,
exploration_bonus=True,
target_policy=False,
):
super(SACAgent, self).__init__()
self.obs_spec = obs_spec
self.act_spec = act_spec
self.act_dim = self.act_spec.shape[0]
if len(self.obs_spec.shape) == 1: # 1D observation
self.obs_dim = self.obs_spec.shape[0]
self.policy_net = rlu.nn.SquashedGaussianMLPActor(self.obs_dim, self.act_dim, policy_mlp_hidden)
if target_policy:
print('Use target policy for SAC')
self.target_policy_net = rlu.nn.SquashedGaussianMLPActor(self.obs_dim, self.act_dim, policy_mlp_hidden)
else:
print('No target policy for SAC')
self.target_policy_net = None
self.q_network = rlu.nn.EnsembleMinQNet(self.obs_dim, self.act_dim, q_mlp_hidden,
num_ensembles=num_ensembles)
self.target_q_network = rlu.nn.EnsembleMinQNet(self.obs_dim, self.act_dim, q_mlp_hidden,
num_ensembles=num_ensembles)
else:
raise NotImplementedError
rlu.functional.hard_update(self.target_q_network, self.q_network)
if self.target_policy_net is not None:
rlu.functional.hard_update(self.target_policy_net, self.policy_net)
self.policy_optimizer = tf.keras.optimizers.Adam(lr=policy_lr)
self.q_optimizer = tf.keras.optimizers.Adam(lr=q_lr)
self.log_alpha = rlu.nn.LagrangeLayer(initial_value=alpha)
self.alpha_optimizer = tf.keras.optimizers.Adam(lr=alpha_lr)
self.target_entropy = -self.act_dim if target_entropy is None else target_entropy
self.auto_alpha = auto_alpha
self.exploration_bonus = exploration_bonus
self.tau = tau
self.gamma = gamma
def set_logger(self, logger):
self.logger = logger
def log_tabular(self):
for i in range(self.q_network.num_ensembles):
self.logger.log_tabular(f'Q{i + 1}Vals', with_min_and_max=True)
self.logger.log_tabular('LogPi', average_only=True)
self.logger.log_tabular('LossPi', average_only=True)
self.logger.log_tabular('LossQ', average_only=True)
self.logger.log_tabular('TDError', average_only=True)
self.logger.log_tabular('Alpha', average_only=True)
self.logger.log_tabular('LossAlpha', average_only=True)
@tf.function
def update_target_policy(self):
rlu.functional.soft_update(self.target_policy_net, self.policy_net, self.tau)
@tf.function
def update_target_q(self):
rlu.functional.soft_update(self.target_q_network, self.q_network, self.tau)
def _compute_next_obs_q(self, next_obs):
alpha = self.log_alpha()
if self.target_policy_net is not None:
next_action, next_action_log_prob, _, _ = self.target_policy_net((next_obs, tf.constant(False)))
else:
next_action, next_action_log_prob, _, _ = self.policy_net((next_obs, tf.constant(False)))
next_q_values = self.target_q_network((next_obs, next_action, tf.constant(True)))
if self.exploration_bonus:
print('Tracing exploration bonus')
next_q_values = next_q_values - alpha * next_action_log_prob
return next_q_values
@tf.function
def _update_q_nets(self, obs, act, next_obs, done, rew, weights=None):
# compute target Q values
next_q_values = self._compute_next_obs_q(next_obs)
q_target = rlu.functional.compute_target_value(rew, self.gamma, done, next_q_values)
q_target_ensemble = rlu.functional.expand_ensemble_dim(q_target, num_ensembles=self.q_network.num_ensembles)
# q loss
with tf.GradientTape() as q_tape:
q_values = self.q_network((obs, act, tf.constant(False))) # (num_ensembles, None)
q_values_loss = 0.5 * tf.square(q_target_ensemble - q_values)
# apply importance weights if needed
if weights is not None:
weights = rlu.functional.expand_ensemble_dim(weights, num_ensembles=self.q_network.num_ensembles)
q_values_loss = q_values_loss * weights
q_values_loss = tf.reduce_mean(q_values_loss, axis=-1)
# (num_ensembles, None)
q_values_loss = tf.reduce_sum(q_values_loss, axis=0)
q_gradients = q_tape.gradient(q_values_loss, self.q_network.trainable_variables)
self.q_optimizer.apply_gradients(zip(q_gradients, self.q_network.trainable_variables))
self.update_target_q()
td_error = tf.abs(tf.reduce_min(q_values, axis=0) - q_target)
info = dict(
LossQ=q_values_loss,
TDError=td_error,
)
for i in range(self.q_network.num_ensembles):
info[f'Q{i + 1}Vals'] = q_values[i]
return info
@tf.function
def _update_actor(self, obs, weights=None):
alpha = self.log_alpha()
# policy loss
with tf.GradientTape() as policy_tape:
action, log_prob, _, _ = self.policy_net((obs, tf.constant(False)))
q_values_pi_min = self.q_network((obs, action, tf.constant(True)))
policy_loss = log_prob * alpha - q_values_pi_min
if weights is not None:
policy_loss = policy_loss * weights
policy_loss = tf.reduce_mean(policy_loss, axis=0)
policy_gradients = policy_tape.gradient(policy_loss, self.policy_net.trainable_variables)
self.policy_optimizer.apply_gradients(zip(policy_gradients, self.policy_net.trainable_variables))
# log alpha
if self.auto_alpha:
with tf.GradientTape() as alpha_tape:
alpha = self.log_alpha()
alpha_loss = alpha * (tf.stop_gradient(log_prob) + self.target_entropy)
if weights is not None:
alpha_loss = alpha_loss * weights
alpha_loss = -tf.reduce_mean(alpha_loss, axis=0)
alpha_gradient = alpha_tape.gradient(alpha_loss, self.log_alpha.trainable_variables)
self.alpha_optimizer.apply_gradients(zip(alpha_gradient, self.log_alpha.trainable_variables))
else:
alpha_loss = 0.
if self.target_policy_net is not None:
self.update_target_policy()
info = dict(
LogPi=log_prob,
Alpha=alpha,
LossAlpha=alpha_loss,
LossPi=policy_loss,
)
return info
def train_on_batch(self, data, **kwargs):
update_target = data.pop('update_target')
obs = data['obs']
info = self._update_q_nets(**data)
if update_target:
actor_info = self._update_actor(obs)
info.update(actor_info)
self.logger.store(**rlu.functional.to_numpy_or_python_type(info))
return info
@tf.function
def act_batch_explore_tf(self, obs):
print(f'Tracing sac act_batch with obs {obs}')
pi_final = self.policy_net((obs, tf.constant(False)))[0]
return pi_final
@tf.function
def act_batch_test_tf(self, obs):
pi_final = self.policy_net((obs, tf.constant(True)))[0]
return pi_final
@tf.function
def act_batch_test_tf_v2(self, obs):
n = 20
batch_size = tf.shape(obs)[0]
obs = tf.tile(obs, (n, 1))
action = self.policy_net((obs, tf.constant(False)))[0]
q_values_pi_min = self.q_network((obs, action), training=True)[0, :]
action = tf.reshape(action, shape=(n, batch_size, self.act_dim))
idx = tf.argmax(tf.reshape(q_values_pi_min, shape=(n, batch_size)), axis=0,
output_type=tf.int32) # (batch_size)
idx = tf.stack([idx, tf.range(batch_size)], axis=-1)
pi_final = tf.gather_nd(action, idx)
return pi_final
def act_batch_test(self, obs):
return self.act_batch_test_tf(tf.convert_to_tensor(obs)).numpy()
def act_batch_explore(self, obs):
return self.act_batch_explore_tf(tf.convert_to_tensor(obs)).numpy()
class Runner(TFOffPolicyRunner):
@classmethod
def main(cls,
env_name,
epochs=100,
# sac args
policy_mlp_hidden=256,
policy_lr=3e-4,
q_mlp_hidden=256,
q_lr=3e-4,
policy_delay=1,
alpha=0.2,
tau=5e-3,
gamma=0.99,
seed=1,
target_policy=False,
logger_path: str = None,
**kwargs
):
agent_kwargs = dict(
policy_mlp_hidden=policy_mlp_hidden,
policy_lr=policy_lr,
q_mlp_hidden=q_mlp_hidden,
q_lr=q_lr,
alpha=alpha,
alpha_lr=q_lr,
tau=tau,
gamma=gamma,
target_entropy=None,
target_policy=target_policy
)
super(Runner, cls).main(
env_name=env_name,
epochs=epochs,
agent_cls=SACAgent,
agent_kwargs=agent_kwargs,
policy_delay=policy_delay,
seed=seed,
logger_path=logger_path,
**kwargs
)
if __name__ == '__main__':
run_func_as_main(Runner.main) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/algos/tf/mf/sac.py | 0.754553 | 0.236538 | sac.py | pypi |
import rlutils.tf as rlu
import tensorflow as tf
from rlutils.infra.runner import TFOffPolicyRunner, run_func_as_main
def gather_q_values(q_values, actions):
batch_size = tf.shape(actions)[0]
idx = tf.stack([tf.range(batch_size, dtype=actions.dtype), actions], axis=-1) # (None, 2)
q_values = tf.gather_nd(q_values, indices=idx)
return q_values
class DQN(tf.keras.Model):
def __init__(self,
obs_spec,
act_spec,
mlp_hidden=128,
double_q=True,
epsilon=0.1,
q_lr=1e-4,
gamma=0.99,
tau=5e-3,
huber_delta=None):
super(DQN, self).__init__()
self.obs_spec = obs_spec
self.act_spec = act_spec
obs_dim = obs_spec.shape[0]
act_dim = act_spec.n
self.q_network = rlu.nn.build_mlp(obs_dim, act_dim, mlp_hidden=mlp_hidden, num_layers=3)
self.target_q_network = rlu.nn.build_mlp(obs_dim, act_dim, mlp_hidden=mlp_hidden, num_layers=3)
self.q_optimizer = tf.keras.optimizers.Adam(lr=q_lr)
self.epsilon = tf.Variable(initial_value=epsilon, dtype=tf.float32, trainable=False)
self.act_dim = act_dim
self.double_q = double_q
self.huber_delta = huber_delta
self.gamma = gamma
self.tau = tau
reduction = tf.keras.losses.Reduction.NONE # Note: tensorflow uses reduce_mean at axis=-1 by default
if huber_delta is None:
self.loss_fn = tf.keras.losses.MeanSquaredError(reduction=reduction)
else:
self.loss_fn = tf.keras.losses.Huber(delta=huber_delta, reduction=reduction)
rlu.functional.hard_update(self.target_q_network, self.q_network)
def set_logger(self, logger):
self.logger = logger
def log_tabular(self):
self.logger.log_tabular('QVals', with_min_and_max=True)
self.logger.log_tabular('LossQ', average_only=True)
def set_epsilon(self, epsilon):
assert epsilon >= 0. and epsilon <= 1.
self.epsilon.assign(epsilon)
@tf.function
def update_target(self):
rlu.functional.soft_update(self.target_q_network, self.q_network, tau=self.tau)
@tf.function
def _update_nets(self, obs, act, next_obs, done, rew):
print('Tracing _update_nets')
# compute target Q values
target_q_values = self.target_q_network(next_obs)
if self.double_q:
# select action using Q network instead of target Q network
target_actions = tf.argmax(self.q_network(next_obs), axis=-1, output_type=self.act_spec.dtype)
target_q_values = gather_q_values(target_q_values, target_actions)
else:
target_q_values = tf.reduce_max(target_q_values, axis=-1)
target_q_values = rew + self.gamma * (1. - done) * target_q_values
with tf.GradientTape() as tape:
q_values = gather_q_values(self.q_network(obs), act) # (None,)
loss = self.loss_fn(q_values, target_q_values) # (None,)
grad = tape.gradient(loss, self.q_network.trainable_variables)
self.q_optimizer.apply_gradients(zip(grad, self.q_network.trainable_variables))
info = dict(
QVals=q_values,
LossQ=loss
)
return info
@tf.function
def train_step(self, data):
obs = data['obs']
act = data['act']
next_obs = data['next_obs']
done = data['done']
rew = data['rew']
update_target = data['update_target']
info = self._update_nets(obs, act, next_obs, done, rew)
if update_target:
self.update_target()
return info
def train_on_batch(self, data, **kwargs):
info = self.train_step(data=data)
self.logger.store(**rlu.functional.to_numpy_or_python_type(info))
@tf.function
def act_batch_explore_tf(self, obs):
return self.act_batch(obs, deterministic=tf.convert_to_tensor(False))
@tf.function
def act_batch_test_tf(self, obs):
return self.act_batch(obs, deterministic=tf.convert_to_tensor(True))
def act_batch_test(self, obs):
return self.act_batch_test_tf(tf.convert_to_tensor(obs)).numpy()
def act_batch_explore(self, obs):
return self.act_batch_explore_tf(tf.convert_to_tensor(obs)).numpy()
@tf.function
def act_batch(self, obs, deterministic):
""" Implement epsilon-greedy here """
batch_size = tf.shape(obs)[0]
epsilon = tf.random.uniform(shape=(batch_size,), minval=0., maxval=1., dtype=tf.float32)
epsilon_indicator = tf.cast(epsilon > self.epsilon, dtype=tf.int32) # (None,)
random_actions = tf.random.uniform(shape=(batch_size,), minval=0, maxval=self.act_dim,
dtype=self.act_spec.dtype)
deterministic_actions = tf.argmax(self.q_network(obs), axis=-1, output_type=self.act_spec.dtype)
epsilon_greedy_actions = tf.stack([random_actions, deterministic_actions], axis=-1) # (None, 2)
epsilon_greedy_actions = gather_q_values(epsilon_greedy_actions, epsilon_indicator)
final_actions = tf.cond(deterministic, true_fn=lambda: deterministic_actions,
false_fn=lambda: epsilon_greedy_actions)
return final_actions
class Runner(TFOffPolicyRunner):
@classmethod
def main(cls,
env_name,
mlp_hidden=256,
double_q=True,
q_lr=1e-4,
gamma=0.99,
huber_delta: float = None,
tau=5e-3,
epsilon=0.1,
**kwargs
):
agent_kwargs = dict(
mlp_hidden=mlp_hidden,
double_q=double_q,
q_lr=q_lr,
gamma=gamma,
huber_delta=huber_delta,
tau=tau,
epsilon=epsilon
)
super(Runner, cls).main(env_name=env_name,
agent_cls=DQN,
agent_kwargs=agent_kwargs,
**kwargs
)
if __name__ == '__main__':
run_func_as_main(Runner.main) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/algos/tf/mf/dqn.py | 0.838944 | 0.429549 | dqn.py | pypi |
import time
import numpy as np
import tensorflow as tf
from rlutils.tf.utils import set_tf_allow_growth
set_tf_allow_growth()
from rlutils.infra.runner import TFRunner
from rlutils.tf.nn import AtariQNetworkDeepMind, hard_update
from rlutils.replay_buffers import PyUniformParallelEnvReplayBufferFrame
from rlutils.infra.runner import run_func_as_main
from rlutils.np.schedulers import PiecewiseSchedule
from gym.wrappers import AtariPreprocessing
def gather_q_values(q_values, actions):
batch_size = tf.shape(actions)[0]
idx = tf.stack([tf.range(batch_size), actions], axis=-1) # (None, 2)
q_values = tf.gather_nd(q_values, indices=idx)
return q_values
class DQN(tf.keras.Model):
def __init__(self,
act_dim,
frame_stack=4,
dueling=True,
double_q=True,
q_lr=1e-4,
gamma=0.99,
huber_delta=1.0):
super(DQN, self).__init__()
data_format = 'channels_first'
self.q_network = AtariQNetworkDeepMind(act_dim=act_dim, frame_stack=frame_stack, dueling=dueling,
data_format=data_format)
self.target_q_network = AtariQNetworkDeepMind(act_dim=act_dim, frame_stack=frame_stack, dueling=dueling,
data_format=data_format)
self.q_optimizer = tf.keras.optimizers.Adam(lr=q_lr)
self.epsilon = tf.Variable(initial_value=1.0, dtype=tf.float32)
self.act_dim = act_dim
self.double_q = double_q
self.huber_delta = huber_delta
self.gamma = gamma
reduction = tf.keras.losses.Reduction.NONE # Note: tensorflow uses reduce_mean at axis=-1 by default
if huber_delta is None:
self.loss_fn = tf.keras.losses.MeanSquaredError(reduction=reduction)
else:
self.loss_fn = tf.keras.losses.Huber(delta=huber_delta, reduction=reduction)
hard_update(self.target_q_network, self.q_network)
def set_logger(self, logger):
self.logger = logger
def log_tabular(self):
self.logger.log_tabular('QVals', with_min_and_max=True)
self.logger.log_tabular('LossQ', average_only=True)
def set_epsilon(self, epsilon):
assert epsilon >= 0. and epsilon <= 1.
self.epsilon.assign(epsilon)
def update_target(self):
hard_update(self.target_q_network, self.q_network)
@tf.function
def _update_nets(self, obs, act, next_obs, done, rew):
print('Tracing _update_nets')
# compute target Q values
target_q_values = self.target_q_network(next_obs)
if self.double_q:
# select action using Q network instead of target Q network
target_actions = tf.argmax(self.q_network(next_obs), axis=-1, output_type=tf.int32)
target_q_values = gather_q_values(target_q_values, target_actions)
else:
target_q_values = tf.reduce_max(target_q_values, axis=-1)
target_q_values = rew + self.gamma * (1. - done) * target_q_values
with tf.GradientTape() as tape:
q_values = gather_q_values(self.q_network(obs), act) # (None,)
loss = self.loss_fn(q_values, target_q_values) # (None,)
grad = tape.gradient(loss, self.q_network.trainable_variables)
self.q_optimizer.apply_gradients(zip(grad, self.q_network.trainable_variables))
info = dict(
QVals=q_values,
LossQ=loss
)
return info
def update(self, obs, act, next_obs, rew, done):
info = self._update_nets(obs, act, next_obs, done, rew)
for key, item in info.items():
info[key] = item.numpy()
self.logger.store(**info)
@tf.function
def act_batch(self, obs, deterministic):
""" Implement epsilon-greedy here """
batch_size = tf.shape(obs)[0]
epsilon = tf.random.uniform(shape=(batch_size,), minval=0., maxval=1., dtype=tf.float32)
epsilon_indicator = tf.cast(epsilon > self.epsilon, dtype=tf.int32) # (None,)
random_actions = tf.random.uniform(shape=(batch_size,), minval=0, maxval=self.act_dim, dtype=tf.int32)
deterministic_actions = tf.argmax(self.q_network(obs), axis=-1, output_type=tf.int32)
epsilon_greedy_actions = tf.stack([random_actions, deterministic_actions], axis=-1) # (None, 2)
epsilon_greedy_actions = gather_q_values(epsilon_greedy_actions, epsilon_indicator)
final_actions = tf.cond(deterministic, true_fn=lambda: deterministic_actions,
false_fn=lambda: epsilon_greedy_actions)
return final_actions
class DQNRunner(TFRunner):
def setup_replay_buffer(self,
num_parallel_env,
replay_capacity,
batch_size,
gamma,
update_horizon,
frame_stack
):
self.replay_buffer = PyUniformParallelEnvReplayBufferFrame(
num_parallel_env=num_parallel_env,
obs_spec=tf.TensorSpec(shape=[84, 84], dtype=tf.uint8),
act_spec=tf.TensorSpec(shape=(), dtype=tf.int32),
replay_capacity=replay_capacity,
batch_size=batch_size,
gamma=gamma,
update_horizon=update_horizon,
frame_stack=frame_stack
)
def setup_agent(self,
frame_stack=4,
dueling=True,
double_q=True,
q_lr=1e-4,
gamma=0.99,
huber_delta=1.0):
self.agent = DQN(act_dim=self.env.single_action_space.n,
frame_stack=frame_stack,
dueling=dueling,
double_q=double_q,
q_lr=q_lr,
gamma=gamma,
huber_delta=huber_delta)
self.agent.set_logger(self.logger)
def setup_extra(self,
start_steps,
update_after,
update_every,
update_per_step,
target_update):
self.start_steps = start_steps
self.update_after = update_after
self.update_every = update_every
self.update_per_step = update_per_step
self.target_update = target_update
# epsilon scheduler
self.epsilon_scheduler = PiecewiseSchedule(
[
(0, 1.0),
(1e6, 0.1),
(self.epochs * self.steps_per_epoch / 2, 0.01),
], outside_value=0.01
)
def get_action_batch(self, o, deterministic=False):
return self.agent.act_batch(tf.convert_to_tensor(o, dtype=tf.float32),
tf.convert_to_tensor(deterministic, dtype=tf.bool)).numpy()
def run_one_step(self, t):
global_env_steps = self.global_step * self.num_parallel_env
if global_env_steps >= self.start_steps:
a = self.get_action_batch(self.o, deterministic=False)
else:
a = self.env.action_space.sample()
a = np.asarray(a, dtype=np.int32)
# Step the env
o2, r, d, info = self.env.step(a)
self.ep_ret += r
self.ep_len += 1
timeouts = np.array([i.get('TimeLimit.truncated', False) for i in info], dtype=np.bool)
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
true_d = np.logical_and(d, np.logical_not(timeouts))
# Store experience to replay buffer
self.replay_buffer.add(data={
'obs': self.o[:, -1, :, :], # only add the last frame to the replay buffer
'act': a,
'rew': r,
'done': true_d
})
# Super critical, easy to overlook step: make sure to update
# most recent observation!
self.o = o2
# End of trajectory handling
if np.any(d):
self.logger.store(EpRet=self.ep_ret[d], EpLen=self.ep_len[d])
self.ep_ret[d] = 0
self.ep_len[d] = 0
# Update handling
if global_env_steps >= self.update_after and global_env_steps % self.update_every == 0:
for j in range(int(self.update_every * self.update_per_step)):
batch = self.replay_buffer.sample()
self.agent.update(**batch)
if global_env_steps % self.target_update == 0:
self.agent.update_target()
def on_train_begin(self):
self.start_time = time.time()
self.o = self.env.reset()
self.ep_ret = np.zeros(shape=self.num_parallel_env)
self.ep_len = np.zeros(shape=self.num_parallel_env, dtype=np.int64)
def on_epoch_end(self, epoch):
# schedule the learning rate and epsilon
epsilon = self.epsilon_scheduler.value(self.global_step)
self.logger.log(f'Setting epsilon to {epsilon:.4f}')
self.agent.set_epsilon(epsilon=epsilon)
# Log info about epoch
self.logger.log_tabular('Epoch', epoch)
self.logger.log_tabular('EpRet', with_min_and_max=True)
self.logger.log_tabular('EpLen', average_only=True)
self.logger.log_tabular('TotalEnvInteracts', self.global_step * self.num_parallel_env)
self.agent.log_tabular()
self.logger.log_tabular('Time', time.time() - self.start_time)
self.logger.dump_tabular()
def dqn(env_name,
steps_per_epoch=10000,
epochs=500,
start_steps=10000,
update_after=1000,
update_every=4,
update_per_step=0.25,
batch_size=32,
num_parallel_env=1,
seed=1,
# sac args
frame_stack=4,
dueling=True,
double_q=True,
q_lr=1e-4,
gamma=0.99,
huber_delta=1.0,
target_update=1000,
# replay
update_horizon=1,
replay_size=int(1e6)
):
frame_skip = 4 if 'NoFrameskip' in env_name else 1
config = locals()
runner = DQNRunner(seed=seed, steps_per_epoch=steps_per_epoch, epochs=epochs,
exp_name=f'{env_name}_dqn_test', logger_path='data')
runner.setup_env(env_name=env_name, num_parallel_env=num_parallel_env, frame_stack=frame_stack,
wrappers=lambda env: AtariPreprocessing(env, frame_skip=frame_skip, terminal_on_life_loss=True),
asynchronous=False, num_test_episodes=None)
runner.setup_seed(seed)
runner.setup_logger(config=config)
runner.setup_agent(frame_stack=frame_stack,
dueling=dueling,
double_q=double_q,
q_lr=q_lr,
gamma=gamma,
huber_delta=huber_delta)
runner.setup_extra(start_steps=start_steps,
update_after=update_after,
update_every=update_every,
update_per_step=update_per_step,
target_update=target_update)
runner.setup_replay_buffer(num_parallel_env=num_parallel_env,
replay_capacity=replay_size,
batch_size=batch_size,
gamma=gamma,
update_horizon=update_horizon,
frame_stack=frame_stack,
)
runner.run()
if __name__ == '__main__':
run_func_as_main(dqn) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/algos/tf/mf/images/dqn.py | 0.842345 | 0.414366 | dqn.py | pypi |
import os
import time
import gym
import numpy as np
import rlutils.tf as rlu
import tensorflow as tf
import tensorflow_probability as tfp
from rlutils.infra.runner import TFRunner
from rlutils.logx import EpochLogger
from rlutils.replay_buffers import PyUniformReplayBuffer
from tqdm.auto import tqdm, trange
tfd = tfp.distributions
class BRACPAgent(tf.keras.Model):
def __init__(self,
ob_dim,
ac_dim,
num_ensembles=3,
behavior_mlp_hidden=256,
behavior_lr=1e-3,
policy_lr=5e-6,
policy_behavior_lr=1e-3,
policy_mlp_hidden=256,
q_mlp_hidden=256,
q_lr=3e-4,
alpha_lr=1e-3,
alpha=1.0,
tau=1e-3,
gamma=0.99,
target_entropy=None,
use_gp=True,
gp_type='softplus',
reg_type='kl',
sigma=10,
n=5,
gp_weight=0.1,
entropy_reg=True,
kl_backup=False,
max_ood_grad_norm=0.01,
):
super(BRACPAgent, self).__init__()
self.reg_type = reg_type
self.gp_type = gp_type
assert self.reg_type in ['kl', 'mmd', 'cross_entropy']
self.ob_dim = ob_dim
self.ac_dim = ac_dim
self.q_mlp_hidden = q_mlp_hidden
self.policy_lr = policy_lr
self.policy_behavior_lr = policy_behavior_lr
self.behavior_policy = rlu.nn.EnsembleBehaviorPolicy(num_ensembles=num_ensembles, out_dist='normal',
obs_dim=self.ob_dim, act_dim=self.ac_dim,
mlp_hidden=behavior_mlp_hidden)
self.behavior_lr = behavior_lr
self.policy_net = rlu.nn.SquashedGaussianMLPActor(ob_dim, ac_dim, policy_mlp_hidden)
self.target_policy_net = rlu.nn.SquashedGaussianMLPActor(ob_dim, ac_dim, policy_mlp_hidden)
self.policy_net.optimizer = rlu.future.get_adam_optimizer(lr=self.policy_lr)
rlu.functional.hard_update(self.target_policy_net, self.policy_net)
self.q_network = rlu.nn.EnsembleMinQNet(ob_dim, ac_dim, q_mlp_hidden)
self.q_network.compile(optimizer=rlu.future.get_adam_optimizer(q_lr))
self.target_q_network = rlu.nn.EnsembleMinQNet(ob_dim, ac_dim, q_mlp_hidden)
rlu.functional.hard_update(self.target_q_network, self.q_network)
self.log_beta = rlu.nn.LagrangeLayer(initial_value=alpha)
self.log_beta.compile(optimizer=rlu.future.get_adam_optimizer(alpha_lr))
self.log_alpha = rlu.nn.LagrangeLayer(initial_value=alpha)
self.log_alpha.compile(optimizer=rlu.future.get_adam_optimizer(alpha_lr))
self.log_gp = rlu.nn.LagrangeLayer(initial_value=gp_weight, min_value=gp_weight)
self.log_gp.compile(optimizer=rlu.future.get_adam_optimizer(alpha_lr))
self.target_entropy = target_entropy
self.tau = tau
self.gamma = gamma
self.kl_n = 5
self.n = n
self.max_q_backup = True
self.entropy_reg = entropy_reg
self.kl_backup = kl_backup
self.gradient_clipping = False
self.sensitivity = 1.0
self.max_ood_grad_norm = max_ood_grad_norm
self.use_gp = use_gp
self.sigma = sigma
# delta should set according to the KL between initial policy and behavior policy
self.delta_behavior = tf.Variable(initial_value=0.0, trainable=False, dtype=tf.float32)
self.delta_gp = tf.Variable(initial_value=0.0, trainable=False, dtype=tf.float32)
self.reward_scale_factor = 1.0
def get_alpha(self, obs):
return self.log_alpha(obs)
def call(self, inputs, training=None, mask=None):
obs, deterministic = inputs
pi_final = self.policy_net((obs, deterministic))[0]
return pi_final
def set_delta_behavior(self, delta_behavior):
EpochLogger.log(f'Setting behavior hard KL to {delta_behavior:.4f}')
self.delta_behavior.assign(delta_behavior)
def set_delta_gp(self, delta_gp):
EpochLogger.log(f'Setting delta GP to {delta_gp:.4f}')
self.delta_gp.assign(delta_gp)
def set_logger(self, logger):
self.logger = logger
def log_tabular(self):
self.logger.log_tabular('Q1Vals', with_min_and_max=True)
self.logger.log_tabular('Q2Vals', with_min_and_max=True)
self.logger.log_tabular('LogPi', average_only=True)
self.logger.log_tabular('LossPi', average_only=True)
self.logger.log_tabular('LossQ', average_only=True)
self.logger.log_tabular('Alpha', average_only=True)
self.logger.log_tabular('LossAlpha', average_only=True)
self.logger.log_tabular('KL', with_min_and_max=True)
self.logger.log_tabular('ViolationRatio', average_only=True)
self.logger.log_tabular('Beta', average_only=True)
self.logger.log_tabular('BetaLoss', average_only=True)
self.logger.log_tabular('BehaviorLoss', average_only=True)
self.logger.log_tabular('GP', average_only=True)
self.logger.log_tabular('GPWeight', average_only=True)
@tf.function
def update_target(self):
rlu.functional.soft_update(self.target_q_network, self.q_network, self.tau)
rlu.functional.soft_update(self.target_policy_net, self.policy_net, self.tau)
@tf.function
def hard_update_policy_target(self):
rlu.functional.hard_update(self.target_policy_net, self.policy_net)
@tf.function(experimental_relax_shapes=True)
def compute_pi_pib_distance(self, obs):
if self.reg_type in ['kl', 'cross_entropy']:
_, log_prob, raw_action, pi_distribution = self.policy_net((obs, False))
loss = self._compute_kl_behavior_v2(obs, raw_action, pi_distribution)
elif self.reg_type == 'mmd':
batch_size = tf.shape(obs)[0]
obs = tf.tile(obs, (self.n, 1))
_, log_prob, raw_action, pi_distribution = self.policy_net((obs, False))
loss = self._compute_mmd(obs, raw_action, pi_distribution)
log_prob = tf.reduce_mean(tf.reshape(log_prob, shape=(self.n, batch_size)), axis=0)
else:
raise NotImplementedError
return loss, log_prob
def mmd_loss_laplacian(self, samples1, samples2, sigma=0.2):
"""MMD constraint with Laplacian kernel for support matching"""
# sigma is set to 10.0 for hopper, cheetah and 20 for walker/ant
# (n, None, ac_dim)
diff_x_x = tf.expand_dims(samples1, axis=0) - tf.expand_dims(samples1, axis=1) # (n, n, None, ac_dim)
diff_x_x = tf.reduce_mean(tf.exp(-tf.reduce_sum(tf.abs(diff_x_x), axis=-1) / (2.0 * sigma)), axis=(0, 1))
diff_x_y = tf.expand_dims(samples1, axis=0) - tf.expand_dims(samples2, axis=1)
diff_x_y = tf.reduce_mean(tf.exp(-tf.reduce_sum(tf.abs(diff_x_y), axis=-1) / (2.0 * sigma)), axis=(0, 1))
diff_y_y = tf.expand_dims(samples2, axis=0) - tf.expand_dims(samples2, axis=1) # (n, n, None, ac_dim)
diff_y_y = tf.reduce_mean(tf.exp(-tf.reduce_sum(tf.abs(diff_y_y), axis=-1) / (2.0 * sigma)), axis=(0, 1))
overall_loss = tf.sqrt(diff_x_x + diff_y_y - 2.0 * diff_x_y + 1e-6) # (None,)
return overall_loss
def _compute_mmd(self, obs, raw_action, pi_distribution):
# obs: (n * None, obs_dim), raw_actions: (n * None, ac_dim)
batch_size = tf.shape(obs)[0] // self.n
samples_pi = raw_action
samples_pi = tf.tile(samples_pi, (self.behavior_policy.num_ensembles, 1))
samples_pi = tf.reshape(samples_pi, shape=(self.behavior_policy.num_ensembles, self.n,
batch_size, self.ac_dim))
samples_pi = tf.transpose(samples_pi, perm=[1, 0, 2, 3])
samples_pi = tf.reshape(samples_pi, shape=(self.n * self.behavior_policy.num_ensembles, batch_size,
self.ac_dim))
obs_expand = rlu.functional.expand_ensemble_dim(obs, self.behavior_policy.num_ensembles)
samples_pi_b = self.behavior_policy.sample(
obs_expand, full_path=tf.convert_to_tensor(False)) # (num_ensembles, n * batch_size, d)
samples_pi_b = tf.reshape(samples_pi_b, shape=(self.behavior_policy.num_ensembles, self.n,
batch_size, self.ac_dim))
samples_pi_b = tf.transpose(samples_pi_b, perm=[1, 0, 2, 3])
samples_pi_b = tf.reshape(samples_pi_b, shape=(self.n * self.behavior_policy.num_ensembles, batch_size,
self.ac_dim))
samples_pi = tf.clip_by_value(samples_pi, -3., 3.)
samples_pi_b = tf.clip_by_value(samples_pi_b, -3., 3.)
samples_pi = tf.tanh(samples_pi)
samples_pi_b = tf.tanh(samples_pi_b)
# samples_pi = self.policy_net.transform_raw_action(samples_pi)
# samples_pi_b = self.behavior_policy.transform_raw_action(samples_pi_b)
mmd_loss = self.mmd_loss_laplacian(samples_pi, samples_pi_b, sigma=self.sigma)
# mmd_loss = tf.reshape(mmd_loss, shape=(self.behavior_policy.num_ensembles, batch_size))
# mmd_loss = tf.reduce_mean(mmd_loss, axis=0)
return mmd_loss
def _compute_kl_behavior_v2(self, obs, raw_action, pi_distribution):
n = self.kl_n
batch_size = tf.shape(obs)[0]
pi_distribution = tfd.Independent(distribution=tfd.Normal(
loc=tf.tile(pi_distribution.distribution.loc, (n, 1)),
scale=tf.tile(pi_distribution.distribution.scale, (n, 1))
), reinterpreted_batch_ndims=1) # (n * batch_size)
# compute KLD upper bound
x, cond = raw_action, obs
print(f'Tracing call_n with x={x}, cond={cond}')
x = rlu.functional.expand_ensemble_dim(x, self.behavior_policy.num_ensembles) # (num_ensembles, None, act_dim)
cond = rlu.functional.expand_ensemble_dim(
cond, self.behavior_policy.num_ensembles) # (num_ensembles, None, obs_dim)
posterior = self.behavior_policy.encode_distribution(inputs=(x, cond))
encode_sample = posterior.sample(n) # (n, num_ensembles, None, z_dim)
encode_sample = tf.transpose(encode_sample, perm=[1, 0, 2, 3]) # (num_ensembles, n, None, z_dim)
encode_sample = tf.reshape(encode_sample, shape=(self.behavior_policy.num_ensembles,
n * batch_size,
self.behavior_policy.latent_dim))
cond = tf.tile(cond, multiples=(1, n, 1)) # (num_ensembles, n * None, obs_dim)
beta_distribution = self.behavior_policy.decode_distribution(z=(encode_sample, cond)) # (ensemble, n * None)
posterior_kld = tfd.kl_divergence(posterior, self.behavior_policy.prior) # (num_ensembles, None,)
posterior_kld = tf.tile(posterior_kld, multiples=(1, n,))
if self.reg_type == 'kl':
kl_loss = tfd.kl_divergence(pi_distribution, beta_distribution) # (ensembles, n * None)
elif self.reg_type == 'cross_entropy':
# Cross entropy
x = tf.tile(x, multiples=(1, n, 1)) # (num_ensembles, n * None, act_dim)
kl_loss = beta_distribution.log_prob(x) # (ensembles, None * n)
kl_loss = -rlu.distributions.apply_squash_log_prob(kl_loss, x)
else:
raise NotImplementedError
final_kl_loss = kl_loss + posterior_kld # (ensembles, None * n)
final_kl_loss = tf.reshape(final_kl_loss, shape=(self.behavior_policy.num_ensembles, n, batch_size))
final_kl_loss = tf.reduce_mean(final_kl_loss, axis=[0, 1]) # average both latent and ensemble dimension
return final_kl_loss
@tf.function
def update_actor_first_order(self, obs):
# TODO: maybe we just follow behavior policy and keep a minimum entropy instead of the optimal one.
# policy loss
with tf.GradientTape() as policy_tape:
""" Compute the loss function of the policy that maximizes the Q function """
print(f'Tracing _compute_surrogate_loss_pi with obs={obs}')
policy_tape.watch(self.policy_net.trainable_variables)
batch_size = tf.shape(obs)[0]
alpha = self.get_alpha(obs) # (None, act_dim)
beta = self.log_beta(obs)
obs_tile = tf.tile(obs, (self.n, 1))
# policy loss
action, log_prob, raw_action, pi_distribution = self.policy_net((obs_tile, False))
log_prob = tf.reduce_mean(tf.reshape(log_prob, shape=(self.n, batch_size)), axis=0)
q_values_pi_min = self.q_network((obs_tile, action), training=False)
q_values_pi_min = tf.reduce_mean(tf.reshape(q_values_pi_min, shape=(self.n, batch_size)), axis=0)
# add KL divergence penalty, high variance?
if self.reg_type in ['kl', 'cross_entropy']:
kl_loss = self._compute_kl_behavior_v2(obs_tile, raw_action, pi_distribution) # (None, act_dim)
kl_loss = tf.reduce_mean(tf.reshape(kl_loss, shape=(self.n, batch_size)), axis=0)
elif self.reg_type == 'mmd':
kl_loss = self._compute_mmd(obs_tile, raw_action, pi_distribution)
else:
raise NotImplementedError
delta = kl_loss - self.delta_behavior
penalty = delta * alpha # (None, act_dim)
if self.reg_type == 'kl':
if self.entropy_reg:
policy_loss = tf.reduce_mean(- q_values_pi_min + penalty - beta * log_prob, axis=0)
else:
policy_loss = tf.reduce_mean(- q_values_pi_min + penalty, axis=0)
elif self.reg_type in ['mmd', 'cross_entropy']:
if self.entropy_reg:
policy_loss = tf.reduce_mean(- q_values_pi_min + penalty + beta * log_prob, axis=0)
else:
policy_loss = tf.reduce_mean(- q_values_pi_min + penalty, axis=0)
else:
raise NotImplementedError
rlu.future.minimize(policy_loss, policy_tape, self.policy_net)
if self.entropy_reg:
with tf.GradientTape() as beta_tape:
beta_tape.watch(self.log_beta.trainable_variables)
beta = self.log_beta(obs)
# beta loss
if self.reg_type == 'kl':
beta_loss = tf.reduce_mean(beta * (log_prob + self.target_entropy))
elif self.reg_type in ['mmd', 'cross_entropy']:
beta_loss = -tf.reduce_mean(beta * (log_prob + self.target_entropy))
else:
raise NotImplementedError
rlu.future.minimize(beta_loss, beta_tape, self.log_beta)
else:
beta_loss = 0.
with tf.GradientTape() as alpha_tape:
# alpha loss
alpha = self.get_alpha(obs)
penalty = delta * alpha
alpha_loss = -tf.reduce_mean(penalty, axis=0)
rlu.future.minimize(alpha_loss, alpha_tape, self.log_alpha)
info = dict(
LossPi=policy_loss,
KL=kl_loss,
ViolationRatio=tf.reduce_mean(tf.cast(delta > 0., dtype=tf.float32), axis=-1),
Alpha=alpha,
LossAlpha=alpha_loss,
Beta=beta,
BetaLoss=beta_loss,
LogPi=log_prob,
)
return info
@tf.function
def update_actor_cloning(self, obs):
""" Minimize KL(pi, pi_b) """
with tf.GradientTape() as policy_tape:
policy_tape.watch(self.policy_net.trainable_variables)
beta = self.log_beta(obs)
loss, log_prob = self.compute_pi_pib_distance(obs)
if self.entropy_reg:
if self.reg_type in ['kl']:
policy_loss = tf.reduce_mean(loss - beta * log_prob, axis=0)
elif self.reg_type in ['mmd', 'cross_entropy']:
policy_loss = tf.reduce_mean(loss + beta * log_prob, axis=0)
else:
raise NotImplementedError
else:
policy_loss = tf.reduce_mean(loss, axis=0)
rlu.future.minimize(policy_loss, policy_tape, self.policy_net)
if self.entropy_reg:
with tf.GradientTape() as beta_tape:
beta_tape.watch(self.log_beta.trainable_variables)
beta = self.log_beta(obs)
if self.reg_type in ['kl']:
beta_loss = tf.reduce_mean(beta * (log_prob + self.target_entropy), axis=0)
elif self.reg_type in ['mmd', 'cross_entropy']:
beta_loss = -tf.reduce_mean(beta * (log_prob + self.target_entropy), axis=0)
else:
raise NotImplementedError
rlu.future.minimize(beta_loss, beta_tape, self.log_beta)
info = dict(
KL=loss,
LogPi=log_prob,
)
return info
def _compute_target_q(self, next_obs, reward, done):
batch_size = tf.shape(next_obs)[0]
alpha = self.get_alpha(next_obs)
next_obs = tf.tile(next_obs, multiples=(self.n, 1))
next_action, next_action_log_prob, next_raw_action, pi_distribution = self.target_policy_net((next_obs, False))
target_q_values = self.target_q_network((next_obs, next_action), training=False)
target_q_values = tf.reduce_max(tf.reshape(target_q_values, shape=(self.n, batch_size)), axis=0)
if self.kl_backup is True:
if self.reg_type in ['kl', 'cross_entropy']:
kl_loss = self._compute_kl_behavior_v2(next_obs, next_raw_action, pi_distribution) # (None, act_dim)
kl_loss = tf.reduce_mean(tf.reshape(kl_loss, shape=(self.n, batch_size)), axis=0)
elif self.reg_type == 'mmd':
kl_loss = self._compute_mmd(next_obs, next_raw_action, pi_distribution)
else:
raise NotImplementedError
kl_loss = kl_loss / self.reward_scale_factor
target_q_values = target_q_values - alpha * kl_loss
q_target = reward + self.gamma * (1.0 - done) * target_q_values
return q_target
def _compute_q_net_gp(self, obs, actions):
batch_size = tf.shape(obs)[0]
if self.reg_type in ['kl', 'cross_entropy']:
pi_action, log_prob, raw_action, pi_distribution = self.policy_net((obs, False))
kl = self._compute_kl_behavior_v2(obs, raw_action, pi_distribution) # (None,)
elif self.reg_type == 'mmd':
obs = tf.tile(obs, (self.n, 1))
pi_action, log_prob, raw_action, pi_distribution = self.policy_net((obs, False))
kl = self._compute_mmd(obs, raw_action, pi_distribution)
else:
raise NotImplementedError
with tf.GradientTape() as inner_tape:
inner_tape.watch(pi_action)
q_values = self.q_network((obs, pi_action), training=False) # (num_ensembles, None)
input_gradient = inner_tape.gradient(q_values, pi_action) # (None, act_dim)
penalty = tf.norm(input_gradient, axis=-1) # (None,)
if self.reg_type == 'mmd':
penalty = tf.reshape(penalty, shape=(self.n, batch_size))
penalty = tf.reduce_mean(penalty, axis=0)
# TODO: consider using soft constraints instead of hard clip
# weights = tf.nn.sigmoid((kl - self.delta_behavior * 2.) * self.sensitivity)
if self.gp_type == 'softplus':
weights = tf.nn.softplus((kl - self.delta_gp) * self.sensitivity)
weights = weights / tf.reduce_max(weights)
elif self.gp_type == 'hard':
weights = tf.cast((kl - self.delta_gp) > 0, dtype=tf.float32)
else:
raise NotImplementedError
penalty = penalty * tf.stop_gradient(weights)
return penalty
def _update_q_nets(self, obs, actions, q_target):
# q loss
with tf.GradientTape() as q_tape:
q_tape.watch(self.q_network.trainable_variables)
q_values = self.q_network((obs, actions), training=True) # (num_ensembles, None)
q_values_loss = 0.5 * tf.square(rlu.functional.expand_ensemble_dim(
q_target, self.q_network.num_ensembles) - q_values)
# (num_ensembles, None)
q_values_loss = tf.reduce_sum(q_values_loss, axis=0) # (None,)
if self.use_gp:
gp_weight = self.log_gp(obs, training=False)
gp = self._compute_q_net_gp(obs, actions)
loss = q_values_loss + gp * gp_weight
else:
loss = q_values_loss
gp = 0.
gp_weight = 0.
loss = tf.reduce_mean(loss, axis=0)
rlu.future.minimize(loss, q_tape, self.q_network)
if self.use_gp and (self.max_ood_grad_norm is not None):
with tf.GradientTape() as gp_weight_tape:
gp_weight_tape.watch(self.log_gp.trainable_variables)
raw_gp_weight = self.log_gp(obs, training=True)
delta_gp = (gp - self.max_ood_grad_norm) * raw_gp_weight
loss_gp_weight = -tf.reduce_mean(delta_gp, axis=0)
rlu.future.minimize(loss_gp_weight, gp_weight_tape, self.log_gp)
info = dict(
Q1Vals=q_values[0],
Q2Vals=q_values[1],
LossQ=q_values_loss,
GP=gp,
GPWeight=gp_weight,
)
return info
@tf.function
def update_q_nets(self, obs, actions, next_obs, done, reward):
"""Normal SAC update"""
q_target = self._compute_target_q(next_obs, reward, done)
return self._update_q_nets(obs, actions, q_target)
@tf.function
def _update(self, obs, act, next_obs, done, rew):
raw_act = self.behavior_policy.inverse_transform_action(act)
behavior_loss = self.behavior_policy.train_on_batch(x=(raw_act, obs))['loss']
info = self.update_q_nets(obs, act, next_obs, done, rew)
actor_info = self.update_actor_first_order(obs)
self.update_target()
# we only update alpha when policy is updated
info.update(actor_info)
info['BehaviorLoss'] = behavior_loss
return info
def update(self, replay_buffer: PyUniformReplayBuffer):
# TODO: use different batches to update q and actor to break correlation
data = replay_buffer.sample()
info = self._update(**data)
self.logger.store(**rlu.functional.to_numpy_or_python_type(info))
@tf.function
def act_batch(self, obs, type=5):
if type == 1:
pi_final = self.policy_net((obs, tf.convert_to_tensor(True)))[0]
return pi_final
elif type == 2:
pi_final = self.policy_net((obs, tf.convert_to_tensor(False)))[0]
return pi_final
elif type == 3 or type == 4 or type == 5:
n = 20
batch_size = tf.shape(obs)[0]
pi_distribution = self.policy_net((obs, tf.convert_to_tensor(False)))[-1]
samples = pi_distribution.sample(n) # (n, None, act_dim)
if type == 4:
mean = tf.tile(tf.expand_dims(pi_distribution.mean(), axis=0), (n, 1, 1))
std = tf.tile(tf.expand_dims(pi_distribution.stddev(), axis=0), (n, 1, 1))
samples = tf.clip_by_value(samples, mean - 2 * std, mean + 2 * std)
elif type == 5:
mean = tf.tile(tf.expand_dims(pi_distribution.mean(), axis=0), (n, 1, 1))
std = tf.tile(tf.expand_dims(pi_distribution.stddev(), axis=0), (n, 1, 1))
samples = tf.clip_by_value(samples, mean - std, mean + std)
samples = tf.tanh(samples)
action = tf.reshape(samples, shape=(n * batch_size, self.ac_dim))
obs_tile = tf.tile(obs, (n, 1))
q_values_pi_min = self.q_network((obs_tile, action), training=True)
q_values_pi_min = tf.reduce_mean(q_values_pi_min, axis=0)
idx = tf.argmax(tf.reshape(q_values_pi_min, shape=(n, batch_size)), axis=0,
output_type=tf.int32) # (batch_size)
idx = tf.stack([idx, tf.range(batch_size)], axis=-1)
pi_final = tf.gather_nd(samples, idx)
return pi_final
else:
raise NotImplementedError
def get_decayed_lr_schedule(self, lr, interval):
lr_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries=[interval, interval * 2, interval * 3, interval * 4],
values=[lr, 0.5 * lr, 0.1 * lr, 0.05 * lr, 0.01 * lr])
return lr_schedule
def set_pretrain_policy_net_optimizer(self, epochs, steps_per_epoch):
# set learning rate decay
interval = epochs * steps_per_epoch // 5
self.policy_net.optimizer = rlu.future.get_adam_optimizer(
lr=self.get_decayed_lr_schedule(lr=self.policy_behavior_lr,
interval=interval))
def set_policy_net_optimizer(self):
# reset learning rate
self.hard_update_policy_target()
# reset policy net learning rate
self.policy_net.optimizer = rlu.future.get_adam_optimizer(lr=self.policy_lr)
self.log_beta.optimizer = rlu.future.get_adam_optimizer(lr=1e-3)
def pretrain_cloning(self, epochs, steps_per_epoch, replay_buffer):
EpochLogger.log(f'Training cloning policy')
t = trange(epochs)
for epoch in t:
kl, log_pi = [], []
for _ in trange(steps_per_epoch, desc=f'Epoch {epoch + 1}/{epochs}', leave=False):
# update q_b, pi_0, pi_b
data = replay_buffer.sample()
obs = data['obs']
actor_info = self.update_actor_cloning(obs)
kl.append(actor_info['KL'])
log_pi.append(actor_info['LogPi'])
kl = tf.reduce_mean(kl).numpy()
log_pi = tf.reduce_mean(log_pi).numpy()
t.set_description(desc=f'KL: {kl:.2f}, LogPi: {log_pi:.2f}')
def set_behavior_policy_optimizer(self, epochs, steps_per_epoch):
interval = epochs * steps_per_epoch // 5
self.behavior_policy.optimizer = rlu.future.get_adam_optimizer(
lr=self.get_decayed_lr_schedule(lr=self.behavior_lr,
interval=interval))
def pretrain_behavior_policy(self, epochs, steps_per_epoch, replay_buffer):
EpochLogger.log(f'Training behavior policy')
t = trange(epochs)
for epoch in t:
loss = []
for _ in trange(steps_per_epoch, desc=f'Epoch {epoch + 1}/{epochs}', leave=False):
# update q_b, pi_0, pi_b
data = replay_buffer.sample()
obs = data['obs']
raw_act = self.behavior_policy.inverse_transform_action(data['act'])
behavior_loss = self.behavior_policy.train_on_batch(x=(raw_act, obs))['loss']
loss.append(behavior_loss)
loss = tf.reduce_mean(loss).numpy()
t.set_description(desc=f'Loss: {loss:.2f}')
class BRACPRunner(TFRunner):
def get_action_batch(self, o, deterministic=False):
return self.agent.act_batch(tf.convert_to_tensor(o, dtype=tf.float32),
deterministic).numpy()
def test_agent(self, agent, name, deterministic=False, logger=None):
o, d, ep_ret, ep_len = self.env.reset(), np.zeros(shape=self.num_test_episodes, dtype=np.bool), \
np.zeros(shape=self.num_test_episodes), np.zeros(shape=self.num_test_episodes,
dtype=np.int64)
t = tqdm(total=1, desc=f'Testing {name}')
while not np.all(d):
a = agent.act_batch(tf.convert_to_tensor(o, dtype=tf.float32), 5).numpy()
assert not np.any(np.isnan(a)), f'nan action: {a}'
o, r, d_, _ = self.env.step(a)
ep_ret = r * (1 - d) + ep_ret
ep_len = np.ones(shape=self.num_test_episodes, dtype=np.int64) * (1 - d) + ep_len
d = np.logical_or(d, d_)
t.update(1)
t.close()
normalized_ep_ret = self.dummy_env.get_normalized_score(ep_ret) * 100
if logger is not None:
logger.store(TestEpRet=ep_ret, NormalizedTestEpRet=normalized_ep_ret, TestEpLen=ep_len)
else:
print(f'EpRet: {np.mean(ep_ret):.2f}, TestEpLen: {np.mean(ep_len):.2f}')
def setup_replay_buffer(self,
batch_size,
reward_scale=True):
import d4rl
def rescale(x):
return (x - np.min(x)) / (np.max(x) - np.min(x))
self.dummy_env = gym.make(self.env_name)
dataset = d4rl.qlearning_dataset(env=self.dummy_env)
if reward_scale:
EpochLogger.log('Using reward scale', color='red')
self.agent.reward_scale_factor = np.max(dataset['rewards'] - np.min(dataset['rewards']))
EpochLogger.log(f'The scale factor is {self.agent.reward_scale_factor:.2f}')
dataset['rewards'] = rescale(dataset['rewards'])
# modify keys
dataset['obs'] = dataset.pop('observations').astype(np.float32)
dataset['act'] = dataset.pop('actions').astype(np.float32)
dataset['next_obs'] = dataset.pop('next_observations').astype(np.float32)
dataset['rew'] = dataset.pop('rewards').astype(np.float32)
dataset['done'] = dataset.pop('terminals').astype(np.float32)
replay_size = dataset['obs'].shape[0]
self.logger.log(f'Dataset size: {replay_size}')
self.replay_buffer = PyUniformReplayBuffer.from_data_dict(
data=dataset,
batch_size=batch_size
)
def setup_agent(self,
num_ensembles,
behavior_mlp_hidden,
behavior_lr,
policy_mlp_hidden,
q_mlp_hidden,
policy_lr,
q_lr,
alpha_lr,
alpha,
tau,
gamma,
target_entropy,
use_gp,
policy_behavior_lr,
reg_type,
sigma,
n,
gp_weight,
gp_type,
entropy_reg,
kl_backup,
max_ood_grad_norm,
):
obs_dim = self.env.single_observation_space.shape[-1]
act_dim = self.env.single_action_space.shape[-1]
self.agent = BRACPAgent(ob_dim=obs_dim, ac_dim=act_dim,
num_ensembles=num_ensembles,
behavior_mlp_hidden=behavior_mlp_hidden,
policy_lr=policy_lr,
policy_behavior_lr=policy_behavior_lr,
behavior_lr=behavior_lr,
policy_mlp_hidden=policy_mlp_hidden, q_mlp_hidden=q_mlp_hidden,
q_lr=q_lr, alpha_lr=alpha_lr, alpha=alpha, tau=tau, gamma=gamma,
target_entropy=target_entropy, use_gp=use_gp,
reg_type=reg_type, sigma=sigma, n=n, gp_weight=gp_weight,
entropy_reg=entropy_reg, kl_backup=kl_backup, max_ood_grad_norm=max_ood_grad_norm,
gp_type=gp_type)
self.agent.set_logger(self.logger)
self.behavior_filepath = os.path.join(self.logger.output_dir, 'behavior.ckpt')
self.policy_behavior_filepath = os.path.join(self.logger.output_dir,
f'policy_behavior_{target_entropy}_{reg_type}.ckpt')
self.log_beta_behavior_filepath = os.path.join(self.logger.output_dir,
f'policy_behavior_log_beta_{target_entropy}_{reg_type}.ckpt')
self.final_filepath = os.path.join(self.logger.output_dir, 'agent_final.ckpt')
def setup_extra(self,
pretrain_epochs,
save_freq,
max_kl,
force_pretrain_behavior,
force_pretrain_cloning,
generalization_threshold,
std_scale
):
self.pretrain_epochs = pretrain_epochs
self.save_freq = save_freq
self.max_kl = max_kl
self.force_pretrain_behavior = force_pretrain_behavior
self.force_pretrain_cloning = force_pretrain_cloning
self.generalization_threshold = generalization_threshold
self.std_scale = std_scale
def run_one_step(self, t):
self.agent.update(self.replay_buffer)
def on_epoch_end(self, epoch):
self.test_agent(agent=self.agent, name='policy', logger=self.logger)
# set delta_gp
kl_stats = self.logger.get_stats('KL')
self.agent.set_delta_gp(kl_stats[0] + kl_stats[1]) # mean + std
# Log info about epoch
self.logger.log_tabular('Epoch', epoch)
self.logger.log_tabular('TestEpRet', with_min_and_max=True)
self.logger.log_tabular('NormalizedTestEpRet', average_only=True)
self.logger.log_tabular('TestEpLen', average_only=True)
self.agent.log_tabular()
self.logger.log_tabular('GradientSteps', epoch * self.steps_per_epoch)
self.logger.log_tabular('Time', time.time() - self.start_time)
self.logger.dump_tabular()
if self.save_freq is not None and (epoch + 1) % self.save_freq == 0:
self.agent.save_weights(filepath=os.path.join(self.logger.output_dir, f'agent_final_{epoch + 1}.ckpt'))
def on_train_begin(self):
self.agent.set_behavior_policy_optimizer(self.pretrain_epochs, self.steps_per_epoch)
try:
if self.force_pretrain_behavior:
raise tf.errors.NotFoundError(None, None, None)
self.agent.behavior_policy.load_weights(filepath=self.behavior_filepath).assert_consumed()
EpochLogger.log(f'Successfully load behavior policy from {self.behavior_filepath}')
except tf.errors.NotFoundError:
self.agent.pretrain_behavior_policy(self.pretrain_epochs, self.steps_per_epoch, self.replay_buffer)
self.agent.behavior_policy.save_weights(filepath=self.behavior_filepath)
except AssertionError as e:
print(e)
EpochLogger.log('The structure of model is altered. Add --pretrain_behavior flag.', color='red')
raise
obs_act_dataset = tf.data.Dataset.from_tensor_slices((self.replay_buffer.get()['obs'],
self.replay_buffer.get()['act'])).batch(1000)
# evaluate dataset log probability
behavior_nll = []
for obs, act in obs_act_dataset:
raw_act = self.agent.behavior_policy.inverse_transform_action(act)
behavior_nll.append(self.agent.behavior_policy.test_on_batch(x=(raw_act, obs))['loss'])
behavior_nll = tf.reduce_mean(tf.concat(behavior_nll, axis=0)).numpy()
self.logger.log(f'Behavior policy data log probability is {-behavior_nll:.4f}')
# set target_entropy heuristically as -behavior_log_prob - act_dim
if self.agent.target_entropy is None:
# std reduced by a factor of x
self.agent.target_entropy = behavior_nll - self.agent.ac_dim * np.log(self.std_scale)
self.logger.log(f'The target entropy of the behavior policy is {self.agent.target_entropy:.4f}')
self.agent.set_pretrain_policy_net_optimizer(self.pretrain_epochs, self.steps_per_epoch)
try:
if self.force_pretrain_cloning:
raise tf.errors.NotFoundError(None, None, None)
self.agent.policy_net.load_weights(filepath=self.policy_behavior_filepath).assert_consumed()
self.agent.log_beta.load_weights(filepath=self.log_beta_behavior_filepath).assert_consumed()
self.agent.hard_update_policy_target()
EpochLogger.log(f'Successfully load initial policy from {self.policy_behavior_filepath}')
except tf.errors.NotFoundError:
self.agent.pretrain_cloning(self.pretrain_epochs, self.steps_per_epoch, self.replay_buffer)
self.agent.policy_net.save_weights(filepath=self.policy_behavior_filepath)
self.agent.log_beta.save_weights(filepath=self.log_beta_behavior_filepath)
except AssertionError as e:
print(e)
EpochLogger.log('The structure of model is altered. Add --pretrain_cloning flag', color='red')
raise
distance = []
for obs, act in obs_act_dataset:
distance.append(self.agent.compute_pi_pib_distance(obs)[0])
distance = tf.reduce_mean(tf.concat(distance, axis=0)).numpy()
self.logger.log(f'The average distance ({self.agent.reg_type}) between pi and pi_b is {distance:.4f}')
# set max_kl heuristically if it is None.
if self.max_kl is None:
self.max_kl = distance + self.generalization_threshold # allow space to explore generalization
self.agent.set_delta_behavior(self.max_kl)
self.agent.set_delta_gp(self.max_kl + self.generalization_threshold)
self.agent.set_policy_net_optimizer()
self.start_time = time.time()
def on_train_end(self):
self.agent.save_weights(filepath=self.final_filepath)
@classmethod
def main(cls,
env_name,
steps_per_epoch=2500,
pretrain_epochs=200,
pretrain_behavior=False,
pretrain_cloning=False,
epochs=400,
batch_size=100,
num_test_episodes=20,
seed=1,
# agent args
policy_mlp_hidden=256,
q_mlp_hidden=256,
policy_lr=5e-6,
policy_behavior_lr=1e-3,
q_lr=3e-4,
alpha_lr=1e-3,
alpha=10.0,
tau=1e-3,
gamma=0.99,
target_entropy: float = None,
max_kl: float = None,
use_gp=True,
reg_type='kl',
gp_type='hard',
sigma=20,
n=5,
gp_weight=0.1,
entropy_reg=True,
kl_backup=False,
generalization_threshold=0.1,
std_scale=4.,
max_ood_grad_norm=0.01,
# behavior policy
num_ensembles=3,
behavior_mlp_hidden=256,
behavior_lr=1e-3,
# others
reward_scale=True,
save_freq: int = None,
tensorboard=False,
logger_path='data',
):
"""Main function to run Improved Behavior Regularized Actor Critic (BRAC+)
Args:
env_name (str): name of the environment
steps_per_epoch (int): number of steps per epoch
pretrain_epochs (int): number of epochs to pretrain
pretrain_behavior (bool): whether to pretrain the behavior policy or load from checkpoint.
If load fails, the flag is ignored.
pretrain_cloning (bool):whether to pretrain the initial policy or load from checkpoint.
If load fails, the flag is ignored.
epochs (int): number of epochs to run
batch_size (int): batch size of the data sampled from the dataset
num_test_episodes (int): number of test episodes to evaluate the policy after each epoch
seed (int): random seed
policy_mlp_hidden (int): MLP hidden size of the policy network
q_mlp_hidden (int): MLP hidden size of the Q network
policy_lr (float): learning rate of the policy network
policy_behavior_lr (float): learning rate used to train the policy that minimize the distance between the policy
and the behavior policy. This is usally larger than policy_lr.
q_lr (float): learning rate of the q network
alpha_lr (float): learning rate of the alpha
alpha (int): initial Lagrange multiplier used to control the maximum distance between the \pi and \pi_b
tau (float): polyak average coefficient of the target update
gamma (float): discount factor
target_entropy (float or None): target entropy of the policy
max_kl (float or None): maximum of the distance between \pi and \pi_b
use_gp (bool): whether use gradient penalty or not
reg_type (str): regularization type
sigma (float): sigma of the Laplacian kernel for MMD
n (int): number of samples when estimate the expectation for policy evaluation and update
gp_weight (float): initial GP weight
entropy_reg (bool): whether use entropy regularization or not
kl_backup (bool): whether add the KL loss to the backup value of the target Q network
generalization_threshold (float): generalization threshold used to compute max_kl when max_kl is None
std_scale (float): standard deviation scale when computing target_entropy when it is None.
num_ensembles (int): number of ensembles to train the behavior policy
behavior_mlp_hidden (int): MLP hidden size of the behavior policy
behavior_lr (float): the learning rate of the behavior policy
reward_scale (bool): whether to use reward scale or not. By default, it will scale to [0, 1]
save_freq (int or None): the frequency to save the model
tensorboard (bool): whether to turn on tensorboard logging
"""
config = locals()
runner = cls(seed=seed, steps_per_epoch=steps_per_epoch, epochs=epochs,
exp_name=None, logger_path=logger_path)
runner.setup_env(env_name=env_name, num_parallel_env=num_test_episodes, asynchronous=False,
num_test_episodes=None)
runner.setup_logger(config=config, tensorboard=tensorboard)
runner.setup_agent(num_ensembles=num_ensembles,
behavior_mlp_hidden=behavior_mlp_hidden,
behavior_lr=behavior_lr,
policy_mlp_hidden=policy_mlp_hidden, q_mlp_hidden=q_mlp_hidden,
policy_lr=policy_lr, q_lr=q_lr, alpha_lr=alpha_lr, alpha=alpha, tau=tau, gamma=gamma,
target_entropy=target_entropy, use_gp=use_gp,
policy_behavior_lr=policy_behavior_lr,
reg_type=reg_type, sigma=sigma, n=n, gp_weight=gp_weight, gp_type=gp_type,
entropy_reg=entropy_reg, kl_backup=kl_backup, max_ood_grad_norm=max_ood_grad_norm)
runner.setup_extra(pretrain_epochs=pretrain_epochs,
save_freq=save_freq,
max_kl=max_kl,
force_pretrain_behavior=pretrain_behavior,
force_pretrain_cloning=pretrain_cloning,
generalization_threshold=generalization_threshold,
std_scale=std_scale)
runner.setup_replay_buffer(batch_size=batch_size,
reward_scale=reward_scale)
runner.run()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env_name', type=str, required=True)
parser.add_argument('--pretrain_behavior', action='store_true')
parser.add_argument('--pretrain_cloning', action='store_true')
parser.add_argument('--seed', type=int, default=1)
args = vars(parser.parse_args())
env_name = args['env_name']
BRACPRunner.main(**args) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/algos/tf/offline/bracp.py | 0.750736 | 0.269163 | bracp.py | pypi |
import os
import time
import gym
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from rlutils.tf.future import get_adam_optimizer, minimize
from rlutils.logx import EpochLogger
from rlutils.replay_buffers import PyUniformReplayBuffer
from rlutils.infra.runner import TFRunner
from rlutils.tf.functional import soft_update, hard_update, to_numpy_or_python_type
from rlutils.tf.nn import EnsembleMinQNet, BehaviorPolicy
from rlutils.tf.nn.functional import build_mlp
from tqdm.auto import tqdm, trange
tfd = tfp.distributions
tfl = tfp.layers
class PLASAgent(tf.keras.Model):
def __init__(self,
ob_dim,
ac_dim,
behavior_mlp_hidden=256,
behavior_lr=1e-3,
policy_lr=5e-6,
policy_mlp_hidden=256,
q_mlp_hidden=256,
q_lr=1e-4,
tau=1e-3,
gamma=0.99,
beta=1.,
latent_threshold=2.0
):
super(PLASAgent, self).__init__()
self.ob_dim = ob_dim
self.ac_dim = ac_dim
self.q_mlp_hidden = q_mlp_hidden
self.behavior_policy = BehaviorPolicy(out_dist='normal', obs_dim=self.ob_dim, act_dim=self.ac_dim,
mlp_hidden=behavior_mlp_hidden, beta=beta)
self.behavior_policy.optimizer = get_adam_optimizer(lr=behavior_lr)
self.policy_net = build_mlp(self.ob_dim, self.behavior_policy.latent_dim,
mlp_hidden=policy_mlp_hidden, num_layers=3)
self.target_policy_net = build_mlp(self.ob_dim, self.behavior_policy.latent_dim,
mlp_hidden=policy_mlp_hidden, num_layers=3)
# reset policy net learning rate
self.policy_net.optimizer = get_adam_optimizer(lr=policy_lr)
hard_update(self.target_policy_net, self.policy_net)
self.q_network = EnsembleMinQNet(ob_dim, ac_dim, q_mlp_hidden)
self.q_network.compile(optimizer=get_adam_optimizer(q_lr))
self.target_q_network = EnsembleMinQNet(ob_dim, ac_dim, q_mlp_hidden)
hard_update(self.target_q_network, self.q_network)
self.tau = tau
self.gamma = gamma
self.latent_threshold = latent_threshold
def get_action(self, policy_net, obs):
z = policy_net(obs)
z = tf.tanh(z) * self.latent_threshold
raw_action = self.behavior_policy.decode_sample(z=(z, obs))
action = tf.tanh(raw_action)
return action, z
def call(self, inputs, training=None, mask=None):
obs, deterministic = inputs
pi_final = self.policy_net((obs, deterministic))[0]
return pi_final
def set_logger(self, logger):
self.logger = logger
def log_tabular(self):
self.logger.log_tabular('Q1Vals', with_min_and_max=True)
self.logger.log_tabular('Q2Vals', with_min_and_max=True)
self.logger.log_tabular('LossPi', average_only=True)
self.logger.log_tabular('LossQ', average_only=True)
self.logger.log_tabular('BehaviorLoss', average_only=True)
self.logger.log_tabular('Z', with_min_and_max=True)
@tf.function
def update_target(self):
soft_update(self.target_q_network, self.q_network, self.tau)
soft_update(self.target_policy_net, self.policy_net, self.tau)
@tf.function
def update_actor(self, obs):
# TODO: maybe we just follow behavior policy and keep a minimum entropy instead of the optimal one.
# policy loss
n = 10
batch_size = tf.shape(obs)[0]
obs = tf.tile(obs, (n, 1))
with tf.GradientTape() as policy_tape:
""" Compute the loss function of the policy that maximizes the Q function """
print(f'Tracing _compute_surrogate_loss_pi with obs={obs}')
policy_tape.watch(self.policy_net.trainable_variables)
action, z = self.get_action(self.policy_net, obs)
q_values_pi_min = self.q_network((obs, action), training=False)
q_values_pi_min = tf.reshape(q_values_pi_min, (n, batch_size))
q_values_pi_min = tf.reduce_mean(q_values_pi_min, axis=0)
policy_loss = -tf.reduce_mean(q_values_pi_min, axis=0)
minimize(policy_loss, policy_tape, self.policy_net)
info = dict(
LossPi=policy_loss,
Z=tf.reshape(z, shape=(-1,))
)
return info
def _compute_target_q(self, next_obs, reward, done):
n = 10
batch_size = tf.shape(next_obs)[0]
next_obs = tf.tile(next_obs, (n, 1))
next_action, _ = self.get_action(self.target_policy_net, next_obs)
# maybe add noise?
target_q_values = self.target_q_network((next_obs, next_action), training=False)
target_q_values = tf.reshape(target_q_values, (n, batch_size))
target_q_values = tf.reduce_max(target_q_values, axis=0)
q_target = reward + self.gamma * (1.0 - done) * target_q_values
return q_target
def _update_q_nets(self, obs, actions, q_target):
# q loss
with tf.GradientTape() as q_tape:
q_tape.watch(self.q_network.trainable_variables)
q_values = self.q_network((obs, actions), training=True) # (num_ensembles, None)
q_values_loss = 0.5 * tf.square(tf.expand_dims(q_target, axis=0) - q_values)
# (num_ensembles, None)
q_values_loss = tf.reduce_sum(q_values_loss, axis=0) # (None,)
minimize(q_values_loss, q_tape, self.q_network)
info = dict(
Q1Vals=q_values[0],
Q2Vals=q_values[1],
LossQ=q_values_loss,
)
return info
@tf.function
def update_q_nets(self, obs, actions, next_obs, done, reward):
"""Normal SAC update"""
q_target = self._compute_target_q(next_obs, reward, done)
return self._update_q_nets(obs, actions, q_target)
@tf.function
def _update(self, obs, act, next_obs, done, rew):
raw_act = self.behavior_policy.inverse_transform_action(act)
behavior_loss = self.behavior_policy.train_on_batch(x=(raw_act, obs))['loss']
info = self.update_q_nets(obs, act, next_obs, done, rew)
info['BehaviorLoss'] = behavior_loss
return info
def update(self, obs, act, next_obs, done, rew, update_target=True):
# TODO: use different batches to update q and actor to break correlation
info = self._update(obs, act, next_obs, done, rew)
if update_target:
actor_info = self.update_actor(obs)
# we only update alpha when policy is updated
info.update(actor_info)
self.update_target()
self.logger.store(**to_numpy_or_python_type(info))
@tf.function
def act_batch(self, obs):
n = 20
batch_size = tf.shape(obs)[0]
obs_tile = tf.tile(obs, (n, 1))
action, _ = self.get_action(self.policy_net, obs_tile)
q_values_pi_min = self.q_network((obs_tile, action), training=True)
q_values_pi_min = tf.reduce_mean(q_values_pi_min, axis=0)
idx = tf.argmax(tf.reshape(q_values_pi_min, shape=(n, batch_size)), axis=0,
output_type=tf.int32) # (batch_size)
idx = tf.stack([idx, tf.range(batch_size)], axis=-1)
samples = tf.reshape(action, shape=(n, batch_size, self.ac_dim))
pi_final = tf.gather_nd(samples, idx)
return pi_final
def pretrain_behavior_policy(self, epochs, steps_per_epoch, replay_buffer):
EpochLogger.log(f'Training behavior policy')
t = trange(epochs)
for epoch in t:
loss = []
for _ in trange(steps_per_epoch, desc=f'Epoch {epoch + 1}/{epochs}', leave=False):
# update q_b, pi_0, pi_b
data = replay_buffer.sample()
obs = data['obs']
raw_act = self.behavior_policy.inverse_transform_action(data['act'])
behavior_loss = self.behavior_policy.train_on_batch(x=(raw_act, obs))['loss']
loss.append(behavior_loss)
loss = tf.reduce_mean(loss).numpy()
t.set_description(desc=f'Loss: {loss:.2f}')
class Runner(TFRunner):
def test_agent(self, agent, name, logger=None):
o, d, ep_ret, ep_len = self.env.reset(), np.zeros(shape=self.num_test_episodes, dtype=np.bool), \
np.zeros(shape=self.num_test_episodes), np.zeros(shape=self.num_test_episodes,
dtype=np.int64)
t = tqdm(total=1, desc=f'Testing {name}')
while not np.all(d):
a = agent.act_batch(tf.convert_to_tensor(o, dtype=tf.float32)).numpy()
assert not np.any(np.isnan(a)), f'nan action: {a}'
o, r, d_, _ = self.env.step(a)
ep_ret = r * (1 - d) + ep_ret
ep_len = np.ones(shape=self.num_test_episodes, dtype=np.int64) * (1 - d) + ep_len
d = np.logical_or(d, d_)
t.update(1)
t.close()
normalized_ep_ret = self.dummy_env.get_normalized_score(ep_ret) * 100
if logger is not None:
logger.store(TestEpRet=ep_ret, NormalizedTestEpRet=normalized_ep_ret, TestEpLen=ep_len)
else:
print(f'EpRet: {np.mean(ep_ret):.2f}, TestEpLen: {np.mean(ep_len):.2f}')
def setup_replay_buffer(self,
batch_size,
reward_scale=True):
import d4rl
def rescale(x):
return (x - np.min(x)) / (np.max(x) - np.min(x))
self.dummy_env = gym.make(self.env_name)
dataset = d4rl.qlearning_dataset(env=self.dummy_env)
if reward_scale:
EpochLogger.log('Using reward scale', color='red')
self.agent.reward_scale_factor = np.max(dataset['rewards'] - np.min(dataset['rewards']))
EpochLogger.log(f'The scale factor is {self.agent.reward_scale_factor:.2f}')
dataset['rewards'] = rescale(dataset['rewards'])
# modify keys
dataset['obs'] = dataset.pop('observations').astype(np.float32)
dataset['act'] = dataset.pop('actions').astype(np.float32)
dataset['obs2'] = dataset.pop('next_observations').astype(np.float32)
dataset['rew'] = dataset.pop('rewards').astype(np.float32)
dataset['done'] = dataset.pop('terminals').astype(np.float32)
replay_size = dataset['obs'].shape[0]
self.logger.log(f'Dataset size: {replay_size}')
self.replay_buffer = PyUniformReplayBuffer.from_data_dict(
data=dataset,
batch_size=batch_size
)
def setup_agent(self,
behavior_mlp_hidden,
behavior_lr,
policy_mlp_hidden,
q_mlp_hidden,
policy_lr,
q_lr,
tau,
gamma,
):
obs_dim = self.env.single_observation_space.shape[-1]
act_dim = self.env.single_action_space.shape[-1]
self.agent = PLASAgent(ob_dim=obs_dim, ac_dim=act_dim,
behavior_mlp_hidden=behavior_mlp_hidden,
behavior_lr=behavior_lr,
policy_mlp_hidden=policy_mlp_hidden, q_mlp_hidden=q_mlp_hidden,
q_lr=q_lr, tau=tau, gamma=gamma)
self.agent.set_logger(self.logger)
self.behavior_filepath = os.path.join(self.logger.output_dir, 'behavior.ckpt')
self.final_filepath = os.path.join(self.logger.output_dir, 'agent_final.ckpt')
def setup_extra(self,
pretrain_epochs,
save_freq,
force_pretrain_behavior,
generalization_threshold,
):
self.pretrain_epochs = pretrain_epochs
self.save_freq = save_freq
self.force_pretrain_behavior = force_pretrain_behavior
self.generalization_threshold = generalization_threshold
def run_one_step(self, t):
self.agent.update(self.replay_buffer)
def on_epoch_end(self, epoch):
self.test_agent(agent=self.agent, name='policy', logger=self.logger)
# Log info about epoch
self.logger.log_tabular('Epoch', epoch)
self.logger.log_tabular('TestEpRet', with_min_and_max=True)
self.logger.log_tabular('NormalizedTestEpRet', average_only=True)
self.logger.log_tabular('TestEpLen', average_only=True)
self.agent.log_tabular()
self.logger.log_tabular('GradientSteps', epoch * self.steps_per_epoch)
self.logger.log_tabular('Time', time.time() - self.start_time)
self.logger.dump_tabular()
if self.save_freq is not None and (epoch + 1) % self.save_freq == 0:
self.agent.save_weights(filepath=os.path.join(self.logger.output_dir, f'agent_final_{epoch + 1}.ckpt'))
def get_decayed_lr_schedule(self, lr, interval):
lr_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries=[interval, interval * 2, interval * 3, interval * 4],
values=[lr, 0.5 * lr, 0.1 * lr, 0.05 * lr, 0.01 * lr])
return lr_schedule
def on_train_begin(self):
interval = self.pretrain_epochs * self.steps_per_epoch // 5
behavior_lr = self.agent.behavior_lr
self.agent.behavior_policy.optimizer = get_adam_optimizer(lr=self.get_decayed_lr_schedule(lr=behavior_lr,
interval=interval))
try:
if self.force_pretrain_behavior:
raise tf.errors.NotFoundError(None, None, None)
self.agent.behavior_policy.load_weights(filepath=self.behavior_filepath).assert_consumed()
EpochLogger.log(f'Successfully load behavior policy from {self.behavior_filepath}')
except tf.errors.NotFoundError:
self.agent.pretrain_behavior_policy(self.pretrain_epochs, self.steps_per_epoch, self.replay_buffer)
self.agent.behavior_policy.save_weights(filepath=self.behavior_filepath)
except AssertionError as e:
print(e)
EpochLogger.log('The structure of model is altered. Add --pretrain_behavior flag.', color='red')
raise
self.start_time = time.time()
def on_train_end(self):
self.agent.save_weights(filepath=self.final_filepath)
@classmethod
def main(cls,
env_name,
steps_per_epoch=2500,
pretrain_epochs=200,
pretrain_behavior=False,
epochs=400,
batch_size=100,
num_test_episodes=20,
seed=1,
# agent args
policy_mlp_hidden=256,
q_mlp_hidden=256,
policy_lr=1e-6,
q_lr=3e-4,
tau=5e-3,
gamma=0.99,
# behavior policy
behavior_mlp_hidden=256,
behavior_lr=1e-3,
# others
generalization_threshold=0.1,
reward_scale=False,
save_freq: int = None,
tensorboard=False,
):
"""Main function to run Improved Behavior Regularized Actor Critic (BRAC+)
Args:
env_name (str): name of the environment
steps_per_epoch (int): number of steps per epoch
pretrain_epochs (int): number of epochs to pretrain
pretrain_behavior (bool): whether to pretrain the behavior policy or load from checkpoint.
If load fails, the flag is ignored.
pretrain_cloning (bool):whether to pretrain the initial policy or load from checkpoint.
If load fails, the flag is ignored.
epochs (int): number of epochs to run
batch_size (int): batch size of the data sampled from the dataset
num_test_episodes (int): number of test episodes to evaluate the policy after each epoch
seed (int): random seed
policy_mlp_hidden (int): MLP hidden size of the policy network
q_mlp_hidden (int): MLP hidden size of the Q network
policy_lr (float): learning rate of the policy network
policy_behavior_lr (float): learning rate used to train the policy that minimize the distance between the policy
and the behavior policy. This is usally larger than policy_lr.
q_lr (float): learning rate of the q network
alpha_lr (float): learning rate of the alpha
alpha (int): initial Lagrange multiplier used to control the maximum distance between the \pi and \pi_b
tau (float): polyak average coefficient of the target update
gamma (float): discount factor
target_entropy (float or None): target entropy of the policy
max_kl (float or None): maximum of the distance between \pi and \pi_b
use_gp (bool): whether use gradient penalty or not
reg_type (str): regularization type
sigma (float): sigma of the Laplacian kernel for MMD
n (int): number of samples when estimate the expectation for policy evaluation and update
gp_weight (float): initial GP weight
entropy_reg (bool): whether use entropy regularization or not
kl_backup (bool): whether add the KL loss to the backup value of the target Q network
generalization_threshold (float): generalization threshold used to compute max_kl when max_kl is None
std_scale (float): standard deviation scale when computing target_entropy when it is None.
num_ensembles (int): number of ensembles to train the behavior policy
behavior_mlp_hidden (int): MLP hidden size of the behavior policy
behavior_lr (float): the learning rate of the behavior policy
reward_scale (bool): whether to use reward scale or not. By default, it will scale to [0, 1]
save_freq (int or None): the frequency to save the model
tensorboard (bool): whether to turn on tensorboard logging
"""
config = locals()
runner = cls(seed=seed, steps_per_epoch=steps_per_epoch, epochs=epochs,
exp_name=None, logger_path='data')
runner.setup_env(env_name=env_name, num_parallel_env=num_test_episodes, frame_stack=None, wrappers=None,
asynchronous=False, num_test_episodes=None)
runner.setup_logger(config=config, tensorboard=tensorboard)
runner.setup_agent(
behavior_mlp_hidden=behavior_mlp_hidden,
behavior_lr=behavior_lr,
policy_mlp_hidden=policy_mlp_hidden, q_mlp_hidden=q_mlp_hidden,
policy_lr=policy_lr, q_lr=q_lr, tau=tau, gamma=gamma,
)
runner.setup_extra(pretrain_epochs=pretrain_epochs,
save_freq=save_freq,
force_pretrain_behavior=pretrain_behavior,
generalization_threshold=generalization_threshold
)
runner.setup_replay_buffer(batch_size=batch_size,
reward_scale=reward_scale)
runner.run()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env_name', type=str, required=True)
parser.add_argument('--pretrain_behavior', action='store_true')
parser.add_argument('--pretrain_cloning', action='store_true')
parser.add_argument('--seed', type=int, default=1)
args = vars(parser.parse_args())
env_name = args['env_name']
Runner.main(**args) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/algos/tf/offline/plas.py | 0.656768 | 0.292513 | plas.py | pypi |
import time
import gym
import numpy as np
import tensorflow as tf
from rlutils.replay_buffers import PyUniformReplayBuffer
from rlutils.infra.runner import TFRunner, run_func_as_main
from rlutils.tf.distributions import apply_squash_log_prob
from rlutils.tf.functional import soft_update, hard_update, compute_target_value, to_numpy_or_python_type
from rlutils.tf.nn import LagrangeLayer, SquashedGaussianMLPActor, EnsembleMinQNet
from tqdm.auto import tqdm
EPS = 1e-6
class CQLAgent(tf.keras.Model):
def __init__(self,
obs_spec,
act_spec,
policy_mlp_hidden=128,
policy_lr=3e-4,
q_mlp_hidden=256,
q_lr=3e-4,
alpha=1.0,
alpha_lr=1e-3,
alpha_cql=1.,
alpha_cql_lr=1e-3,
tau=5e-3,
gamma=0.99,
num_samples=10,
cql_threshold=-1.,
target_entropy=None,
):
super(CQLAgent, self).__init__()
self.obs_spec = obs_spec
self.act_spec = act_spec
self.num_samples = num_samples
self.act_dim = self.act_spec.shape[0]
if len(self.obs_spec.shape) == 1: # 1D observation
self.obs_dim = self.obs_spec.shape[0]
self.policy_net = SquashedGaussianMLPActor(self.obs_dim, self.act_dim, policy_mlp_hidden)
self.q_network = EnsembleMinQNet(self.obs_dim, self.act_dim, q_mlp_hidden)
self.target_q_network = EnsembleMinQNet(self.obs_dim, self.act_dim, q_mlp_hidden)
else:
raise NotImplementedError
hard_update(self.target_q_network, self.q_network)
self.policy_optimizer = tf.keras.optimizers.Adam(lr=policy_lr)
self.q_optimizer = tf.keras.optimizers.Adam(lr=q_lr)
self.log_alpha = LagrangeLayer(initial_value=alpha)
self.log_cql = LagrangeLayer(initial_value=alpha_cql)
self.alpha_optimizer = tf.keras.optimizers.Adam(lr=alpha_lr)
self.cql_alpha_optimizer = tf.keras.optimizers.Adam(lr=alpha_cql_lr)
self.target_entropy = -self.act_dim if target_entropy is None else target_entropy
self.cql_threshold = cql_threshold
self.min_q_weight = 5.
self.tau = tau
self.gamma = gamma
def set_logger(self, logger):
self.logger = logger
def log_tabular(self):
self.logger.log_tabular('Q1Vals', with_min_and_max=True)
self.logger.log_tabular('Q2Vals', with_min_and_max=True)
self.logger.log_tabular('LogPi', average_only=True)
self.logger.log_tabular('LossPi', average_only=True)
self.logger.log_tabular('LossQ', average_only=True)
self.logger.log_tabular('Alpha', average_only=True)
self.logger.log_tabular('LossAlpha', average_only=True)
self.logger.log_tabular('AlphaCQL', average_only=True)
self.logger.log_tabular('AlphaCQLLoss', average_only=True)
self.logger.log_tabular('DeltaQ', with_min_and_max=True)
@tf.function
def update_target(self):
soft_update(self.target_q_network, self.q_network, self.tau)
def _compute_next_obs_q(self, next_obs):
batch_size = tf.shape(next_obs)[0]
next_obs = tf.tile(next_obs, multiples=(self.num_samples, 1)) # (None * n, obs_dim)
next_action, next_action_log_prob, _, _ = self.policy_net((next_obs, False))
next_q_values = self.target_q_network((next_obs, next_action), training=False)
next_q_values = tf.reshape(next_q_values, shape=(self.num_samples, batch_size))
next_q_values = tf.reduce_max(next_q_values, axis=0) # max backup
return next_q_values
def _compute_raw_action(self, actions):
raw_action = tf.atanh(tf.clip_by_value(actions, -1. + EPS, 1. - EPS))
return raw_action
@tf.function
def _update_nets(self, obs, actions, next_obs, done, reward, behavior_cloning=False):
""" Sample a mini-batch from replay buffer and update the network
Args:
obs: (batch_size, ob_dim)
actions: (batch_size, action_dim)
next_obs: (batch_size, ob_dim)
done: (batch_size,)
reward: (batch_size,)
Returns: None
"""
batch_size = tf.shape(obs)[0]
alpha = self.log_alpha()
# compute target Q values
next_q_values = self._compute_next_obs_q(next_obs)
q_target = compute_target_value(reward, self.gamma, done, next_q_values)
# generate additional actions for CQL
random_actions = tf.random.uniform(shape=[batch_size * self.num_samples, self.act_dim],
minval=-1., maxval=1., dtype=tf.float32)
log_prob_random = -np.log(2.) # uniform distribution from [-1, 1], prob=0.5
raw_pi_distribution = self.policy_net((obs, False))[-1]
raw_pi_actions = raw_pi_distribution.sample(self.num_samples) # (n, None, act_dim)
pi_actions = tf.tanh(raw_pi_actions)
pi_log_prob = apply_squash_log_prob(raw_log_prob=raw_pi_distribution.log_prob(raw_pi_actions),
x=raw_pi_actions) # (n, None)
# reshape
pi_actions = tf.reshape(pi_actions, shape=(self.num_samples * batch_size, self.act_dim))
pi_log_prob = tf.reshape(pi_log_prob, shape=(1, self.num_samples * batch_size,))
raw_next_pi_distribution = self.policy_net((next_obs, False))[-1]
raw_next_pi_actions = raw_next_pi_distribution.sample(self.num_samples) # (n, None, act_dim)
next_pi_actions = tf.tanh(raw_next_pi_actions)
next_pi_log_prob = apply_squash_log_prob(raw_log_prob=raw_next_pi_distribution.log_prob(raw_next_pi_actions),
x=raw_pi_actions)
# reshape
next_pi_actions = tf.reshape(next_pi_actions, shape=(self.num_samples * batch_size, self.act_dim))
next_pi_log_prob = tf.reshape(next_pi_log_prob, shape=(1, self.num_samples * batch_size))
alpha_cql = self.log_cql()
# q loss
with tf.GradientTape() as q_tape:
q_values = self.q_network((obs, actions), training=True) # (num_ensembles, None)
mse_q_values_loss = 0.5 * tf.square(tf.expand_dims(q_target, axis=0) - q_values) # (num_ensembles, None)
mse_q_values_loss = tf.reduce_mean(tf.reduce_sum(mse_q_values_loss, axis=0), axis=0) # scalar
# CQL loss logsumexp(Q(s_i, a)) - Q(s_i, a_i). Importance sampling
obs_tile = tf.tile(obs, multiples=(self.num_samples, 1)) # (n * None, obs_dim)
q_random = self.q_network((obs_tile, random_actions),
training=True) - log_prob_random # (num_ensembles, n * None)
q_pi = self.q_network((obs_tile, pi_actions), training=True) - pi_log_prob # (num_ensembles, n * None)
q_next_pi = self.q_network((obs_tile, next_pi_actions), training=True) - next_pi_log_prob
q_random = tf.reshape(q_random, shape=(self.q_network.num_ensembles, self.num_samples, batch_size))
q_pi = tf.reshape(q_pi, shape=(self.q_network.num_ensembles, self.num_samples, batch_size))
q_next_pi = tf.reshape(q_next_pi, shape=(self.q_network.num_ensembles, self.num_samples, batch_size))
q = tf.concat((q_random, q_pi, q_next_pi), axis=1) # (num_ensembles, 2n, None)
q = tf.math.reduce_logsumexp(q, axis=1) # (num_ensembles, None)
# the out-of-distribution Q should not be greater than in-distribution Q by threshold
delta_q = (tf.reduce_mean(tf.reduce_sum(q, axis=0), axis=0) -
tf.reduce_mean(tf.reduce_sum(q_values, axis=0))) * self.min_q_weight \
- self.cql_threshold * self.q_network.num_ensembles # scalar
q_values_loss = mse_q_values_loss + alpha_cql * delta_q
q_gradients = q_tape.gradient(q_values_loss, self.q_network.trainable_variables)
self.q_optimizer.apply_gradients(zip(q_gradients, self.q_network.trainable_variables))
with tf.GradientTape() as cql_tape:
alpha_cql = self.log_cql()
alpha_cql_loss = -alpha_cql * delta_q
alpha_cql_gradients = cql_tape.gradient(alpha_cql_loss, self.log_cql.trainable_variables)
self.cql_alpha_optimizer.apply_gradients(zip(alpha_cql_gradients, self.log_cql.trainable_variables))
# policy loss
with tf.GradientTape() as policy_tape:
if behavior_cloning:
_, log_prob, _, pi_distribution = self.policy_net((obs, False))
raw_action = self._compute_raw_action(actions)
policy_loss = tf.reduce_mean(log_prob * alpha - pi_distribution.log_prob(raw_action), axis=0)
else:
action, log_prob, _, _ = self.policy_net((obs, False))
q_values_pi_min = self.q_network((obs, action), training=False)
policy_loss = tf.reduce_mean(log_prob * alpha - q_values_pi_min, axis=0)
policy_gradients = policy_tape.gradient(policy_loss, self.policy_net.trainable_variables)
self.policy_optimizer.apply_gradients(zip(policy_gradients, self.policy_net.trainable_variables))
with tf.GradientTape() as alpha_tape:
alpha = self.log_alpha()
alpha_loss = -tf.reduce_mean(alpha * (log_prob + self.target_entropy))
alpha_gradient = alpha_tape.gradient(alpha_loss, self.log_alpha.trainable_variables)
self.alpha_optimizer.apply_gradients(zip(alpha_gradient, self.log_alpha.trainable_variables))
info = dict(
Q1Vals=q_values[0],
Q2Vals=q_values[1],
LogPi=log_prob,
Alpha=alpha,
LossQ=mse_q_values_loss,
LossAlpha=alpha_loss,
LossPi=policy_loss,
AlphaCQL=alpha_cql,
AlphaCQLLoss=alpha_cql_loss,
DeltaQ=delta_q,
)
return info
def update(self, obs, act, next_obs, done, rew, update_target=True, behavior_cloning=False):
obs = tf.convert_to_tensor(obs, dtype=tf.float32)
act = tf.convert_to_tensor(act, dtype=tf.float32)
next_obs = tf.convert_to_tensor(next_obs, dtype=tf.float32)
done = tf.convert_to_tensor(done, dtype=tf.float32)
rew = tf.convert_to_tensor(rew, dtype=tf.float32)
info = self._update_nets(obs, act, next_obs, done, rew, behavior_cloning)
self.logger.store(**to_numpy_or_python_type(info))
if update_target:
self.update_target()
@tf.function
def act_batch(self, obs, deterministic=False):
print(f'Tracing sac act_batch with obs {obs}')
if deterministic:
pi_final = self.policy_net((obs, deterministic))[0]
else:
batch_size = tf.shape(obs)[0]
obs = tf.tile(obs, (self.num_samples, 1))
action = self.policy_net((obs, False))[0]
q_values_pi_min = self.q_network((obs, action), training=True)[0, :]
action = tf.reshape(action, shape=(self.num_samples, batch_size, self.act_dim))
idx = tf.argmax(tf.reshape(q_values_pi_min, shape=(self.num_samples, batch_size)), axis=0,
output_type=tf.int32) # (batch_size)
idx = tf.stack([idx, tf.range(batch_size)], axis=-1)
pi_final = tf.gather_nd(action, idx)
return pi_final
class Runner(TFRunner):
def get_action_batch(self, o, deterministic=False):
return self.agent.act_batch(tf.convert_to_tensor(o, dtype=tf.float32),
deterministic).numpy()
def test_agent(self):
o, d, ep_ret, ep_len = self.env.reset(), np.zeros(shape=self.num_test_episodes, dtype=np.bool), \
np.zeros(shape=self.num_test_episodes), \
np.zeros(shape=self.num_test_episodes, dtype=np.int64)
t = tqdm(total=1, desc='Testing')
while not np.all(d):
a = self.get_action_batch(o, deterministic=False)
o, r, d_, _ = self.env.step(a)
ep_ret = r * (1 - d) + ep_ret
ep_len = np.ones(shape=self.num_test_episodes, dtype=np.int64) * (1 - d) + ep_len
d = np.logical_or(d, d_)
t.update(1)
t.close()
normalized_ep_ret = self.dummy_env.get_normalized_score(ep_ret) * 100
self.logger.store(TestEpRet=ep_ret, NormalizedTestEpRet=normalized_ep_ret, TestEpLen=ep_len)
def setup_replay_buffer(self, batch_size):
import d4rl
self.dummy_env = gym.make(self.env_name)
dataset = d4rl.qlearning_dataset(env=self.dummy_env)
# modify keys
dataset['obs'] = dataset.pop('observations')
dataset['act'] = dataset.pop('actions')
dataset['next_obs'] = dataset.pop('next_observations')
dataset['rew'] = dataset.pop('rewards')
dataset['done'] = dataset.pop('terminals')
replay_size = dataset['obs'].shape[0]
print(f'Dataset size: {replay_size}')
self.replay_buffer = PyUniformReplayBuffer.from_data_dict(
data=dataset,
batch_size=batch_size
)
def setup_agent(self,
policy_mlp_hidden=128,
policy_lr=3e-4,
q_mlp_hidden=256,
q_lr=3e-4,
alpha=1.0,
alpha_lr=1e-3,
alpha_cql=1.,
alpha_cql_lr=1e-3,
tau=5e-3,
gamma=0.99,
num_samples=10,
cql_threshold=-1.,
target_entropy=None,
):
obs_spec = tf.TensorSpec(shape=self.env.single_observation_space.shape,
dtype=tf.float32)
act_spec = tf.TensorSpec(shape=self.env.single_action_space.shape,
dtype=tf.float32)
self.agent = CQLAgent(obs_spec=obs_spec, act_spec=act_spec,
policy_mlp_hidden=policy_mlp_hidden,
policy_lr=policy_lr,
q_mlp_hidden=q_mlp_hidden,
q_lr=q_lr,
alpha=alpha,
alpha_lr=alpha_lr,
alpha_cql=alpha_cql,
alpha_cql_lr=alpha_cql_lr,
tau=tau,
gamma=gamma,
num_samples=num_samples,
cql_threshold=cql_threshold,
target_entropy=target_entropy,
)
self.agent.set_logger(self.logger)
def setup_extra(self,
start_steps
):
self.start_steps = start_steps
def run_one_step(self, t):
batch = self.replay_buffer.sample()
self.agent.update(**batch, update_target=True, behavior_cloning=self.global_step <= self.start_steps)
def on_epoch_end(self, epoch):
self.test_agent()
# Log info about epoch
self.logger.log_tabular('Epoch', epoch)
self.logger.log_tabular('TestEpRet', with_min_and_max=True)
self.logger.log_tabular('TestEpLen', average_only=True)
self.logger.log_tabular('NormalizedTestEpRet', average_only=True)
self.logger.log_tabular('GradientSteps', self.global_step)
self.agent.log_tabular()
self.logger.log_tabular('Time', time.time() - self.start_time)
self.logger.dump_tabular()
def on_train_begin(self):
self.start_time = time.time()
@classmethod
def main(cls,
env_name,
max_ep_len=1000,
steps_per_epoch=2000,
epochs=500,
start_steps=1000 * 10,
batch_size=256,
num_test_episodes=20,
seed=1,
# sac args
nn_size=256,
learning_rate=3e-4,
alpha=0.2,
tau=5e-3,
gamma=0.99,
# replay
replay_size=int(1e6),
logger_path: str = None
):
config = locals()
runner = cls(seed=seed, steps_per_epoch=steps_per_epoch, epochs=epochs,
exp_name=None, logger_path=logger_path)
runner.setup_env(env_name=env_name, num_parallel_env=num_test_episodes, frame_stack=None, wrappers=None,
asynchronous=False, num_test_episodes=None)
runner.setup_logger(config=config)
runner.setup_agent(policy_mlp_hidden=nn_size,
policy_lr=learning_rate,
q_mlp_hidden=nn_size,
q_lr=learning_rate,
alpha=alpha,
alpha_lr=1e-3,
alpha_cql=alpha,
alpha_cql_lr=1e-3,
tau=tau,
gamma=gamma,
num_samples=10,
cql_threshold=-1.,
target_entropy=None)
runner.setup_extra(start_steps=start_steps)
runner.setup_replay_buffer(batch_size=batch_size)
runner.run()
if __name__ == '__main__':
run_func_as_main(Runner.main) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/algos/tf/offline/cql.py | 0.824144 | 0.258443 | cql.py | pypi |
import time
import gym.spaces
import numpy as np
import tensorflow as tf
from rlutils.replay_buffers import PyUniformReplayBuffer
from rlutils.infra.runner import TFRunner, run_func_as_main
from rlutils.tf.nn.models import EnsembleDynamicsModel
from rlutils.tf.nn.planners import RandomShooter
class PETSAgent(tf.keras.Model):
def __init__(self, obs_dim, act_dim, mlp_hidden=128, num_ensembles=5, lr=1e-3,
horizon=10, num_particles=5, num_actions=1024):
super(PETSAgent, self).__init__()
self.dynamics_model = EnsembleDynamicsModel(obs_dim=obs_dim, act_dim=act_dim, mlp_hidden=mlp_hidden,
num_ensembles=num_ensembles, lr=lr, reward_fn=None,
terminate_fn=None)
self.inference_model = self.dynamics_model.build_ts_model(horizon=horizon, num_particles=num_particles)
self.planner = RandomShooter(inference_model=self.inference_model, horizon=horizon, num_actions=num_actions)
def set_logger(self, logger):
self.logger = logger
self.dynamics_model.set_logger(logger=logger)
def log_tabular(self):
self.dynamics_model.log_tabular()
def update_model(self, data, batch_size=64, num_epochs=60, patience=None,
validation_split=0.1, shuffle=True):
self.dynamics_model.update(inputs=data, batch_size=batch_size, num_epochs=num_epochs, patience=patience,
validation_split=validation_split, shuffle=shuffle)
def act_batch(self, obs):
return self.planner.act_batch(obs)
class Runner(TFRunner):
def setup_replay_buffer(self,
replay_size):
data_spec = {
'obs': gym.spaces.Space(shape=self.env.single_observation_space.shape,
dtype=np.float32),
'act': gym.spaces.Space(shape=self.env.single_action_space.shape,
dtype=np.float32),
'next_obs': gym.spaces.Space(shape=self.env.single_observation_space.shape,
dtype=np.float32),
'rew': gym.spaces.Space(shape=None, dtype=np.float32),
'done': gym.spaces.Space(shape=None, dtype=np.float32)
}
self.replay_buffer = PyUniformReplayBuffer(data_spec=data_spec,
capacity=replay_size,
batch_size=None,
num_parallel_env=self.num_parallel_env)
def setup_agent(self, mlp_hidden=128, num_ensembles=5, lr=1e-3, horizon=10, num_particles=5, num_actions=1024):
obs_dim = self.env.single_observation_space.shape[0]
act_dim = self.env.single_action_space.shape[0]
self.agent = PETSAgent(obs_dim=obs_dim, act_dim=act_dim,
mlp_hidden=mlp_hidden,
num_ensembles=num_ensembles,
lr=lr, horizon=horizon,
num_particles=num_particles,
num_actions=num_actions)
self.agent.set_logger(self.logger)
def setup_extra(self,
start_steps,
batch_size,
num_model_epochs,
patience,
validation_split):
self.start_steps = start_steps
self.batch_size = batch_size
self.num_model_epochs = num_model_epochs
self.patience = patience
self.validation_split = validation_split
def run_one_step(self, t):
global_env_steps = self.global_step * self.num_parallel_env
if global_env_steps >= self.start_steps:
a = self.agent.act_batch(self.o).numpy()
assert not np.any(np.isnan(a)), f'NAN action: {a}'
else:
a = self.env.action_space.sample()
# Step the env
o2, r, d, _ = self.env.step(a)
self.ep_ret += r
self.ep_len += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
true_d = np.logical_and(d, self.ep_len != self.max_ep_len)
# Store experience to replay buffer
self.replay_buffer.add(data={
'obs': self.o,
'act': a,
'rew': r,
'next_obs': o2,
'done': true_d
})
# Super critical, easy to overlook step: make sure to update
# most recent observation!
self.o = o2
# End of trajectory handling
if np.any(d):
self.logger.store(EpRet=self.ep_ret[d], EpLen=self.ep_len[d])
self.ep_ret[d] = 0
self.ep_len[d] = 0
self.o = self.env.reset_done()
def on_epoch_end(self, epoch):
# update the model
data = self.replay_buffer.get()
self.agent.update_model(data=data, batch_size=self.batch_size, num_epochs=self.num_model_epochs,
patience=self.patience, validation_split=self.validation_split, shuffle=True)
# Log info about epoch
self.logger.log_tabular('Epoch', epoch)
self.logger.log_tabular('EpRet', with_min_and_max=True)
self.logger.log_tabular('EpLen', average_only=True)
self.logger.log_tabular('TotalEnvInteracts', self.global_step * self.num_parallel_env)
self.agent.log_tabular()
self.logger.log_tabular('Time', time.time() - self.start_time)
self.logger.dump_tabular()
def on_train_begin(self):
self.start_time = time.time()
self.o = self.env.reset()
self.ep_ret = np.zeros(shape=self.num_parallel_env)
self.ep_len = np.zeros(shape=self.num_parallel_env, dtype=np.int64)
@classmethod
def main(cls,
env_name,
steps_per_epoch=400,
epochs=200,
start_steps=2000,
num_parallel_env=2,
seed=1,
# sac args
mlp_hidden=256,
num_ensembles=3,
learning_rate=1e-3,
horizon=10,
num_particles=5,
num_actions=1024,
batch_size=256,
num_model_epochs=60,
patience=10,
validation_split=0.1,
# replay
replay_size=int(1e6),
logger_path: str = None
):
config = locals()
runner = cls(seed=seed, steps_per_epoch=steps_per_epoch // num_parallel_env, epochs=epochs,
exp_name=None, logger_path=logger_path)
runner.setup_env(env_name=env_name, num_parallel_env=num_parallel_env, frame_stack=None, wrappers=None,
asynchronous=False, num_test_episodes=None)
runner.setup_logger(config=config)
runner.setup_agent(mlp_hidden=mlp_hidden, num_ensembles=num_ensembles, lr=learning_rate,
horizon=horizon, num_particles=num_particles, num_actions=num_actions)
runner.setup_extra(start_steps=start_steps,
batch_size=batch_size,
num_model_epochs=num_model_epochs,
patience=patience,
validation_split=validation_split)
runner.setup_replay_buffer(replay_size=replay_size)
runner.run()
if __name__ == '__main__':
run_func_as_main(Runner.main) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/algos/tf/mb/pets.py | 0.744749 | 0.252284 | pets.py | pypi |
import numpy as np
import tensorflow as tf
EPS = 1e-6
def compute_accuracy(logits, labels):
num = tf.cast(tf.argmax(logits, axis=-1, output_type=tf.int32) == labels, dtype=tf.float32)
accuracy = tf.reduce_mean(num)
return accuracy
def expand_ensemble_dim(x, num_ensembles):
""" functionality for outer class to expand before passing into the ensemble model. """
multiples = tf.concat(([num_ensembles], tf.ones_like(tf.shape(x))), axis=0)
x = tf.tile(tf.expand_dims(x, axis=0), multiples=multiples)
return x
def clip_by_value(t, clip_value_min=None, clip_value_max=None):
if clip_value_min is not None:
t = tf.maximum(t, clip_value_min)
if clip_value_max is not None:
t = tf.minimum(t, clip_value_max)
return t
def clip_by_value_preserve_gradient(t, clip_value_min=None, clip_value_max=None, name=None):
with tf.name_scope(name or 'clip_by_value_preserve_gradient'):
t = tf.convert_to_tensor(t, name='t')
clip_t = clip_by_value(t, clip_value_min, clip_value_max)
return t + tf.stop_gradient(clip_t - t)
def flatten_leading_dims(tensor, n_dims):
if n_dims <= 1:
return tensor
newshape = [tf.math.reduce_prod(tf.shape(tensor)[:n_dims])] + tf.TensorShape(tf.shape(tensor)[n_dims:])
return tf.reshape(tensor, shape=newshape)
def clip_atanh(x, name=None):
return tf.atanh(tf.clip_by_value(x, clip_value_min=-1. + EPS, clip_value_max=1. - EPS), name=name)
def compute_target_value(reward, gamma, done, next_q):
q_target = reward + gamma * (1.0 - done) * next_q
return q_target
def flat_vars(vars):
print('Tracing flat_vars')
vars = [tf.reshape(v, shape=(-1,)) for v in vars]
return tf.concat(vars, axis=0)
def set_flat_trainable_variables(model: tf.keras.layers.Layer, trainable_variables):
print(f'Tracing set_flat_params_to model={model.name}, flat_params={len(trainable_variables)}')
prev_ind = 0
for param in model.trainable_variables:
flat_size = tf.reduce_prod(param.shape)
param.assign(tf.reshape(trainable_variables[prev_ind:prev_ind + flat_size], shape=param.shape))
prev_ind += flat_size
def soft_update(target: tf.keras.layers.Layer, source: tf.keras.layers.Layer, tau):
print('Tracing soft_update_tf')
for target_param, source_param in zip(target.trainable_variables, source.trainable_variables):
target_param.assign(target_param * (1. - tau) + source_param * tau)
def hard_update(target: tf.keras.layers.Layer, source: tf.keras.layers.Layer):
print('Tracing hard_update_tf')
for target_param, source_param in zip(target.trainable_variables, source.trainable_variables):
target_param.assign(source_param)
def to_numpy_or_python_type(tensors):
"""Converts a structure of `Tensor`s to `NumPy` arrays or Python scalar types.
For each tensor, it calls `tensor.numpy()`. If the result is a scalar value,
it converts it to a Python type, such as a float or int, by calling
`result.item()`.
Numpy scalars are converted, as Python types are often more convenient to deal
with. This is especially useful for bfloat16 Numpy scalars, which don't
support as many operations as other Numpy values.
Args:
tensors: A structure of tensors.
Returns:
`tensors`, but scalar tensors are converted to Python types and non-scalar
tensors are converted to Numpy arrays.
"""
def _to_single_numpy_or_python_type(t):
if isinstance(t, tf.Tensor):
x = t.numpy()
return x.item() if np.ndim(x) == 0 else x
return t # Don't turn ragged or sparse tensors to NumPy.
return tf.nest.map_structure(_to_single_numpy_or_python_type, tensors) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/tf/functional.py | 0.915202 | 0.438424 | functional.py | pypi |
import numpy as np
import rlutils.tf as rlu
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
tfl = tfp.layers
EPS = 1e-4
class CenteredBeta(tfd.TransformedDistribution):
def __init__(self,
concentration1,
concentration0,
validate_args=False,
allow_nan_stats=True,
name='CenteredBeta'):
parameters = dict(locals())
with tf.name_scope(name) as name:
super(CenteredBeta, self).__init__(
distribution=tfd.Beta(concentration1=concentration1, concentration0=concentration0),
bijector=tfb.Chain(bijectors=[
tfb.Shift(shift=-1.),
tfb.Scale(scale=2.)
]),
validate_args=validate_args,
parameters=parameters,
name=name)
def apply_squash_log_prob(raw_log_prob, x):
""" Compute the log probability after applying tanh on raw_actions
Args:
raw_log_prob: (None,)
raw_actions: (None, act_dim)
Returns:
"""
log_det_jacobian = 2. * (np.log(2.) - x - tf.math.softplus(-2. * x))
num_reduce_dim = tf.rank(x) - tf.rank(raw_log_prob)
log_det_jacobian = tf.reduce_sum(log_det_jacobian, axis=tf.range(-num_reduce_dim, 0))
log_prob = raw_log_prob - log_det_jacobian
return log_prob
def make_independent_normal(loc, scale, ndims=1):
distribution = tfd.Independent(distribution=tfd.Normal(loc=loc, scale=scale),
reinterpreted_batch_ndims=ndims)
return distribution
def make_independent_normal_from_params(params, ndims=1, min_log_scale=None, max_log_scale=None):
loc_params, scale_params = tf.split(params, 2, axis=-1)
scale_params = rlu.functional.clip_by_value(scale_params, min_log_scale, max_log_scale)
scale_params = tf.math.softplus(scale_params)
distribution = make_independent_normal(loc_params, scale_params, ndims=ndims)
return distribution
def make_independent_truncated_normal(loc, scale, low, high, ndims=1):
pi_distribution = tfd.Independent(distribution=tfd.TruncatedNormal(loc=loc, scale=scale,
low=low, high=high),
reinterpreted_batch_ndims=ndims)
return pi_distribution
def make_independent_truncated_normal_from_params(params, low, high, ndims=1, min_log_scale=None, max_log_scale=None):
loc_params, scale_params = tf.split(params, 2, axis=-1)
if min_log_scale is not None:
scale_params = tf.maximum(scale_params, min_log_scale)
if max_log_scale is not None:
scale_params = tf.minimum(scale_params, max_log_scale)
scale_params = tf.math.softplus(scale_params)
distribution = make_independent_truncated_normal(loc_params, scale_params, low, high, ndims=ndims)
return distribution
def make_independent_centered_beta(c1, c2, ndims=1):
beta_distribution = CenteredBeta(concentration1=c1, concentration0=c2, validate_args=False, allow_nan_stats=False)
distribution = tfd.Independent(beta_distribution,
reinterpreted_batch_ndims=ndims)
return distribution
def make_independent_centered_beta_from_params(params, ndims=1):
params = tf.math.softplus(params) + 1.0
c1, c2 = tf.split(params, 2, axis=-1)
distribution = make_independent_centered_beta(c1, c2, ndims=ndims)
return distribution
def make_independent_beta(c1, c2, ndims=1):
beta_distribution = tfd.Beta(concentration1=c1, concentration0=c2, validate_args=False, allow_nan_stats=False)
distribution = tfd.Independent(beta_distribution,
reinterpreted_batch_ndims=ndims)
return distribution
def make_independent_beta_from_params(params, ndims=1):
params = tf.math.softplus(params) + 1.0
c1, c2 = tf.split(params, 2, axis=-1)
distribution = make_independent_beta(c1, c2, ndims=ndims)
return distribution
def make_independent_categorical_from_params(params, ndims=1):
return tfd.Independent(tfd.Categorical(logits=params), reinterpreted_batch_ndims=ndims)
class IndependentNormal(tfl.DistributionLambda):
def __init__(self, min_log_scale=None, max_log_scale=None, ndims=1):
super(IndependentNormal, self).__init__(make_distribution_fn=lambda t: make_independent_normal_from_params(
t, ndims=ndims, min_log_scale=min_log_scale, max_log_scale=max_log_scale))
class IndependentBeta(tfl.DistributionLambda):
def __init__(self, ndims=1):
super(IndependentBeta, self).__init__(make_distribution_fn=lambda t: make_independent_beta_from_params(
t, ndims=ndims))
class IndependentTruncatedNormal(tfl.DistributionLambda):
def __init__(self, low, high, min_log_scale=None, max_log_scale=None, ndims=1):
super(IndependentTruncatedNormal, self).__init__(
make_distribution_fn=lambda t: make_independent_truncated_normal_from_params(
t, low=low, high=high, ndims=ndims, min_log_scale=min_log_scale, max_log_scale=max_log_scale))
class IndependentCenteredBeta(tfl.DistributionLambda):
def __init__(self, ndims=1):
super(IndependentCenteredBeta, self).__init__(lambda t: make_independent_centered_beta_from_params(
t, ndims=ndims))
class IndependentCategorical(tfl.DistributionLambda):
def __init__(self, ndims=1):
super(IndependentCategorical, self).__init__(lambda t: make_independent_categorical_from_params(
t, ndims=ndims)) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/tf/distributions.py | 0.867064 | 0.432303 | distributions.py | pypi |
import tensorflow as tf
from rlutils.tf.nn.functional import build_mlp
OUT_KERNEL_INIT = tf.keras.initializers.RandomUniform(minval=-1e-3, maxval=1e-3)
class EnsembleMinQNet(tf.keras.Model):
def __init__(self, ob_dim, ac_dim, mlp_hidden, num_ensembles=2, num_layers=3):
super(EnsembleMinQNet, self).__init__()
self.ob_dim = ob_dim
self.ac_dim = ac_dim
self.mlp_hidden = mlp_hidden
self.num_ensembles = num_ensembles
self.num_layers = num_layers
self.q_net = build_mlp(input_dim=self.ob_dim + self.ac_dim,
output_dim=1,
mlp_hidden=self.mlp_hidden,
num_ensembles=self.num_ensembles,
num_layers=num_layers,
squeeze=True,
out_kernel_initializer=OUT_KERNEL_INIT)
self.build(input_shape=[(None, ob_dim), (None, ac_dim), ()])
def get_config(self):
config = super(EnsembleMinQNet, self).get_config()
config.update({
'ob_dim': self.ob_dim,
'ac_dim': self.ac_dim,
'mlp_hidden': self.mlp_hidden,
'num_ensembles': self.num_ensembles,
'num_layers': self.num_layers
})
return config
def call(self, inputs, training=None, mask=None):
obs, act, reduce = inputs
inputs = tf.concat((obs, act), axis=-1)
inputs = tf.tile(tf.expand_dims(inputs, axis=0), (self.num_ensembles, 1, 1))
q = self.q_net(inputs) # (num_ensembles, None)
return tf.cond(pred=reduce, true_fn=lambda: tf.reduce_min(q, axis=0), false_fn=lambda: q)
class AtariQNetworkDeepMind(tf.keras.Model):
def __init__(self, act_dim, frame_stack=4, dueling=False, data_format='channels_first', scale_input=True):
super(AtariQNetworkDeepMind, self).__init__()
if data_format == 'channels_first':
self.batch_input_shape = (None, frame_stack, 84, 84)
else:
self.batch_input_shape = (None, 84, 84, frame_stack)
self.features = tf.keras.Sequential([
tf.keras.layers.InputLayer(batch_input_shape=self.batch_input_shape),
tf.keras.layers.Conv2D(filters=32, kernel_size=8, strides=4, padding='same', activation='relu',
data_format=data_format),
tf.keras.layers.Conv2D(filters=64, kernel_size=4, strides=2, padding='same', activation='relu',
data_format=data_format),
tf.keras.layers.Conv2D(filters=64, kernel_size=3, strides=1, padding='same', activation='relu',
data_format=data_format),
tf.keras.layers.Flatten()
])
self.dueling = dueling
self.scale_input = scale_input
self.q_feature = tf.keras.layers.Dense(units=512, activation='relu')
self.adv_fc = tf.keras.layers.Dense(units=act_dim)
if self.dueling:
self.value_fc = tf.keras.layers.Dense(units=1)
else:
self.value_fc = None
self.build(input_shape=self.batch_input_shape)
def call(self, inputs, training=None):
if self.scale_input:
# this assumes the inputs is in image format (None, frame_stack, 84, 84)
inputs = tf.cast(inputs, dtype=tf.float32) / 255.
features = self.features(inputs, training=training)
q_value = self.q_feature(features, training=training)
adv = self.adv_fc(q_value) # (None, act_dim)
if self.dueling:
adv = adv - tf.reduce_mean(adv, axis=-1, keepdims=True)
value = self.value_fc(q_value)
q_value = value + adv
else:
q_value = adv
return q_value | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/tf/nn/values.py | 0.860501 | 0.406273 | values.py | pypi |
import tensorflow as tf
from tensorflow.keras.regularizers import l2
from .layers import EnsembleDense, SqueezeLayer
def build_mlp(input_dim, output_dim, mlp_hidden, num_ensembles=None, num_layers=3,
activation='relu', out_activation=None, squeeze=False, dropout=None,
batch_norm=False, layer_norm=False, regularization=None, out_regularization=None,
kernel_initializer='glorot_uniform', bias_initializer='zeros',
out_kernel_initializer='glorot_uniform', out_bias_initializer='zeros'):
"""
Args:
input_dim: input dimension
output_dim: output dimension
mlp_hidden: hidden size. int or a list of integers
num_ensembles: number of ensembles
num_layers: number of layers. Must be compatible with mlp_hidden
activation: activation after each hidden layer
out_activation: activation after the output layer
squeeze: whether squeeze the output
dropout: apply dropout
batch_norm: apply batch normalization
layer_norm: apply layer normalization
regularization: hidden kernel regularization
out_regularization: output kernel regularization
kernel_initializer: hidden kernel initializer
bias_initializer: bias initializer
out_kernel_initializer: The range of the output kernel is set to small number.
out_bias_initializer: output bias initializer
Returns:
"""
assert not (batch_norm and layer_norm), "batch_norm and layer_norm can't be True simultaneously"
if squeeze:
assert output_dim == 1, "if squeeze, output_dim must have size 1"
if isinstance(mlp_hidden, int):
mlp_hidden = [mlp_hidden] * (num_layers - 1)
elif isinstance(mlp_hidden, list) or isinstance(mlp_hidden, tuple):
assert len(mlp_hidden) == num_layers - 1, 'len(mlp_hidden) must equal to num_layers - 1.'
else:
raise ValueError(f'Unknown type mlp_hidden. Got {type(mlp_hidden)}')
model = tf.keras.Sequential()
regularizer = l2(regularization) if regularization is not None else None
out_regularizer = l2(out_regularization) if out_regularization is not None else None
# input layer
if num_ensembles is None:
model.add(tf.keras.layers.InputLayer(batch_input_shape=(None, input_dim)))
else:
model.add(tf.keras.layers.InputLayer(batch_input_shape=(num_ensembles, None, input_dim)))
# intermediate layers: Dense + normalization layer (optional) + activation + dropout (optional)
for i in range(num_layers - 1):
if num_ensembles is None:
model.add(tf.keras.layers.Dense(mlp_hidden[i], kernel_regularizer=regularizer,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer))
if batch_norm:
model.add(tf.keras.layers.BatchNormalization(axis=-1))
if layer_norm:
model.add(tf.keras.layers.LayerNormalization(axis=-1))
else:
model.add(EnsembleDense(num_ensembles, mlp_hidden[i], kernel_regularizer=regularizer,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer))
if batch_norm:
model.add(tf.keras.layers.BatchNormalization(axis=[0, -1]))
if layer_norm:
model.add(tf.keras.layers.LayerNormalization(axis=[0, -1]))
model.add(tf.keras.layers.Activation(activation=activation))
if dropout is not None:
model.add(tf.keras.layers.Dropout(rate=dropout))
# final layer
if num_ensembles is None:
model.add(tf.keras.layers.Dense(output_dim, activation=out_activation, kernel_regularizer=out_regularizer,
kernel_initializer=out_kernel_initializer,
bias_initializer=out_bias_initializer))
else:
model.add(EnsembleDense(num_ensembles, output_dim, activation=out_activation,
kernel_regularizer=out_regularizer,
kernel_initializer=out_kernel_initializer,
bias_initializer=out_bias_initializer))
if output_dim == 1 and squeeze is True:
model.add(SqueezeLayer(axis=-1))
return model | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/tf/nn/functional.py | 0.927544 | 0.579817 | functional.py | pypi |
from abc import ABC, abstractmethod
import numpy as np
import rlutils.tf as rlu
import sklearn
import tensorflow as tf
import tensorflow_probability as tfp
from rlutils.tf.callbacks import EpochLoggerCallback
from rlutils.tf.generative_models.vae import ConditionalBetaVAE
tfd = tfp.distributions
tfl = tfp.layers
MIN_LOG_SCALE = -10.
MAX_LOG_SCALE = 5.
EPS = 1e-3
class AbstractBehaviorPolicy(ABC):
def __init__(self, obs_dim, act_dim, mlp_hidden=256):
self.obs_dim = obs_dim
self.act_dim = act_dim
self.mlp_hidden = mlp_hidden
@abstractmethod
def sample(self, obs, n=None):
pass
@abstractmethod
def log_prob(self, obs, act):
pass
@abstractmethod
def act_batch(self, obs, **kwargs):
pass
class BehaviorPolicy(ConditionalBetaVAE, AbstractBehaviorPolicy):
def __init__(self, obs_dim, act_dim, mlp_hidden=256, beta=1., out_dist='normal'):
self.out_dist = out_dist
self.obs_dim = obs_dim
self.act_dim = act_dim
self.mlp_hidden = mlp_hidden
super(BehaviorPolicy, self).__init__(latent_dim=self.act_dim * 2, beta=beta)
def log_prob(self, obs, act):
raise NotImplementedError
def call(self, inputs, training=None, mask=None):
x, cond = inputs
print(f'Tracing _forward with x={x}, cond={cond}')
posterior = self.encoder(inputs=(x, cond), training=training)
encode_sample = posterior.sample()
out = self.decoder((encode_sample, cond), training=training)
log_likelihood = out.log_prob(x) # (None,)
log_likelihood = self.transform_raw_log_prob(log_likelihood, x)
kl_divergence = tfd.kl_divergence(posterior, self.prior)
return -log_likelihood, kl_divergence
def transform_raw_action(self, action):
if self.out_dist == 'normal':
return tf.tanh(action)
elif self.out_dist == 'beta':
return (action - 0.5) * 2
else:
raise NotImplementedError
def inverse_transform_action(self, action):
if self.out_dist == 'normal':
return rlu.functional.clip_atanh(action)
elif self.out_dist == 'beta':
raw_action = (action + 1) / 2
raw_action = tf.clip_by_value(raw_action, EPS, 1. - EPS)
return raw_action
else:
raise NotImplementedError
def transform_raw_log_prob(self, raw_log_prob, raw_action):
if self.out_dist == 'beta':
return raw_log_prob - np.log(2.)
elif self.out_dist == 'normal':
return rlu.distributions.apply_squash_log_prob(raw_log_prob=raw_log_prob, x=raw_action)
else:
raise NotImplementedError
def _make_encoder(self) -> tf.keras.Model:
obs_input = tf.keras.Input(shape=(self.obs_dim,), dtype=tf.float32)
act_input = tf.keras.Input(shape=(self.act_dim,), dtype=tf.float32)
input = tf.concat((act_input, obs_input), axis=-1)
encoder = rlu.nn.build_mlp(input_dim=self.obs_dim + self.act_dim,
output_dim=self.latent_dim * 2,
mlp_hidden=self.mlp_hidden,
num_layers=3)
encoder.add(rlu.distributions.IndependentNormal(min_log_scale=MIN_LOG_SCALE, max_log_scale=MAX_LOG_SCALE))
output = encoder(input)
model = tf.keras.Model(inputs=[act_input, obs_input], outputs=output)
return model
def _make_decoder(self) -> tf.keras.Model:
obs_input = tf.keras.Input(shape=(self.obs_dim,), dtype=tf.float32)
latent_input = tf.keras.Input(shape=(self.latent_dim,), dtype=tf.float32)
input = tf.concat((latent_input, obs_input), axis=-1)
decoder = rlu.nn.build_mlp(input_dim=self.obs_dim + self.latent_dim,
output_dim=self.act_dim * 2,
mlp_hidden=self.mlp_hidden,
num_layers=3)
if self.out_dist == 'beta':
out_layer = rlu.distributions.IndependentBeta()
elif self.out_dist == 'normal':
out_layer = rlu.distributions.IndependentNormal(min_log_scale=MIN_LOG_SCALE, max_log_scale=MAX_LOG_SCALE)
else:
raise NotImplementedError
decoder.add(out_layer)
output = decoder(input)
model = tf.keras.Model(inputs=[latent_input, obs_input], outputs=output)
return model
def set_logger(self, logger):
self.logger = logger
def log_tabular(self):
self.logger.log_tabular('TrainBehaviorLoss', average_only=True)
self.logger.log_tabular('ValBehaviorLoss', average_only=True)
@tf.function
def act_batch(self, obs):
print(f'Tracing vae act_batch with obs {obs}')
pi_final = self.sample(cond=obs, full_path=False)
pi_final = tf.tanh(pi_final)
return pi_final
def update(self, inputs, sample_weights=None, batch_size=64, num_epochs=60, patience=None,
validation_split=0.1, shuffle=True):
obs = inputs['obs']
actions = inputs['act']
obs, actions = sklearn.utils.shuffle(obs, actions)
raw_actions = self.inverse_transform_action(actions)
callbacks = [EpochLoggerCallback(keys=[('TrainBehaviorLoss', 'loss'), ('ValBehaviorLoss', 'val_loss')],
epochs=num_epochs, logger=self.logger,
decs='Training Behavior Policy')]
if patience is not None:
callbacks.append(tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience,
restore_best_weights=True))
self.fit(x=(raw_actions, obs), sample_weight=sample_weights, epochs=num_epochs,
batch_size=batch_size, verbose=False, validation_split=validation_split,
callbacks=callbacks, shuffle=shuffle)
class EnsembleBehaviorPolicy(BehaviorPolicy):
def __init__(self, num_ensembles, out_dist, obs_dim, act_dim, mlp_hidden=256):
self.num_ensembles = num_ensembles
super(EnsembleBehaviorPolicy, self).__init__(out_dist=out_dist, obs_dim=obs_dim,
act_dim=act_dim, mlp_hidden=mlp_hidden)
self.build(input_shape=[(self.num_ensembles, None, act_dim),
(self.num_ensembles, None, obs_dim)])
def select_random_ensemble(self, x):
""" x: (num_ensembles, None) """
batch_size = tf.shape(x)[1]
indices = tf.stack([tf.random.uniform(shape=[batch_size], maxval=self.num_ensembles, dtype=tf.int32),
tf.range(batch_size)], axis=-1)
x = tf.gather_nd(x, indices=indices)
return x
def _make_encoder(self) -> tf.keras.Model:
obs_input = tf.keras.Input(batch_input_shape=(self.num_ensembles, None, self.obs_dim,), dtype=tf.float32)
act_input = tf.keras.Input(batch_input_shape=(self.num_ensembles, None, self.act_dim,), dtype=tf.float32)
input = tf.concat((act_input, obs_input), axis=-1)
encoder = rlu.nn.build_mlp(input_dim=self.obs_dim + self.act_dim,
output_dim=self.latent_dim * 2,
mlp_hidden=self.mlp_hidden,
num_layers=3,
num_ensembles=self.num_ensembles)
encoder.add(rlu.distributions.IndependentNormal(min_log_scale=MIN_LOG_SCALE, max_log_scale=MAX_LOG_SCALE))
output = encoder(input)
model = tf.keras.Model(inputs=[act_input, obs_input], outputs=output)
return model
def _make_decoder(self) -> tf.keras.Model:
obs_input = tf.keras.Input(batch_input_shape=(self.num_ensembles, None, self.obs_dim,), dtype=tf.float32)
latent_input = tf.keras.Input(batch_input_shape=(self.num_ensembles, None, self.latent_dim,),
dtype=tf.float32)
input = tf.concat((latent_input, obs_input), axis=-1)
decoder = rlu.nn.build_mlp(input_dim=self.obs_dim + self.latent_dim,
output_dim=self.act_dim * 2,
mlp_hidden=self.mlp_hidden,
num_layers=3,
num_ensembles=self.num_ensembles)
if self.out_dist == 'beta':
out_layer = rlu.distributions.IndependentBeta()
elif self.out_dist == 'normal':
out_layer = rlu.distributions.IndependentNormal(min_log_scale=MIN_LOG_SCALE, max_log_scale=MAX_LOG_SCALE)
else:
raise NotImplementedError
decoder.add(out_layer)
output = decoder(input)
model = tf.keras.Model(inputs=[latent_input, obs_input], outputs=output)
return model
@tf.function
def act_batch(self, obs):
print(f'Tracing vae act_batch with obs {obs}')
obs = rlu.functional.expand_ensemble_dim(obs, num_ensembles=self.num_ensembles)
pi_final = self.sample(cond=obs, full_path=False)
# random select one ensemble
pi_final = self.select_random_ensemble(pi_final)
pi_final = tf.tanh(pi_final)
return pi_final
def call(self, inputs, training=None, mask=None):
x, cond = inputs
print(f'Tracing call with x={x}, cond={cond}')
posterior = self.encoder(inputs=(x, cond), training=training)
encode_sample = posterior.sample()
out = self.decoder((encode_sample, cond), training=training)
log_likelihood = out.log_prob(x) # (num_ensembles, None)
log_likelihood = self.transform_raw_log_prob(log_likelihood, x)
kl_divergence = tfd.kl_divergence(posterior, self.prior)
nll = -tf.reduce_sum(log_likelihood, axis=0)
kld = tf.reduce_sum(kl_divergence, axis=0)
return nll, kld
def train_step(self, data):
data = tf.nest.map_structure(lambda x: rlu.functional.expand_ensemble_dim(x, num_ensembles=self.num_ensembles),
data)
result = super(EnsembleBehaviorPolicy, self).train_step(data=data)
result = tf.nest.map_structure(lambda x: x / self.num_ensembles, result)
return result
def test_step(self, data):
data = tf.nest.map_structure(lambda x: rlu.functional.expand_ensemble_dim(x, num_ensembles=self.num_ensembles),
data)
result = super(EnsembleBehaviorPolicy, self).test_step(data=data)
result = tf.nest.map_structure(lambda x: x / self.num_ensembles, result)
return result
def sample(self, cond, full_path=True):
print(f'Tracing sample with cond={cond}')
z = self.prior.sample(sample_shape=tf.shape(cond)[0:2]) # (num_ensembles, None, z_dim)
z = tf.clip_by_value(z, clip_value_min=-0.5, clip_value_max=0.5)
out_dist = self.decode_distribution(z=(z, cond))
return tf.cond(full_path, true_fn=lambda: out_dist.sample(), false_fn=lambda: out_dist.mean()) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/tf/nn/behavior.py | 0.858541 | 0.356699 | behavior.py | pypi |
import sklearn
import tensorflow as tf
import tensorflow_probability as tfp
from rlutils.tf.callbacks import EpochLoggerCallback
from rlutils.tf.distributions import make_independent_normal_from_params, apply_squash_log_prob, \
make_independent_centered_beta_from_params, make_independent_truncated_normal, make_independent_normal
from rlutils.tf.functional import clip_atanh
from rlutils.tf.future import minimize
from .functional import build_mlp
tfd = tfp.distributions
LOG_STD_RANGE = (-10., 5.)
# This may affect the performance a lot! Setting the min_log_scale=-20 makes HalfCheetah-v2 achieves 16000, but
# makes Hopper-v2 worse.
EPS = 1e-3
OUT_KERNEL_INIT = tf.keras.initializers.RandomUniform(minval=-1e-3, maxval=1e-3)
@tf.function
def get_pi_action(deterministic, pi_distribution):
# print(f'Tracing get_pi_action with deterministic={deterministic}')
return tf.cond(pred=deterministic, true_fn=lambda: pi_distribution.mean(),
false_fn=lambda: pi_distribution.sample())
@tf.function
def get_pi_action_categorical(deterministic, pi_distribution):
# print(f'Tracing get_pi_action with deterministic={deterministic}')
return tf.cond(pred=deterministic,
true_fn=lambda: tf.argmax(pi_distribution.probs_parameter(), axis=-1,
output_type=pi_distribution.dtype),
false_fn=lambda: pi_distribution.sample())
class StochasticActor(tf.keras.Model):
def __init__(self):
super(StochasticActor, self).__init__()
self.logger = None
def set_logger(self, logger):
self.logger = logger
def log_tabular(self):
self.logger.log_tabular('TrainPolicyLoss', average_only=True)
self.logger.log_tabular('ValPolicyLoss', average_only=True)
@property
def pi_dist_layer(self):
raise NotImplementedError
def transform_raw_action(self, raw_actions):
return raw_actions
def inverse_transform_action(self, action):
return action
def transform_raw_log_prob(self, raw_log_prob, raw_action):
return raw_log_prob
def train_step(self, data):
x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data)
raw_y = self.inverse_transform_action(y)
with tf.GradientTape() as tape:
params = self.net(x)
pi_distribution = self.pi_dist_layer(params)
log_prob = pi_distribution.log_prob(raw_y)
loss = -tf.reduce_mean(log_prob)
minimize(loss, tape, self.net, self.optimizer)
return {
'loss': loss
}
def test_step(self, data):
x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data)
raw_y = self.inverse_transform_action(y)
params = self.net(x)
pi_distribution = self.pi_dist_layer(params)
log_prob = pi_distribution.log_prob(raw_y)
loss = -tf.reduce_mean(log_prob)
return {
'loss': loss
}
def update(self, inputs, sample_weights=None, batch_size=64, num_epochs=60, patience=None,
validation_split=0.1, shuffle=True):
""" Update the policy via maximum likelihood estimation """
obs = inputs['obs']
actions = inputs['act']
callbacks = [EpochLoggerCallback(keys=[('TrainPolicyLoss', 'loss'), ('ValPolicyLoss', 'val_loss')],
epochs=num_epochs, logger=self.logger, decs='Training Model')]
if patience is not None:
callbacks.append(tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience,
restore_best_weights=True))
obs, actions = sklearn.utils.shuffle(obs, actions)
self.fit(x=obs, y=actions, sample_weight=sample_weights, epochs=num_epochs,
batch_size=batch_size, verbose=False, validation_split=validation_split,
callbacks=callbacks, shuffle=shuffle)
class CategoricalActor(StochasticActor):
def __init__(self, obs_dim, act_dim, mlp_hidden):
super(CategoricalActor, self).__init__()
self.net = build_mlp(input_dim=obs_dim, output_dim=act_dim, mlp_hidden=mlp_hidden,
out_kernel_initializer=OUT_KERNEL_INIT)
@property
def pi_dist_layer(self):
return tfp.layers.DistributionLambda(
make_distribution_fn=lambda t: tfd.Categorical(logits=t)
)
def call(self, inputs, **kwargs):
inputs, deterministic = inputs
params = self.net(inputs)
pi_distribution = self.pi_dist_layer(params)
pi_action = get_pi_action_categorical(deterministic, pi_distribution)
logp_pi = pi_distribution.log_prob(pi_action)
pi_action_final = pi_action
return pi_action_final, logp_pi, pi_action, pi_distribution
class NormalActor(StochasticActor):
def __init__(self, obs_dim, act_dim, mlp_hidden, global_std=True):
super(NormalActor, self).__init__()
self.global_std = global_std
if self.global_std:
self.net = build_mlp(input_dim=obs_dim, output_dim=act_dim, mlp_hidden=mlp_hidden,
out_kernel_initializer=OUT_KERNEL_INIT)
self.log_std = tf.Variable(initial_value=-0.5 * tf.ones(act_dim))
else:
self.net = build_mlp(input_dim=obs_dim, output_dim=act_dim * 2, mlp_hidden=mlp_hidden,
out_kernel_initializer=OUT_KERNEL_INIT)
self.log_std = None
@property
def pi_dist_layer(self):
return tfp.layers.DistributionLambda(
make_distribution_fn=lambda t: make_independent_normal(t[0], t[1]))
def call(self, inputs, **kwargs):
inputs, deterministic = inputs
params = self.net(inputs)
if self.global_std:
pi_distribution = self.pi_dist_layer((params, tf.math.softplus(self.log_std)))
else:
mean, log_std = tf.split(params, 2, axis=-1)
pi_distribution = self.pi_dist_layer((tf.tanh(mean), tf.math.softplus(log_std)))
pi_action = get_pi_action(deterministic, pi_distribution)
logp_pi = pi_distribution.log_prob(pi_action)
pi_action_final = pi_action
return pi_action_final, logp_pi, pi_action, pi_distribution
class TruncatedNormalActor(NormalActor):
@property
def pi_dist_layer(self):
return tfp.layers.DistributionLambda(
make_distribution_fn=lambda t: make_independent_truncated_normal(t[0], t[1], low=-1., high=1.))
class CenteredBetaMLPActor(StochasticActor):
""" Note that Beta distribution is 2x slower than SquashedGaussian"""
def __init__(self, ob_dim, ac_dim, mlp_hidden):
super(CenteredBetaMLPActor, self).__init__()
self.net = build_mlp(ob_dim, ac_dim * 2, mlp_hidden, out_kernel_initializer=OUT_KERNEL_INIT)
self.ac_dim = ac_dim
self.build(input_shape=[(None, ob_dim), (None,)])
@property
def pi_dist_layer(self):
return tfp.layers.DistributionLambda(
make_distribution_fn=lambda t: make_independent_centered_beta_from_params(t))
def call(self, inputs, **kwargs):
inputs, deterministic = inputs
# print(f'Tracing call with inputs={inputs}, deterministic={deterministic}')
params = self.net(inputs)
pi_distribution = self.pi_dist_layer(params)
pi_action = get_pi_action(deterministic, pi_distribution)
pi_action = tf.clip_by_value(pi_action, EPS, 1. - EPS)
logp_pi = pi_distribution.log_prob(pi_action)
return pi_action, logp_pi, pi_action, pi_distribution
class SquashedGaussianMLPActor(StochasticActor):
def __init__(self, ob_dim, ac_dim, mlp_hidden):
super(SquashedGaussianMLPActor, self).__init__()
self.net = build_mlp(ob_dim, ac_dim * 2, mlp_hidden, out_kernel_initializer=OUT_KERNEL_INIT)
self.ac_dim = ac_dim
self.build(input_shape=[(None, ob_dim), (None,)])
@property
def pi_dist_layer(self):
return tfp.layers.DistributionLambda(
make_distribution_fn=lambda t: make_independent_normal_from_params(t, min_log_scale=LOG_STD_RANGE[0],
max_log_scale=LOG_STD_RANGE[1]))
def transform_raw_action(self, action):
return tf.tanh(action)
def inverse_transform_action(self, action):
return clip_atanh(action)
def transform_raw_log_prob(self, raw_log_prob, raw_action):
return apply_squash_log_prob(raw_log_prob=raw_log_prob, x=raw_action)
def call(self, inputs, **kwargs):
inputs, deterministic = inputs
params = self.net(inputs)
pi_distribution = self.pi_dist_layer(params)
pi_action = get_pi_action(deterministic, pi_distribution)
logp_pi = pi_distribution.log_prob(pi_action)
logp_pi = self.transform_raw_log_prob(logp_pi, pi_action)
pi_action_final = self.transform_raw_action(pi_action)
return pi_action_final, logp_pi, pi_action, pi_distribution
class DeterministicMLPActor(tf.keras.Model):
def __init__(self, ob_dim, ac_dim, mlp_hidden, out_activation='tanh'):
super(DeterministicMLPActor, self).__init__()
self.policy_net = build_mlp(ob_dim, ac_dim, mlp_hidden=mlp_hidden,
num_layers=3, out_activation=out_activation,
out_kernel_initializer=OUT_KERNEL_INIT)
def call(self, inputs, **kwargs):
return self.policy_net(inputs) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/tf/nn/actors.py | 0.816589 | 0.30654 | actors.py | pypi |
import tensorflow as tf
from rlutils.np.functional import inverse_softplus
from rlutils.tf.functional import clip_by_value_preserve_gradient
from .initializer import _decode_initializer
class SqueezeLayer(tf.keras.layers.Layer):
def __init__(self, axis=-1):
super(SqueezeLayer, self).__init__()
self.axis = axis
def call(self, inputs, **kwargs):
return tf.squeeze(inputs, axis=self.axis)
class EnsembleDense(tf.keras.layers.Dense):
def __init__(self, num_ensembles, units, kernel_initializer, **kwargs):
kernel_initializer = _decode_initializer(kernel_initializer)
super(EnsembleDense, self).__init__(units=units, kernel_initializer=kernel_initializer, **kwargs)
self.num_ensembles = num_ensembles
def build(self, input_shape):
last_dim = int(input_shape[-1])
self.kernel = self.add_weight(
'kernel',
shape=[self.num_ensembles, last_dim, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_weight(
'bias',
shape=[self.num_ensembles, 1, self.units],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs):
outputs = tf.linalg.matmul(inputs, self.kernel) # (num_ensembles, None, units)
if self.use_bias:
outputs = outputs + self.bias
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
class LagrangeLayer(tf.keras.layers.Layer):
def __init__(self, initial_value=1.0, min_value=None, max_value=10000.):
super(LagrangeLayer, self).__init__()
self.log_value = inverse_softplus(initial_value)
if min_value is not None:
self.min_log_value = inverse_softplus(min_value)
else:
self.min_log_value = None
if max_value is not None:
self.max_log_value = inverse_softplus(max_value)
else:
self.max_log_value = None
self.build(input_shape=None)
def build(self, input_shape):
self.kernel = self.add_weight(
name='kernel',
shape=(),
dtype=tf.float32,
initializer=tf.keras.initializers.Constant(self.log_value)
)
self.built = True
def __call__(self, inputs=None, training=None):
return super(LagrangeLayer, self).__call__(inputs, training=training)
def call(self, inputs, training=None, mask=None):
return tf.math.softplus(clip_by_value_preserve_gradient(self.kernel, self.min_log_value, self.max_log_value))
def assign(self, value):
self.kernel.assign(inverse_softplus(value)) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/tf/nn/layers.py | 0.86771 | 0.277732 | layers.py | pypi |
import math
import tensorflow as tf
class _RandomGenerator(object):
"""Random generator that selects appropriate random ops."""
dtypes = tf.dtypes
def __init__(self, seed=None):
super(_RandomGenerator, self).__init__()
if seed is not None:
# Stateless random ops requires 2-int seed.
self.seed = [seed, 0]
else:
self.seed = None
def random_normal(self, shape, mean=0.0, stddev=1, dtype=dtypes.float32):
"""A deterministic random normal if seed is passed."""
if self.seed:
op = tf.random.stateless_normal
else:
op = tf.random.normal
return op(
shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)
def random_uniform(self, shape, minval, maxval, dtype):
"""A deterministic random uniform if seed is passed."""
if self.seed:
op = tf.random.stateless_uniform
else:
op = tf.random.uniform
return op(
shape=shape, minval=minval, maxval=maxval, dtype=dtype, seed=self.seed)
def truncated_normal(self, shape, mean, stddev, dtype):
"""A deterministic truncated normal if seed is passed."""
if self.seed:
op = tf.random.stateless_truncated_normal
else:
op = tf.random.truncated_normal
return op(
shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)
class EnsembleVarianceScaling(tf.keras.initializers.Initializer):
def __init__(self,
scale=1.0,
mode="fan_in",
distribution="truncated_normal",
seed=None):
if scale <= 0.:
raise ValueError("`scale` must be positive float.")
if mode not in {"fan_in", "fan_out", "fan_avg"}:
raise ValueError("Invalid `mode` argument:", mode)
distribution = distribution.lower()
# Compatibility with keras-team/keras.
if distribution == "normal":
distribution = "truncated_normal"
if distribution not in {"uniform", "truncated_normal",
"untruncated_normal"}:
raise ValueError("Invalid `distribution` argument:", distribution)
self.scale = scale
self.mode = mode
self.distribution = distribution
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=tf.dtypes.float32):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
Raises:
ValueError: If the dtype is not floating point
"""
scale = self.scale
scale_shape = shape
fan_in, fan_out = _compute_fans(scale_shape)
if self.mode == "fan_in":
scale /= max(1., fan_in)
elif self.mode == "fan_out":
scale /= max(1., fan_out)
else:
scale /= max(1., (fan_in + fan_out) / 2.)
if self.distribution == "truncated_normal":
# constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
stddev = math.sqrt(scale) / .87962566103423978
return self._random_generator.truncated_normal(shape, 0.0, stddev, dtype)
elif self.distribution == "untruncated_normal":
stddev = math.sqrt(scale)
return self._random_generator.random_normal(shape, 0.0, stddev, dtype)
else:
limit = math.sqrt(3.0 * scale)
return self._random_generator.random_uniform(shape, -limit, limit, dtype)
def get_config(self):
return {
"scale": self.scale,
"mode": self.mode,
"distribution": self.distribution,
"seed": self.seed
}
def _compute_fans(shape):
"""Computes the number of input and output units for a weight shape.
Args:
shape: Integer shape tuple or TF tensor shape.
Returns:
A tuple of integer scalars (fan_in, fan_out).
"""
assert len(shape) == 3 # only used for ensemble dense layer
fan_in = shape[1]
fan_out = shape[2]
return int(fan_in), int(fan_out)
class EnsembleHeNormal(EnsembleVarianceScaling):
def __init__(self, seed=None):
super(EnsembleHeNormal, self).__init__(
scale=2., mode='fan_in', distribution='truncated_normal', seed=seed)
def get_config(self):
return {'seed': self.seed}
class EnsembleHeUniform(EnsembleVarianceScaling):
def __init__(self, seed=None):
super(EnsembleHeUniform, self).__init__(
scale=2., mode='fan_in', distribution='uniform', seed=seed)
def get_config(self):
return {'seed': self.seed}
class EnsembleGlorotNormal(EnsembleVarianceScaling):
def __init__(self, seed=None):
super(EnsembleGlorotNormal, self).__init__(
scale=1.0,
mode='fan_avg',
distribution='truncated_normal',
seed=seed)
def get_config(self):
return {'seed': self.seed}
class EnsembleGlorotUniform(EnsembleVarianceScaling):
def __init__(self, seed=None):
super(EnsembleGlorotUniform, self).__init__(
scale=1.0,
mode='fan_avg',
distribution='uniform',
seed=seed)
def get_config(self):
return {'seed': self.seed}
ensemble_init = {
'he_normal': EnsembleHeNormal,
'he_uniform': EnsembleHeUniform,
'glorot_normal': EnsembleGlorotNormal,
'glorot_uniform': EnsembleGlorotUniform
}
def _decode_initializer(name):
if name is None:
name = 'glorot_uniform'
if isinstance(name, str):
if name in ensemble_init:
return ensemble_init[name]
return name | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/tf/nn/initializer.py | 0.921473 | 0.333693 | initializer.py | pypi |
import tensorflow as tf
import tensorflow_probability as tfp
from rlutils.tf.functional import compute_accuracy
tfd = tfp.distributions
class GAN(tf.keras.Model):
def __init__(self, n_critics=5, noise_dim=100):
super(GAN, self).__init__()
self.n_critics = n_critics
self.noise_dim = noise_dim
self.generator = self._make_generator()
self.discriminator = self._make_discriminator()
self.prior = self._make_prior()
self.cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
self.logger = None
def compile(self, generator_optimizer, discriminator_optimizer):
self.generator_optimizer = generator_optimizer
self.discriminator_optimizer = discriminator_optimizer
super(GAN, self).compile()
@tf.function
def generate(self, z):
return self.generator(z, training=False)
def _make_prior(self):
return tfd.Independent(tfd.Normal(loc=tf.zeros(self.noise_dim), scale=tf.ones(self.noise_dim)),
reinterpreted_batch_ndims=1)
def _make_generator(self) -> tf.keras.Model:
raise NotImplementedError
def _make_discriminator(self) -> tf.keras.Model:
raise NotImplementedError
def set_logger(self, logger):
self.logger = logger
def log_tabular(self):
self.logger.log_tabular('GenLoss', average_only=True)
self.logger.log_tabular('DiscLoss', average_only=True)
@tf.function
def sample(self, n):
print(f'Tracing sample with n={n}')
noise = self.prior.sample(n)
outputs = self.generator(noise, training=False)
return outputs
def predict_real_fake(self, x):
print(f'Tracing predict_real_fake with x={x}')
return tf.sigmoid(self.discriminator(x, training=False))
def _discriminator_loss(self, outputs):
real_output, fake_output = outputs
real_loss = self.cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = self.cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
def _generator_loss(self, outputs):
return self.cross_entropy(tf.ones_like(outputs), outputs)
@tf.function
def _train_generator(self, real_images):
batch_size = tf.shape(real_images)[0]
noise = self.prior.sample(batch_size)
with tf.GradientTape() as tape:
generated_images = self.generator(noise, training=True)
fake_output = self.discriminator(generated_images, training=True)
gen_loss = self._generator_loss(fake_output)
grads = tape.gradient(gen_loss, self.generator.trainable_variables)
self.generator_optimizer.apply_gradients(zip(grads, self.generator.trainable_variables))
return gen_loss
@tf.function
def _train_discriminator(self, real_images):
batch_size = tf.shape(real_images)[0]
noise = self.prior.sample(batch_size)
generated_images = self.generator(noise, training=True)
with tf.GradientTape() as tape:
real_output = self.discriminator(real_images, training=True)
fake_output = self.discriminator(generated_images, training=True)
disc_loss = self._discriminator_loss(outputs=(real_output, fake_output))
grads = tape.gradient(disc_loss, self.discriminator.trainable_variables)
self.discriminator_optimizer.apply_gradients(zip(grads, self.discriminator.trainable_variables))
return disc_loss
def train_step(self, data):
x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data)
gen_loss = self._train_generator(x)
disc_loss = self._train_discriminator(x)
return {
'gen_loss': gen_loss,
'disc_loss': disc_loss
}
class ACGAN(GAN):
def __init__(self, num_classes, class_loss_weight=1., *args, **kwargs):
self.num_classes = num_classes
self.class_loss_weight = class_loss_weight
super(ACGAN, self).__init__(*args, **kwargs)
@tf.function
def sample_with_labels(self, labels):
noise = self.prior.sample(labels.shape[0])
return self.generate(z=(noise, labels))
@tf.function
def sample(self, n):
labels = tf.random.uniform(shape=(n,), minval=0, maxval=self.num_classes, dtype=tf.int32)
return self.sample_with_labels(labels=labels)
def _compute_classification_loss(self, logits, labels):
loss = tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)
return tf.reduce_mean(loss, axis=0)
def _generator_loss(self, outputs):
fake_output, fake_logits, fake_labels = outputs
validity_loss = super(ACGAN, self)._generator_loss(fake_output)
classification_loss = self._compute_classification_loss(fake_logits, fake_labels)
loss = validity_loss + classification_loss * self.class_loss_weight
return loss
def _discriminator_loss(self, outputs):
real_output, fake_output, real_logits, real_labels = outputs
validity_loss = super(ACGAN, self)._discriminator_loss(outputs=(real_output, fake_output))
classification_loss = self._compute_classification_loss(real_logits, real_labels)
loss = validity_loss + classification_loss * self.class_loss_weight
return loss
@tf.function
def _train_generator(self, data):
real_images, real_labels = data
batch_size = tf.shape(real_images)[0]
noise = self.prior.sample(batch_size)
with tf.GradientTape() as tape:
generated_images = self.generator(inputs=(noise, real_labels), training=True)
fake_output, fake_logits = self.discriminator(generated_images, training=True)
gen_loss = self._generator_loss(outputs=(fake_output, fake_logits, real_labels))
grads = tape.gradient(gen_loss, self.generator.trainable_variables)
self.generator_optimizer.apply_gradients(zip(grads, self.generator.trainable_variables))
accuracy = compute_accuracy(fake_logits, real_labels)
return gen_loss, accuracy
@tf.function
def _train_discriminator(self, data):
real_images, real_labels = data
batch_size = tf.shape(real_images)[0]
noise = self.prior.sample(batch_size)
generated_images = self.generator(inputs=(noise, real_labels), training=True)
with tf.GradientTape() as tape:
real_output, real_logits = self.discriminator(real_images, training=True)
fake_output, _ = self.discriminator(generated_images, training=True)
disc_loss = self._discriminator_loss(outputs=(real_output, fake_output,
real_logits,
real_labels))
grads = tape.gradient(disc_loss, self.discriminator.trainable_variables)
self.discriminator_optimizer.apply_gradients(zip(grads, self.discriminator.trainable_variables))
# compute accuracy
accuracy = compute_accuracy(real_logits, real_labels)
return disc_loss, accuracy
def train_step(self, data):
x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data)
for _ in range(self.n_critics - 1):
self._train_discriminator(data=(x, y))
disc_loss, disc_accuracy = self._train_discriminator(data=(x, y))
gen_loss, gen_accuracy = self._train_generator(data=(x, y))
return {
'gen_loss': gen_loss,
'gen_acc': gen_accuracy,
'disc_loss': disc_loss,
'disc_acc': disc_accuracy
}
def test_step(self, data):
x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data)
_, real_logits = self.discriminator(x, training=False)
disc_accuracy = compute_accuracy(real_logits, y)
return {
'disc_acc': disc_accuracy
} | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/tf/generative_models/gan/base.py | 0.918822 | 0.341198 | base.py | pypi |
import tensorflow as tf
from rlutils.tf.functional import compute_accuracy
from tqdm.auto import tqdm
from .base import GAN, ACGAN
class WassersteinGANGradientPenalty(GAN):
def __init__(self, gp_weight=10, *args, **kwargs):
self.gp_weight = gp_weight
super(WassersteinGANGradientPenalty, self).__init__(*args, **kwargs)
def _discriminator_loss(self, outputs):
real_output, fake_output = outputs
loss = tf.reduce_mean(fake_output, axis=0) - tf.reduce_mean(real_output, axis=0)
return loss
def _generator_loss(self, outputs):
return -tf.reduce_mean(outputs, axis=0)
def _compute_gp(self, real_images, fake_images, training):
batch_size = tf.shape(real_images)[0]
alpha = tf.random.uniform(shape=[batch_size], minval=0., maxval=1.)
for _ in range(len(real_images.shape) - 1):
alpha = tf.expand_dims(alpha, axis=-1)
interpolate = real_images * alpha + fake_images * (1 - alpha)
with tf.GradientTape() as tape:
tape.watch(interpolate)
prediction = self.discriminator(interpolate, training=training)
grads = tape.gradient(prediction, interpolate)
grads = tf.reshape(grads, shape=(batch_size, -1))
grads = tf.square(tf.norm(grads, axis=-1) - 1)
return tf.reduce_mean(grads, axis=0)
@tf.function
def _train_discriminator(self, real_images):
batch_size = tf.shape(real_images)[0]
noise = self.prior.sample(batch_size)
generated_images = self.generator(noise, training=True)
with tf.GradientTape() as tape:
real_output = self.discriminator(real_images, training=True)
fake_output = self.discriminator(generated_images, training=True)
disc_loss = self._discriminator_loss(outputs=(real_output, fake_output))
gp_loss = self._compute_gp(real_images, generated_images, training=True)
disc_loss = disc_loss + gp_loss * self.gp_weight
grads = tape.gradient(disc_loss, self.discriminator.trainable_variables)
self.discriminator_optimizer.apply_gradients(zip(grads, self.discriminator.trainable_variables))
return {
'disc_loss': disc_loss
}
def train(self,
x=None,
batch_size=None,
epochs=1,
callbacks=None):
for callback in callbacks:
callback.set_model(self)
t = 0
dataset = tf.data.Dataset.from_tensor_slices(x).shuffle(buffer_size=x.shape[0]).batch(batch_size)
for i in range(1, epochs + 1):
bar = tqdm(total=-(-x.shape[0] // batch_size))
gen_loss = 0
for images in dataset:
disc_loss = self._train_discriminator(images)
if (t == self.n_critics - 1):
gen_loss = self._train_generator(images)
t = (t + 1) % self.n_critics
bar.update(1)
bar.set_description(f'Epoch {i}/{epochs}, disc_loss: {disc_loss:.4f}, gen_loss: {gen_loss:.4f}')
bar.close()
class ACWassersteinGANGradientPenalty(ACGAN, WassersteinGANGradientPenalty):
def _discriminator_loss(self, outputs):
real_output, fake_output, real_logits, real_labels = outputs
validity_loss = WassersteinGANGradientPenalty._discriminator_loss(self, outputs=(real_output, fake_output))
real_class_loss = self._compute_classification_loss(real_logits, real_labels)
loss = validity_loss + self.class_loss_weight * real_class_loss
return loss
def _compute_gp(self, real_images, fake_images, training):
batch_size = tf.shape(real_images)[0]
alpha = tf.random.uniform(shape=[batch_size], minval=0., maxval=1.)
for _ in range(len(real_images.shape) - 1):
alpha = tf.expand_dims(alpha, axis=-1)
interpolate = real_images * alpha + fake_images * (1 - alpha)
with tf.GradientTape() as tape:
tape.watch(interpolate)
validity, _ = self.discriminator(interpolate, training=training) # the GP should only be in validity path
grads = tape.gradient(validity, interpolate)
grads = tf.reshape(grads, shape=(batch_size, -1))
grads = tf.square(tf.norm(grads, axis=-1) - 1)
return tf.reduce_mean(grads, axis=0)
@tf.function
def _train_discriminator(self, data):
print('Tracing discriminator')
real_images, real_labels = data
batch_size = tf.shape(real_images)[0]
noise = self.prior.sample(batch_size)
generated_images = self.generator(inputs=(noise, real_labels), training=True)
with tf.GradientTape() as tape:
real_output, real_logits = self.discriminator(real_images, training=True)
fake_output, _ = self.discriminator(generated_images, training=True)
disc_loss = self._discriminator_loss(outputs=(real_output,
fake_output,
real_logits,
real_labels))
gp_loss = self._compute_gp(real_images, generated_images, training=True)
disc_loss = disc_loss + gp_loss * self.gp_weight
grads = tape.gradient(disc_loss, self.discriminator.trainable_variables)
self.discriminator_optimizer.apply_gradients(zip(grads, self.discriminator.trainable_variables))
# compute accuracy
accuracy = compute_accuracy(real_logits, real_labels)
return disc_loss, accuracy
def train(self,
x=None,
y=None,
batch_size=None,
epochs=1,
callbacks=None):
for callback in callbacks:
callback.set_model(self)
t = 0
dataset = tf.data.Dataset.from_tensor_slices((x, y)).shuffle(buffer_size=x.shape[0]).batch(batch_size)
for i in range(1, epochs + 1):
disc_acc_metric = tf.keras.metrics.Mean()
gen_acc_metric = tf.keras.metrics.Mean()
disc_loss_metric = tf.keras.metrics.Mean()
gen_loss_metric = tf.keras.metrics.Mean()
bar = tqdm(total=-(-x.shape[0] // batch_size))
for images, labels in dataset:
disc_loss, disc_accuracy = self._train_discriminator(data=(images, labels))
disc_loss_metric.update_state(disc_loss)
disc_acc_metric.update_state(disc_accuracy)
if (t == self.n_critics - 1):
gen_loss, gen_accuracy = self._train_generator(data=(images, labels))
gen_loss_metric.update_state(gen_loss)
gen_acc_metric.update_state(gen_accuracy)
t = (t + 1) % self.n_critics
bar.update(1)
bar.set_description(f'Epoch {i}/{epochs}, disc_loss: {disc_loss_metric.result():.4f}, '
f'gen_loss: {gen_loss_metric.result():.4f}, '
f'disc_acc: {disc_acc_metric.result():.4f}, '
f'gen_acc: {gen_acc_metric.result():.4f}')
bar.close()
for callback in callbacks:
callback.on_epoch_end(i) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/tf/generative_models/gan/wgan_gp.py | 0.913464 | 0.311047 | wgan_gp.py | pypi |
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
class BetaVAE(tf.keras.Model):
def __init__(self, latent_dim, beta=1.):
super(BetaVAE, self).__init__()
self.latent_dim = latent_dim
self.beta = beta
self.encoder = self._make_encoder()
self.decoder = self._make_decoder()
self.prior = self._make_prior()
self.logger = None
def _make_encoder(self) -> tf.keras.Model:
raise NotImplementedError
def _make_decoder(self) -> tf.keras.Model:
raise NotImplementedError
def _make_prior(self):
return tfd.Independent(tfd.Normal(loc=tf.zeros(shape=[self.latent_dim], dtype=tf.float32),
scale=tf.ones(shape=[self.latent_dim], dtype=tf.float32)),
reinterpreted_batch_ndims=1)
def encode_distribution(self, inputs):
return self.encoder(inputs, training=False)
def encode_sample(self, inputs):
encode_distribution = self.encode_distribution(inputs)
encode_sample = encode_distribution.sample()
return encode_sample
def encode_mean(self, inputs):
encode_distribution = self.encode_distribution(inputs)
encode_sample = encode_distribution.mean()
return encode_sample
def decode_distribution(self, z):
return self.decoder(z, training=False)
def decode_sample(self, z):
decode_distribution = self.decode_distribution(z)
decode_sample = decode_distribution.sample()
return decode_sample
def decode_mean(self, z):
decode_distribution = self.decoder(z, training=False)
decode_sample = decode_distribution.mean()
return decode_sample
@tf.function
def elbo(self, inputs):
assert self.beta == 1., 'Only Beta=1.0 has ELBO'
nll, kld = self(inputs, training=False)
elbo = -nll - kld
return elbo
def sample(self, full_path=True):
z = self.prior.sample()
mean = self.decode_mean(z)
sample = self.decode_sample(z)
return tf.cond(full_path, true_fn=lambda: sample, false_fn=lambda: mean)
def call(self, inputs, training=None, mask=None):
print(f'Tracing _forward with input {inputs}')
posterior = self.encoder(inputs, training=training)
encode_sample = posterior.sample()
out = self.decoder(encode_sample, training=training)
log_likelihood = out.log_prob(inputs) # (None,)
kl_divergence = tfd.kl_divergence(posterior, self.prior)
# print(f'Shape of nll: {log_likelihood.shape}, kld: {kl_divergence.shape}')
return -log_likelihood, kl_divergence
def train_step(self, data):
x, y, sample_weights = tf.keras.utils.unpack_x_y_sample_weight(data)
with tf.GradientTape() as tape:
nll, kld = self(x, training=True)
loss = nll + kld * self.beta
loss = tf.reduce_mean(loss, axis=0)
gradients = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
return {
'loss': loss,
'nll': nll,
'kld': kld
}
def test_step(self, data):
x, y, sample_weights = tf.keras.utils.unpack_x_y_sample_weight(data)
nll, kld = self(x, training=False)
loss = nll + kld * self.beta
loss = tf.reduce_mean(loss, axis=0)
return {
'loss': loss,
'nll': nll,
'kld': kld
}
@tf.function
def train_on_batch(self,
x,
y=None,
sample_weight=None,
class_weight=None,
reset_metrics=True,
return_dict=False):
return self.train_step(data=(x,))
@tf.function
def test_on_batch(self,
x,
y=None,
sample_weight=None,
reset_metrics=True,
return_dict=False):
return self.test_step(data=(x,))
class ConditionalBetaVAE(BetaVAE):
"""
x + cond -> z + cond -> x
Encoder should take in (x, cond).
"""
def call(self, inputs, training=None, mask=None):
x, cond = inputs
print(f'Tracing _forward with x={x}, cond={cond}')
posterior = self.encoder(inputs=(x, cond), training=training)
encode_sample = posterior.sample()
out = self.decoder((encode_sample, cond), training=training)
log_likelihood = out.log_prob(x) # (None,)
kl_divergence = tfd.kl_divergence(posterior, self.prior)
return -log_likelihood, kl_divergence
def sample(self, cond, full_path=tf.constant(True)):
print(f'Tracing sample with cond={cond}')
z = self.prior.sample(sample_shape=tf.shape(cond)[0]) # (None, z_dim)
out_dist = self.decode_distribution(z=(z, cond))
return tf.cond(full_path, true_fn=lambda: out_dist.sample(), false_fn=lambda: out_dist.mean())
def sample_n(self, cond, n, full_path=True):
print(f'Tracing sample with cond={cond}, n={n}')
batch_size = tf.shape(cond)[0]
cond = tf.tile(cond, (n, 1))
samples = self.sample(cond, full_path=full_path) # (None * n, y_dim)
shape = [tf.shape(samples)[k] for k in range(tf.rank(samples))]
return tf.reshape(samples, shape=[n, batch_size] + shape[1:]) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/tf/generative_models/vae/base.py | 0.83545 | 0.586671 | base.py | pypi |
import tensorflow as tf
import tensorflow_probability as tfp
from rlutils.tf.future import get_adam_optimizer
tfd = tfp.distributions
tfl = tfp.layers
eps = 1e-6
class Flow(tf.keras.Model):
"""
A flow is a function f that defines a forward (call) and backward path
"""
def call(self, x, training=None, mask=None):
raise NotImplementedError
def backward(self, z, training):
raise NotImplementedError
class SequentialFlow(Flow):
"""
A sequence of flows. It is a flow by itself.
"""
def __init__(self, lr=1e-3):
super(SequentialFlow, self).__init__()
self.flows = self._make_flow()
self.prior = self._make_prior()
self.logger = None
self.compile(optimizer=get_adam_optimizer(lr=lr))
def _make_flow(self):
raise NotImplementedError
def _make_prior(self):
raise NotImplementedError
def call(self, x, training=None, mask=None):
z, log_det = x, tf.zeros(shape=tf.shape(x)[0], dtype=tf.float32)
for flow in self.flows:
z, delta_log_det = flow(z, training=training)
log_det += delta_log_det
return z, log_det
def backward(self, z, training):
x = z
for flow in reversed(self.flows):
x = flow.backward(x, training=training)
return x
def infer(self, x):
return self(x, training=False)[0]
def log_prob(self, x, training=False):
print(f'Tracing log_prob with x:{x}, training:{training}')
z, log_det = self(x, training=training)
return log_det + self.prior.log_prob(z)
def sample(self, n):
print(f'Tracing sample with n:{n}')
z = self.prior.sample(sample_shape=n)
x = self.backward(z, training=False)
return x
def _forward(self, x, training):
print(f'Tracing _forward with x:{x}, training:{training}')
loss = tf.reduce_mean(-self.log_prob(x, training=training), axis=0)
return loss
def train_step(self, data):
print(f'Tracing train_step with data:{data}')
with tf.GradientTape() as tape:
loss = self._forward(data, training=True)
final_loss = loss + sum(self.losses)
gradients = tape.gradient(final_loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
return {'loss': loss}
def test_step(self, data):
loss = self._forward(data, training=False)
return {'loss': loss}
class ConditionalFlowModel(SequentialFlow):
def __init__(self, lr=1e-3):
super(ConditionalFlowModel, self).__init__(lr=lr)
def log_prob(self, data, training=False):
x, y = data
print(f'Tracing log_prob with x:{x}, y:{y}, training:{training}')
prior = self.prior(x, training=training)
z, log_det = self(y, training=training)
return log_det + prior.log_prob(z)
def sample(self, x):
print(f'Tracing sample with x:{x}')
z = self.prior(x, training=False).sample()
y = self.backward(z, training=False)
return y
def sample_n(self, x, n):
print(f'Tracing sample with x:{x}, n:{n}')
x = tf.tile(x, (n, 1))
output = self.sample(x)
return tf.reshape(output, shape=(n, -1, self.y_dim)) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/tf/generative_models/flow/base.py | 0.920397 | 0.615608 | base.py | pypi |
import tensorflow as tf
import tensorflow_probability as tfp
from rlutils.tf.distributions import make_independent_normal_from_params
from rlutils.tf.nn.functional import build_mlp
from .base import Flow, SequentialFlow, ConditionalFlowModel
tfd = tfp.distributions
tfl = tfp.layers
class AffineCouplingFlow(Flow):
def __init__(self, input_dim, parity, mlp_hidden=64, num_layers=3):
super(AffineCouplingFlow, self).__init__()
assert input_dim > 1, f'input_dim must be greater than 1. Got {input_dim}'
self.parity = parity
self.num_layers = num_layers
self.scale = tf.Variable(initial_value=0., dtype=tf.float32, trainable=True, name='scale')
self.scale_shift = tf.Variable(initial_value=0., dtype=tf.float32, trainable=True, name='scale_shift')
self.left_dim = input_dim // 2
self.right_dim = input_dim - self.left_dim
if not self.parity:
self.net = build_mlp(input_dim=self.left_dim, output_dim=self.right_dim * 2, mlp_hidden=mlp_hidden,
batch_norm=False, num_layers=self.num_layers)
else:
self.net = build_mlp(input_dim=self.right_dim, output_dim=self.left_dim * 2, mlp_hidden=mlp_hidden,
batch_norm=False, num_layers=self.num_layers)
def call(self, x, training=None, mask=None):
x0, x1 = x[:, :self.left_dim], x[:, self.left_dim:]
if self.parity:
x0, x1 = x1, x0
s, t = tf.split(self.net(x0, training=training), 2, axis=-1)
log_s = tf.tanh(s) * self.scale + self.scale_shift
z0 = x0
z1 = tf.exp(log_s) * x1 + t
if self.parity:
z0, z1 = z1, z0
z = tf.concat((z0, z1), axis=-1)
return z, tf.reduce_sum(log_s, axis=-1)
def backward(self, z, training=None):
z0, z1 = z[:, :self.left_dim], z[:, self.left_dim:]
if self.parity:
z0, z1 = z1, z0
s, t = tf.split(self.net(z0, training=training), 2, axis=-1)
log_s = tf.tanh(s) * self.scale + self.scale_shift
x0 = z0
x1 = (z1 - t) * tf.exp(-log_s)
if self.parity:
x0, x1 = x1, x0
x = tf.concat((x0, x1), axis=-1)
return x
class RealNVP(SequentialFlow):
def __init__(self, x_dim, num_layers=4, num_layers_coupling=3, mlp_hidden=64, lr=1e-3):
self.x_dim = x_dim
self.num_layers = num_layers
self.mlp_hidden = mlp_hidden
self.num_layers_coupling = num_layers_coupling
super(RealNVP, self).__init__(lr=lr)
self.build(input_shape=tf.TensorShape([None, self.x_dim]))
def _make_flow(self):
flows = []
for _ in range(self.num_layers):
flows.append(AffineCouplingFlow(input_dim=self.x_dim, parity=True, mlp_hidden=self.mlp_hidden,
num_layers=self.num_layers_coupling))
flows.append(AffineCouplingFlow(input_dim=self.x_dim, parity=False, mlp_hidden=self.mlp_hidden,
num_layers=self.num_layers_coupling))
return flows
def _make_prior(self):
return tfd.Independent(tfd.Normal(loc=tf.zeros(shape=self.x_dim), scale=tf.ones(shape=self.x_dim)),
reinterpreted_batch_ndims=1)
class ConditionalRealNVP(RealNVP, ConditionalFlowModel):
"""
We consider conditional flow via fixed prior dependent on x from x -> y' -> y.
Note that we assume the input actions are raw actions before Tanh!
"""
def __init__(self, x_dim, y_dim, mlp_hidden=128, num_layers=4, lr=1e-3, num_layers_coupling=3):
self.y_dim = y_dim
super(ConditionalRealNVP, self).__init__(x_dim=x_dim, mlp_hidden=mlp_hidden,
num_layers=num_layers, lr=lr,
num_layers_coupling=num_layers_coupling)
def _make_prior(self):
model = build_mlp(input_dim=self.x_dim, output_dim=self.y_dim * 2, mlp_hidden=self.mlp_hidden,
num_layers=self.num_layers_coupling, batch_norm=False)
model.add(tfl.DistributionLambda(make_distribution_fn=make_independent_normal_from_params,
convert_to_tensor_fn=tfd.Distribution.sample))
return model | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/tf/generative_models/flow/realnvp.py | 0.8727 | 0.505554 | realnvp.py | pypi |
import os
import pprint
import random
from abc import abstractmethod, ABC
import numpy as np
import rlutils.gym
import rlutils.infra as rl_infra
from rlutils.logx import EpochLogger, setup_logger_kwargs
from rlutils.replay_buffers import PyUniformReplayBuffer, GAEBuffer
from tqdm.auto import trange
class BaseRunner(ABC):
def __init__(self, seed, steps_per_epoch, epochs, exp_name=None, logger_path='data'):
self.exp_name = exp_name
self.logger_path = logger_path
self.steps_per_epoch = steps_per_epoch
self.epochs = epochs
self.seed = seed
self.global_step = 1
self.total_steps = steps_per_epoch * epochs
self.seeder = rl_infra.Seeder(seed=seed)
self.timer = rl_infra.StopWatch()
self.agent = None
self.setup_global_seed()
def setup_logger(self, config, tensorboard=False):
if self.exp_name is None:
self.exp_name = f'{self.env_name}_{self.agent.__class__.__name__}_test'
assert self.exp_name is not None, 'Call setup_env before setup_logger if exp passed by the contructor is None.'
logger_kwargs = setup_logger_kwargs(exp_name=self.exp_name, data_dir=self.logger_path, seed=self.seed)
self.logger = EpochLogger(**logger_kwargs, tensorboard=tensorboard)
self.logger.save_config(config)
self.timer.set_logger(logger=self.logger)
self.agent.set_logger(logger=self.logger)
def setup_global_seed(self):
self.seeds_info = {}
# we set numpy seed first and use it to generate other seeds
global_np_seed = self.seeder.generate_seed()
global_random_seed = self.seeder.generate_seed()
np.random.seed(global_np_seed)
random.seed(global_random_seed)
self.seeds_info['global_np'] = global_np_seed
self.seeds_info['global_random'] = global_random_seed
@property
def seeds(self):
return self.seeds_info
@abstractmethod
def run_one_step(self, t):
raise NotImplementedError
def on_epoch_begin(self, epoch):
pass
def on_epoch_end(self, epoch):
pass
def on_train_begin(self):
pass
def on_train_end(self):
pass
def setup_env(self,
env_name,
env_fn=None,
num_parallel_env=1,
asynchronous=False,
num_test_episodes=None):
import gym
self.env_name = env_name
if env_fn is None:
env_fn = lambda: gym.make(env_name)
self.env_fn = env_fn
self.env = rlutils.gym.utils.create_vector_env(env_fn=env_fn,
normalize_action_space=True,
num_parallel_env=num_parallel_env,
asynchronous=asynchronous)
env_seed = self.seeder.generate_seed()
env_action_space_seed = self.seeder.generate_seed()
self.env.seed(env_seed)
self.env.action_space.seed(env_action_space_seed)
self.seeds_info['env'] = env_seed
self.seeds_info['env_action_space'] = env_action_space_seed
self.num_test_episodes = num_test_episodes
self.asynchronous = asynchronous
def setup_agent(self, agent_cls, **kwargs):
self.agent = agent_cls(obs_spec=self.env.single_observation_space,
act_spec=self.env.single_action_space,
**kwargs)
def run(self):
self.on_train_begin()
for i in range(1, self.epochs + 1):
self.on_epoch_begin(i)
for t in trange(self.steps_per_epoch, desc=f'Epoch {i}/{self.epochs}'):
self.run_one_step(t)
self.global_step += 1
self.on_epoch_end(i)
self.on_train_end()
@classmethod
def main(cls, *args, **kwargs):
raise NotImplementedError
def save_checkpoint(self, path=None):
pass
def load_checkpoint(self, path=None):
pass
def save_agent(self, path=None):
if path is None:
path = os.path.join(self.logger.output_dir, 'agent.tf')
self.agent.save_weights(path)
def load_agent(self, path=None):
if path is None:
path = os.path.join(self.logger.output_dir, 'agent.tf')
self.agent.load_weights(path)
class OnPolicyRunner(BaseRunner):
def setup_logger(self, config, tensorboard=False):
super(OnPolicyRunner, self).setup_logger(config=config, tensorboard=tensorboard)
self.sampler.set_logger(self.logger)
self.updater.set_logger(self.logger)
def setup_replay_buffer(self, max_length, gamma, lam):
self.replay_buffer = GAEBuffer.from_vec_env(self.env, max_length=max_length, gamma=gamma, lam=lam)
def setup_sampler(self, num_steps):
self.num_steps = num_steps
self.sampler = rl_infra.samplers.TrajectorySampler(env=self.env)
def setup_updater(self):
self.updater = rl_infra.OnPolicyUpdater(agent=self.agent, replay_buffer=self.replay_buffer)
def run_one_step(self, t):
self.sampler.sample(num_steps=self.num_steps,
collect_fn=(self.agent.act_batch, self.agent.value_net.predict),
replay_buffer=self.replay_buffer)
self.updater.update(self.global_step)
def on_epoch_end(self, epoch):
self.logger.log_tabular('Epoch', epoch)
self.sampler.log_tabular()
self.updater.log_tabular()
self.timer.log_tabular()
self.logger.dump_tabular()
def on_train_begin(self):
self.sampler.reset()
self.updater.reset()
self.timer.start()
@classmethod
def main(cls, env_name, env_fn=None, seed=0, num_parallel_envs=5, agent_cls=None, agent_kwargs={},
batch_size=5000, epochs=200, gamma=0.99, lam=0.97, logger_path: str = None):
# Instantiate environment
assert batch_size % num_parallel_envs == 0
num_steps_per_sample = batch_size // num_parallel_envs
config = locals()
runner = cls(seed=seed, steps_per_epoch=1,
epochs=epochs, exp_name=None, logger_path=logger_path)
runner.setup_env(env_name=env_name, env_fn=env_fn, num_parallel_env=num_parallel_envs,
asynchronous=False, num_test_episodes=None)
runner.setup_agent(agent_cls=agent_cls, **agent_kwargs)
runner.setup_replay_buffer(max_length=num_steps_per_sample, gamma=gamma, lam=lam)
runner.setup_sampler(num_steps=num_steps_per_sample)
runner.setup_updater()
runner.setup_logger(config)
runner.run()
class OffPolicyRunner(BaseRunner):
def setup_logger(self, config, tensorboard=False):
super(OffPolicyRunner, self).setup_logger(config=config, tensorboard=tensorboard)
self.sampler.set_logger(self.logger)
self.tester.set_logger(self.logger)
self.updater.set_logger(self.logger)
def setup_tester(self, num_test_episodes):
test_env_seed = self.seeder.generate_seed()
self.seeds_info['test_env'] = test_env_seed
self.num_test_episodes = num_test_episodes
self.tester = rl_infra.Tester(env_fn=self.env_fn, num_parallel_env=num_test_episodes,
asynchronous=self.asynchronous, seed=test_env_seed)
def setup_replay_buffer(self,
replay_size,
batch_size):
self.seeds_info['replay_buffer'] = self.seeder.generate_seed()
self.replay_buffer = PyUniformReplayBuffer.from_vec_env(self.env, capacity=replay_size,
batch_size=batch_size,
seed=self.seeds_info['replay_buffer'])
def setup_sampler(self, start_steps):
self.start_steps = start_steps
self.sampler = rl_infra.samplers.BatchSampler(env=self.env)
def setup_updater(self, update_after, policy_delay, update_per_step, update_every):
self.update_after = update_after
self.updater = rl_infra.OffPolicyUpdater(agent=self.agent,
replay_buffer=self.replay_buffer,
policy_delay=policy_delay,
update_per_step=update_per_step,
update_every=update_every)
def run_one_step(self, t):
if self.sampler.total_env_steps < self.start_steps:
self.sampler.sample(num_steps=1,
collect_fn=lambda o: np.asarray(self.env.action_space.sample()),
replay_buffer=self.replay_buffer)
else:
self.sampler.sample(num_steps=1,
collect_fn=lambda obs: self.agent.act_batch_explore(obs),
replay_buffer=self.replay_buffer)
# Update handling
if self.sampler.total_env_steps >= self.update_after:
self.updater.update(self.global_step)
def on_epoch_end(self, epoch):
self.tester.test_agent(get_action=lambda obs: self.agent.act_batch_test(obs),
name=self.agent.__class__.__name__,
num_test_episodes=self.num_test_episodes)
# Log info about epoch
self.logger.log_tabular('Epoch', epoch)
self.tester.log_tabular()
self.sampler.log_tabular()
self.updater.log_tabular()
self.timer.log_tabular()
self.logger.dump_tabular()
def on_train_begin(self):
self.sampler.reset()
self.updater.reset()
self.timer.start()
@classmethod
def main(cls,
env_name,
env_fn=None,
exp_name=None,
steps_per_epoch=10000,
epochs=100,
start_steps=10000,
update_after=5000,
update_every=1,
update_per_step=1,
policy_delay=1,
batch_size=256,
num_parallel_env=1,
num_test_episodes=30,
seed=1,
# agent args
agent_cls=None,
agent_kwargs={},
# replay
replay_size=int(1e6),
logger_path=None
):
config = locals()
runner = cls(seed=seed, steps_per_epoch=steps_per_epoch, epochs=epochs,
exp_name=exp_name, logger_path=logger_path)
runner.setup_env(env_name=env_name, env_fn=env_fn, num_parallel_env=num_parallel_env,
asynchronous=False, num_test_episodes=num_test_episodes)
runner.setup_agent(agent_cls=agent_cls, **agent_kwargs)
runner.setup_replay_buffer(replay_size=replay_size,
batch_size=batch_size)
runner.setup_sampler(start_steps=start_steps)
runner.setup_tester(num_test_episodes=num_test_episodes)
runner.setup_updater(update_after=update_after,
policy_delay=policy_delay,
update_per_step=update_per_step,
update_every=update_every)
runner.setup_logger(config=config, tensorboard=False)
pprint.pprint(runner.seeds_info)
runner.run()
class OfflineRunner(OffPolicyRunner):
def run_one_step(self, t):
self.updater.update(self.global_step)
def setup_sampler(self, start_steps):
# create a dummy sampler
self.sampler = rl_infra.samplers.Sampler(env=self.test_env)
def setup_env(self,
env_name,
env_fn=None,
num_parallel_env=1,
asynchronous=False,
num_test_episodes=None):
import gym
self.env_name = env_name
if env_fn is None:
env_fn = lambda: gym.make(env_name)
self.env_fn = env_fn
test_env_seed = self.seeder.generate_seed()
test_env_action_space_seed = self.seeder.generate_seed()
self.seeds_info['test_env'] = test_env_seed
self.seeds_info['test_env_action_space'] = test_env_action_space_seed
if num_test_episodes is not None:
self.test_env = rlutils.gym.utils.create_vector_env(env_fn=env_fn,
normalize_action_space=True,
num_parallel_env=num_test_episodes,
asynchronous=asynchronous)
self.test_env.seed(test_env_seed)
self.test_env.action_space.seed(test_env_action_space_seed)
def setup_replay_buffer(self,
batch_size,
dataset=None,
reward_scale=True):
def rescale(x):
return (x - np.min(x)) / (np.max(x) - np.min(x))
if dataset is None:
# modify d4rl keys
import d4rl
self.dummy_env = self.env_fn()
dataset = d4rl.qlearning_dataset(env=self.dummy_env)
dataset['obs'] = dataset.pop('observations').astype(np.float32)
dataset['act'] = dataset.pop('actions').astype(np.float32)
dataset['next_obs'] = dataset.pop('next_observations').astype(np.float32)
dataset['rew'] = dataset.pop('rewards').astype(np.float32)
dataset['done'] = dataset.pop('terminals').astype(np.float32)
if reward_scale:
EpochLogger.log('Using reward scale', color='red')
self.agent.reward_scale_factor = np.max(dataset['rew'] - np.min(dataset['rew']))
EpochLogger.log(f'The scale factor is {self.agent.reward_scale_factor:.2f}')
dataset['rew'] = rescale(dataset['rew'])
replay_size = dataset['obs'].shape[0]
EpochLogger.log(f'Dataset size: {replay_size}')
self.replay_buffer = PyUniformReplayBuffer.from_data_dict(
data=dataset,
batch_size=batch_size
) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/infra/runner/base.py | 0.634204 | 0.206354 | base.py | pypi |
from abc import ABC, abstractmethod
import numpy as np
import rlutils.np as rln
from rlutils.gym.vector import VectorEnv
from tqdm.auto import trange
class Sampler(ABC):
def __init__(self, env: VectorEnv):
self.env = env
def reset(self):
pass
def set_logger(self, logger):
self.logger = logger
def log_tabular(self):
pass
@abstractmethod
def sample(self, num_steps, collect_fn, replay_buffer):
pass
@property
@abstractmethod
def total_env_steps(self):
pass
class TrajectorySampler(Sampler):
def reset(self):
self._global_env_step = 0
def log_tabular(self):
self.logger.log_tabular('EpRet', with_min_and_max=True)
self.logger.log_tabular('EpLen', average_only=True)
self.logger.log_tabular('VVals', with_min_and_max=True)
self.logger.log_tabular('TotalEnvInteracts', self.total_env_steps)
@property
def total_env_steps(self):
return self._global_env_step
def sample(self, num_steps, collect_fn, replay_buffer):
""" Only collect dataset. No computation """
self.obs = self.env.reset()
self.ep_ret = np.zeros(shape=self.env.num_envs, dtype=np.float32)
self.ep_len = np.zeros(shape=self.env.num_envs, dtype=np.int32)
actor_fn, value_fn = collect_fn
for t in trange(num_steps, desc='Sampling'):
act, logp, val = actor_fn(self.obs)
if isinstance(act, np.float32):
act_taken = np.clip(act, -1., 1.)
else:
act_taken = act
obs2, rew, dones, infos = self.env.step(act_taken)
replay_buffer.store(self.obs, act, rew, val, logp)
self.logger.store(VVals=val)
self.ep_ret += rew
self.ep_len += 1
# There are four cases there:
# 1. if done is False. Bootstrap (truncated due to trajectory length)
# 2. if done is True, if TimeLimit.truncated not in info. Don't bootstrap (didn't truncate)
# 3. if done is True, if TimeLimit.truncated in info, if it is True, Bootstrap (true truncated)
# 4. if done is True, if TimeLimit.truncated in info, if it is False. Don't bootstrap (same time)
if t == num_steps - 1:
time_truncated_dones = np.array([info.get('TimeLimit.truncated', False) for info in infos],
dtype=np.bool_)
# need to finish path for all the environments
last_vals = value_fn(obs2)
last_vals = last_vals * np.logical_or(np.logical_not(dones), time_truncated_dones)
replay_buffer.finish_path(dones=np.ones(shape=self.env.num_envs, dtype=np.bool_),
last_vals=last_vals)
self.logger.store(EpRet=self.ep_ret[dones], EpLen=self.ep_len[dones])
self.obs = None
elif np.any(dones):
time_truncated_dones = np.array([info.get('TimeLimit.truncated', False) for info in infos],
dtype=np.bool_)
last_vals = value_fn(obs2) * time_truncated_dones
replay_buffer.finish_path(dones=dones,
last_vals=last_vals)
self.logger.store(EpRet=self.ep_ret[dones], EpLen=self.ep_len[dones])
self.ep_ret[dones] = 0.
self.ep_len[dones] = 0
self.obs = self.env.reset_done()
else:
self.obs = obs2
self._global_env_step += num_steps * self.env.num_envs
class BatchSampler(Sampler):
@property
def total_env_steps(self):
return self._global_env_step
def reset(self):
self._global_env_step = 0
self.o = self.env.reset()
self.ep_ret = np.zeros(shape=self.env.num_envs)
self.ep_len = np.zeros(shape=self.env.num_envs, dtype=np.int64)
def log_tabular(self):
self.logger.log_tabular('EpRet', with_min_and_max=True)
self.logger.log_tabular('EpLen', average_only=True)
self.logger.log_tabular('TotalEnvInteracts', self._global_env_step)
def sample(self, num_steps, collect_fn, replay_buffer):
for _ in range(num_steps):
a = collect_fn(self.o)
assert not np.any(np.isnan(a)), f'NAN action: {a}'
# Step the env
o2, r, d, infos = self.env.step(a)
self.ep_ret += r
self.ep_len += 1
timeouts = rln.gather_dict_key(infos=infos, key='TimeLimit.truncated', default=False, dtype=np.bool)
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
true_d = np.logical_and(d, np.logical_not(timeouts))
# Store experience to replay buffer
replay_buffer.add(dict(
obs=self.o,
act=a,
rew=r,
next_obs=o2,
done=true_d
))
# Super critical, easy to overlook step: make sure to update
# most recent observation!
self.o = o2
# End of trajectory handling
if np.any(d):
self.logger.store(EpRet=self.ep_ret[d], EpLen=self.ep_len[d])
self.ep_ret[d] = 0
self.ep_len[d] = 0
self.o = self.env.reset_done()
self._global_env_step += self.env.num_envs | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/infra/samplers/base.py | 0.743541 | 0.368235 | base.py | pypi |
import multiprocessing as mp
import sys
import time
from copy import deepcopy
from enum import Enum
import numpy as np
from gym import logger
from gym.error import (AlreadyPendingCallError, NoAsyncCallError,
ClosedEnvironmentError)
from gym.vector.utils import (create_shared_memory, create_empty_array,
write_to_shared_memory, read_from_shared_memory,
concatenate, CloudpickleWrapper, clear_mpi_env_vars)
from .vector_env import VectorEnv
__all__ = ['AsyncVectorEnv']
class AsyncState(Enum):
DEFAULT = 'default'
WAITING_RESET = 'reset'
WAITING_RESET_OBS = 'reset_obs'
WAITING_RESET_DONE = 'reset_done'
WAITING_STEP = 'step'
WAITING_STEP_MULTIPLE = 'step_multiple'
class AsyncVectorEnv(VectorEnv):
def __init__(self, env_fns, observation_space=None, action_space=None,
shared_memory=True, copy=True, context=None, daemon=True, worker=None):
try:
ctx = mp.get_context(context)
except AttributeError:
logger.warn('Context switching for `multiprocessing` is not '
'available in Python 2. Using the default context.')
ctx = mp
self.env_fns = env_fns
self.shared_memory = shared_memory
self.copy = copy
if (observation_space is None) or (action_space is None):
dummy_env = env_fns[0]()
observation_space = observation_space or dummy_env.observation_space
action_space = action_space or dummy_env.action_space
dummy_env.close()
del dummy_env
super(AsyncVectorEnv, self).__init__(num_envs=len(env_fns),
observation_space=observation_space, action_space=action_space)
if self.shared_memory:
_obs_buffer = create_shared_memory(self.single_observation_space,
n=self.num_envs, ctx=ctx)
self.observations = read_from_shared_memory(_obs_buffer,
self.single_observation_space, n=self.num_envs)
else:
_obs_buffer = None
self.observations = create_empty_array(
self.single_observation_space, n=self.num_envs, fn=np.zeros)
self.parent_pipes, self.processes = [], []
self.error_queue = ctx.Queue()
target = _worker_shared_memory if self.shared_memory else _worker
target = worker or target
with clear_mpi_env_vars():
for idx, env_fn in enumerate(self.env_fns):
parent_pipe, child_pipe = ctx.Pipe()
process = ctx.Process(target=target,
name='Worker<{0}>-{1}'.format(type(self).__name__, idx),
args=(idx, CloudpickleWrapper(env_fn), child_pipe,
parent_pipe, _obs_buffer, self.error_queue))
self.parent_pipes.append(parent_pipe)
self.processes.append(process)
process.daemon = daemon
process.start()
child_pipe.close()
self._state = AsyncState.DEFAULT
self._check_observation_spaces()
self.rewards = np.zeros(shape=[self.num_envs], dtype=np.float32)
self.dones = np.zeros(shape=[self.num_envs], dtype=np.bool_)
def seed(self, seeds=None):
self._assert_is_running()
if seeds is None:
seeds = [None for _ in range(self.num_envs)]
if isinstance(seeds, int):
seeds = [seeds + i for i in range(self.num_envs)]
assert len(seeds) == self.num_envs
if self._state != AsyncState.DEFAULT:
raise AlreadyPendingCallError('Calling `seed` while waiting '
'for a pending call to `{0}` to complete.'.format(
self._state.value), self._state.value)
for pipe, seed in zip(self.parent_pipes, seeds):
pipe.send(('seed', seed))
_, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
self._raise_if_errors(successes)
"""
reset done
"""
def reset_done_async(self):
self._assert_is_running()
if self._state != AsyncState.DEFAULT:
raise AlreadyPendingCallError('Calling `reset_done_async` while waiting '
'for a pending call to `{0}` to complete'.format(
self._state.value), self._state.value)
for i, pipe in enumerate(self.parent_pipes):
if self.dones[i]:
pipe.send(('reset_done', None))
self._state = AsyncState.WAITING_RESET_DONE
def reset_done_wait(self, timeout=None):
self._assert_is_running()
if self._state != AsyncState.WAITING_RESET_DONE:
raise NoAsyncCallError('Calling `reset_done_wait` without any prior '
'call to `reset_done_async`.', AsyncState.WAITING_RESET_DONE.value)
if not self._poll(timeout):
self._state = AsyncState.DEFAULT
raise mp.TimeoutError('The call to `reset_done_wait` has timed out after '
'{0} second{1}.'.format(timeout, 's' if timeout > 1 else ''))
successes = []
for i, pipe in enumerate(self.parent_pipes):
if self.dones[i]:
result, success = pipe.recv()
successes.append(success)
if not self.shared_memory:
self.observations[i] = result
else:
successes.append(True)
self._raise_if_errors(successes)
self._state = AsyncState.DEFAULT
return deepcopy(self.observations) if self.copy else self.observations
"""
reset obs
"""
def reset_obs_async(self, obs, mask=None):
self._assert_is_running()
if self._state != AsyncState.DEFAULT:
raise AlreadyPendingCallError('Calling `reset_obs_async` while waiting '
'for a pending call to `{0}` to complete'.format(
self._state.value), self._state.value)
self._reset_mask = mask if mask is not None else np.ones(shape=(self.num_envs,), dtype=np.bool_)
for i, (pipe, ob) in enumerate(zip(self.parent_pipes, obs)):
if self._reset_mask[i]:
pipe.send(('reset_obs', ob))
self._state = AsyncState.WAITING_RESET_OBS
def reset_obs_wait(self, timeout=None):
self._assert_is_running()
if self._state != AsyncState.WAITING_RESET_OBS:
raise NoAsyncCallError('Calling `reset_obs_wait` without any prior '
'call to `reset_obs_async`.', AsyncState.WAITING_RESET_OBS.value)
if not self._poll(timeout):
self._state = AsyncState.DEFAULT
raise mp.TimeoutError('The call to `reset_wait` has timed out after '
'{0} second{1}.'.format(timeout, 's' if timeout > 1 else ''))
successes = []
for i, pipe in enumerate(self.parent_pipes):
if self._reset_mask[i]:
result, success = pipe.recv()
successes.append(success)
if not self.shared_memory:
self.observations[i] = result
else:
successes.append(True)
self._raise_if_errors(successes=successes)
self._state = AsyncState.DEFAULT
return deepcopy(self.observations) if self.copy else self.observations
"""
reset
"""
def reset_async(self):
self._assert_is_running()
if self._state != AsyncState.DEFAULT:
raise AlreadyPendingCallError('Calling `reset_async` while waiting '
'for a pending call to `{0}` to complete'.format(
self._state.value), self._state.value)
for pipe in self.parent_pipes:
pipe.send(('reset', None))
self._state = AsyncState.WAITING_RESET
def reset_wait(self, timeout=None):
"""
Parameters
----------
timeout : int or float, optional
Number of seconds before the call to `reset_wait` times out. If
`None`, the call to `reset_wait` never times out.
Returns
-------
observations : sample from `observation_space`
A batch of observations from the vectorized environment.
"""
self._assert_is_running()
if self._state != AsyncState.WAITING_RESET:
raise NoAsyncCallError('Calling `reset_wait` without any prior '
'call to `reset_async`.', AsyncState.WAITING_RESET.value)
if not self._poll(timeout):
self._state = AsyncState.DEFAULT
raise mp.TimeoutError('The call to `reset_wait` has timed out after '
'{0} second{1}.'.format(timeout, 's' if timeout > 1 else ''))
results, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
self._raise_if_errors(successes)
self._state = AsyncState.DEFAULT
if not self.shared_memory:
concatenate(results, self.observations, self.single_observation_space)
return deepcopy(self.observations) if self.copy else self.observations
"""
step
"""
def step_async(self, actions, mask=None):
"""
Parameters
----------
actions : iterable of samples from `action_space`
List of actions.
"""
self._mask = mask if mask is not None else np.ones(shape=(self.num_envs,), dtype=np.bool_)
self._assert_is_running()
if self._state != AsyncState.DEFAULT:
raise AlreadyPendingCallError('Calling `step_async` while waiting '
'for a pending call to `{0}` to complete.'.format(
self._state.value), self._state.value)
for i, (pipe, action) in enumerate(zip(self.parent_pipes, actions)):
if self._mask[i]:
pipe.send(('step', action))
self._state = AsyncState.WAITING_STEP
def step_wait(self, timeout=None):
"""
Parameters
----------
timeout : int or float, optional
Number of seconds before the call to `step_wait` times out. If
`None`, the call to `step_wait` never times out.
Returns
-------
observations : sample from `observation_space`
A batch of observations from the vectorized environment.
rewards : `np.ndarray` instance (dtype `np.float_`)
A vector of rewards from the vectorized environment.
dones : `np.ndarray` instance (dtype `np.bool_`)
A vector whose entries indicate whether the episode has ended.
infos : list of dict
A list of auxiliary diagnostic information.
"""
self._assert_is_running()
if self._state != AsyncState.WAITING_STEP:
raise NoAsyncCallError('Calling `step_wait` without any prior call '
'to `step_async`.', AsyncState.WAITING_STEP.value)
if not self._poll(timeout):
self._state = AsyncState.DEFAULT
raise mp.TimeoutError('The call to `step_wait` has timed out after '
'{0} second{1}.'.format(timeout, 's' if timeout > 1 else ''))
infos = []
successes = []
for i, pipe in enumerate(self.parent_pipes):
if self._mask[i]:
result, success = pipe.recv()
successes.append(success)
observation, reward, done, info = result
self.rewards[i] = reward
self.dones[i] = done
infos.append(info)
if not self.shared_memory:
self.observations[i] = observation
else:
infos.append({})
successes.append(True)
self._raise_if_errors(successes=successes)
self._state = AsyncState.DEFAULT
return (deepcopy(self.observations) if self.copy else self.observations,
np.copy(self.rewards) if self.copy else self.rewards,
np.array(self.dones, dtype=np.bool_) if self.copy else self.dones, infos)
def close_extras(self, timeout=None, terminate=False):
"""
Parameters
----------
timeout : int or float, optional
Number of seconds before the call to `close` times out. If `None`,
the call to `close` never times out. If the call to `close` times
out, then all processes are terminated.
terminate : bool (default: `False`)
If `True`, then the `close` operation is forced and all processes
are terminated.
"""
timeout = 0 if terminate else timeout
try:
if self._state != AsyncState.DEFAULT:
logger.warn('Calling `close` while waiting for a pending '
'call to `{0}` to complete.'.format(self._state.value))
function = getattr(self, '{0}_wait'.format(self._state.value))
function(timeout)
except mp.TimeoutError:
terminate = True
if terminate:
for process in self.processes:
if process.is_alive():
process.terminate()
else:
for pipe in self.parent_pipes:
if (pipe is not None) and (not pipe.closed):
pipe.send(('close', None))
for pipe in self.parent_pipes:
if (pipe is not None) and (not pipe.closed):
pipe.recv()
for pipe in self.parent_pipes:
if pipe is not None:
pipe.close()
for process in self.processes:
process.join()
def _poll(self, timeout=None):
self._assert_is_running()
if timeout is None:
return True
end_time = time.time() + timeout
delta = None
for pipe in self.parent_pipes:
delta = max(end_time - time.time(), 0)
if pipe is None:
return False
if pipe.closed or (not pipe.poll(delta)):
return False
return True
def _check_observation_spaces(self):
self._assert_is_running()
for pipe in self.parent_pipes:
pipe.send(('_check_observation_space', self.single_observation_space))
same_spaces, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
self._raise_if_errors(successes)
if not all(same_spaces):
raise RuntimeError('Some environments have an observation space '
'different from `{0}`. In order to batch observations, the '
'observation spaces from all environments must be '
'equal.'.format(self.single_observation_space))
def _assert_is_running(self):
if self.closed:
raise ClosedEnvironmentError('Trying to operate on `{0}`, after a '
'call to `close()`.'.format(type(self).__name__))
def _raise_if_errors(self, successes):
if all(successes):
return
num_errors = self.num_envs - sum(successes)
assert num_errors > 0
for _ in range(num_errors):
index, exctype, value = self.error_queue.get()
logger.error('Received the following error from Worker-{0}: '
'{1}: {2}'.format(index, exctype.__name__, value))
logger.error('Shutting down Worker-{0}.'.format(index))
self.parent_pipes[index].close()
self.parent_pipes[index] = None
logger.error('Raising the last exception back to the main process.')
raise exctype(value)
def _worker(index, env_fn, pipe, parent_pipe, shared_memory, error_queue):
assert shared_memory is None
env = env_fn()
parent_pipe.close()
try:
while True:
command, data = pipe.recv()
if command == 'reset':
observation = env.reset()
pipe.send((observation, True))
elif command == 'step':
observation, reward, done, info = env.step(data)
pipe.send(((observation, reward, done, info), True))
elif command == 'seed':
env.seed(data)
pipe.send((None, True))
elif command == 'close':
pipe.send((None, True))
break
elif command == '_check_observation_space':
pipe.send((data == env.observation_space, True))
elif command == 'reset_done':
observation = env.reset()
pipe.send((observation, True))
elif command == 'reset_obs':
observation = env.reset_obs(data)
pipe.send((observation, True))
else:
raise RuntimeError('Received unknown command `{0}`. Must '
'be one of {`reset`, `step`, `seed`, `close`, '
'`_check_observation_space`, `reset_done`, `reset_obs`}.'.format(command))
except (KeyboardInterrupt, Exception):
error_queue.put((index,) + sys.exc_info()[:2])
pipe.send((None, False))
finally:
env.close()
def _worker_shared_memory(index, env_fn, pipe, parent_pipe, shared_memory, error_queue):
assert shared_memory is not None
env = env_fn()
observation_space = env.observation_space
parent_pipe.close()
try:
while True:
command, data = pipe.recv()
if command == 'reset':
observation = env.reset()
write_to_shared_memory(index, observation, shared_memory,
observation_space)
pipe.send((None, True))
elif command == 'step':
observation, reward, done, info = env.step(data)
write_to_shared_memory(index, observation, shared_memory,
observation_space)
pipe.send(((None, reward, done, info), True))
elif command == 'seed':
env.seed(data)
pipe.send((None, True))
elif command == 'close':
pipe.send((None, True))
break
elif command == '_check_observation_space':
pipe.send((data == observation_space, True))
elif command == 'reset_done':
observation = env.reset()
write_to_shared_memory(index, observation, shared_memory,
observation_space)
pipe.send((None, True))
elif command == 'reset_obs':
observation = env.reset_obs(data)
write_to_shared_memory(index, observation, shared_memory,
observation_space)
pipe.send((None, True))
else:
raise RuntimeError('Received unknown command `{0}`. Must '
'be one of {`reset`, `step`, `seed`, `close`, '
'`_check_observation_space`, `reset_done`, `reset_obs`}.'.format(command))
except (KeyboardInterrupt, Exception):
error_queue.put((index,) + sys.exc_info()[:2])
pipe.send((None, False))
finally:
env.close() | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/gym/vector/async_vector_env.py | 0.410402 | 0.216156 | async_vector_env.py | pypi |
import numpy as np
from gym.vector.utils import create_empty_array
from .vector_env import VectorEnv
__all__ = ['SyncVectorEnv']
class SyncVectorEnv(VectorEnv):
"""Vectorized environment that serially runs multiple environments.
Parameters
----------
env_fns : iterable of callable
Functions that create the environments.
observation_space : `gym.spaces.Space` instance, optional
Observation space of a single environment. If `None`, then the
observation space of the first environment is taken.
action_space : `gym.spaces.Space` instance, optional
Action space of a single environment. If `None`, then the action space
of the first environment is taken.
copy : bool (default: `True`)
If `True`, then the `reset` and `step` methods return a copy of the
observations.
"""
def __init__(self, env_fns, observation_space=None, action_space=None,
copy=True):
self.env_fns = env_fns
self.envs = [env_fn() for env_fn in env_fns]
self.copy = copy
if (observation_space is None) or (action_space is None):
observation_space = observation_space or self.envs[0].observation_space
action_space = action_space or self.envs[0].action_space
super(SyncVectorEnv, self).__init__(num_envs=len(env_fns),
observation_space=observation_space, action_space=action_space)
self._check_observation_spaces()
self.observations = create_empty_array(self.single_observation_space,
n=self.num_envs, fn=np.zeros)
self._rewards = np.zeros((self.num_envs,), dtype=np.float64)
self._dones = np.zeros((self.num_envs,), dtype=np.bool_)
self._actions = None
def seed(self, seeds=None):
if seeds is None:
seeds = [None for _ in range(self.num_envs)]
if isinstance(seeds, int):
seeds = [seeds + i for i in range(self.num_envs)]
assert len(seeds) == self.num_envs
for env, seed in zip(self.envs, seeds):
env.seed(seed)
"""
reset done
"""
def reset_done_wait(self):
for i, env in enumerate(self.envs):
if self._dones[i]:
self.observations[i] = env.reset()
return np.copy(self.observations) if self.copy else self.observations
"""
reset obs
"""
def reset_obs_async(self, obs, mask=None):
self._reset_obs = obs
self._reset_mask = mask if mask is not None else np.ones(shape=(self.num_envs,), dtype=np.bool_)
def reset_obs_wait(self, **kwargs):
for i, env in enumerate(self.envs):
if self._reset_mask[i]:
self.observations[i] = env.reset_obs(self._reset_obs[i])
return np.copy(self.observations) if self.copy else self.observations
"""
reset
"""
def reset_wait(self):
for i, env in enumerate(self.envs):
self.observations[i] = env.reset()
return np.copy(self.observations) if self.copy else self.observations
"""
step
"""
def step_async(self, actions, mask=None):
self._actions = actions
self._mask = mask if mask is not None else np.ones(shape=(self.num_envs,), dtype=np.bool_)
def step_wait(self):
observations, infos = [], []
for i, (env, action, mask) in enumerate(zip(self.envs, self._actions, self._mask)):
if mask:
self.observations[i], self._rewards[i], self._dones[i], info = env.step(action)
infos.append(info)
else:
infos.append({})
return (np.copy(self.observations) if self.copy else self.observations,
np.copy(self._rewards), np.copy(self._dones), infos)
def close_extras(self, **kwargs):
[env.close() for env in self.envs]
def _check_observation_spaces(self):
for env in self.envs:
if not (env.observation_space == self.single_observation_space):
break
else:
return True
raise RuntimeError('Some environments have an observation space '
'different from `{0}`. In order to batch observations, the '
'observation spaces from all environments must be '
'equal.'.format(self.single_observation_space)) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/gym/vector/sync_vector_env.py | 0.862279 | 0.714441 | sync_vector_env.py | pypi |
try:
from collections.abc import Iterable
except ImportError:
Iterable = (tuple, list)
from .async_vector_env import AsyncVectorEnv
from .sync_vector_env import SyncVectorEnv
from .vector_env import VectorEnv
def make(id, num_envs=1, asynchronous=True, wrappers=None, **kwargs):
"""Create a vectorized environment from multiple copies of an environment,
from its id
Parameters
----------
id : str
The environment ID. This must be a valid ID from the registry.
num_envs : int
Number of copies of the environment.
asynchronous : bool (default: `True`)
If `True`, wraps the environments in an `AsyncVectorEnv` (which uses
`multiprocessing` to run the environments in parallel). If `False`,
wraps the environments in a `SyncVectorEnv`.
wrappers : Callable or Iterable of Callables (default: `None`)
If not `None`, then apply the wrappers to each internal
environment during creation.
Returns
-------
env : `gym.vector.VectorEnv` instance
The vectorized environment.
Example
-------
>>> import gym
>>> env = gym.vector.make('CartPole-v1', 3)
>>> env.reset()
array([[-0.04456399, 0.04653909, 0.01326909, -0.02099827],
[ 0.03073904, 0.00145001, -0.03088818, -0.03131252],
[ 0.03468829, 0.01500225, 0.01230312, 0.01825218]],
dtype=float32)
"""
from gym.envs import make as make_
def _make_env():
env = make_(id, **kwargs)
if wrappers is not None:
if callable(wrappers):
env = wrappers(env)
elif isinstance(wrappers, Iterable) and all([callable(w) for w in wrappers]):
for wrapper in wrappers:
env = wrapper(env)
else:
raise NotImplementedError
return env
env_fns = [_make_env for _ in range(num_envs)]
return AsyncVectorEnv(env_fns, shared_memory=True, copy=False, **kwargs) \
if asynchronous else SyncVectorEnv(env_fns, **kwargs) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/gym/vector/__init__.py | 0.876423 | 0.462048 | __init__.py | pypi |
import inspect
import sys
import numpy as np
from .base import ModelBasedStaticFn
model_based_wrapper_dict = {}
class ReacherFn(ModelBasedStaticFn):
reward = False
terminate = True
env_name = ['Reacher-v2']
class HopperFn(ModelBasedStaticFn):
reward = False
terminate = True
env_name = ['Hopper-v2']
@staticmethod
def terminate_fn_tf_batch(states, actions, next_states):
import tensorflow as tf
height = next_states[:, 0]
angle = next_states[:, 1]
t1 = tf.reduce_all(next_states[:, 1:] < 100., axis=-1)
t2 = height > 0.7
t3 = tf.abs(angle) < 0.2
not_done = tf.logical_and(tf.logical_and(t1, t2), t3)
return tf.logical_not(not_done)
@staticmethod
def terminate_fn_torch_batch(states, actions, next_states):
import torch
height = next_states[:, 0]
angle = next_states[:, 1]
t1 = torch.all(next_states[:, 1:] < 100., dim=-1)
t2 = height > 0.7
t3 = torch.abs(angle) < 0.2
not_done = t1 & t2 & t3
return torch.logical_not(not_done)
@staticmethod
def terminate_fn_numpy_batch(states, actions, next_states):
assert len(states.shape) == len(next_states.shape) == len(actions.shape) == 2
height = next_states[:, 0]
angle = next_states[:, 1]
not_done = np.isfinite(next_states).all(axis=-1) \
* np.abs(next_states[:, 1:] < 100).all(axis=-1) \
* (height > .7) \
* (np.abs(angle) < .2)
done = ~not_done
return done
class Walker2dFn(ModelBasedStaticFn):
reward = False
terminate = True
env_name = ['Walker2d-v2']
@staticmethod
def terminate_fn_tf_batch(states, actions, next_states):
import tensorflow as tf
height = next_states[:, 0]
angle = next_states[:, 1]
t1 = tf.logical_and(height > 0.8, height < 2.0)
t2 = tf.logical_and(angle > -1.0, angle < 1.0)
not_done = tf.logical_and(t1, t2)
return tf.logical_not(not_done)
@staticmethod
def terminate_fn_torch_batch(states, actions, next_states):
import torch
height = next_states[:, 0]
angle = next_states[:, 1]
t1 = height > 0.8 & height < 2.0
t2 = angle > -1.0 & angle < 1.0
not_done = t1 & t2
return torch.logical_not(not_done)
@staticmethod
def terminate_fn_numpy_batch(states, actions, next_states):
assert len(states.shape) == len(next_states.shape) == len(actions.shape) == 2
height = next_states[:, 0]
angle = next_states[:, 1]
not_done = (height > 0.8) \
* (height < 2.0) \
* (angle > -1.0) \
* (angle < 1.0)
done = ~not_done
return done
class HalfCheetahFn(ModelBasedStaticFn):
reward = False
terminate = True
env_name = ['HalfCheetah-v2']
class AntFn(ModelBasedStaticFn):
reward = False
terminate = True
env_name = ['Ant-v2', 'AntTruncatedObs-v2']
@staticmethod
def terminate_fn_numpy_batch(states, actions, next_states):
assert len(states.shape) == len(next_states.shape) == len(actions.shape) == 2
x = next_states[:, 0]
not_done = np.isfinite(next_states).all(axis=-1) \
* (x >= 0.2) \
* (x <= 1.0)
done = ~not_done
return done
@staticmethod
def terminate_fn_torch_batch(states, actions, next_states):
import torch
x = next_states[:, 0]
not_done = torch.logical_and(x >= 0.2, x <= 1.0)
return torch.logical_not(not_done)
@staticmethod
def terminate_fn_tf_batch(states, actions, next_states):
import tensorflow as tf
x = next_states[:, 0]
not_done = tf.logical_and(x >= 0.2, x <= 1.0)
return tf.logical_not(not_done)
class HumanoidFn(ModelBasedStaticFn):
reward = False
terminate = True
env_name = ['Humanoid-v2']
@staticmethod
def terminate_fn_numpy_batch(states, actions, next_states):
assert len(states.shape) == len(next_states.shape) == len(actions.shape) == 2
z = next_states[:, 0]
done = (z < 1.0) + (z > 2.0)
return done
@staticmethod
def terminate_fn_torch_batch(states, actions, next_states):
z = next_states[:, 0]
done = (z < 1.0) | (z > 2.0)
return done
@staticmethod
def terminate_fn_tf_batch(states, actions, next_states):
import tensorflow as tf
z = next_states[:, 0]
done = tf.logical_or(z < 1.0, z > 2.0)
return done
def register():
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj):
for name in obj().env_name:
model_based_wrapper_dict[name] = obj
if len(model_based_wrapper_dict) == 0:
register() | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/gym/static/mujoco.py | 0.581184 | 0.678387 | mujoco.py | pypi |
import numpy as np
from .base import ModelBasedStaticFn
class InvertedPendulumBulletEnvFn(ModelBasedStaticFn):
env_name = ['InvertedPendulumBulletEnv-v0']
terminate = True
reward = True
@staticmethod
def terminate_fn_numpy_batch(states, actions, next_states):
cos_th, sin_th = next_states[:, 2], next_states[:, 3]
theta = np.arctan2(sin_th, cos_th)
return np.abs(theta) > .2
@staticmethod
def terminate_fn_torch_batch(states, actions, next_states):
import torch
cos_th, sin_th = next_states[:, 2], next_states[:, 3]
theta = torch.atan2(sin_th, cos_th)
return torch.abs(theta) > .2
@staticmethod
def terminate_fn_tf_batch(states, actions, next_states):
import tensorflow as tf
cos_th, sin_th = next_states[:, 2], next_states[:, 3]
theta = tf.atan2(sin_th, cos_th)
return tf.abs(theta) > .2
@staticmethod
def reward_fn_tf_batch(states, actions, next_states):
import tensorflow as tf
cos_th, sin_th = next_states[:, 2], next_states[:, 3]
theta = tf.atan2(sin_th, cos_th)
cost = tf.cast(tf.abs(theta), tf.float32)
return -cost
@staticmethod
def reward_fn_torch_batch(states, actions, next_states):
import torch
cos_th, sin_th = next_states[:, 2], next_states[:, 3]
theta = torch.atan2(sin_th, cos_th)
cost = torch.abs(theta).float()
return -cost
@staticmethod
def reward_fn_numpy_batch(states, actions, next_states):
cos_th, sin_th = next_states[:, 2], next_states[:, 3]
theta = np.arctan2(sin_th, cos_th)
cost = np.abs(theta).astype(np.float32)
return -cost
class InvertedPendulumSwingupBulletEnvFn(ModelBasedStaticFn):
env_name = ['InvertedPendulumSwingupBulletEnv-v0']
reward = True
terminate = True
@staticmethod
def reward_fn_tf_batch(states, actions, next_states):
return -next_states[:, 2]
@staticmethod
def reward_fn_numpy_batch(states, actions, next_states):
return -next_states[:, 2]
@staticmethod
def reward_fn_torch_batch(states, actions, next_states):
return -next_states[:, 2]
class ReacherBulletEnvFn(ModelBasedStaticFn):
env_name = ['ReacherBulletEnv-v0']
terminate = True
reward = True
def reward_fn_tf_batch(states, actions, next_states):
import tensorflow as tf
old_to_target_vec = states[:, 2:4]
to_target_vec = next_states[:, 2:4]
theta_dot = next_states[:, 6]
gamma = next_states[:, 7]
gamma_dot = next_states[:, 8]
old_potential = 100 * tf.sqrt(tf.reduce_sum(old_to_target_vec ** 2, axis=-1))
potential = 100 * tf.sqrt(tf.reduce_sum(to_target_vec ** 2, axis=-1))
electricity_cost = (
0.10 * (tf.abs(actions[:, 0] * theta_dot) + tf.abs(actions[:, 1] * gamma_dot))
+ 0.01 * (tf.abs(actions[:, 0]) + tf.abs(actions[:, 1]))
)
stuck_joint_cost = 0.1 * tf.cast((tf.abs(tf.abs(gamma) - 1) < 0.01), dtype=tf.float32)
cost = potential - old_potential + electricity_cost + stuck_joint_cost
return -cost
def reward_fn_numpy_batch(states, actions, next_states):
old_to_target_vec = states[:, 2:4]
to_target_vec = next_states[:, 2:4]
theta_dot = next_states[:, 6]
gamma = next_states[:, 7]
gamma_dot = next_states[:, 8]
old_potential = 100 * np.sqrt(np.sum(old_to_target_vec ** 2, axis=-1))
potential = 100 * np.sqrt(np.sum(to_target_vec ** 2, axis=-1))
electricity_cost = (
0.10 * (np.abs(actions[:, 0] * theta_dot) + np.abs(actions[:, 1] * gamma_dot))
+ 0.01 * (np.abs(actions[:, 0]) + np.abs(actions[:, 1]))
)
stuck_joint_cost = 0.1 * (np.abs(np.abs(gamma) - 1) < 0.01).astype(np.float32)
cost = potential - old_potential + electricity_cost + stuck_joint_cost
return -cost
def reward_fn_torch_batch(states, actions, next_states):
import torch
old_to_target_vec = states[:, 2:4]
to_target_vec = next_states[:, 2:4]
theta_dot = next_states[:, 6]
gamma = next_states[:, 7]
gamma_dot = next_states[:, 8]
old_potential = 100 * torch.sqrt(torch.sum(old_to_target_vec ** 2, dim=-1))
potential = 100 * torch.sqrt(torch.sum(to_target_vec ** 2, dim=-1))
electricity_cost = (
0.10 * (torch.abs(actions[:, 0] * theta_dot) + torch.abs(actions[:, 1] * gamma_dot))
+ 0.01 * (torch.abs(actions[:, 0]) + torch.abs(actions[:, 1]))
)
stuck_joint_cost = 0.1 * (torch.abs(torch.abs(gamma) - 1) < 0.01).float()
cost = potential - old_potential + electricity_cost + stuck_joint_cost
return -cost
class HopperBulletEnvFn(ModelBasedStaticFn):
env_name = ['HopperBulletEnv-v0']
terminate = True
reward = False
# the initial_z is 1.25
@staticmethod
def terminate_fn_tf_batch(states, actions, next_states):
import tensorflow as tf
# +1 if z > 0.8 and abs(pitch) < 1.0 else -1
z = next_states[:, 0]
p = next_states[:, 7]
return tf.logical_or(z <= -0.45, tf.abs(p) >= 1.0)
@staticmethod
def terminate_fn_numpy_batch(states, actions, next_states):
# +1 if z > 0.8 and abs(pitch) < 1.0 else -1
z = next_states[:, 0]
p = next_states[:, 7]
return np.logical_or(z <= -0.45, np.abs(p) >= 1.0)
@staticmethod
def terminate_fn_torch_batch(states, actions, next_states):
import torch
# +1 if z > 0.8 and abs(pitch) < 1.0 else -1
z = next_states[:, 0]
p = next_states[:, 7]
return torch.abs(p) >= 1.0 | z <= -0.45
class Walker2DBulletEnvFn(HopperBulletEnvFn):
env_name = ['Walker2DBulletEnv-v0']
class HalfCheetahBulletEnvFn(HopperBulletEnvFn):
env_name = ['HalfCheetahBulletEnv-v0']
terminate = True
reward = False
class AntBulletEnvFn(HopperBulletEnvFn):
env_name = ['AntBulletEnv-v0']
terminate = True
reward = False
@staticmethod
def terminate_fn_tf_batch(states, actions, next_states):
# +1 if z > 0.26 else -1
z = next_states[:, 0]
return z <= -0.49
@staticmethod
def terminate_fn_numpy_batch(states, actions, next_states):
# +1 if z > 0.8 and abs(pitch) < 1.0 else -1
z = next_states[:, 0]
return z <= -0.49
@staticmethod
def terminate_fn_torch_batch(states, actions, next_states):
# +1 if z > 0.8 and abs(pitch) < 1.0 else -1
z = next_states[:, 0]
return z <= -0.49
# retrieve all the class
import sys, inspect
model_based_wrapper_dict = {}
def register():
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj):
for name in obj().env_name:
model_based_wrapper_dict[name] = obj
if len(model_based_wrapper_dict) == 0:
register() | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/gym/static/pybullet.py | 0.822118 | 0.630756 | pybullet.py | pypi |
class Schedule(object):
def value(self, t):
"""Value of the schedule at time t"""
raise NotImplementedError()
class ExponentialScheduler(Schedule):
def __init__(self, epsilon=1.0, decay=1e-4, minimum=0.01):
self.epsilon = epsilon
self.decay = decay
self.minimum = minimum
def value(self, t):
explore_p = self.minimum + (self.epsilon - self.minimum) * np.exp(-self.decay * t)
return explore_p
class ConstantSchedule(Schedule):
def __init__(self, value):
"""Value remains constant over time.
Parameters
----------
value: float
Constant value of the schedule
"""
self._v = value
def value(self, t):
"""See Schedule.value"""
return self._v
def linear_interpolation(l, r, alpha):
return l + alpha * (r - l)
class PiecewiseSchedule(Schedule):
def __init__(self, endpoints, interpolation=linear_interpolation, outside_value=None):
"""Piecewise schedule.
endpoints: [(int, int)]
list of pairs `(time, value)` meanining that schedule should output
`value` when `t==time`. All the values for time must be sorted in
an increasing order. When t is between two times, e.g. `(time_a, value_a)`
and `(time_b, value_b)`, such that `time_a <= t < time_b` then value outputs
`interpolation(value_a, value_b, alpha)` where alpha is a fraction of
time passed between `time_a` and `time_b` for time `t`.
interpolation: lambda float, float, float: float
a function that takes value to the left and to the right of t according
to the `endpoints`. Alpha is the fraction of distance from left endpoint to
right endpoint that t has covered. See linear_interpolation for example.
outside_value: float
if the value is requested outside of all the intervals sepecified in
`endpoints` this value is returned. If None then AssertionError is
raised when outside value is requested.
"""
idxes = [e[0] for e in endpoints]
assert idxes == sorted(idxes)
self._interpolation = interpolation
if outside_value is None:
self._outside_value = endpoints[-1][-1]
else:
self._outside_value = outside_value
self._endpoints = endpoints
def value(self, t):
"""See Schedule.value"""
for (l_t, l), (r_t, r) in zip(self._endpoints[:-1], self._endpoints[1:]):
if l_t <= t and t < r_t:
alpha = float(t - l_t) / (r_t - l_t)
return self._interpolation(l, r, alpha)
# t does not belong to any of the pieces, so doom.
assert self._outside_value is not None
return self._outside_value
class LinearSchedule(Schedule):
def __init__(self, schedule_timesteps, final_p, initial_p=1.0):
"""Linear interpolation between initial_p and final_p over
schedule_timesteps. After this many timesteps pass final_p is
returned.
Parameters
----------
schedule_timesteps: int
Number of timesteps for which to linearly anneal initial_p
to final_p
initial_p: float
initial output value
final_p: float
final output value
"""
self.schedule_timesteps = schedule_timesteps
self.final_p = final_p
self.initial_p = initial_p
def value(self, t):
"""See Schedule.value"""
fraction = min(float(t) / self.schedule_timesteps, 1.0)
return self.initial_p + fraction * (self.final_p - self.initial_p) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/np/schedulers.py | 0.954563 | 0.588091 | schedulers.py | pypi |
import numpy as np
from rlutils.np.functional import discount_cumsum
from rlutils.np.functional import flatten_leading_dims
from .utils import combined_shape
class GAEBuffer(object):
"""
A buffer for storing trajectories experienced by a PPO agent interacting
with the environment, and using Generalized Advantage Estimation (GAE-Lambda)
for calculating the advantages of state-action pairs.
"""
def __init__(self, obs_shape, obs_dtype, act_shape, act_dtype, num_envs, length, gamma=0.99, lam=0.95):
self.obs_buf = np.zeros(shape=combined_shape(num_envs, (length, *obs_shape)), dtype=obs_dtype)
self.act_buf = np.zeros(shape=combined_shape(num_envs, (length, *act_shape)), dtype=act_dtype)
self.adv_buf = np.zeros(shape=(num_envs, length), dtype=np.float32)
self.rew_buf = np.zeros(shape=(num_envs, length), dtype=np.float32)
self.ret_buf = np.zeros(shape=(num_envs, length), dtype=np.float32)
self.val_buf = np.zeros(shape=(num_envs, length), dtype=np.float32)
self.logp_buf = np.zeros(shape=(num_envs, length), dtype=np.float32)
self.gamma, self.lam = gamma, lam
self.num_envs = num_envs
self.max_size = length
self.reset()
@classmethod
def from_vec_env(cls, vec_env, max_length, gamma, lam):
obs_shape = vec_env.single_observation_space.shape
obs_dtype = vec_env.single_observation_space.dtype
act_shape = vec_env.single_action_space.shape
act_dtype = vec_env.single_action_space.dtype
buffer = cls(obs_shape=obs_shape, obs_dtype=obs_dtype, act_shape=act_shape, act_dtype=act_dtype,
num_envs=vec_env.num_envs, length=max_length, gamma=gamma, lam=lam)
return buffer
def reset(self):
self.ptr, self.path_start_idx = 0, np.zeros(shape=(self.num_envs), dtype=np.int32)
def store(self, obs, act, rew, val, logp):
"""
Append one timestep of agent-environment interaction to the buffer.
"""
assert self.ptr < self.max_size # buffer has to have room so you can store
self.obs_buf[:, self.ptr] = obs
self.act_buf[:, self.ptr] = act
self.rew_buf[:, self.ptr] = rew
self.val_buf[:, self.ptr] = val
self.logp_buf[:, self.ptr] = logp
self.ptr += 1
def finish_path(self, dones, last_vals):
"""
Call this at the end of a trajectory, or when one gets cut off
by an epoch ending. This looks back in the buffer to where the
trajectory started, and uses rewards and value estimates from
the whole trajectory to compute advantage estimates with GAE-Lambda,
as well as compute the rewards-to-go for each state, to use as
the targets for the value function.
The "last_val" argument should be 0 if the trajectory ended
because the agent reached a terminal state (died), and otherwise
should be V(s_T), the value function estimated for the last state.
This allows us to bootstrap the reward-to-go calculation to account
for timesteps beyond the arbitrary episode horizon (or epoch cutoff).
"""
for i in range(self.num_envs):
if dones[i]:
path_slice = slice(self.path_start_idx[i], self.ptr)
rews = np.append(self.rew_buf[i, path_slice], last_vals[i])
vals = np.append(self.val_buf[i, path_slice], last_vals[i])
# the next two lines implement GAE-Lambda advantage calculation
deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
self.adv_buf[i, path_slice] = discount_cumsum(deltas, self.gamma * self.lam)
# the next line computes rewards-to-go, to be targets for the value function
self.ret_buf[i, path_slice] = discount_cumsum(rews, self.gamma)[:-1]
self.path_start_idx[i] = self.ptr
def get(self):
"""
Call this at the end of an epoch to get all of the data from
the buffer, with advantages appropriately normalized (shifted to have
mean zero and std one). Also, resets some pointers in the buffer.
"""
assert self.ptr == self.max_size # buffer has to be full before you can get
assert np.all(self.path_start_idx == self.ptr)
self.reset()
# ravel the data
obs_buf = flatten_leading_dims(self.obs_buf, n_dims=2)
act_buf = flatten_leading_dims(self.act_buf, n_dims=2)
ret_buf = flatten_leading_dims(self.ret_buf, n_dims=2)
adv_buf = flatten_leading_dims(self.adv_buf, n_dims=2)
logp_buf = flatten_leading_dims(self.logp_buf, n_dims=2)
# the next two lines implement the advantage normalization trick
adv_mean, adv_std = np.mean(adv_buf), np.std(adv_buf)
adv_buf = (adv_buf - adv_mean) / adv_std
data = dict(obs=obs_buf, act=act_buf, ret=ret_buf,
adv=adv_buf, logp=logp_buf)
return data | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/replay_buffers/pg_py.py | 0.767733 | 0.666669 | pg_py.py | pypi |
from abc import ABC, abstractmethod
from typing import Dict
import gym.spaces
import numpy as np
from gym.utils import seeding
from rlutils.np.functional import shuffle_dict_data
from .utils import combined_shape
class BaseReplayBuffer(ABC):
def __init__(self, seed=None):
self.set_seed(seed)
def reset(self):
pass
def set_seed(self, seed=None):
self.np_random, self.seed = seeding.np_random(seed)
@abstractmethod
def __len__(self):
raise NotImplementedError
@abstractmethod
def add(self, data):
raise NotImplementedError
@property
@abstractmethod
def capacity(self):
raise NotImplementedError
@abstractmethod
def sample(self):
raise NotImplementedError
def load(self, data):
raise NotImplementedError
def append(self, data):
raise NotImplementedError
def is_full(self):
return len(self) == self.capacity
def is_empty(self):
return len(self) <= 0
class PyReplayBuffer(BaseReplayBuffer):
"""
A simple FIFO experience replay buffer for SAC agents.
"""
def __init__(self,
data_spec: Dict[str, gym.spaces.Space],
capacity,
batch_size,
seed=None,
**kwargs):
super(PyReplayBuffer, self).__init__(seed=seed)
self.max_size = capacity
self.data_spec = data_spec
self.storage = {key: np.zeros(combined_shape(self.capacity, item.shape), dtype=item.dtype)
for key, item in data_spec.items()}
self.batch_size = batch_size
self.reset()
def reset(self):
self.ptr, self.size = 0, 0
def __len__(self):
return self.size
def __getitem__(self, item):
""" Make it compatible with Pytorch data loaders """
return {key: data[item] for key, data in self.storage.items()}
@property
def capacity(self):
return self.max_size
@classmethod
def from_data_dict(cls, data: Dict[str, np.ndarray], batch_size, shuffle=False, seed=None, **kwargs):
if shuffle:
data = shuffle_dict_data(data)
data_spec = {key: gym.spaces.Space(shape=item.shape[1:], dtype=item.dtype) for key, item in data.items()}
capacity = list(data.values())[0].shape[0]
replay_buffer = cls(data_spec=data_spec, capacity=capacity, batch_size=batch_size, seed=seed, **kwargs)
replay_buffer.append(data=data)
assert replay_buffer.is_full()
return replay_buffer
@classmethod
def from_vec_env(cls, vec_env, capacity, batch_size, seed=None, **kwargs):
data_spec = {
'obs': vec_env.single_observation_space,
'act': vec_env.single_action_space,
'next_obs': vec_env.single_observation_space,
'rew': gym.spaces.Space(shape=None, dtype=np.float32),
'done': gym.spaces.Space(shape=None, dtype=np.float32)
}
return cls(data_spec=data_spec, capacity=capacity, batch_size=batch_size, seed=seed, **kwargs)
@classmethod
def from_env(cls, env, capacity, batch_size, seed=None, **kwargs):
data_spec = {
'obs': env.observation_space,
'act': env.action_space,
'next_obs': env.observation_space,
'rew': gym.spaces.Space(shape=None, dtype=np.float32),
'done': gym.spaces.Space(shape=None, dtype=np.float32)
}
return cls(data_spec=data_spec, capacity=capacity, batch_size=batch_size, seed=seed, **kwargs)
def append(self, data: Dict[str, np.ndarray]):
batch_size = list(data.values())[0].shape[0]
for key, item in data.items():
assert batch_size == item.shape[0], 'Mismatch batch size in the dataset'
if self.ptr + batch_size > self.capacity:
print(f'Truncated dataset due to limited capacity. Original size {batch_size}. '
f'Truncated size {self.capacity - self.ptr}')
for key, item in data.items():
self.storage[key][self.ptr:self.ptr + batch_size] = item
self.ptr = (self.ptr + batch_size) % self.capacity
self.size = min(self.size + batch_size, self.capacity)
def get(self):
idxs = np.arange(self.size)
return self.__getitem__(idxs)
def add(self, data: Dict[str, np.ndarray]):
batch_size = list(data.values())[0].shape[0]
for key, item in data.items():
assert batch_size == item.shape[0], 'The batch size in the data is not consistent'
if self.ptr + batch_size > self.max_size:
print('Reaches the end of the replay buffer')
self.storage[key][self.ptr:] = item[:self.max_size - self.ptr]
self.storage[key][:batch_size - (self.max_size - self.ptr)] = item[self.max_size - self.ptr:]
else:
self.storage[key][self.ptr:self.ptr + batch_size] = item
self.ptr = (self.ptr + batch_size) % self.capacity
self.size = min(self.size + batch_size, self.capacity) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/replay_buffers/base.py | 0.895936 | 0.377225 | base.py | pypi |
from collections import deque
try:
import reverb
except:
print('Reverb is not installed.')
import tensorflow as tf
from .base import BaseReplayBuffer
class ReverbReplayBuffer(BaseReplayBuffer):
def __init__(self,
data_spec,
replay_capacity,
batch_size,
update_horizon=1,
frame_stack=1
):
"""
Args:
data_spec: tf.TensorSpec
replay_capacity (int): capacity of the replay buffer
batch_size (int):
"""
self.table_name = 'uniform_replay'
self.table = reverb.Table(
name=self.table_name,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
max_size=replay_capacity,
rate_limiter=reverb.rate_limiters.MinSize(1),
)
self.server = reverb.Server(
tables=[self.table]
)
self.client = reverb.Client(f'localhost:{self.server.port}')
self.frame_stack = frame_stack
self.total_horizon = update_horizon + frame_stack
self.replay_dataset = reverb.ReplayDataset(
server_address=f'localhost:{self.server.port}',
table=self.table_name,
max_in_flight_samples_per_worker=10,
sequence_length=self.total_horizon,
dtypes=tf.nest.map_structure(lambda x: x.dtype, data_spec),
shapes=tf.nest.map_structure(lambda x: x.shape, data_spec)
)
self.writer = self.client.writer(max_sequence_length=self.total_horizon)
self.dataset = self.replay_dataset.batch(self.total_horizon).batch(batch_size).__iter__()
self._num_items = 0
self.replay_capacity = replay_capacity
@property
def capacity(self):
return self.replay_capacity
def __del__(self):
if hasattr(self, 'writer') and self.writer is not None:
self.writer.close()
def get_table_info(self):
return self.client.server_info()[self.table_name]
def __len__(self):
return self.get_table_info().current_size
def add(self, data, priority=1.0):
self.writer.append(data=data)
if self._num_items >= self.total_horizon:
self.writer.create_item(table=self.table_name, num_timesteps=self.total_horizon, priority=priority)
else:
self._num_items += 1
def sample(self):
return next(self.dataset).data
class ReverbTransitionReplayBuffer(ReverbReplayBuffer):
def __init__(self,
num_parallel_env,
obs_spec,
act_spec,
replay_capacity,
batch_size,
gamma=0.99,
update_horizon=1,
frame_stack=1,
):
assert replay_capacity % num_parallel_env == 0, 'replay_capacity must be divisible by num_parallel_env'
assert batch_size % num_parallel_env == 0, 'batch_size must be divisible by num_parallel_env'
self.obs_spec = obs_spec
self.act_spec = act_spec
obs_spec = tf.TensorSpec(shape=[num_parallel_env] + obs_spec.shape, dtype=obs_spec.dtype)
act_spec = tf.TensorSpec(shape=[num_parallel_env] + act_spec.shape, dtype=act_spec.dtype)
data_spec = {
'obs': obs_spec,
'act': act_spec,
'rew': tf.TensorSpec(shape=[num_parallel_env], dtype=tf.float32),
'done': tf.TensorSpec(shape=[num_parallel_env], dtype=tf.float32)
}
super(ReverbTransitionReplayBuffer, self).__init__(data_spec=data_spec,
replay_capacity=replay_capacity // num_parallel_env,
batch_size=batch_size // num_parallel_env,
update_horizon=update_horizon,
frame_stack=frame_stack)
self.gamma = gamma
self.rew_deque = deque(maxlen=update_horizon)
self.done_deque = deque(maxlen=update_horizon)
for _ in range(update_horizon):
self.rew_deque.append(tf.zeros(shape=[num_parallel_env], dtype=tf.float32))
self.done_deque.append(tf.zeros(shape=[num_parallel_env], dtype=tf.float32))
self.gamma_array = tf.math.cumprod(tf.ones(shape=[update_horizon, 1], dtype=tf.float32) * self.gamma,
exclusive=True, axis=0)
self.out_perm = [0, 2, 1] + list(range(3, 3 + len(self.obs_spec.shape)))
def add(self, data, priority=1.0):
"""For n-step return, we only know the reward for state s_t in s_{t+n-1}.
Args:
data: a dictionary contains obs, act, rew and done
priority:
Returns:
"""
rew = tf.cast(data['rew'], dtype=tf.float32)
done = tf.cast(data['done'], dtype=tf.float32)
self.rew_deque.append(rew)
self.done_deque.append(done)
rew_queue = tf.stack(list(self.rew_deque), axis=0) # (T, B)
not_done_queue = 1. - tf.stack(list(self.done_deque), axis=0) # (T, B)
not_done_cumprod = tf.math.cumprod(not_done_queue, exclusive=True, axis=0) # (T, B)
rew = tf.reduce_sum(rew_queue * self.gamma_array * not_done_cumprod, axis=0)
done = 1 - tf.math.reduce_prod(not_done_queue, axis=0)
data['rew'] = rew
data['done'] = done
super(ReverbTransitionReplayBuffer, self).add(data=data)
@tf.function
def sample(self):
print('Tracing sample in ReverbTransitionReplayBuffer')
data = super(ReverbTransitionReplayBuffer, self).sample()
obs_seq = data['obs'] # (None, update_horizon + frame_stack, B, ...)
act_seq = data['act'] # (None, update_horizon + frame_stack, B, ...)
rew_seq = data['rew'] # (None, update_horizon + frame_stack, B, ...)
done_seq = data['done'] # (None, update_horizon + frame_stack, B, ...)
obs_seq = tf.transpose(obs_seq, perm=self.out_perm) # (None, B, update_horizon + frame_stack)
obs_seq = tf.reshape(obs_seq, shape=[-1, self.total_horizon] + list(self.obs_spec.shape))
obs = obs_seq[:, :self.frame_stack] # (None * B, frame_stack, ...)
next_obs = obs_seq[:, -self.frame_stack:] # (None * B, frame_stack, ...)
act = act_seq[:, self.frame_stack - 1] # (None, B)
rew = rew_seq[:, self.total_horizon - 2] # (None, B)
done = done_seq[:, self.total_horizon - 2] # (None, B)
act = tf.reshape(act, shape=[-1] + list(self.act_spec.shape))
rew = tf.reshape(rew, shape=[-1])
done = tf.reshape(done, shape=[-1])
if self.frame_stack == 1:
obs = tf.squeeze(obs, axis=1)
next_obs = tf.squeeze(next_obs, axis=1)
return {
'obs': obs,
'act': act,
'next_obs': next_obs,
'rew': rew,
'done': done
}
if __name__ == '__main__':
num_parallel_env = 5
replay_buffer = ReverbTransitionReplayBuffer(num_parallel_env=num_parallel_env,
obs_spec=tf.TensorSpec(shape=[1], dtype=tf.int32),
act_spec=tf.TensorSpec(shape=[1], dtype=tf.int32),
replay_capacity=1000,
batch_size=10,
update_horizon=2,
frame_stack=1)
for i in range(100):
replay_buffer.add(data={
'obs': tf.convert_to_tensor([[i]] * num_parallel_env),
'act': tf.convert_to_tensor([[i]] * num_parallel_env),
'rew': tf.convert_to_tensor([i] * num_parallel_env, dtype=tf.float32),
'done': tf.convert_to_tensor([False] * num_parallel_env) if i % 4 != 0 else tf.convert_to_tensor(
[True] * num_parallel_env)
})
for _ in range(10):
replay_buffer.sample() | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/replay_buffers/reverb.py | 0.802633 | 0.272454 | reverb.py | pypi |
from typing import Dict
import gym.spaces
import numpy as np
from .base import PyReplayBuffer
from .utils import segtree
EPS = np.finfo(np.float32).eps.item()
class PyPrioritizedReplayBuffer(PyReplayBuffer):
"""
A simple implementation of PER based on pure numpy. No advanced data structure is used.
"""
def __init__(self, data_spec: Dict[str, gym.spaces.Space], capacity, batch_size, alpha=0.6, seed=None):
super(PyPrioritizedReplayBuffer, self).__init__(data_spec=data_spec,
capacity=capacity,
batch_size=batch_size,
seed=seed)
self.alpha = alpha
self.max_priority = 1.0
self.min_priority = 1.0
self.segtree = segtree.SegmentTree(size=capacity)
def add(self, data: Dict[str, np.ndarray], priority=None):
batch_size = list(data.values())[0].shape[0]
if priority is None:
priority = np.ones(shape=(batch_size,), dtype=np.float32) * self.max_priority
assert np.all(priority > 0.), f'Priority must be all greater than zero. Got {priority}'
idx = np.arange(self.ptr, self.ptr + batch_size)
self.segtree[idx] = priority ** self.alpha
self.max_priority = max(self.max_priority, np.max(priority))
self.min_priority = min(self.min_priority, np.min(priority))
super(PyPrioritizedReplayBuffer, self).add(data=data)
def sample(self, beta=0.4):
scalar = self.np_random.rand(self.batch_size) * self.segtree.reduce()
idx = self.segtree.get_prefix_sum_idx(scalar)
data = self.__getitem__(idx)
# important sampling weight calculation
# original formula: ((p_j/p_sum*N)**(-beta))/((p_min/p_sum*N)**(-beta))
# simplified formula: (p_j/p_min)**(-beta)
data['weights'] = (self.segtree[idx].astype(np.float32) / self.min_priority) ** (-beta)
return data, idx
def update_priorities(self, idx, priorities, min_priority=None, max_priority=None):
assert idx.shape == priorities.shape
priorities = np.abs(priorities) + EPS
if min_priority is not None or max_priority is not None:
priorities = np.clip(priorities, a_min=min_priority, a_max=max_priority)
self.segtree[idx] = priorities ** self.alpha
self.max_priority = max(self.max_priority, np.max(priorities))
self.min_priority = min(self.min_priority, np.min(priorities))
@classmethod
def from_data_dict(cls, alpha=0.6, **kwargs):
return super(PyPrioritizedReplayBuffer, cls).from_data_dict(alpha=alpha, **kwargs)
@classmethod
def from_vec_env(cls, alpha=0.6, **kwargs):
return super(PyPrioritizedReplayBuffer, cls).from_vec_env(alpha=alpha, **kwargs)
@classmethod
def from_env(cls, alpha=0.6, **kwargs):
return super(PyPrioritizedReplayBuffer, cls).from_env(alpha=alpha, **kwargs) | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/replay_buffers/prioritized_py.py | 0.911838 | 0.350727 | prioritized_py.py | pypi |
from typing import Union, Optional
import numpy as np
from numba import njit
class SegmentTree:
"""Implementation of Segment Tree.
The segment tree stores an array ``arr`` with size ``n``. It supports value
update and fast query of the sum for the interval ``[left, right)`` in
O(log n) time. The detailed procedure is as follows:
1. Pad the array to have length of power of 2, so that leaf nodes in the \
segment tree have the same depth.
2. Store the segment tree in a binary heap.
:param int size: the size of segment tree.
"""
def __init__(self, size: int) -> None:
bound = 1
while bound < size:
bound *= 2
self._size = size
self._bound = bound
self._value = np.zeros([bound * 2])
self._compile()
def __len__(self) -> int:
return self._size
def __getitem__(
self, index: Union[int, np.ndarray]
) -> Union[float, np.ndarray]:
"""Return self[index]."""
return self._value[index + self._bound]
def __setitem__(
self, index: Union[int, np.ndarray], value: Union[float, np.ndarray]
) -> None:
"""Update values in segment tree.
Duplicate values in ``index`` are handled by numpy: later index
overwrites previous ones.
::
>>> a = np.array([1, 2, 3, 4])
>>> a[[0, 1, 0, 1]] = [4, 5, 6, 7]
>>> print(a)
[6 7 3 4]
"""
if isinstance(index, int):
index, value = np.array([index]), np.array([value])
assert np.all(0 <= index) and np.all(index < self._size)
_setitem(self._value, index + self._bound, value)
def reduce(self, start: int = 0, end: Optional[int] = None) -> float:
"""Return operation(value[start:end])."""
if start == 0 and end is None:
return self._value[1]
if end is None:
end = self._size
if end < 0:
end += self._size
return _reduce(self._value, start + self._bound - 1, end + self._bound)
def get_prefix_sum_idx(
self, value: Union[float, np.ndarray]
) -> Union[int, np.ndarray]:
r"""Find the index with given value.
Return the minimum index for each ``v`` in ``value`` so that
:math:`v \le \mathrm{sums}_i`, where
:math:`\mathrm{sums}_i = \sum_{j = 0}^{i} \mathrm{arr}_j`.
.. warning::
Please make sure all of the values inside the segment tree are
non-negative when using this function.
"""
assert np.all(value >= 0.0) and np.all(value < self._value[1])
single = False
if not isinstance(value, np.ndarray):
value = np.array([value])
single = True
index = _get_prefix_sum_idx(value, self._bound, self._value)
return index.item() if single else index
def _compile(self) -> None:
f64 = np.array([0, 1], dtype=np.float64)
f32 = np.array([0, 1], dtype=np.float32)
i64 = np.array([0, 1], dtype=np.int64)
_setitem(f64, i64, f64)
_setitem(f64, i64, f32)
_reduce(f64, 0, 1)
_get_prefix_sum_idx(f64, 1, f64)
_get_prefix_sum_idx(f32, 1, f64)
@njit
def _setitem(tree: np.ndarray, index: np.ndarray, value: np.ndarray) -> None:
"""Numba version, 4x faster: 0.1 -> 0.024."""
tree[index] = value
while index[0] > 1:
index //= 2
tree[index] = tree[index * 2] + tree[index * 2 + 1]
@njit
def _reduce(tree: np.ndarray, start: int, end: int) -> float:
"""Numba version, 2x faster: 0.009 -> 0.005."""
# nodes in (start, end) should be aggregated
result = 0.0
while end - start > 1: # (start, end) interval is not empty
if start % 2 == 0:
result += tree[start + 1]
start //= 2
if end % 2 == 1:
result += tree[end - 1]
end //= 2
return result
@njit
def _get_prefix_sum_idx(
value: np.ndarray, bound: int, sums: np.ndarray
) -> np.ndarray:
"""Numba version (v0.51), 5x speed up with size=100000 and bsz=64.
vectorized np: 0.0923 (numpy best) -> 0.024 (now)
for-loop: 0.2914 -> 0.019 (but not so stable)
"""
index = np.ones(value.shape, dtype=np.int64)
while index[0] < bound:
index *= 2
lsons = sums[index]
direct = lsons < value
value -= lsons * direct
index += direct
index -= bound
return index | /rlutils-python-0.0.3.tar.gz/rlutils-python-0.0.3/rlutils/replay_buffers/utils/segtree.py | 0.942804 | 0.831622 | segtree.py | pypi |
from . import driver
import traceback
import weakref
class Engine(object):
"""
@ivar proxy: Proxy to a driver implementation
@type proxy: L{DriverProxy}
@ivar _connects: Array of subscriptions
@type _connects: list
@ivar _inLoop: Running an event loop or not
@type _inLoop: bool
@ivar _driverLoop: Using a driver event loop or not
@type _driverLoop: bool
@ivar _debug: Print exceptions or not
@type _debug: bool
"""
def __init__(self, driverName=None, debug=False):
"""
Constructs a new TTS engine instance.
@param driverName: Name of the platform specific driver to use. If
None, selects the default driver for the operating system.
@type: str
@param debug: Debugging output enabled or not
@type debug: bool
"""
self.proxy = driver.DriverProxy(weakref.proxy(self), driverName, debug)
# initialize other vars
self._connects = {}
self._inLoop = False
self._driverLoop = True
self._debug = debug
def _notify(self, topic, **kwargs):
"""
Invokes callbacks for an event topic.
@param topic: String event name
@type topic: str
@param kwargs: Values associated with the event
@type kwargs: dict
"""
for cb in self._connects.get(topic, []):
try:
cb(**kwargs)
except Exception:
if self._debug:
traceback.print_exc()
def connect(self, topic, cb):
"""
Registers a callback for an event topic. Valid topics and their
associated values:
started-utterance: name=<str>
started-word: name=<str>, location=<int>, length=<int>
finished-utterance: name=<str>, completed=<bool>
error: name=<str>, exception=<exception>
@param topic: Event topic name
@type topic: str
@param cb: Callback function
@type cb: callable
@return: Token to use to unregister
@rtype: dict
"""
arr = self._connects.setdefault(topic, [])
arr.append(cb)
return {'topic': topic, 'cb': cb}
def disconnect(self, token):
"""
Unregisters a callback for an event topic.
@param token: Token of the callback to unregister
@type token: dict
"""
topic = token['topic']
try:
arr = self._connects[topic]
except KeyError:
return
arr.remove(token['cb'])
if len(arr) == 0:
del self._connects[topic]
def say(self, text, name=None):
"""
Adds an utterance to speak to the event queue.
@param text: Text to speak
@type text: unicode
@param name: Name to associate with this utterance. Included in
notifications about this utterance.
@type name: str
"""
if text == None:
return "Argument value can't be none or empty"
else:
self.proxy.say(text, name)
def stop(self):
"""
Stops the current utterance and clears the event queue.
"""
self.proxy.stop()
def save_to_file(self, text, filename, name=None):
'''
Adds an utterance to speak to the event queue.
@param text: Text to speak
@type text: unicode
@param filename: the name of file to save.
@param name: Name to associate with this utterance. Included in
notifications about this utterance.
@type name: str
'''
self.proxy.save_to_file(text, filename, name)
def isBusy(self):
"""
@return: True if an utterance is currently being spoken, false if not
@rtype: bool
"""
return self.proxy.isBusy()
def getProperty(self, name):
"""
Gets the current value of a property. Valid names and values include:
voices: List of L{voice.Voice} objects supported by the driver
voice: String ID of the current voice
rate: Integer speech rate in words per minute
volume: Floating point volume of speech in the range [0.0, 1.0]
Numeric values outside the valid range supported by the driver are
clipped.
@param name: Name of the property to fetch
@type name: str
@return: Value associated with the property
@rtype: object
@raise KeyError: When the property name is unknown
"""
return self.proxy.getProperty(name)
def setProperty(self, name, value):
"""
Adds a property value to set to the event queue. Valid names and values
include:
voice: String ID of the voice
rate: Integer speech rate in words per minute
volume: Floating point volume of speech in the range [0.0, 1.0]
Numeric values outside the valid range supported by the driver are
clipped.
@param name: Name of the property to fetch
@type name: str
@param: Value to set for the property
@rtype: object
@raise KeyError: When the property name is unknown
"""
self.proxy.setProperty(name, value)
def runAndWait(self):
"""
Runs an event loop until all commands queued up until this method call
complete. Blocks during the event loop and returns when the queue is
cleared.
@raise RuntimeError: When the loop is already running
"""
if self._inLoop:
raise RuntimeError('run loop already started')
self._inLoop = True
self._driverLoop = True
self.proxy.runAndWait()
def startLoop(self, useDriverLoop=True):
"""
Starts an event loop to process queued commands and callbacks.
@param useDriverLoop: If True, uses the run loop provided by the driver
(the default). If False, assumes the caller will enter its own
run loop which will pump any events for the TTS engine properly.
@type useDriverLoop: bool
@raise RuntimeError: When the loop is already running
"""
if self._inLoop:
raise RuntimeError('run loop already started')
self._inLoop = True
self._driverLoop = useDriverLoop
self.proxy.startLoop(self._driverLoop)
def endLoop(self):
"""
Stops a running event loop.
@raise RuntimeError: When the loop is not running
"""
if not self._inLoop:
raise RuntimeError('run loop not started')
self.proxy.endLoop(self._driverLoop)
self._inLoop = False
def iterate(self):
"""
Must be called regularly when using an external event loop.
"""
if not self._inLoop:
raise RuntimeError('run loop not started')
elif self._driverLoop:
raise RuntimeError('iterate not valid in driver run loop')
self.proxy.iterate() | /rlvoice_1-1.1.1-py3-none-any.whl/rlvoice/engine.py | 0.699254 | 0.238129 | engine.py | pypi |
from ..voice import Voice
import time
def buildDriver(proxy):
'''
Builds a new instance of a driver and returns it for use by the driver
proxy.
@param proxy: Proxy creating the driver
@type proxy: L{driver.DriverProxy}
'''
return DummyDriver(proxy)
class DummyDriver(object):
'''
Dummy speech engine implementation. Documents the interface, notifications,
properties, and sequencing responsibilities of a driver implementation.
@ivar _proxy: Driver proxy that manages this instance
@type _proxy: L{driver.DriverProxy}
@ivar _config: Dummy configuration
@type _config: dict
@ivar _looping: True when in the dummy event loop, False when not
@ivar _looping: bool
'''
def __init__(self, proxy):
'''
Constructs the driver.
@param proxy: Proxy creating the driver
@type proxy: L{driver.DriverProxy}
'''
self._proxy = proxy
self._looping = False
# hold config values as if we had a real tts implementation that
# supported them
voices = [
Voice('dummy.voice1', 'John Doe', ['en-US', 'en-GB'], 'male', 'adult'),
Voice('dummy.voice2', 'Jane Doe', ['en-US', 'en-GB'], 'female', 'adult'),
Voice('dummy.voice3', 'Jimmy Doe', ['en-US', 'en-GB'], 'male', 10)
]
self._config = {
'rate' : 200,
'volume' : 1.0,
'voice' : voices[0],
'voices' : voices
}
def destroy(self):
'''
Optional method that will be called when the driver proxy is being
destroyed. Can cleanup any resources to make sure the engine terminates
properly.
'''
pass
def startLoop(self):
'''
Starts a blocking run loop in which driver callbacks are properly
invoked.
@precondition: There was no previous successful call to L{startLoop}
without an intervening call to L{stopLoop}.
'''
first = True
self._looping = True
while self._looping:
if first:
self._proxy.setBusy(False)
first = False
time.sleep(0.5)
def endLoop(self):
'''
Stops a previously started run loop.
@precondition: A previous call to L{startLoop} suceeded and there was
no intervening call to L{endLoop}.
'''
self._looping = False
def iterate(self):
'''
Iterates from within an external run loop.
'''
self._proxy.setBusy(False)
yield
def say(self, text):
'''
Speaks the given text. Generates the following notifications during
output:
started-utterance: When speech output has started
started-word: When a word is about to be spoken. Includes the character
"location" of the start of the word in the original utterance text
and the "length" of the word in characters.
finished-utterance: When speech output has finished. Includes a flag
indicating if the entire utterance was "completed" or not.
The proxy automatically adds any "name" associated with the utterance
to the notifications on behalf of the driver.
When starting to output an utterance, the driver must inform its proxy
that it is busy by invoking L{driver.DriverProxy.setBusy} with a flag
of True. When the utterance completes or is interrupted, the driver
inform the proxy that it is no longer busy by invoking
L{driver.DriverProxy.setBusy} with a flag of False.
@param text: Unicode text to speak
@type text: unicode
'''
self._proxy.setBusy(True)
self._proxy.notify('started-utterance')
i = 0
for word in text.split(' '):
self._proxy.notify('started-word', location=i, length=len(word))
try:
i = text.index(' ', i+1)+1
except Exception:
pass
self._proxy.notify('finished-utterance', completed=True)
self._proxy.setBusy(False)
def stop(self):
'''
Stops any current output. If an utterance was being spoken, the driver
is still responsible for sending the closing finished-utterance
notification documented above and resetting the busy state of the
proxy.
'''
pass
def getProperty(self, name):
'''
Gets a property value of the speech engine. The suppoted properties
and their values are:
voices: List of L{voice.Voice} objects supported by the driver
voice: String ID of the current voice
rate: Integer speech rate in words per minute
volume: Floating point volume of speech in the range [0.0, 1.0]
@param name: Property name
@type name: str
@raise KeyError: When the property name is unknown
'''
try:
return self._config[name]
except KeyError:
raise KeyError('unknown property %s' % name)
def setProperty(self, name, value):
'''
Sets one of the supported property values of the speech engine listed
above. If a value is invalid, attempts to clip it / coerce so it is
valid before giving up and firing an exception.
@param name: Property name
@type name: str
@param value: Property value
@type value: object
@raise KeyError: When the property name is unknown
@raise ValueError: When the value cannot be coerced to fit the property
'''
if name == 'voice':
v = filter(lambda v: v.id == value, self._config['voices'])
self._config['voice'] = v[0]
elif name == 'rate':
self._config['rate'] = value
elif name == 'volume':
self._config['volume'] = value
else:
raise KeyError('unknown property %s' % name) | /rlvoice_1-1.1.1-py3-none-any.whl/rlvoice/drivers/dummy.py | 0.641535 | 0.301908 | dummy.py | pypi |
# Reinforcement Learning Zoo
[](https://rlzoo.readthedocs.io/en/latest/?badge=latest)
[](https://github.com/tensorflow/tensorflow/releases)
[](http://pepy.tech/project/rlzoo)
<br/>
<a href="https://deepreinforcementlearningbook.org" target="\_blank">
<div align="center">
<img src="docs/img/rlzoo-logo.png" width="40%"/>
</div>
<!-- <div align="center"><caption>Slack Invitation Link</caption></div> -->
</a>
<br/>
RLzoo is a collection of the most practical reinforcement learning algorithms, frameworks and applications. It is implemented with Tensorflow 2.0 and API of neural network layers in TensorLayer 2, to provide a hands-on fast-developing approach for reinforcement learning practices and benchmarks. It supports basic toy-tests like [OpenAI Gym](https://gym.openai.com/) and [DeepMind Control Suite](https://github.com/deepmind/dm_control) with very simple configurations. Moreover, RLzoo supports robot learning benchmark environment [RLBench](https://github.com/stepjam/RLBench) based on [Vrep](http://www.coppeliarobotics.com/)/[Pyrep](https://github.com/stepjam/PyRep) simulator. Other large-scale distributed training framework for more realistic scenarios with [Unity 3D](https://github.com/Unity-Technologies/ml-agents),
[Mujoco](http://www.mujoco.org/), [Bullet Physics](https://github.com/bulletphysics/bullet3), etc, will be supported in the future. A [Springer textbook](https://deepreinforcementlearningbook.org) is also provided, you can get the free PDF if your institute has Springer license.
Different from RLzoo for simple usage with **high-level APIs**, we also have a [RL tutorial](https://github.com/tensorlayer/tensorlayer/tree/master/examples/reinforcement_learning) that aims to make the reinforcement learning tutorial simple, transparent and straight-forward with **low-level APIs**, as this would not only benefits new learners of reinforcement learning, but also provide convenience for senior researchers to testify their new ideas quickly.
<!-- <em>Gym: Atari</em> <em>Gym: Box2D </em> <em>Gym: Classic Control </em> <em>Gym: MuJoCo </em>-->
<img src="https://github.com/tensorlayer/RLzoo/blob/master/gif/atari.gif" height=250 width=210 > <img src="https://github.com/tensorlayer/RLzoo/blob/master/gif/box2d.gif" height=250 width=210 > <img src="https://github.com/tensorlayer/RLzoo/blob/master/gif/classic.gif" height=250 width=210 > <img src="https://github.com/tensorlayer/RLzoo/blob/master/gif/mujoco.gif" height=250 width=210 >
<!-- <em>Gym: Robotics</em> <em>DeepMind Control Suite </em> <em>Gym: RLBench </em> -->
<img src="https://github.com/tensorlayer/RLzoo/blob/master/gif/robotics.gif" height=250 width=210 > <img src="https://github.com/tensorlayer/RLzoo/blob/master/gif/dmcontrol.gif" height=250 width=210 > <img src="https://github.com/tensorlayer/RLzoo/blob/master/gif/rlbench.gif" height=250 width=210 > <img src="https://github.com/tensorlayer/tensorlayer/blob/master/img/tl_transparent_logo.png" height=180 width=210 >
Please check our [Online Documentation](https://rlzoo.readthedocs.io). We suggest users to report bugs using Github issues. Users can also discuss how to use RLzoo in the following slack channel.
<br/>
<a href="https://join.slack.com/t/tensorlayer/shared_invite/enQtODk1NTQ5NTY1OTM5LTQyMGZhN2UzZDBhM2I3YjYzZDBkNGExYzcyZDNmOGQzNmYzNjc3ZjE3MzhiMjlkMmNiMmM3Nzc4ZDY2YmNkMTY" target="\_blank">
<div align="center">
<img src="https://github.com/tensorlayer/tensorlayer/raw/master/img/join_slack.png" width="40%"/>
</div>
</a>
<br/>
**Table of contents:**
- [Status](#status)
- [Installation](#installation)
- [Prerequisites](#prerequisites)
- [Usage](#usage)
- [Contents](#contents)
- [Algorithms](#algorithms)
- [Environments](#environments)
- [Configurations](#configuration)
- [Properties](#properties)
- [Troubleshooting](#troubleshooting)
- [Credits](#credits)
- [Citing](#citing)
## Status: Release
<details><summary><b>Current status</b> <i>[click to expand]</i></summary>
<div>
We are currently open to any suggestions or pull requests from the community to make RLzoo a better repository. Given the scope of this project, we expect there could be some issues over
the coming months after initial release. We will keep improving the potential problems and commit when significant changes are made in the future. Current default hyperparameters for each algorithm and each environment may not be optimal, so you can play around with those hyperparameters to achieve best performances. We will release a version with optimal hyperparameters and benchmark results for all algorithms in the future.
</div>
</details>
<details><summary><b>Version History</b> <i>[click to expand]</i></summary>
<div>
* 1.0.3 (Current version)
Changes:
* Fix bugs in SAC algorithm
* 1.0.1
Changes:
* Add [interactive training configuration](https://github.com/tensorlayer/RLzoo/blob/master/rlzoo/interactive/main.ipynb);
* Better support RLBench environment, with multi-head network architectures to support dictionary as observation type;
* Make the code cleaner.
* 0.0.1
</div>
</details>
## Installation
Ensure that you have **Python >=3.5** (Python 3.6 is needed if using DeepMind Control Suite).
Direct installation:
```
pip3 install rlzoo --upgrade
```
Install RLzoo from Git:
```
git clone https://github.com/tensorlayer/RLzoo.git
cd RLzoo
pip3 install .
```
## Prerequisites
```pip3 install -r requirements.txt```
<details><summary><b>List of prerequisites.</b> <i>[click to expand]</i></summary>
<div>
* tensorflow >= 2.0.0 or tensorflow-gpu >= 2.0.0a0
* tensorlayer >= 2.0.1
* tensorflow-probability
* tf-nightly-2.0-preview
* [Mujoco 2.0](http://www.mujoco.org/), [dm_control](https://github.com/deepmind/dm_control), [dm2gym](https://github.com/zuoxingdong/dm2gym) (if using DeepMind Control Suite environments)
* Vrep, PyRep, RLBench (if using RLBench environments, follows [here](http://www.coppeliarobotics.com/downloads.html), [here](https://github.com/stepjam/PyRep) and [here](https://github.com/stepjam/RLBench))
</div>
</details>
## Usage
For detailed usage, please check our [**online documentation**](https://rlzoo.readthedocs.io).
### Quick Start
Choose whatever environments with whatever RL algorithms supported in RLzoo, and enjoy the game by running following example in the root file of installed package:
```python
# in the root folder of rlzoo package
cd RLzoo
python run_rlzoo.py
```
What's in `run_rlzoo.py`?
```python
from rlzoo.common.env_wrappers import build_env
from rlzoo.common.utils import call_default_params
from rlzoo.algorithms import TD3 # import the algorithm to use
# choose an algorithm
AlgName = 'TD3'
# chose an environment
EnvName = 'Pendulum-v0'
# select a corresponding environment type
EnvType = 'classic_control'
# build an environment with wrappers
env = build_env(EnvName, EnvType)
# call default parameters for the algorithm and learning process
alg_params, learn_params = call_default_params(env, EnvType, AlgName)
# instantiate the algorithm
alg = eval(AlgName+'(**alg_params)')
# start the training
alg.learn(env=env, mode='train', render=False, **learn_params)
# test after training
alg.learn(env=env, mode='test', render=True, **learn_params)
```
The main script `run_rlzoo.py` follows (almost) the same structure for all algorithms on all environments, see the [**full list of examples**](./examples.md).
**General Descriptions:**
RLzoo provides at least two types of interfaces for running the learning algorithms, with (1) implicit configurations or (2) explicit configurations. Both of them start learning program through running a python script, instead of running a long command line with all configurations shortened to be arguments of it (e.g. in Openai Baseline). Our approaches are found to be more interpretable, flexible and convenient to apply in practice. According to the level of explicitness of learning configurations, we provided two different ways of setting learning configurations in python scripts: the first one with implicit configurations uses a `default.py` script to record all configurations for each algorithm, while the second one with explicit configurations exposes all configurations to the running scripts. Both of them can run any RL algorithms on any environments supported in our repository with a simple command line.
<details><summary><b>1. Implicit Configurations</b> <i>[click to expand]</i></summary>
<div>
RLzoo with **implicit configurations** means the configurations for learning are not explicitly contained in the main script for running (i.e. `run_rlzoo.py`), but in the `default.py` file in each algorithm folder (for example, `rlzoo/algorithms/sac/default.py` is the default parameters configuration for SAC algorithm). All configurations include (1) parameter values for the algorithm and learning process, (2) the network structures, (3) the optimizers, etc, are divided into configurations for the algorithm (stored in `alg_params`) and configurations for the learning process (stored in `learn_params`). Whenever you want to change the configurations for the algorithm or learning process, you can either go to the folder of each algorithm and modify parameters in `default.py`, or change the values in `alg_params` (a dictionary of configurations for the algorithm) and `learn_params` (a dictionary of configurations for the learning process) in `run_rlzoo.py` according to the keys.
#### Common Interface:
```python
from rlzoo.common.env_wrappers import build_env
from rlzoo.common.utils import call_default_params
from rlzoo.algorithms import *
# choose an algorithm
AlgName = 'TD3'
# chose an environment
EnvName = 'Pendulum-v0'
# select a corresponding environment type
EnvType = ['classic_control', 'atari', 'box2d', 'mujoco', 'robotics', 'dm_control', 'rlbench'][0]
# build an environment with wrappers
env = build_env(EnvName, EnvType)
# call default parameters for the algorithm and learning process
alg_params, learn_params = call_default_params(env, EnvType, AlgName)
# instantiate the algorithm
alg = eval(AlgName+'(**alg_params)')
# start the training
alg.learn(env=env, mode='train', render=False, **learn_params)
# test after training
alg.learn(env=env, mode='test', render=True, **learn_params)
```
```python
# in the root folder of rlzoo package
cd rlzoo
python run_rlzoo.py
```
</div>
</details>
<details><summary><b>2. Explicit Configurations</b> <i>[click to expand]</i></summary>
<div>
RLzoo with **explicit configurations** means the configurations for learning, including parameter values for the algorithm and the learning process, the network structures used in the algorithms and the optimizers etc, are explicitly displayed in the main script for running. And the main scripts for demonstration are under the folder of each algorithm, for example, `./rlzoo/algorithms/sac/run_sac.py` can be called with `python algorithms/sac/run_sac.py` from the file `./rlzoo` to run the learning process same as in above implicit configurations.
#### A Quick Example
```python
import gym
from rlzoo.common.utils import make_env, set_seed
from rlzoo.algorithms import AC
from rlzoo.common.value_networks import ValueNetwork
from rlzoo.common.policy_networks import StochasticPolicyNetwork
''' load environment '''
env = gym.make('CartPole-v0').unwrapped
obs_space = env.observation_space
act_space = env.action_space
# reproducible
seed = 2
set_seed(seed, env)
''' build networks for the algorithm '''
num_hidden_layer = 4 #number of hidden layers for the networks
hidden_dim = 64 # dimension of hidden layers for the networks
with tf.name_scope('AC'):
with tf.name_scope('Critic'):
# choose the critic network, can be replaced with customized network
critic = ValueNetwork(obs_space, hidden_dim_list=num_hidden_layer * [hidden_dim])
with tf.name_scope('Actor'):
# choose the actor network, can be replaced with customized network
actor = StochasticPolicyNetwork(obs_space, act_space, hidden_dim_list=num_hidden_layer * [hidden_dim], output_activation=tf.nn.tanh)
net_list = [actor, critic] # list of the networks
''' choose optimizers '''
a_lr, c_lr = 1e-4, 1e-2 # a_lr: learning rate of the actor; c_lr: learning rate of the critic
a_optimizer = tf.optimizers.Adam(a_lr)
c_optimizer = tf.optimizers.Adam(c_lr)
optimizers_list=[a_optimizer, c_optimizer] # list of optimizers
# intialize the algorithm model, with algorithm parameters passed in
model = AC(net_list, optimizers_list)
'''
full list of arguments for the algorithm
----------------------------------------
net_list: a list of networks (value and policy) used in the algorithm, from common functions or customization
optimizers_list: a list of optimizers for all networks and differentiable variables
gamma: discounted factor of reward
action_range: scale of action values
'''
# start the training process, with learning parameters passed in
model.learn(env, train_episodes=500, max_steps=200,
save_interval=50, mode='train', render=False)
'''
full list of parameters for training
---------------------------------------
env: learning environment
train_episodes: total number of episodes for training
test_episodes: total number of episodes for testing
max_steps: maximum number of steps for one episode
save_interval: time steps for saving the weights and plotting the results
mode: 'train' or 'test'
render: if true, visualize the environment
'''
# test after training
model.learn(env, test_episodes=100, max_steps=200, mode='test', render=True)
```
In the package folder, we provides examples with explicit configurations for each algorithm.
```python
# in the root folder of rlzoo package
cd rlzoo
python algorithms/<ALGORITHM_NAME>/run_<ALGORITHM_NAME>.py
# for example: run actor-critic
python algorithms/ac/run_ac.py
```
</div>
</details>
### Interactive Configurations
We also provide an interactive learning configuration with Jupyter Notebook and *ipywidgets*, where you can select the algorithm, environment, and general learning settings with simple clicking on dropdown lists and sliders! A video demonstrating the usage is as following. The interactive mode can be used with [`rlzoo/interactive/main.ipynb`](https://github.com/tensorlayer/RLzoo/blob/master/rlzoo/interactive/main.ipynb) by running `$ jupyter notebook` to open it.

## Contents
### Algorithms
Choices for `AlgName`: 'DQN', 'AC', 'A3C', 'DDPG', 'TD3', 'SAC', 'PG', 'TRPO', 'PPO', 'DPPO'
| Algorithms | Papers |
| --------------- | -------|
|**Value-based**||
| Q-learning | [Technical note: Q-learning. Watkins et al. 1992](http://www.gatsby.ucl.ac.uk/~dayan/papers/cjch.pdf)|
| Deep Q-Network (DQN)| [Human-level control through deep reinforcement learning, Mnih et al. 2015.](https://www.nature.com/articles/nature14236/) |
| Prioritized Experience Replay | [Schaul et al. Prioritized experience replay. Schaul et al. 2015.](https://arxiv.org/abs/1511.05952) |
|Dueling DQN|[Dueling network architectures for deep reinforcement learning. Wang et al. 2015.](https://arxiv.org/abs/1511.06581)|
|Double DQN|[Deep reinforcement learning with double q-learning. Van et al. 2016.](https://arxiv.org/abs/1509.06461)|
|Retrace|[Safe and efficient off-policy reinforcement learning. Munos et al. 2016: ](https://arxiv.org/pdf/1606.02647.pdf)|
|Noisy DQN|[Noisy networks for exploration. Fortunato et al. 2017.](https://arxiv.org/pdf/1706.10295.pdf)|
| Distributed DQN (C51)| [A distributional perspective on reinforcement learning. Bellemare et al. 2017.](https://arxiv.org/pdf/1707.06887.pdf) |
|**Policy-based**||
|REINFORCE(PG) | [Simple statistical gradient-following algorithms for connectionist reinforcement learning. Ronald J. Williams 1992.](https://link.springer.com/article/10.1007/BF00992696)|
| Trust Region Policy Optimization (TRPO)| [Abbeel et al. Trust region policy optimization. Schulman et al.2015.](https://arxiv.org/pdf/1502.05477.pdf) |
| Proximal Policy Optimization (PPO) | [Proximal policy optimization algorithms. Schulman et al. 2017.](https://arxiv.org/abs/1707.06347) |
|Distributed Proximal Policy Optimization (DPPO)|[Emergence of locomotion behaviours in rich environments. Heess et al. 2017.](https://arxiv.org/abs/1707.02286)|
|**Actor-Critic**||
|Actor-Critic (AC)| [Actor-critic algorithms. Konda er al. 2000.](https://papers.nips.cc/paper/1786-actor-critic-algorithms.pdf)|
| Asynchronous Advantage Actor-Critic (A3C)| [Asynchronous methods for deep reinforcement learning. Mnih et al. 2016.](https://arxiv.org/pdf/1602.01783.pdf) |
| Deep Deterministic Policy Gradient (DDPG) | [Continuous Control With Deep Reinforcement Learning, Lillicrap et al. 2016](https://arxiv.org/pdf/1509.02971.pdf) |
|Twin Delayed DDPG (TD3)|[Addressing function approximation error in actor-critic methods. Fujimoto et al. 2018.](https://arxiv.org/pdf/1802.09477.pdf)|
|Soft Actor-Critic (SAC)|[Soft actor-critic algorithms and applications. Haarnoja et al. 2018.](https://arxiv.org/abs/1812.05905)|
### Environments
Choices for `EnvType`: 'atari', 'box2d', 'classic_control', 'mujoco', 'robotics', 'dm_control', 'rlbench'
* [**OpenAI Gym**](https://gym.openai.com/envs):
* Atari
* Box2D
* Classic control
* MuJoCo
* Robotics
* [**DeepMind Control Suite**](https://github.com/deepmind/dm_control)
* [**RLBench**](https://github.com/stepjam/RLBench)
<details><summary><b>Some notes on environment usage.</b> <i>[click to expand]</i></summary>
<div>
* Make sure the name of environment matches the type of environment in the main script. The types of environments include: 'atari', 'box2d', 'classic_control', 'mujoco', 'robotics', 'dm_control', 'rlbench'.
* When using the DeepMind Control Suite, install the [dm2gym](https://github.com/zuoxingdong/dm2gym) package with: `pip install dm2gym`
* When using the RLBench environments, please add the path of your local rlbench repository to python:
```export PYTHONPATH=PATH_TO_YOUR_LOCAL_RLBENCH_REPO```
* A dictionary of all different environments is stored in `./rlzoo/common/env_list.py`
* Full list of environments in RLBench is [here](https://github.com/stepjam/RLBench/blob/master/rlbench/tasks/__init__.py).
* Installation of Vrep->PyRep->RLBench follows [here](http://www.coppeliarobotics.com/downloads.html)->[here](https://github.com/stepjam/PyRep)->[here](https://github.com/stepjam/RLBench).
</div>
</details>
## Configurations:
The supported configurations for RL algorithms with corresponding environments in RLzoo are listed in the following table.
| Algorithms | Action Space | Policy | Update | Envs |
| -------------------------- | ------------------- | ------------- | ---------- | ------------------------------------------------------------ |
| DQN (double, dueling, PER) | Discrete Only | -- | Off-policy | Atari, Classic Control |
| AC | Discrete/Continuous | Stochastic | On-policy | All |
| PG | Discrete/Continuous | Stochastic | On-policy | All |
| DDPG | Continuous | Deterministic | Off-policy | Classic Control, Box2D, Mujoco, Robotics, DeepMind Control, RLBench |
| TD3 | Continuous | Deterministic | Off-policy | Classic Control, Box2D, Mujoco, Robotics, DeepMind Control, RLBench |
| SAC | Continuous | Stochastic | Off-policy | Classic Control, Box2D, Mujoco, Robotics, DeepMind Control, RLBench |
| A3C | Discrete/Continuous | Stochastic | On-policy | Atari, Classic Control, Box2D, Mujoco, Robotics, DeepMind Control |
| PPO | Discrete/Continuous | Stochastic | On-policy | All |
| DPPO | Discrete/Continuous | Stochastic | On-policy | Atari, Classic Control, Box2D, Mujoco, Robotics, DeepMind Control |
| TRPO | Discrete/Continuous | Stochastic | On-policy | All |
## Properties
<details><summary><b>1. Automatic model construction</b> <i>[click to expand]</i></summary>
<div>
We aim to make it easy to configure for all components within RL, including replacing the networks, optimizers, etc. We also provide automatically adaptive policies and value functions in the common functions: for the observation space, the vector state or the raw-pixel (image) state are supported automatically according to the shape of the space; for the action space, the discrete action or continuous action are supported automatically according to the shape of the space as well. The deterministic or stochastic property of policy needs to be chosen according to each algorithm. Some environments with raw-pixel based observation (e.g. Atari, RLBench) may be hard to train, be patient and play around with the hyperparameters!
</div>
</details>
<details><summary><b>3. Simple and flexible API</b> <i>[click to expand]</i></summary>
<div>
As described in the Section of Usage, we provide at least two ways of deploying RLzoo: implicit configuration and explicit configuration process. We ensure the maximum flexiblity for different use cases with this design.
</div>
</details>
<details><summary><b>3. Sufficient support for DRL algorithms and environments</b> <i>[click to expand]</i></summary>
<div>
As shown in above algorithms and environments tables.
</div>
</details>
<details><summary><b>4. Interactive reinforcement learning configuration.</b> <i>[click to expand]</i></summary>
<div>
As shown in the interactive use case in Section of Usage, a jupyter notebook is provided for more intuitively configuring the whole process of deploying the learning process ([`rlzoo/interactive/main.ipynb`](https://github.com/tensorlayer/RLzoo/blob/master/rlzoo/interactive/main.ipynb))
</div>
</details>
## Troubleshooting
* If you meet the error *'AttributeError: module 'tensorflow' has no attribute 'contrib''* when running the code after installing tensorflow-probability, try:
`pip install --upgrade tf-nightly-2.0-preview tfp-nightly`
* When trying to use RLBench environments, *'No module named rlbench'* can be caused by no RLBench package installed at your local or a mistake in the python path. You should add `export PYTHONPATH=/home/quantumiracle/research/vrep/PyRep/RLBench` every time you try to run the learning script with RLBench environment or add it to you `~/.bashrc` file once for all.
* If you meet the error that the Qt platform is not loaded correctly when using DeepMind Control Suite environments, it's probably caused by your Ubuntu system not being version 14.04 or 16.04. Check [here](https://github.com/deepmind/dm_control).
## Credits
Our core contributors include:
[Zihan Ding](https://github.com/quantumiracle?tab=repositories),
[Tianyang Yu](https://github.com/Tokarev-TT-33),
[Yanhua Huang](https://github.com/Officium),
[Hongming Zhang](https://github.com/initial-h),
[Hao Dong](https://github.com/zsdonghao)
## Citing
```
@misc{RLzoo,
author = {Zihan Ding, Tianyang Yu, Yanhua Huang, Hongming Zhang, Hao Dong},
title = {Reinforcement Learning Algorithms Zoo},
year = {2019},
publisher = {GitHub},
journal = {GitHub repository},
howpublished = {\url{https://github.com/tensorlayer/RLzoo}},
}
```
## Other Resources
<br/>
<a href="https://deepreinforcementlearningbook.org" target="\_blank">
<div align="center">
<img src="http://deep-reinforcement-learning-book.github.io/assets/images/cover_v1.png" width="20%"/>
</div>
<!-- <div align="center"><caption>Slack Invitation Link</caption></div> -->
</a>
<br/>
<br/>
<a href="https://deepreinforcementlearningbook.org" target="\_blank">
<div align="center">
<img src="docs/img/logo.png" width="80%"/>
</div>
<!-- <div align="center"><caption>Slack Invitation Link</caption></div> -->
</a>
<br/>
| /rlzoo-1.0.4.tar.gz/rlzoo-1.0.4/README.md | 0.933051 | 0.986244 | README.md | pypi |
import argparse
from pathlib import Path
from typing import List, Optional
import cv2
import numpy as np
import onnxruntime as rt
from huggingface_hub.file_download import hf_hub_download
SCALE: int = 255
def get_mask(
session_infer: rt.InferenceSession,
img: np.ndarray,
size_infer: int = 1024,
):
img = (img / SCALE).astype(np.float32)
h_orig, w_orig = img.shape[:-1]
if h_orig > w_orig:
h_infer, w_infer = (size_infer, int(size_infer * w_orig / h_orig))
else:
h_infer, w_infer = (int(size_infer * h_orig / w_orig), size_infer)
h_padding, w_padding = size_infer - h_infer, size_infer - w_infer
img_infer = np.zeros([size_infer, size_infer, 3], dtype=np.float32)
img_infer[
h_padding // 2 : h_padding // 2 + h_infer,
w_padding // 2 : w_padding // 2 + w_infer,
] = cv2.resize(img, (w_infer, h_infer))
img_infer = np.transpose(img_infer, (2, 0, 1))
img_infer = img_infer[np.newaxis, :]
mask = session_infer.run(None, {"img": img_infer})[0][0]
mask = np.transpose(mask, (1, 2, 0))
mask = mask[
h_padding // 2 : h_padding // 2 + h_infer,
w_padding // 2 : w_padding // 2 + w_infer,
]
mask = cv2.resize(mask, (w_orig, h_orig))[:, :, np.newaxis]
return mask
def save_image(
*,
img,
output_dir: Path,
path_original: Path,
out_format,
):
if path_original.parent == output_dir:
raise FileExistsError(
f"Output directory should not be the same directory of the input image: {output_dir}"
)
output_dir.mkdir(
exist_ok=True,
parents=True,
)
out_path: Path = output_dir.joinpath(path_original.stem + ".png")
idx: int = 0
while out_path.exists():
out_path = output_dir.joinpath(f"{path_original.stem}.{idx}.png")
idx += 1
img = cv2.cvtColor(img, out_format)
cv2.imwrite(str(out_path), img)
def operation(
*,
model_repo_id: str,
model_filename: str,
targets: List[str],
output_matted: Optional[Path],
output_dir: Optional[Path],
alpha_min: float,
alpha_max: float,
force_cpu: bool,
) -> None:
if output_matted is None and output_dir is None:
raise ValueError("No output directory names are given")
session_infer_path = hf_hub_download(
repo_id=model_repo_id,
filename=model_filename,
)
providers: list[str] = ["CPUExecutionProvider"]
if not force_cpu and "CUDAExecutionProvider" in rt.get_available_providers():
providers = ["CUDAExecutionProvider"]
session_infer = rt.InferenceSession(
session_infer_path,
providers=providers,
)
for path in targets:
img = cv2.cvtColor(cv2.imread(path, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
mask = get_mask(session_infer, img)
mask[mask < alpha_min] = 0.0
mask[mask > alpha_max] = 1.0
img_after = (mask * img + SCALE * (1 - mask)).astype(np.uint8)
mask = (mask * SCALE).astype(np.uint8)
img_after = np.concatenate([img_after, mask], axis=2, dtype=np.uint8)
mask = mask.repeat(3, axis=2)
if output_dir:
save_image(
img=img_after,
output_dir=output_dir,
path_original=Path(path),
out_format=cv2.COLOR_BGRA2RGBA,
)
if output_matted:
save_image(
img=mask,
output_dir=output_matted,
path_original=Path(path),
out_format=cv2.COLOR_BGR2RGB,
)
def get_opts():
oparser = argparse.ArgumentParser()
oparser.add_argument(
"--model-repo-id",
default="skytnt/anime-seg",
)
oparser.add_argument(
"--model-filename",
default="isnetis.onnx",
)
oparser.add_argument(
"-o",
"--output",
type=Path,
)
oparser.add_argument(
"--matted",
type=Path,
)
oparser.add_argument(
"--alpha-min",
type=float,
default=0.0,
)
oparser.add_argument(
"--alpha-max",
type=float,
default=1.0,
)
oparser.add_argument(
"--cpu",
action="store_true",
help="Force to use CPU",
)
return oparser.parse_known_args()
def main() -> None:
(opts, targets) = get_opts()
operation(
model_repo_id=opts.model_repo_id,
model_filename=opts.model_filename,
targets=targets,
output_dir=opts.output,
output_matted=opts.matted,
alpha_min=opts.alpha_min,
alpha_max=opts.alpha_max,
force_cpu=opts.cpu,
)
if __name__ == "__main__":
main() | /rm_anime_bg-0.2.0-py3-none-any.whl/rm_anime_bg/cli.py | 0.739893 | 0.318989 | cli.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /rm_gaussian_binomial_distributions-0.1.tar.gz/rm_gaussian_binomial_distributions-0.1/rm_gaussian_binomial_distributions/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
import subprocess
# List if problem letters that have some problems to show in current version of simple
PROBLEM_LETTERS = "ěščřžýáíéúů"
# Current version of this module is trying to prevent SAS from crash by doing some edits
# to texts displayed on screen and adding "." after letters that wont render without it.
# Widgets
class Label():
"""
Represents label and is parent class for all text based widgets.
If fontSize or justify is specified, adds them before occurrence of this label.
"""
def __init__(self, x: int, y: int, w: int, h:int, text="", id="", fontSize=None, justify=None):
self.x = x
self.y = y
self.w = w
self.h = h
self.text = diacriticsRepair(text)
self.id = id
self.fontSize = fontSize
self.justify = justify
def toStr(self, type: str) -> str:
"""
Universal function for all child classes. Translate object to strings that will be passed to SAS
"""
tempId = self.id
if tempId != "":
tempId=":" + self.id
tempText = self.text
if tempText != "":
tempText = " " + self.text
toReturn = type + tempId + " " + str(self.x) + " " + str(self.y) + " " + str(self.w) + " " + str(self.h) + tempText
if type == "paragraph" or type == "textarea":
toReturn = "[" + toReturn + "]"
if self.fontSize != None:
toReturn = str(FontSize(self.fontSize)) + "\n" + toReturn
if self.justify != None:
toReturn = str(Justify(self.justify)) + "\n" + toReturn
return toReturn
def __str__(self):
return self.toStr("label")
class Paragraph(Label):
"""
Class representing paragraph. Inherits everything from Label class.
"""
def __str__(self):
return self.toStr("paragraph")
class Button(Label):
"""
Class representing button. Inherits everything from Label class.
"""
def __str__(self):
return self.toStr("button")
class TextInput(Label):
"""
Class representing textinput. Inherits everything from Label class.
"""
def __str__(self):
return self.toStr("textinput")
class TextArea(Label):
"""
Class representing textarea. Inherits everything from Label class.
"""
def __str__(self):
return self.toStr("textarea")
class Image():
"""
Class representing image. Need path to image source.
"""
def __init__(self, x: int, y: int, w: int, h: int, path: str, id: str):
self.x = x
self.y = y
self.w = w
self.h = h
i = 0
self.path = path
self.id = id
def __str__(self):
tempId = self.id
if tempId != "":
tempId=":" + self.id
toReturn = "image" + tempId + " " + str(self.x) + " " + str(self.y) + " " + str(self.w) + " " + str(self.h) + " " + self.path
return toReturn
class Range():
"""
Class representing range. Needs min, max, and value that will be displayed as default.
"""
def __init__(self, x: int, y: int, w: int, h:int, min: int, max: int, value: int, id: str):
self.x = x
self.y = y
self.w = w
self.h = h
self.min = min
self.max = max
self.value = value
self.id = id
def __str__(self):
tempId = self.id
if tempId != "":
tempId=":" + self.id
toReturn = "range" + tempId + " " + str(self.x) + " " + str(self.y) + " " + str(self.w) + " " + str(self.h) + " " + \
str(self.min) + " " + str(self.max) + " " + str(self.value)
return toReturn
# Directives
class FontSize():
"""
Specifies font size for all text based widgets that follow until next FontSize().
"""
def __init__(self, size):
self.size = size
def __str__(self):
return "@fontsize " + str(self.size)
class Justify():
"""
Specifies font alignment for all text based widgets that follow until next Justify().
Possible input: "left", "right", "center"
"""
def __init__(self, justify):
self.justify = justify
def __str__(self):
return "@justify " + str(self.justify)
class Timeout():
"""
Specifies time in secs that SAS will wait until exit.
"""
def __init__(self, timeout):
self.timeout = timeout
def __str__(self):
return "@timeout " + str(self.timeout)
class NoClear():
"""
When used, SAS will not clear the display between render.
"""
def __str__(self):
return "@noclear"
# Helper functions
def diacriticsRepair(text: str) -> str:
"""
This functions take care of problem that make some diacritics letters disappear or make SAS crash
"""
if text == "":
return ""
i = 0
for letter in text:
if letter in PROBLEM_LETTERS.upper():
if i != 0:
text = text[0:i] + letter.lower() + text[i+1:]
else:
text = letter.lower() + text[i+1:]
i = i + 1
if text[-1] in PROBLEM_LETTERS:
text = text + "."
return text
def parseOutput(output):
"""
Parses output of SAS and return lists which contain id and text input, when exists.
"""
if output[:9] == "selected:":
button = output[10:]
if button[-1].isspace():
button = button[:-1]
return [button, None]
elif output[:6] == "input:" or output[:6] == "range:":
name, input = output[7:].split(" : ", 1)
if input[-1].isspace():
input = input[:-1]
return [name, input]
def passToSimple(input, encoding="utf-8", simplePath = "/opt/bin/simple"):
"""
Passes all widgets to simple to render and then takes care of its output. Return list from parseOutput()
"""
if type(input) is str:
toPass == input
elif type(input) is list:
toPass = "\n".join(map(str, input))
else:
toPass = str(input)
print(toPass)
result = subprocess.run(['''echo "''' + toPass + '''" | ''' + simplePath], stdout=subprocess.PIPE, shell=True, text=True, encoding=encoding)
output = str(result.stdout)
return parseOutput(output)
# Interface class
class Scene():
"""
One for all class that represent one scene, that should be displayed. Contains all widgets that will be passed to simple.
"""
def __init__(self, noClear = False, timeOut=None, simplePath = "/opt/bin/simple", encoding="utf-8") -> None:
self.widgets = []
self.input = []
self.simplePath = simplePath
self.encoding = encoding
if noClear:
self.widgets.append(NoClear())
if timeOut:
self.widgets.append(Timeout(timeOut))
def add(self, toDisplay):
"""
Will add widget or lists of widgets to the scene.
"""
if type(toDisplay) is list:
self.widgets = self.widgets + toDisplay
else:
self.widgets.append(toDisplay)
def display(self):
"""
Pass all widgets to the SAS and then save the output to input variable.
"""
self.input = passToSimple(self.widgets, simplePath=self.simplePath, encoding=self.encoding)
def remove(self, id):
"""
Searches for widget specified by id and than removes it from list of widgets.
"""
tmpWidget = None
for widget in self.widgets:
if hasattr(widget, "id"):
if widget.id == id:
tmpWidget = widget
break
if tmpWidget != None:
self.widgets.remove(tmpWidget) | /rm_pysas-0.0.1-py3-none-any.whl/rm_pySAS/__init__.py | 0.415729 | 0.288488 | __init__.py | pypi |
import boto3
import json
from pkg_resources import resource_filename
def get_region_name(region_code):
endpoint_file = resource_filename("botocore", "data/endpoints.json")
with open(endpoint_file, "r") as f:
endpoint_data = json.load(f)
region_name = endpoint_data["partitions"][0]["regions"][region_code]["description"]
region_name = region_name.replace("Europe", "EU")
return region_name
def get_ec2_instance_hourly_price(
region_code,
instance_type,
operating_system,
session=None,
preinstalled_software="NA",
tenancy="Shared",
is_byol=False,
):
region_name = get_region_name(region_code)
if is_byol:
license_model = "Bring your own license"
else:
license_model = "No License required"
if tenancy == "Host":
capacity_status = "AllocatedHost"
else:
capacity_status = "Used"
filters = [
{"Type": "TERM_MATCH", "Field": "termType", "Value": "OnDemand"},
{"Type": "TERM_MATCH", "Field": "capacitystatus", "Value": capacity_status},
{"Type": "TERM_MATCH", "Field": "location", "Value": region_name},
{"Type": "TERM_MATCH", "Field": "instanceType", "Value": instance_type},
{"Type": "TERM_MATCH", "Field": "tenancy", "Value": tenancy},
{"Type": "TERM_MATCH", "Field": "operatingSystem", "Value": operating_system},
{"Type": "TERM_MATCH", "Field": "preInstalledSw", "Value": preinstalled_software},
{"Type": "TERM_MATCH", "Field": "licenseModel", "Value": license_model},
]
pricing_client = session.client("pricing", region_name="us-east-1")
response = pricing_client.get_products(ServiceCode="AmazonEC2", Filters=filters)
for price in response["PriceList"]:
price = json.loads(price)
for on_demand in price["terms"]["OnDemand"].values():
for price_dimensions in on_demand["priceDimensions"].values():
price_value = price_dimensions["pricePerUnit"]["USD"]
return float(price_value)
return None
def get_price_for_instance_with_seconds(duration=None, region=None, instance_type=None, session=None):
hour_price = get_ec2_instance_hourly_price(
region_code=region,
session=session,
instance_type=instance_type,
operating_system="Linux",
)
return round(hour_price * duration / 3600, 2) | /rm_runner-0.1.0-py3-none-any.whl/rm_runner/utils.py | 0.445288 | 0.213972 | utils.py | pypi |
port_service_map = {1: 'tcpmux', 2: 'compressnet', 3: 'compressnet', 5: 'rje', 7: 'echo', 9: 'discard', 11: 'systat',
13: 'daytime', 17: 'qotd', 18: 'msp', 19: 'chargen', 20: 'ftp-data', 21: 'ftp', 22: 'ssh',
23: 'telnet', 25: 'smtp', 27: 'nsw-fe', 29: 'msg-icp', 31: 'msg-auth', 33: 'dsp', 37: 'time',
38: 'rap', 39: 'rlp', 41: 'graphics', 42: 'name', 43: 'nicname', 44: 'mpm-flags', 45: 'mpm',
46: 'mpm-snd', 47: 'ni-ftp', 48: 'auditd', 49: 'tacacs', 50: 're-mail-ck', 52: 'xns-time',
53: 'dns', 54: 'xns-ch', 55: 'isi-gl', 56: 'xns-auth', 58: 'xns-mail', 61: 'ni-mail', 62: 'acas',
63: 'whoispp', 64: 'covia', 65: 'tacacs-ds', 66: 'sql-net', 67: 'dhcp/bootps', 68: 'dhcp/bootpc',
69: 'tftp', 70: 'gopher', 71: 'netrjs-1', 72: 'netrjs-2', 73: 'netrjs-3', 74: 'netrjs-4',
76: 'deos', 78: 'vettcp', 79: 'finger', 80: 'http', 82: 'xfer', 83: 'mit-ml-dev', 84: 'ctf',
85: 'mit-ml-dev', 86: 'mfcobol', 88: 'kerberos', 89: 'su-mit-tg', 90: 'dnsix', 91: 'mit-dov',
92: 'npp', 93: 'dcp', 94: 'objcall', 95: 'supdup', 96: 'dixie', 97: 'swift-rvf', 98: 'tacnews',
99: 'metagram', 101: 'hostname', 102: 'iso-tsap', 103: 'gppitnp', 104: 'acr-nema', 105: 'cso',
107: 'rtelnet', 108: 'snagas', 109: 'pop2', 110: 'pop3', 111: 'sunrpc', 112: 'mcidas', 113: 'ident',
115: 'sftp', 116: 'ansanotify', 117: 'uucp-path', 118: 'sqlserv', 119: 'nntp', 120: 'cfdptkt',
121: 'erpc', 122: 'smakynet', 123: 'ntp', 124: 'ansatrader', 125: 'locus-map', 126: 'nxedit',
127: 'locus-con', 128: 'gss-xlicen', 129: 'pwdgen', 130: 'cisco-fna', 131: 'cisco-tna',
132: 'cisco-sys', 133: 'statsrv', 134: 'ingres-net', 135: 'epmap', 136: 'profile',
137: 'netbios-ns', 138: 'netbios-dgm', 139: 'netbios-ssn', 140: 'emfis-data', 141: 'emfis-cntl',
142: 'bl-idm', 143: 'imap', 144: 'uma', 145: 'uaac', 146: 'iso-tp0', 147: 'iso-ip', 148: 'jargon',
149: 'aed-512', 150: 'sql-net', 151: 'hems', 152: 'bftp', 153: 'sgmp', 154: 'netsc-prod',
155: 'netsc-dev', 156: 'sqlsrv', 157: 'knet-cmp', 158: 'pcmail-srv', 159: 'nss-routing',
160: 'sgmp-traps', 161: 'snmp', 162: 'snmptrap', 163: 'cmip-man', 164: 'cmip-agent',
165: 'xns-courier', 166: 's-net', 167: 'namp', 168: 'rsvd', 169: 'send', 170: 'print-srv',
171: 'multiplex', 172: 'cl-1', 173: 'xyplex-mux', 174: 'mailq', 175: 'vmnet', 176: 'genrad-mux',
177: 'xdmcp', 178: 'nextstep', 179: 'bgp', 180: 'ris', 181: 'unify', 182: 'audit', 183: 'ocbinder',
184: 'ocserver', 185: 'remote-kis', 186: 'kis', 187: 'aci', 188: 'mumps', 189: 'qft', 190: 'gacp',
191: 'prospero', 192: 'osu-nms', 193: 'srmp', 194: 'irc', 195: 'dn6-nlm-aud', 196: 'dn6-smm-red',
197: 'dls', 198: 'dls-mon', 199: 'smux', 200: 'src', 201: 'at-rtmp', 202: 'at-nbp', 203: 'at-3',
204: 'at-echo', 205: 'at-5', 206: 'at-zis', 207: 'at-7', 208: 'at-8', 209: 'qmtp', 210: 'z39-50',
212: 'anet', 213: 'ipx', 214: 'vmpwscs', 215: 'softpc', 217: 'dbase', 218: 'mpp', 219: 'uarps',
220: 'imap3', 221: 'fln-spx', 222: 'rsh-spx', 223: 'cdc', 224: 'masqdialer', 242: 'direct',
243: 'sur-meas', 244: 'inbusiness', 245: 'link', 246: 'dsp3270', 247: 'subntbcst-tftp',
248: 'bhfhs', 256: 'rap', 257: 'set', 259: 'esro-gen', 260: 'openport', 261: 'nsiiops',
262: 'arcisdms', 263: 'hdap', 264: 'bgmp', 265: 'x-bone-ctl', 266: 'sst', 267: 'td-service',
268: 'td-replica', 269: 'manet', 270: 'gist', 271: 'pt-tls', 280: 'http-mgmt', 281: 'personal-link',
282: 'cableport-ax', 283: 'rescap', 284: 'corerjd', 286: 'fxp', 287: 'k-block',
308: 'novastorbakcup', 309: 'entrusttime', 310: 'bhmds', 311: 'asip-webadmin', 312: 'vslmp',
313: 'magenta-logic', 314: 'opalis-robot', 315: 'dpsi', 316: 'decauth', 317: 'zannet',
318: 'pkix-timestamp', 319: 'ptp-event', 320: 'ptp-general', 321: 'pip', 322: 'rtsps',
323: 'rpki-rtr', 324: 'rpki-rtr-tls', 333: 'texar', 344: 'pdap', 345: 'pawserv', 346: 'zserv',
347: 'fatserv', 348: 'csi-sgwp', 349: 'mftp', 350: 'matip-type-a', 351: 'matip-type-b',
352: 'dtag-ste-sb', 353: 'ndsauth', 354: 'bh611', 355: 'datex-asn', 356: 'cloanto-net-1',
357: 'bhevent', 358: 'shrinkwrap', 359: 'nsrmp', 360: 'scoi2odialog', 361: 'semantix',
362: 'srssend', 363: 'rsvp-tunnel', 364: 'aurora-cmgr', 365: 'dtk', 366: 'odmr',
367: 'mortgageware', 368: 'qbikgdp', 369: 'rpc2portmap', 370: 'codaauth2', 371: 'clearcase',
372: 'ulistproc', 373: 'legent-1', 374: 'legent-2', 375: 'hassle', 376: 'nip', 377: 'tnETOS',
378: 'dsETOS', 379: 'is99c', 380: 'is99s', 381: 'hp-collector', 382: 'hp-managed-node',
383: 'hp-alarm-mgr', 384: 'arns', 385: 'ibm-app', 386: 'asa', 387: 'aurp', 388: 'unidata-ldm',
389: 'ldap', 390: 'uis', 391: 'synotics-relay', 392: 'synotics-broker', 393: 'meta5',
394: 'embl-ndt', 395: 'netcp', 396: 'netware-ip', 397: 'mptn', 398: 'kryptolan', 399: 'iso-tsap-c2',
400: 'osb-sd', 401: 'ups', 402: 'genie', 403: 'decap', 404: 'nced', 405: 'ncld', 406: 'imsp',
407: 'timbuktu', 408: 'prm-sm', 409: 'prm-nm', 410: 'decladebug', 411: 'rmt', 412: 'synoptics-trap',
413: 'smsp', 414: 'infoseek', 415: 'bnet', 416: 'silverplatter', 417: 'onmux', 418: 'hyper-g',
419: 'ariel1', 420: 'smpte', 421: 'ariel2', 422: 'ariel3', 423: 'opc-job-start',
424: 'opc-job-track', 425: 'icad-el', 426: 'smartsdp', 427: 'svrloc', 428: 'ocs-cmu',
429: 'ocs-amu', 430: 'utmpsd', 431: 'utmpcd', 432: 'iasd', 433: 'nnsp', 434: 'mobileip-agent',
435: 'mobilip-mn', 436: 'dna-cml', 437: 'comscm', 438: 'dsfgw', 439: 'dasp', 440: 'sgcp',
441: 'decvms-sysmgt', 442: 'cvc-hostd', 443: 'https', 444: 'snpp', 445: 'microsoft-ds',
446: 'ddm-rdb', 447: 'ddm-dfm', 448: 'ddm-ssl', 449: 'as-servermap', 450: 'tserver',
451: 'sfs-smp-net', 452: 'sfs-config', 453: 'creativeserver', 454: 'contentserver',
455: 'creativepartnr', 456: 'macon-tcp', 457: 'scohelp', 458: 'appleqtc', 459: 'ampr-rcmd',
460: 'skronk', 461: 'datasurfsrv', 462: 'datasurfsrvsec', 463: 'alpes', 464: 'kpasswd', 465: 'urd',
466: 'digital-vrc', 467: 'mylex-mapd', 468: 'photuris', 469: 'rcp', 470: 'scx-proxy', 471: 'mondex',
472: 'ljk-login', 473: 'hybrid-pop', 474: 'tn-tl-w1', 475: 'tcpnethaspsrv', 476: 'tn-tl-fd1',
477: 'ss7ns', 478: 'spsc', 479: 'iafserver', 480: 'iafdbase', 481: 'ph', 482: 'bgs-nsi',
483: 'ulpnet', 484: 'integra-sme', 485: 'powerburst', 486: 'avian', 487: 'saft', 488: 'gss-http',
489: 'nest-protocol', 490: 'micom-pfs', 491: 'go-login', 492: 'ticf-1', 493: 'ticf-2',
494: 'pov-ray', 495: 'intecourier', 496: 'pim-rp-disc', 497: 'retrospect', 498: 'siam',
499: 'iso-ill', 500: 'isakmp', 501: 'stmf', 502: 'mbap', 503: 'intrinsa', 504: 'citadel',
505: 'mailbox-lm', 506: 'ohimsrv', 507: 'crs', 508: 'xvttp', 509: 'snare', 510: 'fcp',
511: 'passgo', 512: 'exec', 513: 'login', 514: 'shell', 515: 'printer', 516: 'videotex',
517: 'talk', 518: 'ntalk', 519: 'utime', 520: 'efs', 521: 'ripng', 522: 'ulp', 523: 'ibm-db2',
524: 'ncp', 525: 'timed', 526: 'tempo', 527: 'stx', 528: 'custix', 529: 'irc-serv', 530: 'courier',
531: 'conference', 532: 'netnews', 533: 'netwall', 534: 'windream', 535: 'iiop', 536: 'opalis-rdv',
537: 'nmsp', 538: 'gdomap', 539: 'apertus-ldp', 540: 'uucp', 541: 'uucp-rlogin', 542: 'commerce',
543: 'klogin', 544: 'kshell', 545: 'appleqtcsrvr', 546: 'dhcpv6-client', 547: 'dhcpv6-server',
548: 'afpovertcp', 549: 'idfp', 550: 'new-rwho', 551: 'cybercash', 552: 'devshr-nts', 553: 'pirp',
554: 'rtsp', 555: 'dsf', 556: 'remotefs', 557: 'openvms-sysipc', 558: 'sdnskmp', 559: 'teedtap',
560: 'rmonitor', 561: 'monitor', 562: 'chshell', 563: 'nntps', 565: 'whoami', 566: 'streettalk',
567: 'banyan-rpc', 568: 'ms-shuttle', 569: 'ms-rome', 570: 'meter', 571: 'meter', 572: 'sonar',
573: 'banyan-vip', 574: 'ftp-agent', 575: 'vemmi', 576: 'ipcd', 577: 'vnas', 578: 'ipdd',
579: 'decbsrv', 580: 'sntp-heartbeat', 581: 'bdp', 582: 'scc-security', 583: 'philips-vc',
584: 'keyserver', 586: 'password-chg', 587: 'submission', 588: 'cal', 589: 'eyelink',
590: 'tns-cml', 591: 'http-alt', 592: 'eudora-set', 593: 'http-rpc-epmap', 594: 'tpip',
595: 'cab-protocol', 596: 'smsd', 597: 'ptcnameservice', 598: 'sco-websrvrmg3', 599: 'acp',
600: 'ipcserver', 601: 'syslog-conn', 602: 'xmlrpc-beep', 603: 'idxp', 604: 'tunnel',
605: 'soap-beep', 606: 'urm', 607: 'nqs', 608: 'sift-uft', 609: 'npmp-trap', 610: 'npmp-local',
611: 'npmp-gui', 612: 'hmmp-ind', 613: 'hmmp-op', 614: 'sshell', 615: 'sco-inetmgr',
616: 'sco-sysmgr', 617: 'sco-dtmgr', 618: 'dei-icda', 619: 'compaq-evm', 620: 'sco-websrvrmgr',
621: 'escp-ip', 622: 'collaborator', 623: 'oob-ws-http', 624: 'cryptoadmin', 625: 'dec-dlm',
626: 'asia', 627: 'passgo-tivoli', 628: 'qmqp', 630: 'rda', 631: 'ipp', 632: 'bmpp',
633: 'servstat', 634: 'ginad', 635: 'rlzdbase', 636: 'ldaps', 637: 'lanserver', 638: 'mcns-sec',
639: 'msdp', 640: 'entrust-sps', 641: 'repcmd', 642: 'esro-emsdp', 643: 'sanity', 644: 'dwr',
645: 'pssc', 646: 'ldp', 647: 'dhcp-failover', 648: 'rrp', 649: 'cadview-3d', 650: 'obex',
651: 'ieee-mms', 652: 'hello-port', 653: 'repscmd', 654: 'aodv', 655: 'tinc', 656: 'spmp',
657: 'rmc', 658: 'tenfold', 660: 'mac-srvr-admin', 661: 'hap', 662: 'pftp', 663: 'purenoise',
664: 'oob-ws-https', 665: 'sun-dr', 666: 'mdqs', 667: 'disclose', 668: 'mecomm', 669: 'meregister',
670: 'vacdsm-sws', 671: 'vacdsm-app', 672: 'vpps-qua', 673: 'cimplex', 674: 'acap', 675: 'dctp',
676: 'vpps-via', 677: 'vpp', 678: 'ggf-ncp', 679: 'mrm', 680: 'entrust-aaas', 681: 'entrust-aams',
682: 'xfr', 683: 'corba-iiop', 684: 'corba-iiop-ssl', 685: 'mdc-portmapper', 686: 'hcp-wismar',
687: 'asipregistry', 688: 'realm-rusd', 689: 'nmap', 690: 'vatp', 691: 'msexch-routing',
692: 'hyperwave-isp', 693: 'connendp', 694: 'ha-cluster', 695: 'ieee-mms-ssl', 696: 'rushd',
697: 'uuidgen', 698: 'olsr', 699: 'accessnetwork', 700: 'epp', 701: 'lmp', 702: 'iris-beep',
704: 'elcsd', 705: 'agentx', 706: 'silc', 707: 'borland-dsj', 709: 'entrust-kmsh',
710: 'entrust-ash', 711: 'cisco-tdp', 712: 'tbrpf', 713: 'iris-xpc', 714: 'iris-xpcs',
715: 'iris-lwz', 716: 'pana', 729: 'netviewdm1', 730: 'netviewdm2', 731: 'netviewdm3', 741: 'netgw',
742: 'netrcs', 744: 'flexlm', 747: 'fujitsu-dev', 748: 'ris-cm', 749: 'kerberos-adm', 750: 'rfile',
751: 'pump', 752: 'qrh', 753: 'rrh', 754: 'tell', 758: 'nlogin', 759: 'con', 760: 'ns', 761: 'rxe',
762: 'quotad', 763: 'cycleserv', 764: 'omserv', 765: 'webster', 767: 'phonebook', 769: 'vid',
770: 'cadlock', 771: 'rtip', 772: 'cycleserv2', 773: 'submit', 774: 'rpasswd', 775: 'entomb',
776: 'wpages', 777: 'multiling-http', 780: 'wpgs', 800: 'mdbs-daemon', 801: 'device', 802: 'mbap-s',
810: 'fcp-udp', 828: 'itm-mcell-s', 829: 'pkix-3-ca-ra', 830: 'netconf-ssh', 831: 'netconf-beep',
832: 'netconfsoaphttp', 833: 'netconfsoapbeep', 847: 'dhcp-failover2', 848: 'gdoi', 853: 'domain-s',
860: 'iscsi', 861: 'owamp-control', 862: 'twamp-control', 873: 'rsync', 886: 'iclcnet-locate',
887: 'iclcnet-svinfo', 888: 'accessbuilder', 900: 'omginitialrefs', 901: 'smpnameres',
902: 'ideafarm-door', 903: 'ideafarm-panic', 910: 'kink', 911: 'xact-backup', 912: 'apex-mesh',
913: 'apex-edge', 989: 'ftps-data', 990: 'ftps', 991: 'nas', 992: 'telnets', 993: 'imaps',
995: 'pop3s', 996: 'vsinet', 997: 'maitrd', 998: 'busboy', 999: 'garcon', 1000: 'cadlock2',
1010: 'surf', 1021: 'exp1', 1022: 'exp2', 1025: 'blackjack', 1026: 'cap', 1029: 'solid-mux',
1033: 'netinfo-local', 1034: 'activesync', 1035: 'mxxrlogin', 1036: 'nsstp', 1037: 'ams',
1038: 'mtqp', 1039: 'sbl', 1040: 'netarx', 1041: 'danf-ak2', 1042: 'afrog', 1043: 'boinc-client',
1044: 'dcutility', 1045: 'fpitp', 1046: 'wfremotertm', 1047: 'neod1', 1048: 'neod2',
1049: 'td-postman', 1050: 'cma', 1051: 'optima-vnet', 1052: 'ddt', 1053: 'remote-as',
1054: 'brvread', 1055: 'ansyslmd', 1056: 'vfo', 1057: 'startron', 1058: 'nim', 1059: 'nimreg',
1060: 'polestar', 1061: 'kiosk', 1062: 'veracity', 1063: 'kyoceranetdev', 1064: 'jstel',
1065: 'syscomlan', 1066: 'fpo-fns', 1067: 'instl-boots', 1068: 'instl-bootc',
1069: 'cognex-insight', 1070: 'gmrupdateserv', 1071: 'bsquare-voip', 1072: 'cardax',
1073: 'bridgecontrol', 1074: 'warmspotMgmt', 1075: 'rdrmshc', 1076: 'dab-sti-c', 1077: 'imgames',
1078: 'avocent-proxy', 1079: 'asprovatalk', 1080: 'socks', 1081: 'pvuniwien', 1082: 'amt-esd-prot',
1083: 'ansoft-lm-1', 1084: 'ansoft-lm-2', 1085: 'webobjects', 1086: 'cplscrambler-lg',
1087: 'cplscrambler-in', 1088: 'cplscrambler-al', 1089: 'ff-annunc', 1090: 'ff-fms', 1091: 'ff-sm',
1092: 'obrpd', 1093: 'proofd', 1094: 'rootd', 1095: 'nicelink', 1096: 'cnrprotocol',
1097: 'sunclustermgr', 1098: 'rmiactivation', 1099: 'rmiregistry', 1100: 'mctp',
1101: 'pt2-discover', 1102: 'adobeserver-1', 1103: 'adobeserver-2', 1104: 'xrl', 1105: 'ftranhc',
1106: 'isoipsigport-1', 1107: 'isoipsigport-2', 1108: 'ratio-adp', 1110: 'webadmstart',
1111: 'lmsocialserver', 1112: 'icp', 1113: 'ltp-deepspace', 1114: 'mini-sql', 1115: 'ardus-trns',
1116: 'ardus-cntl', 1117: 'ardus-mtrns', 1118: 'sacred', 1119: 'bnetgame', 1120: 'bnetfile',
1121: 'rmpp', 1122: 'availant-mgr', 1123: 'murray', 1124: 'hpvmmcontrol', 1125: 'hpvmmagent',
1126: 'hpvmmdata', 1127: 'kwdb-commn', 1128: 'saphostctrl', 1129: 'saphostctrls', 1130: 'casp',
1131: 'caspssl', 1132: 'kvm-via-ip', 1133: 'dfn', 1134: 'aplx', 1135: 'omnivision',
1136: 'hhb-gateway', 1137: 'trim', 1138: 'encrypted-admin', 1139: 'evm', 1140: 'autonoc',
1141: 'mxomss', 1142: 'edtools', 1143: 'imyx', 1144: 'fuscript', 1145: 'x9-icue',
1146: 'audit-transfer', 1147: 'capioverlan', 1148: 'elfiq-repl', 1149: 'bvtsonar', 1150: 'blaze',
1151: 'unizensus', 1152: 'winpoplanmess', 1153: 'c1222-acse', 1154: 'resacommunity', 1155: 'nfa',
1156: 'iascontrol-oms', 1157: 'iascontrol', 1158: 'dbcontrol-oms', 1159: 'oracle-oms', 1160: 'olsv',
1161: 'health-polling', 1162: 'health-trap', 1163: 'sddp', 1164: 'qsm-proxy', 1165: 'qsm-gui',
1166: 'qsm-remote', 1167: 'cisco-ipsla', 1168: 'vchat', 1169: 'tripwire', 1170: 'atc-lm',
1171: 'atc-appserver', 1172: 'dnap', 1173: 'd-cinema-rrp', 1174: 'fnet-remote-ui', 1175: 'dossier',
1176: 'indigo-server', 1177: 'dkmessenger', 1178: 'sgi-storman', 1179: 'b2n', 1180: 'mc-client',
1182: 'accelenet', 1183: 'llsurfup-http', 1184: 'llsurfup-https', 1185: 'catchpole',
1186: 'mysql-cluster', 1187: 'alias', 1188: 'hp-webadmin', 1189: 'unet', 1190: 'commlinx-avl',
1191: 'gpfs', 1192: 'caids-sensor', 1193: 'fiveacross', 1194: 'openvpn', 1195: 'rsf-1',
1196: 'netmagic', 1197: 'carrius-rshell', 1198: 'cajo-discovery', 1199: 'dmidi', 1200: 'scol',
1201: 'nucleus-sand', 1202: 'caiccipc', 1203: 'ssslic-mgr', 1204: 'ssslog-mgr', 1205: 'accord-mgc',
1206: 'anthony-data', 1207: 'metasage', 1208: 'seagull-ais', 1209: 'ipcd3', 1210: 'eoss',
1211: 'groove-dpp', 1212: 'lupa', 1213: 'mpc-lifenet', 1214: 'kazaa', 1215: 'scanstat-1',
1216: 'etebac5', 1217: 'hpss-ndapi', 1218: 'aeroflight-ads', 1219: 'aeroflight-ret',
1220: 'qt-serveradmin', 1221: 'sweetware-apps', 1222: 'nerv', 1223: 'tgp', 1224: 'vpnz',
1225: 'slinkysearch', 1226: 'stgxfws', 1227: 'dns2go', 1228: 'florence', 1229: 'zented',
1230: 'periscope', 1231: 'menandmice-lpm', 1232: 'first-defense', 1233: 'univ-appserver',
1234: 'search-agent', 1235: 'mosaicsyssvc1', 1236: 'bvcontrol', 1237: 'tsdos390', 1238: 'hacl-qs',
1239: 'nmsd', 1240: 'instantia', 1241: 'nessus', 1242: 'nmasoverip', 1243: 'serialgateway',
1244: 'isbconference1', 1245: 'isbconference2', 1246: 'payrouter', 1247: 'visionpyramid',
1248: 'hermes', 1249: 'mesavistaco', 1250: 'swldy-sias', 1251: 'servergraph', 1252: 'bspne-pcc',
1253: 'q55-pcc', 1254: 'de-noc', 1255: 'de-cache-query', 1256: 'de-server', 1257: 'shockwave2',
1258: 'opennl', 1259: 'opennl-voice', 1260: 'ibm-ssd', 1261: 'mpshrsv', 1262: 'qnts-orb',
1263: 'dka', 1264: 'prat', 1265: 'dssiapi', 1266: 'dellpwrappks', 1267: 'epc',
1268: 'propel-msgsys', 1269: 'watilapp', 1270: 'opsmgr', 1271: 'excw', 1272: 'cspmlockmgr',
1273: 'emc-gateway', 1274: 't1distproc', 1275: 'ivcollector', 1277: 'miva-mqs',
1278: 'dellwebadmin-1', 1279: 'dellwebadmin-2', 1280: 'pictrography', 1281: 'healthd',
1282: 'emperion', 1283: 'productinfo', 1284: 'iee-qfx', 1285: 'neoiface', 1286: 'netuitive',
1287: 'routematch', 1288: 'navbuddy', 1289: 'jwalkserver', 1290: 'winjaserver', 1291: 'seagulllms',
1292: 'dsdn', 1293: 'pkt-krb-ipsec', 1294: 'cmmdriver', 1295: 'ehtp', 1296: 'dproxy',
1297: 'sdproxy', 1298: 'lpcp', 1299: 'hp-sci', 1300: 'h323hostcallsc', 1301: 'ci3-software-1',
1302: 'ci3-software-2', 1303: 'sftsrv', 1304: 'boomerang', 1305: 'pe-mike', 1306: 're-conn-proto',
1307: 'pacmand', 1308: 'odsi', 1309: 'jtag-server', 1310: 'husky', 1311: 'rxmon',
1312: 'sti-envision', 1313: 'bmc-patroldb', 1314: 'pdps', 1315: 'els', 1316: 'exbit-escp',
1317: 'vrts-ipcserver', 1318: 'krb5gatekeeper', 1319: 'amx-icsp', 1320: 'amx-axbnet', 1321: 'pip',
1322: 'novation', 1323: 'brcd', 1324: 'delta-mcp', 1325: 'dx-instrument', 1326: 'wimsic',
1327: 'ultrex', 1328: 'ewall', 1329: 'netdb-export', 1330: 'streetperfect', 1331: 'intersan',
1332: 'pcia-rxp-b', 1333: 'passwrd-policy', 1334: 'writesrv', 1335: 'digital-notary',
1336: 'ischat', 1337: 'menandmice-dns', 1338: 'wmc-log-svc', 1339: 'kjtsiteserver', 1340: 'naap',
1341: 'qubes', 1342: 'esbroker', 1343: 're101', 1344: 'icap', 1345: 'vpjp', 1346: 'alta-ana-lm',
1347: 'bbn-mmc', 1348: 'bbn-mmx', 1349: 'sbook', 1350: 'editbench', 1351: 'equationbuilder',
1352: 'lotusnote', 1353: 'relief', 1355: 'intuitive-edge', 1356: 'cuillamartin', 1357: 'pegboard',
1358: 'connlcli', 1359: 'ftsrv', 1360: 'mimer', 1361: 'linx', 1362: 'timeflies',
1363: 'ndm-requester', 1364: 'ndm-server', 1365: 'adapt-sna', 1366: 'netware-csp', 1367: 'dcs',
1368: 'screencast', 1369: 'gv-us', 1370: 'us-gv', 1371: 'fc-cli', 1372: 'fc-ser',
1373: 'chromagrafx', 1374: 'molly', 1375: 'bytex', 1376: 'ibm-pps', 1377: 'cichlid', 1378: 'elan',
1379: 'dbreporter', 1380: 'telesis-licman', 1381: 'apple-licman', 1382: 'udt-os', 1383: 'gwha',
1384: 'os-licman', 1385: 'atex-elmd', 1386: 'checksum', 1387: 'cadsi-lm', 1388: 'objective-dbc',
1389: 'iclpv-dm', 1390: 'iclpv-sc', 1391: 'iclpv-sas', 1392: 'iclpv-pm', 1393: 'iclpv-nls',
1394: 'iclpv-nlc', 1395: 'iclpv-wsm', 1396: 'dvl-activemail', 1397: 'audio-activmail',
1398: 'video-activmail', 1399: 'cadkey-licman', 1400: 'cadkey-tablet', 1401: 'goldleaf-licman',
1402: 'prm-sm-np', 1403: 'prm-nm-np', 1404: 'igi-lm', 1405: 'ibm-res', 1406: 'netlabs-lm',
1407: 'tibet-server', 1408: 'sophia-lm', 1409: 'here-lm', 1410: 'hiq', 1411: 'af', 1412: 'innosys',
1413: 'innosys-acl', 1414: 'ibm-mqseries', 1415: 'dbstar', 1416: 'novell-lu6-2',
1417: 'timbuktu-srv1', 1418: 'timbuktu-srv2', 1419: 'timbuktu-srv3', 1420: 'timbuktu-srv4',
1421: 'gandalf-lm', 1422: 'autodesk-lm', 1423: 'essbase', 1424: 'hybrid', 1425: 'zion-lm',
1426: 'sais', 1427: 'mloadd', 1428: 'informatik-lm', 1429: 'nms', 1430: 'tpdu', 1431: 'rgtp',
1432: 'blueberry-lm', 1433: 'ms-sql-s', 1434: 'ms-sql-m', 1435: 'ibm-cics', 1436: 'saism',
1437: 'tabula', 1438: 'eicon-server', 1439: 'eicon-x25', 1440: 'eicon-slp', 1441: 'cadis-1',
1442: 'cadis-2', 1443: 'ies-lm', 1444: 'marcam-lm', 1445: 'proxima-lm', 1446: 'ora-lm',
1447: 'apri-lm', 1448: 'oc-lm', 1449: 'peport', 1450: 'dwf', 1451: 'infoman', 1452: 'gtegsc-lm',
1453: 'genie-lm', 1454: 'interhdl-elmd', 1455: 'esl-lm', 1456: 'dca', 1457: 'valisys-lm',
1458: 'nrcabq-lm', 1459: 'proshare1', 1460: 'proshare2', 1461: 'ibm-wrless-lan', 1462: 'world-lm',
1463: 'nucleus', 1464: 'msl-lmd', 1465: 'pipes', 1466: 'oceansoft-lm', 1467: 'csdmbase',
1468: 'csdm', 1469: 'aal-lm', 1470: 'uaiact', 1471: 'csdmbase', 1472: 'csdm', 1473: 'openmath',
1474: 'telefinder', 1475: 'taligent-lm', 1476: 'clvm-cfg', 1477: 'ms-sna-server',
1478: 'ms-sna-base', 1479: 'dberegister', 1480: 'pacerforum', 1481: 'airs', 1482: 'miteksys-lm',
1483: 'afs', 1484: 'confluent', 1485: 'lansource', 1486: 'nms-topo-serv', 1487: 'localinfosrvr',
1488: 'docstor', 1489: 'dmdocbroker', 1490: 'insitu-conf', 1492: 'stone-design-1',
1493: 'netmap-lm', 1494: 'ica', 1495: 'cvc', 1496: 'liberty-lm', 1497: 'rfx-lm',
1498: 'sybase-sqlany', 1499: 'fhc', 1500: 'vlsi-lm', 1501: 'saiscm', 1502: 'shivadiscovery',
1503: 'imtc-mcs', 1504: 'evb-elm', 1505: 'funkproxy', 1506: 'utcd', 1507: 'symplex',
1508: 'diagmond', 1509: 'robcad-lm', 1510: 'mvx-lm', 1512: 'wins', 1513: 'fujitsu-dtc',
1514: 'fujitsu-dtcns', 1515: 'ifor-protocol', 1516: 'vpad', 1517: 'vpac', 1518: 'vpvd',
1519: 'vpvc', 1520: 'atm-zip-office', 1521: 'ncube-lm', 1522: 'ricardo-lm', 1523: 'cichild-lm',
1524: 'ingreslock', 1525: 'orasrv', 1526: 'pdap-np', 1527: 'tlisrv', 1529: 'coauthor',
1530: 'rap-service', 1531: 'rap-listen', 1532: 'miroconnect', 1533: 'virtual-places',
1534: 'micromuse-lm', 1535: 'ampr-info', 1536: 'ampr-inter', 1537: 'sdsc-lm',
1539: 'intellistor-lm', 1540: 'rds', 1541: 'rds2', 1542: 'gridgen-elmd', 1543: 'simba-cs',
1544: 'aspeclmd', 1545: 'vistium-share', 1546: 'abbaccuray', 1547: 'laplink', 1548: 'axon-lm',
1549: 'shivahose', 1551: 'hecmtl-db', 1552: 'pciarray', 1553: 'sna-cs', 1554: 'caci-lm',
1555: 'livelan', 1556: 'veritas-pbx', 1557: 'arbortext-lm', 1558: 'xingmpeg', 1559: 'web2host',
1560: 'asci-val', 1561: 'facilityview', 1562: 'pconnectmgr', 1563: 'cadabra-lm',
1564: 'pay-per-view', 1565: 'winddlb', 1566: 'corelvideo', 1567: 'jlicelmd', 1568: 'tsspmap',
1569: 'ets', 1570: 'orbixd', 1571: 'rdb-dbs-disp', 1572: 'chip-lm', 1573: 'itscomm-ns',
1574: 'mvel-lm', 1575: 'oraclenames', 1576: 'moldflow-lm', 1577: 'hypercube-lm', 1578: 'jacobus-lm',
1579: 'ioc-sea-lm', 1580: 'tn-tl-r1', 1581: 'mil-2045-47001', 1582: 'msims', 1583: 'simbaexpress',
1584: 'tn-tl-fd2', 1585: 'intv', 1586: 'ibm-abtact', 1587: 'pra-elmd', 1588: 'triquest-lm',
1589: 'vqp', 1590: 'gemini-lm', 1591: 'ncpm-pm', 1592: 'commonspace', 1593: 'mainsoft-lm',
1594: 'sixtrak', 1595: 'radio', 1596: 'radio-sm', 1597: 'orbplus-iiop', 1598: 'picknfs',
1599: 'simbaservices', 1600: 'issd', 1601: 'aas', 1602: 'inspect', 1603: 'picodbc',
1604: 'icabrowser', 1605: 'slp', 1606: 'slm-api', 1607: 'stt', 1608: 'smart-lm', 1609: 'isysg-lm',
1610: 'taurus-wh', 1611: 'ill', 1612: 'netbill-trans', 1613: 'netbill-keyrep', 1614: 'netbill-cred',
1615: 'netbill-auth', 1616: 'netbill-prod', 1617: 'nimrod-agent', 1618: 'skytelnet',
1619: 'xs-openstorage', 1620: 'faxportwinport', 1621: 'softdataphone', 1622: 'ontime',
1623: 'jaleosnd', 1624: 'udp-sr-port', 1625: 'svs-omagent', 1626: 'shockwave', 1627: 't128-gateway',
1628: 'lontalk-norm', 1629: 'lontalk-urgnt', 1630: 'oraclenet8cman', 1631: 'visitview',
1632: 'pammratc', 1633: 'pammrpc', 1634: 'loaprobe', 1635: 'edb-server1', 1636: 'isdc',
1637: 'islc', 1638: 'ismc', 1639: 'cert-initiator', 1640: 'cert-responder', 1641: 'invision',
1642: 'isis-am', 1643: 'isis-ambc', 1644: 'saiseh', 1645: 'sightline', 1646: 'sa-msg-port',
1647: 'rsap', 1648: 'concurrent-lm', 1649: 'kermit', 1650: 'nkd', 1651: 'shiva-confsrvr',
1652: 'xnmp', 1653: 'alphatech-lm', 1654: 'stargatealerts', 1655: 'dec-mbadmin',
1656: 'dec-mbadmin-h', 1657: 'fujitsu-mmpdc', 1658: 'sixnetudr', 1659: 'sg-lm',
1660: 'skip-mc-gikreq', 1661: 'netview-aix-1', 1662: 'netview-aix-2', 1663: 'netview-aix-3',
1664: 'netview-aix-4', 1665: 'netview-aix-5', 1666: 'netview-aix-6', 1667: 'netview-aix-7',
1668: 'netview-aix-8', 1669: 'netview-aix-9', 1670: 'netview-aix-10', 1671: 'netview-aix-11',
1672: 'netview-aix-12', 1673: 'proshare-mc-1', 1674: 'proshare-mc-2', 1675: 'pdp', 1676: 'netcomm1',
1677: 'groupwise', 1678: 'prolink', 1679: 'darcorp-lm', 1680: 'microcom-sbp', 1681: 'sd-elmd',
1682: 'lanyon-lantern', 1683: 'ncpm-hip', 1684: 'snaresecure', 1685: 'n2nremote', 1686: 'cvmon',
1687: 'nsjtp-ctrl', 1688: 'nsjtp-data', 1689: 'firefox', 1690: 'ng-umds', 1691: 'empire-empuma',
1692: 'sstsys-lm', 1693: 'rrirtr', 1694: 'rrimwm', 1695: 'rrilwm', 1696: 'rrifmm', 1697: 'rrisat',
1698: 'rsvp-encap-1', 1699: 'rsvp-encap-2', 1700: 'mps-raft', 1701: 'l2f', 1702: 'deskshare',
1703: 'hb-engine', 1704: 'bcs-broker', 1705: 'slingshot', 1706: 'jetform', 1707: 'vdmplay',
1708: 'gat-lmd', 1709: 'centra', 1710: 'impera', 1711: 'pptconference', 1712: 'registrar',
1713: 'conferencetalk', 1714: 'sesi-lm', 1715: 'houdini-lm', 1716: 'xmsg', 1717: 'fj-hdnet',
1718: 'h323gatedisc', 1719: 'h323gatestat', 1720: 'h323hostcall', 1721: 'caicci', 1722: 'hks-lm',
1723: 'pptp', 1724: 'csbphonemaster', 1725: 'iden-ralp', 1726: 'iberiagames', 1727: 'winddx',
1728: 'telindus', 1729: 'citynl', 1730: 'roketz', 1731: 'msiccp', 1732: 'proxim', 1733: 'siipat',
1734: 'cambertx-lm', 1735: 'privatechat', 1736: 'street-stream', 1737: 'ultimad', 1738: 'gamegen1',
1739: 'webaccess', 1740: 'encore', 1741: 'cisco-net-mgmt', 1743: 'cinegrfx-lm', 1744: 'ncpm-ft',
1745: 'remote-winsock', 1746: 'ftrapid-1', 1747: 'ftrapid-2', 1748: 'oracle-em1',
1749: 'aspen-services', 1750: 'sslp', 1751: 'swiftnet', 1752: 'lofr-lm', 1753: 'predatar-comms',
1754: 'oracle-em2', 1755: 'ms-streaming', 1756: 'capfast-lmd', 1757: 'cnhrp', 1758: 'tftp-mcast',
1759: 'spss-lm', 1760: 'www-ldap-gw', 1761: 'cft-0', 1762: 'cft-1', 1763: 'cft-2', 1764: 'cft-3',
1765: 'cft-4', 1766: 'cft-5', 1767: 'cft-6', 1768: 'cft-7', 1769: 'bmc-net-adm',
1770: 'bmc-net-svc', 1771: 'vaultbase', 1772: 'essweb-gw', 1773: 'kmscontrol',
1774: 'global-dtserv', 1775: 'vdab', 1776: 'femis', 1777: 'powerguardian', 1778: 'prodigy-intrnet',
1779: 'pharmasoft', 1780: 'dpkeyserv', 1781: 'answersoft-lm', 1782: 'hp-hcip', 1784: 'finle-lm',
1785: 'windlm', 1786: 'funk-logger', 1787: 'funk-license', 1788: 'psmond', 1789: 'hello',
1790: 'nmsp', 1791: 'ea1', 1792: 'ibm-dt-2', 1793: 'rsc-robot', 1794: 'cera-bcm', 1795: 'dpi-proxy',
1796: 'vocaltec-admin', 1797: 'uma', 1798: 'etp', 1799: 'netrisk', 1800: 'ansys-lm', 1801: 'msmq',
1802: 'concomp1', 1803: 'hp-hcip-gwy', 1804: 'enl', 1805: 'enl-name', 1806: 'musiconline',
1807: 'fhsp', 1808: 'oracle-vp2', 1809: 'oracle-vp1', 1810: 'jerand-lm', 1811: 'scientia-sdb',
1812: 'radius', 1813: 'radius-acct', 1814: 'tdp-suite', 1815: 'mmpft', 1816: 'harp',
1817: 'rkb-oscs', 1818: 'etftp', 1819: 'plato-lm', 1820: 'mcagent', 1821: 'donnyworld',
1822: 'es-elmd', 1823: 'unisys-lm', 1824: 'metrics-pas', 1825: 'direcpc-video', 1826: 'ardt',
1827: 'asi', 1828: 'itm-mcell-u', 1829: 'optika-emedia', 1830: 'net8-cman', 1831: 'myrtle',
1832: 'tht-treasure', 1833: 'udpradio', 1834: 'ardusuni', 1835: 'ardusmul', 1836: 'ste-smsc',
1837: 'csoft1', 1838: 'talnet', 1839: 'netopia-vo1', 1840: 'netopia-vo2', 1841: 'netopia-vo3',
1842: 'netopia-vo4', 1843: 'netopia-vo5', 1844: 'direcpc-dll', 1845: 'altalink',
1846: 'tunstall-pnc', 1847: 'slp-notify', 1848: 'fjdocdist', 1849: 'alpha-sms', 1850: 'gsi',
1851: 'ctcd', 1852: 'virtual-time', 1853: 'vids-avtp', 1854: 'buddy-draw', 1855: 'fiorano-rtrsvc',
1856: 'fiorano-msgsvc', 1857: 'datacaptor', 1858: 'privateark', 1859: 'gammafetchsvr',
1860: 'sunscalar-svc', 1861: 'lecroy-vicp', 1862: 'mysql-cm-agent', 1863: 'msnp',
1864: 'paradym-31port', 1865: 'entp', 1866: 'swrmi', 1867: 'udrive', 1868: 'viziblebrowser',
1869: 'transact', 1870: 'sunscalar-dns', 1871: 'canocentral0', 1872: 'canocentral1',
1873: 'fjmpjps', 1874: 'fjswapsnp', 1875: 'westell-stats', 1876: 'ewcappsrv', 1877: 'hp-webqosdb',
1878: 'drmsmc', 1879: 'nettgain-nms', 1880: 'vsat-control', 1881: 'ibm-mqseries2', 1882: 'ecsqdmn',
1883: 'mqtt', 1884: 'idmaps', 1885: 'vrtstrapserver', 1886: 'leoip', 1887: 'filex-lport',
1888: 'ncconfig', 1889: 'unify-adapter', 1890: 'wilkenlistener', 1891: 'childkey-notif',
1892: 'childkey-ctrl', 1893: 'elad', 1894: 'o2server-port', 1896: 'b-novative-ls',
1897: 'metaagent', 1898: 'cymtec-port', 1899: 'mc2studios', 1900: 'ssdp', 1901: 'fjicl-tep-a',
1902: 'fjicl-tep-b', 1903: 'linkname', 1904: 'fjicl-tep-c', 1905: 'sugp', 1906: 'tpmd',
1907: 'intrastar', 1908: 'dawn', 1909: 'global-wlink', 1910: 'ultrabac', 1911: 'mtp',
1912: 'rhp-iibp', 1913: 'armadp', 1914: 'elm-momentum', 1915: 'facelink', 1916: 'persona',
1917: 'noagent', 1918: 'can-nds', 1919: 'can-dch', 1920: 'can-ferret', 1921: 'noadmin',
1922: 'tapestry', 1923: 'spice', 1924: 'xiip', 1925: 'discovery-port', 1926: 'egs',
1927: 'videte-cipc', 1928: 'emsd-port', 1929: 'bandwiz-system', 1930: 'driveappserver',
1931: 'amdsched', 1932: 'ctt-broker', 1933: 'xmapi', 1934: 'xaapi', 1935: 'macromedia-fcs',
1936: 'jetcmeserver', 1937: 'jwserver', 1938: 'jwclient', 1939: 'jvserver', 1940: 'jvclient',
1941: 'dic-aida', 1942: 'res', 1943: 'beeyond-media', 1944: 'close-combat', 1945: 'dialogic-elmd',
1946: 'tekpls', 1947: 'sentinelsrm', 1948: 'eye2eye', 1949: 'ismaeasdaqlive',
1950: 'ismaeasdaqtest', 1951: 'bcs-lmserver', 1952: 'mpnjsc', 1953: 'rapidbase', 1954: 'abr-api',
1955: 'abr-secure', 1956: 'vrtl-vmf-ds', 1957: 'unix-status', 1958: 'dxadmind', 1959: 'simp-all',
1960: 'nasmanager', 1961: 'bts-appserver', 1962: 'biap-mp', 1963: 'webmachine',
1964: 'solid-e-engine', 1965: 'tivoli-npm', 1966: 'slush', 1967: 'sns-quote', 1968: 'lipsinc',
1969: 'lipsinc1', 1970: 'netop-rc', 1971: 'netop-school', 1972: 'intersys-cache', 1973: 'dlsrap',
1974: 'drp', 1975: 'tcoflashagent', 1976: 'tcoregagent', 1977: 'tcoaddressbook', 1978: 'unisql',
1979: 'unisql-java', 1980: 'pearldoc-xact', 1981: 'p2pq', 1982: 'estamp', 1983: 'lhtp', 1984: 'bb',
1985: 'hsrp', 1986: 'licensedaemon', 1987: 'tr-rsrb-p1', 1988: 'tr-rsrb-p2', 1989: 'tr-rsrb-p3',
1990: 'stun-p1', 1991: 'stun-p2', 1992: 'stun-p3', 1993: 'snmp-tcp-port', 1994: 'stun-port',
1995: 'perf-port', 1996: 'tr-rsrb-port', 1997: 'gdp-port', 1998: 'x25-svc-port',
1999: 'tcp-id-port', 2000: 'cisco-sccp', 2001: 'dc', 2002: 'globe', 2003: 'brutus', 2004: 'mailbox',
2005: 'berknet', 2006: 'invokator', 2007: 'dectalk', 2008: 'conf', 2009: 'news', 2010: 'search',
2011: 'raid-cc', 2012: 'ttyinfo', 2013: 'raid-am', 2014: 'troff', 2015: 'cypress',
2016: 'bootserver', 2017: 'cypress-stat', 2018: 'terminaldb', 2019: 'whosockami',
2020: 'xinupageserver', 2021: 'servexec', 2022: 'down', 2023: 'xinuexpansion3',
2024: 'xinuexpansion4', 2025: 'ellpack', 2026: 'scrabble', 2027: 'shadowserver',
2028: 'submitserver', 2029: 'hsrpv6', 2030: 'device2', 2031: 'mobrien-chat', 2032: 'blackboard',
2033: 'glogger', 2034: 'scoremgr', 2035: 'imsldoc', 2036: 'e-dpnet', 2037: 'applus',
2038: 'objectmanager', 2039: 'prizma', 2040: 'lam', 2041: 'interbase', 2042: 'isis',
2043: 'isis-bcast', 2044: 'rimsl', 2045: 'cdfunc', 2046: 'sdfunc', 2047: 'dls', 2048: 'dls-monitor',
2049: 'shilp', 2050: 'av-emb-config', 2051: 'epnsdp', 2052: 'clearvisn', 2053: 'lot105-ds-upd',
2054: 'weblogin', 2055: 'iop', 2056: 'omnisky', 2057: 'rich-cp', 2058: 'newwavesearch',
2059: 'bmc-messaging', 2060: 'teleniumdaemon', 2061: 'netmount', 2062: 'icg-swp',
2063: 'icg-bridge', 2064: 'icg-iprelay', 2065: 'dlsrpn', 2066: 'aura', 2067: 'dlswpn',
2068: 'avauthsrvprtcl', 2069: 'event-port', 2070: 'ah-esp-encap', 2071: 'acp-port', 2072: 'msync',
2073: 'gxs-data-port', 2074: 'vrtl-vmf-sa', 2075: 'newlixengine', 2076: 'newlixconfig',
2077: 'tsrmagt', 2078: 'tpcsrvr', 2079: 'idware-router', 2080: 'autodesk-nlm',
2081: 'kme-trap-port', 2082: 'infowave', 2083: 'radsec', 2084: 'sunclustergeo', 2085: 'ada-cip',
2086: 'gnunet', 2087: 'eli', 2088: 'ip-blf', 2089: 'sep', 2090: 'lrp', 2091: 'prp',
2092: 'descent3', 2093: 'nbx-cc', 2094: 'nbx-au', 2095: 'nbx-ser', 2096: 'nbx-dir',
2097: 'jetformpreview', 2098: 'dialog-port', 2099: 'h2250-annex-g', 2100: 'amiganetfs',
2101: 'rtcm-sc104', 2102: 'zephyr-srv', 2103: 'zephyr-clt', 2104: 'zephyr-hm', 2105: 'minipay',
2106: 'mzap', 2107: 'bintec-admin', 2108: 'comcam', 2109: 'ergolight', 2110: 'umsp', 2111: 'dsatp',
2112: 'idonix-metanet', 2113: 'hsl-storm', 2114: 'newheights', 2115: 'kdm', 2116: 'ccowcmr',
2117: 'mentaclient', 2118: 'mentaserver', 2119: 'gsigatekeeper', 2120: 'qencp',
2121: 'scientia-ssdb', 2122: 'caupc-remote', 2123: 'gtp-control', 2124: 'elatelink',
2125: 'lockstep', 2126: 'pktcable-cops', 2127: 'index-pc-wb', 2128: 'net-steward', 2129: 'cs-live',
2130: 'xds', 2131: 'avantageb2b', 2132: 'solera-epmap', 2133: 'zymed-zpp', 2134: 'avenue',
2135: 'gris', 2136: 'appworxsrv', 2137: 'connect', 2138: 'unbind-cluster', 2139: 'ias-auth',
2140: 'ias-reg', 2141: 'ias-admind', 2142: 'tdmoip', 2143: 'lv-jc', 2144: 'lv-ffx', 2145: 'lv-pici',
2146: 'lv-not', 2147: 'lv-auth', 2148: 'veritas-ucl', 2149: 'acptsys', 2150: 'dynamic3d',
2151: 'docent', 2152: 'gtp-user', 2153: 'ctlptc', 2154: 'stdptc', 2155: 'brdptc', 2156: 'trp',
2157: 'xnds', 2158: 'touchnetplus', 2159: 'gdbremote', 2160: 'apc-2160', 2161: 'apc-2161',
2162: 'navisphere', 2163: 'navisphere-sec', 2164: 'ddns-v3', 2165: 'x-bone-api', 2166: 'iwserver',
2167: 'raw-serial', 2168: 'easy-soft-mux', 2169: 'brain', 2170: 'eyetv', 2171: 'msfw-storage',
2172: 'msfw-s-storage', 2173: 'msfw-replica', 2174: 'msfw-array', 2175: 'airsync', 2176: 'rapi',
2177: 'qwave', 2178: 'bitspeer', 2179: 'vmrdp', 2180: 'mc-gt-srv', 2181: 'eforward',
2182: 'cgn-stat', 2183: 'cgn-config', 2184: 'nvd', 2185: 'onbase-dds', 2186: 'gtaua', 2187: 'ssmc',
2188: 'radware-rpm', 2189: 'radware-rpm-s', 2190: 'tivoconnect', 2191: 'tvbus', 2192: 'asdis',
2193: 'drwcs', 2197: 'mnp-exchange', 2198: 'onehome-remote', 2199: 'onehome-help', 2200: 'ici',
2201: 'ats', 2202: 'imtc-map', 2203: 'b2-runtime', 2204: 'b2-license', 2205: 'jps', 2206: 'hpocbus',
2207: 'hpssd', 2208: 'hpiod', 2209: 'rimf-ps', 2210: 'noaaport', 2211: 'emwin',
2212: 'leecoposserver', 2213: 'kali', 2214: 'rpi', 2215: 'ipcore', 2216: 'vtu-comms',
2217: 'gotodevice', 2218: 'bounzza', 2219: 'netiq-ncap', 2220: 'netiq', 2221: 'ethernet-ip-s',
2223: 'rockwell-csp2', 2224: 'efi-mg', 2225: 'rcip-itu', 2226: 'di-drm', 2227: 'di-msg',
2228: 'ehome-ms', 2229: 'datalens', 2230: 'queueadm', 2231: 'wimaxasncp', 2232: 'ivs-video',
2233: 'infocrypt', 2234: 'directplay', 2235: 'sercomm-wlink', 2236: 'nani', 2237: 'optech-port1-lm',
2238: 'aviva-sna', 2239: 'imagequery', 2240: 'recipe', 2241: 'ivsd', 2242: 'foliocorp',
2243: 'magicom', 2244: 'nmsserver', 2245: 'hao', 2246: 'pc-mta-addrmap', 2247: 'antidotemgrsvr',
2248: 'ums', 2249: 'rfmp', 2250: 'remote-collab', 2251: 'dif-port', 2252: 'njenet-ssl',
2253: 'dtv-chan-req', 2254: 'seispoc', 2255: 'vrtp', 2256: 'pcc-mfp', 2257: 'simple-tx-rx',
2258: 'rcts', 2260: 'apc-2260', 2261: 'comotionmaster', 2262: 'comotionback', 2263: 'ecwcfg',
2264: 'apx500api-1', 2265: 'apx500api-2', 2266: 'mfserver', 2267: 'ontobroker', 2268: 'amt',
2269: 'mikey', 2270: 'starschool', 2271: 'mmcals', 2272: 'mmcal', 2273: 'mysql-im',
2274: 'pcttunnell', 2275: 'ibridge-data', 2276: 'ibridge-mgmt', 2277: 'bluectrlproxy', 2278: 's3db',
2279: 'xmquery', 2280: 'lnvpoller', 2281: 'lnvconsole', 2282: 'lnvalarm', 2283: 'lnvstatus',
2284: 'lnvmaps', 2285: 'lnvmailmon', 2286: 'nas-metering', 2287: 'dna', 2288: 'netml',
2289: 'dict-lookup', 2290: 'sonus-logging', 2291: 'eapsp', 2292: 'mib-streaming', 2293: 'npdbgmngr',
2294: 'konshus-lm', 2295: 'advant-lm', 2296: 'theta-lm', 2297: 'd2k-datamover1',
2298: 'd2k-datamover2', 2299: 'pc-telecommute', 2300: 'cvmmon', 2301: 'cpq-wbem',
2302: 'binderysupport', 2303: 'proxy-gateway', 2304: 'attachmate-uts', 2305: 'mt-scaleserver',
2306: 'tappi-boxnet', 2307: 'pehelp', 2308: 'sdhelp', 2309: 'sdserver', 2310: 'sdclient',
2311: 'messageservice', 2312: 'wanscaler', 2313: 'iapp', 2314: 'cr-websystems', 2315: 'precise-sft',
2316: 'sent-lm', 2317: 'attachmate-g32', 2318: 'cadencecontrol', 2319: 'infolibria',
2320: 'siebel-ns', 2321: 'rdlap', 2322: 'ofsd', 2324: 'cosmocall', 2325: 'ansysli', 2326: 'idcp',
2327: 'xingcsm', 2328: 'netrix-sftm', 2329: 'nvd', 2330: 'tscchat', 2331: 'agentview',
2332: 'rcc-host', 2333: 'snapp', 2334: 'ace-client', 2335: 'ace-proxy', 2336: 'appleugcontrol',
2337: 'ideesrv', 2338: 'norton-lambert', 2340: 'wrs-registry', 2341: 'xiostatus',
2342: 'manage-exec', 2343: 'nati-logos', 2344: 'fcmsys', 2345: 'dbm', 2346: 'redstorm-join',
2347: 'redstorm-find', 2348: 'redstorm-info', 2349: 'redstorm-diag', 2350: 'psbserver',
2351: 'psrserver', 2352: 'pslserver', 2353: 'pspserver', 2354: 'psprserver', 2355: 'psdbserver',
2356: 'gxtelmd', 2357: 'unihub-server', 2358: 'futrix', 2359: 'flukeserver', 2360: 'nexstorindltd',
2361: 'tl1', 2362: 'digiman', 2363: 'mediacntrlnfsd', 2364: 'oi-2000', 2365: 'dbref',
2366: 'qip-login', 2367: 'service-ctrl', 2368: 'opentable', 2370: 'l3-hbmon', 2371: 'hp-rda',
2372: 'lanmessenger', 2373: 'remographlm', 2374: 'hydra', 2375: 'docker', 2376: 'docker-s',
2379: 'etcd-client', 2380: 'etcd-server', 2381: 'compaq-https', 2382: 'ms-olap3', 2383: 'ms-olap4',
2384: 'sd-request', 2385: 'sd-data', 2386: 'virtualtape', 2387: 'vsamredirector',
2388: 'mynahautostart', 2389: 'ovsessionmgr', 2390: 'rsmtp', 2392: 'tacticalauth', 2393: 'ms-olap1',
2394: 'ms-olap2', 2395: 'lan900-remote', 2396: 'wusage', 2397: 'ncl', 2398: 'orbiter',
2399: 'fmpro-fdal', 2400: 'opequus-server', 2401: 'cvspserver', 2402: 'taskmaster2000',
2403: 'taskmaster2000', 2404: 'iec-104', 2405: 'trc-netpoll', 2406: 'jediserver', 2407: 'orion',
2408: 'railgun-webaccl', 2409: 'sns-protocol', 2410: 'vrts-registry', 2411: 'netwave-ap-mgmt',
2412: 'cdn', 2413: 'orion-rmi-reg', 2414: 'beeyond', 2415: 'codima-rtp', 2416: 'rmtserver',
2417: 'composit-server', 2418: 'cas', 2419: 'attachmate-s2s', 2420: 'dslremote-mgmt',
2421: 'g-talk', 2422: 'crmsbits', 2423: 'rnrp', 2424: 'kofax-svr', 2425: 'fjitsuappmgr',
2426: 'vcmp', 2427: 'mgcp-gateway', 2428: 'ott', 2429: 'ft-role', 2430: 'venus', 2431: 'venus-se',
2432: 'codasrv', 2433: 'codasrv-se', 2434: 'pxc-epmap', 2435: 'optilogic', 2436: 'topx',
2437: 'unicontrol', 2438: 'msp', 2439: 'sybasedbsynch', 2440: 'spearway', 2441: 'pvsw-inet',
2442: 'netangel', 2443: 'powerclientcsf', 2444: 'btpp2sectrans', 2445: 'dtn1', 2446: 'bues-service',
2447: 'ovwdb', 2448: 'hpppssvr', 2449: 'ratl', 2450: 'netadmin', 2451: 'netchat',
2452: 'snifferclient', 2453: 'madge-ltd', 2454: 'indx-dds', 2455: 'wago-io-system',
2456: 'altav-remmgt', 2457: 'rapido-ip', 2458: 'griffin', 2459: 'community', 2460: 'ms-theater',
2461: 'qadmifoper', 2462: 'qadmifevent', 2463: 'lsi-raid-mgmt', 2464: 'direcpc-si', 2465: 'lbm',
2466: 'lbf', 2467: 'high-criteria', 2468: 'qip-msgd', 2469: 'mti-tcs-comm', 2470: 'taskman-port',
2471: 'seaodbc', 2472: 'c3', 2473: 'aker-cdp', 2474: 'vitalanalysis', 2475: 'ace-server',
2476: 'ace-svr-prop', 2477: 'ssm-cvs', 2478: 'ssm-cssps', 2479: 'ssm-els', 2480: 'powerexchange',
2481: 'giop', 2482: 'giop-ssl', 2483: 'ttc', 2484: 'ttc-ssl', 2485: 'netobjects1',
2486: 'netobjects2', 2487: 'pns', 2488: 'moy-corp', 2489: 'tsilb', 2490: 'qip-qdhcp',
2491: 'conclave-cpp', 2492: 'groove', 2493: 'talarian-mqs', 2494: 'bmc-ar', 2495: 'fast-rem-serv',
2496: 'dirgis', 2497: 'quaddb', 2498: 'odn-castraq', 2499: 'unicontrol', 2500: 'rtsserv',
2501: 'rtsclient', 2502: 'kentrox-prot', 2503: 'nms-dpnss', 2504: 'wlbs', 2505: 'ppcontrol',
2506: 'jbroker', 2507: 'spock', 2508: 'jdatastore', 2509: 'fjmpss', 2510: 'fjappmgrbulk',
2511: 'metastorm', 2512: 'citrixima', 2513: 'citrixadmin', 2514: 'facsys-ntp',
2515: 'facsys-router', 2516: 'maincontrol', 2517: 'call-sig-trans', 2518: 'willy',
2519: 'globmsgsvc', 2520: 'pvsw', 2521: 'adaptecmgr', 2522: 'windb', 2523: 'qke-llc-v3',
2524: 'optiwave-lm', 2525: 'ms-v-worlds', 2526: 'ema-sent-lm', 2527: 'iqserver', 2528: 'ncr-ccl',
2529: 'utsftp', 2530: 'vrcommerce', 2531: 'ito-e-gui', 2532: 'ovtopmd', 2533: 'snifferserver',
2534: 'combox-web-acc', 2535: 'madcap', 2536: 'btpp2audctr1', 2537: 'upgrade', 2538: 'vnwk-prapi',
2539: 'vsiadmin', 2540: 'lonworks', 2541: 'lonworks2', 2542: 'udrawgraph', 2543: 'reftek',
2544: 'novell-zen', 2545: 'sis-emt', 2546: 'vytalvaultbrtp', 2547: 'vytalvaultvsmp',
2548: 'vytalvaultpipe', 2549: 'ipass', 2550: 'ads', 2551: 'isg-uda-server', 2552: 'call-logging',
2553: 'efidiningport', 2554: 'vcnet-link-v10', 2555: 'compaq-wcp', 2556: 'nicetec-nmsvc',
2557: 'nicetec-mgmt', 2558: 'pclemultimedia', 2559: 'lstp', 2560: 'labrat', 2561: 'mosaixcc',
2562: 'delibo', 2563: 'cti-redwood', 2564: 'hp-3000-telnet', 2565: 'coord-svr', 2566: 'pcs-pcw',
2567: 'clp', 2568: 'spamtrap', 2569: 'sonuscallsig', 2570: 'hs-port', 2571: 'cecsvc', 2572: 'ibp',
2573: 'trustestablish', 2574: 'blockade-bpsp', 2575: 'hl7', 2576: 'tclprodebugger',
2577: 'scipticslsrvr', 2578: 'rvs-isdn-dcp', 2579: 'mpfoncl', 2580: 'tributary', 2581: 'argis-te',
2582: 'argis-ds', 2583: 'mon', 2584: 'cyaserv', 2585: 'netx-server', 2586: 'netx-agent',
2587: 'masc', 2588: 'privilege', 2589: 'quartus-tcl', 2590: 'idotdist', 2591: 'maytagshuffle',
2592: 'netrek', 2593: 'mns-mail', 2594: 'dts', 2595: 'worldfusion1', 2596: 'worldfusion2',
2597: 'homesteadglory', 2598: 'citriximaclient', 2599: 'snapd', 2600: 'hpstgmgr',
2601: 'discp-client', 2602: 'discp-server', 2603: 'servicemeter', 2604: 'nsc-ccs', 2605: 'nsc-posa',
2606: 'netmon', 2607: 'connection', 2608: 'wag-service', 2609: 'system-monitor', 2610: 'versa-tek',
2611: 'lionhead', 2612: 'qpasa-agent', 2613: 'smntubootstrap', 2614: 'neveroffline',
2615: 'firepower', 2616: 'appswitch-emp', 2617: 'cmadmin', 2618: 'priority-e-com', 2619: 'bruce',
2620: 'lpsrecommender', 2621: 'miles-apart', 2622: 'metricadbc', 2623: 'lmdp', 2624: 'aria',
2625: 'blwnkl-port', 2626: 'gbjd816', 2627: 'moshebeeri', 2628: 'dict', 2629: 'sitaraserver',
2630: 'sitaramgmt', 2631: 'sitaradir', 2632: 'irdg-post', 2633: 'interintelli',
2634: 'pk-electronics', 2635: 'backburner', 2636: 'solve', 2637: 'imdocsvc', 2638: 'sybaseanywhere',
2639: 'aminet', 2640: 'sai-sentlm', 2641: 'hdl-srv', 2642: 'tragic', 2643: 'gte-samp',
2644: 'travsoft-ipx-t', 2645: 'novell-ipx-cmd', 2646: 'and-lm', 2647: 'syncserver',
2648: 'upsnotifyprot', 2649: 'vpsipport', 2650: 'eristwoguns', 2651: 'ebinsite',
2652: 'interpathpanel', 2653: 'sonus', 2654: 'corel-vncadmin', 2655: 'unglue', 2656: 'kana',
2657: 'sns-dispatcher', 2658: 'sns-admin', 2659: 'sns-query', 2660: 'gcmonitor', 2661: 'olhost',
2662: 'bintec-capi', 2663: 'bintec-tapi', 2664: 'patrol-mq-gm', 2665: 'patrol-mq-nm',
2666: 'extensis', 2667: 'alarm-clock-s', 2668: 'alarm-clock-c', 2669: 'toad', 2670: 'tve-announce',
2671: 'newlixreg', 2672: 'nhserver', 2673: 'firstcall42', 2674: 'ewnn', 2675: 'ttc-etap',
2676: 'simslink', 2677: 'gadgetgate1way', 2678: 'gadgetgate2way', 2679: 'syncserverssl',
2680: 'pxc-sapxom', 2681: 'mpnjsomb', 2683: 'ncdloadbalance', 2684: 'mpnjsosv', 2685: 'mpnjsocl',
2686: 'mpnjsomg', 2687: 'pq-lic-mgmt', 2688: 'md-cg-http', 2689: 'fastlynx', 2690: 'hp-nnm-data',
2691: 'itinternet', 2692: 'admins-lms', 2694: 'pwrsevent', 2695: 'vspread', 2696: 'unifyadmin',
2697: 'oce-snmp-trap', 2698: 'mck-ivpip', 2699: 'csoft-plusclnt', 2700: 'tqdata',
2701: 'sms-rcinfo', 2702: 'sms-xfer', 2703: 'sms-chat', 2704: 'sms-remctrl', 2705: 'sds-admin',
2706: 'ncdmirroring', 2707: 'emcsymapiport', 2708: 'banyan-net', 2709: 'supermon',
2710: 'sso-service', 2711: 'sso-control', 2712: 'aocp', 2713: 'raventbs', 2714: 'raventdm',
2715: 'hpstgmgr2', 2716: 'inova-ip-disco', 2717: 'pn-requester', 2718: 'pn-requester2',
2719: 'scan-change', 2720: 'wkars', 2721: 'smart-diagnose', 2722: 'proactivesrvr',
2723: 'watchdog-nt', 2724: 'qotps', 2725: 'msolap-ptp2', 2726: 'tams', 2727: 'mgcp-callagent',
2728: 'sqdr', 2729: 'tcim-control', 2730: 'nec-raidplus', 2731: 'fyre-messanger', 2732: 'g5m',
2733: 'signet-ctf', 2734: 'ccs-software', 2735: 'netiq-mc', 2736: 'radwiz-nms-srv',
2737: 'srp-feedback', 2738: 'ndl-tcp-ois-gw', 2739: 'tn-timing', 2740: 'alarm', 2741: 'tsb',
2742: 'tsb2', 2743: 'murx', 2744: 'honyaku', 2745: 'urbisnet', 2746: 'cpudpencap',
2747: 'fjippol-swrly', 2748: 'fjippol-polsvr', 2749: 'fjippol-cnsl', 2750: 'fjippol-port1',
2751: 'fjippol-port2', 2752: 'rsisysaccess', 2753: 'de-spot', 2754: 'apollo-cc', 2755: 'expresspay',
2756: 'simplement-tie', 2757: 'cnrp', 2758: 'apollo-status', 2759: 'apollo-gms', 2760: 'sabams',
2761: 'dicom-iscl', 2762: 'dicom-tls', 2763: 'desktop-dna', 2764: 'data-insurance',
2765: 'qip-audup', 2766: 'compaq-scp', 2767: 'uadtc', 2768: 'uacs', 2769: 'exce', 2770: 'veronica',
2771: 'vergencecm', 2772: 'auris', 2773: 'rbakcup1', 2774: 'rbakcup2', 2775: 'smpp',
2776: 'ridgeway1', 2777: 'ridgeway2', 2778: 'gwen-sonya', 2779: 'lbc-sync', 2780: 'lbc-control',
2781: 'whosells', 2782: 'everydayrc', 2783: 'aises', 2784: 'www-dev', 2785: 'aic-np',
2786: 'aic-oncrpc', 2787: 'piccolo', 2788: 'fryeserv', 2789: 'media-agent', 2790: 'plgproxy',
2791: 'mtport-regist', 2792: 'f5-globalsite', 2793: 'initlsmsad', 2795: 'livestats',
2796: 'ac-tech', 2797: 'esp-encap', 2798: 'tmesis-upshot', 2799: 'icon-discover', 2800: 'acc-raid',
2801: 'igcp', 2802: 'veritas-tcp1', 2803: 'btprjctrl', 2804: 'dvr-esm', 2805: 'wta-wsp-s',
2806: 'cspuni', 2807: 'cspmulti', 2808: 'j-lan-p', 2809: 'corbaloc', 2810: 'netsteward',
2811: 'gsiftp', 2812: 'atmtcp', 2813: 'llm-pass', 2814: 'llm-csv', 2815: 'lbc-measure',
2816: 'lbc-watchdog', 2817: 'nmsigport', 2818: 'rmlnk', 2819: 'fc-faultnotify', 2820: 'univision',
2821: 'vrts-at-port', 2822: 'ka0wuc', 2823: 'cqg-netlan', 2824: 'cqg-netlan-1',
2826: 'slc-systemlog', 2827: 'slc-ctrlrloops', 2828: 'itm-lm', 2829: 'silkp1', 2830: 'silkp2',
2831: 'silkp3', 2832: 'silkp4', 2833: 'glishd', 2834: 'evtp', 2835: 'evtp-data', 2836: 'catalyst',
2837: 'repliweb', 2838: 'starbot', 2839: 'nmsigport', 2840: 'l3-exprt', 2841: 'l3-ranger',
2842: 'l3-hawk', 2843: 'pdnet', 2844: 'bpcp-poll', 2845: 'bpcp-trap', 2846: 'aimpp-hello',
2847: 'aimpp-port-req', 2848: 'amt-blc-port', 2849: 'fxp', 2850: 'metaconsole', 2851: 'webemshttp',
2852: 'bears-01', 2853: 'ispipes', 2854: 'infomover', 2855: 'msrp', 2856: 'cesdinv',
2857: 'simctlp', 2858: 'ecnp', 2859: 'activememory', 2860: 'dialpad-voice1', 2861: 'dialpad-voice2',
2862: 'ttg-protocol', 2863: 'sonardata', 2864: 'astromed-main', 2865: 'pit-vpn', 2866: 'iwlistener',
2867: 'esps-portal', 2868: 'npep-messaging', 2869: 'icslap', 2870: 'daishi', 2871: 'msi-selectplay',
2872: 'radix', 2874: 'dxmessagebase1', 2875: 'dxmessagebase2', 2876: 'sps-tunnel',
2877: 'bluelance', 2878: 'aap', 2879: 'ucentric-ds', 2880: 'synapse', 2881: 'ndsp', 2882: 'ndtp',
2883: 'ndnp', 2884: 'flashmsg', 2885: 'topflow', 2886: 'responselogic', 2887: 'aironetddp',
2888: 'spcsdlobby', 2889: 'rsom', 2890: 'cspclmulti', 2891: 'cinegrfx-elmd', 2892: 'snifferdata',
2893: 'vseconnector', 2894: 'abacus-remote', 2895: 'natuslink', 2896: 'ecovisiong6-1',
2897: 'citrix-rtmp', 2898: 'appliance-cfg', 2899: 'powergemplus', 2900: 'quicksuite',
2901: 'allstorcns', 2902: 'netaspi', 2903: 'suitcase', 2904: 'm2ua', 2905: 'm3ua', 2906: 'caller9',
2907: 'webmethods-b2b', 2908: 'mao', 2909: 'funk-dialout', 2910: 'tdaccess', 2911: 'blockade',
2912: 'epicon', 2913: 'boosterware', 2914: 'gamelobby', 2915: 'tksocket', 2916: 'elvin-server',
2917: 'elvin-client', 2918: 'kastenchasepad', 2919: 'roboer', 2920: 'roboeda', 2921: 'cesdcdman',
2922: 'cesdcdtrn', 2923: 'wta-wsp-wtp-s', 2924: 'precise-vip', 2926: 'mobile-file-dl',
2927: 'unimobilectrl', 2928: 'redstone-cpss', 2929: 'amx-webadmin', 2930: 'amx-weblinx',
2931: 'circle-x', 2932: 'incp', 2935: 'qtp', 2936: 'otpatch', 2937: 'pnaconsult-lm',
2938: 'sm-pas-1', 2939: 'sm-pas-2', 2940: 'sm-pas-3', 2941: 'sm-pas-4', 2942: 'sm-pas-5',
2943: 'ttnrepository', 2944: 'megaco-h248', 2945: 'h248-binary', 2946: 'fjsvmpor', 2947: 'gpsd',
2948: 'wap-push', 2949: 'wap-pushsecure', 2950: 'esip', 2951: 'ottp', 2952: 'mpfwsas',
2953: 'ovalarmsrv', 2954: 'ovalarmsrv-cmd', 2955: 'csnotify', 2956: 'ovrimosdbman', 2957: 'jmact5',
2958: 'jmact6', 2959: 'rmopagt', 2960: 'dfoxserver', 2961: 'boldsoft-lm', 2962: 'iph-policy-cli',
2963: 'iph-policy-adm', 2964: 'bullant-srap', 2965: 'bullant-rap', 2966: 'idp-infotrieve',
2967: 'ssc-agent', 2968: 'enpp', 2969: 'essp', 2970: 'index-net', 2971: 'netclip',
2972: 'pmsm-webrctl', 2973: 'svnetworks', 2974: 'signal', 2975: 'fjmpcm', 2976: 'cns-srv-port',
2977: 'ttc-etap-ns', 2978: 'ttc-etap-ds', 2979: 'h263-video', 2980: 'wimd', 2981: 'mylxamport',
2982: 'iwb-whiteboard', 2983: 'netplan', 2984: 'hpidsadmin', 2985: 'hpidsagent', 2986: 'stonefalls',
2987: 'identify', 2988: 'hippad', 2989: 'zarkov', 2990: 'boscap', 2991: 'wkstn-mon', 2992: 'avenyo',
2993: 'veritas-vis1', 2994: 'veritas-vis2', 2995: 'idrs', 2996: 'vsixml', 2997: 'rebol',
2998: 'realsecure', 2999: 'remoteware-un', 3000: 'hbci', 3001: 'origo-native', 3002: 'exlm-agent',
3003: 'cgms', 3004: 'csoftragent', 3005: 'geniuslm', 3006: 'ii-admin', 3007: 'lotusmtap',
3008: 'midnight-tech', 3009: 'pxc-ntfy', 3010: 'gw', 3011: 'trusted-web', 3012: 'twsdss',
3013: 'gilatskysurfer', 3014: 'broker-service', 3015: 'nati-dstp', 3016: 'notify-srvr',
3017: 'event-listener', 3018: 'srvc-registry', 3019: 'resource-mgr', 3020: 'cifs',
3021: 'agriserver', 3022: 'csregagent', 3023: 'magicnotes', 3024: 'nds-sso', 3025: 'arepa-raft',
3026: 'agri-gateway', 3030: 'arepa-cas', 3031: 'eppc', 3032: 'redwood-chat', 3033: 'pdb',
3034: 'osmosis-aeea', 3035: 'fjsv-gssagt', 3036: 'hagel-dump', 3037: 'hp-san-mgmt',
3038: 'santak-ups', 3039: 'cogitate', 3040: 'tomato-springs', 3041: 'di-traceware', 3042: 'journee',
3043: 'brp', 3044: 'epp', 3045: 'responsenet', 3046: 'di-ase', 3047: 'hlserver', 3048: 'pctrader',
3049: 'nsws', 3050: 'gds-db', 3051: 'galaxy-server', 3052: 'apc-3052', 3053: 'dsom-server',
3054: 'amt-cnf-prot', 3055: 'policyserver', 3056: 'cdl-server', 3057: 'goahead-fldup',
3058: 'videobeans', 3059: 'qsoft', 3060: 'interserver', 3061: 'cautcpd', 3062: 'ncacn-ip-tcp',
3063: 'ncadg-ip-udp', 3064: 'rprt', 3065: 'slinterbase', 3066: 'netattachsdmp', 3067: 'fjhpjp',
3068: 'ls3bcast', 3069: 'ls3', 3070: 'mgxswitch', 3071: 'csd-mgmt-port', 3072: 'csd-monitor',
3073: 'vcrp', 3074: 'xbox', 3075: 'orbix-locator', 3076: 'orbix-config', 3077: 'orbix-loc-ssl',
3078: 'orbix-cfg-ssl', 3079: 'lv-frontpanel', 3080: 'stm-pproc', 3081: 'tl1-lv', 3082: 'tl1-raw',
3083: 'tl1-telnet', 3084: 'itm-mccs', 3085: 'pcihreq', 3086: 'jdl-dbkitchen', 3087: 'asoki-sma',
3088: 'xdtp', 3089: 'ptk-alink', 3090: 'stss', 3093: 'rapidmq-center', 3094: 'rapidmq-reg',
3095: 'panasas', 3096: 'ndl-aps', 3097: 'itu-bicc-stc', 3098: 'umm-port', 3099: 'chmd',
3100: 'opcon-xps', 3101: 'hp-pxpib', 3102: 'slslavemon', 3103: 'autocuesmi', 3104: 'autocuelog',
3105: 'cardbox', 3106: 'cardbox-http', 3107: 'business', 3108: 'geolocate', 3109: 'personnel',
3110: 'sim-control', 3111: 'wsynch', 3112: 'ksysguard', 3113: 'cs-auth-svr', 3114: 'ccmad',
3115: 'mctet-master', 3116: 'mctet-gateway', 3117: 'mctet-jserv', 3118: 'pkagent',
3119: 'd2000kernel', 3120: 'd2000webserver', 3121: 'pcmk-remote', 3122: 'vtr-emulator',
3123: 'edix', 3124: 'beacon-port', 3125: 'a13-an', 3127: 'ctx-bridge', 3128: 'ndl-aas',
3129: 'netport-id', 3130: 'icpv2', 3131: 'netbookmark', 3132: 'ms-rule-engine',
3133: 'prism-deploy', 3134: 'ecp', 3135: 'peerbook-port', 3136: 'grubd', 3137: 'rtnt-1',
3138: 'rtnt-2', 3139: 'incognitorv', 3140: 'ariliamulti', 3141: 'vmodem', 3142: 'rdc-wh-eos',
3143: 'seaview', 3144: 'tarantella', 3145: 'csi-lfap', 3146: 'bears-02', 3147: 'rfio',
3148: 'nm-game-admin', 3149: 'nm-game-server', 3150: 'nm-asses-admin', 3151: 'nm-assessor',
3152: 'feitianrockey', 3153: 's8-client-port', 3154: 'ccmrmi', 3155: 'jpegmpeg', 3156: 'indura',
3157: 'e3consultants', 3158: 'stvp', 3159: 'navegaweb-port', 3160: 'tip-app-server', 3161: 'doc1lm',
3162: 'sflm', 3163: 'res-sap', 3164: 'imprs', 3165: 'newgenpay', 3166: 'sossecollector',
3167: 'nowcontact', 3168: 'poweronnud', 3169: 'serverview-as', 3170: 'serverview-asn',
3171: 'serverview-gf', 3172: 'serverview-rm', 3173: 'serverview-icc', 3174: 'armi-server',
3175: 't1-e1-over-ip', 3176: 'ars-master', 3177: 'phonex-port', 3178: 'radclientport',
3179: 'h2gf-w-2m', 3180: 'mc-brk-srv', 3181: 'bmcpatrolagent', 3182: 'bmcpatrolrnvu',
3183: 'cops-tls', 3184: 'apogeex-port', 3185: 'smpppd', 3186: 'iiw-port', 3187: 'odi-port',
3188: 'brcm-comm-port', 3189: 'pcle-infex', 3190: 'csvr-proxy', 3191: 'csvr-sslproxy',
3192: 'firemonrcc', 3193: 'spandataport', 3194: 'magbind', 3195: 'ncu-1', 3196: 'ncu-2',
3197: 'embrace-dp-s', 3198: 'embrace-dp-c', 3199: 'dmod-workspace', 3200: 'tick-port',
3201: 'cpq-tasksmart', 3202: 'intraintra', 3203: 'netwatcher-mon', 3204: 'netwatcher-db',
3205: 'isns', 3206: 'ironmail', 3207: 'vx-auth-port', 3208: 'pfu-prcallback',
3209: 'netwkpathengine', 3210: 'flamenco-proxy', 3211: 'avsecuremgmt', 3212: 'surveyinst',
3213: 'neon24x7', 3214: 'jmq-daemon-1', 3215: 'jmq-daemon-2', 3216: 'ferrari-foam', 3217: 'unite',
3218: 'smartpackets', 3219: 'wms-messenger', 3220: 'xnm-ssl', 3221: 'xnm-clear-text', 3222: 'glbp',
3223: 'digivote', 3224: 'aes-discovery', 3225: 'fcip-port', 3226: 'isi-irp', 3227: 'dwnmshttp',
3228: 'dwmsgserver', 3229: 'global-cd-port', 3230: 'sftdst-port', 3231: 'vidigo', 3232: 'mdtp',
3233: 'whisker', 3234: 'alchemy', 3235: 'mdap-port', 3236: 'apparenet-ts', 3237: 'apparenet-tps',
3238: 'apparenet-as', 3239: 'apparenet-ui', 3240: 'triomotion', 3241: 'sysorb', 3242: 'sdp-id-port',
3243: 'timelot', 3244: 'onesaf', 3245: 'vieo-fe', 3246: 'dvt-system', 3247: 'dvt-data',
3248: 'procos-lm', 3249: 'ssp', 3250: 'hicp', 3251: 'sysscanner', 3252: 'dhe', 3253: 'pda-data',
3254: 'pda-sys', 3255: 'semaphore', 3256: 'cpqrpm-agent', 3257: 'cpqrpm-server',
3258: 'ivecon-port', 3259: 'epncdp2', 3260: 'iscsi-target', 3261: 'winshadow', 3262: 'necp',
3263: 'ecolor-imager', 3264: 'ccmail', 3265: 'altav-tunnel', 3266: 'ns-cfg-server',
3267: 'ibm-dial-out', 3268: 'msft-gc', 3269: 'msft-gc-ssl', 3270: 'verismart', 3271: 'csoft-prev',
3272: 'user-manager', 3273: 'sxmp', 3274: 'ordinox-server', 3275: 'samd', 3276: 'maxim-asics',
3277: 'awg-proxy', 3278: 'lkcmserver', 3279: 'admind', 3280: 'vs-server', 3281: 'sysopt',
3282: 'datusorb', 3285: 'plato', 3286: 'e-net', 3287: 'directvdata', 3288: 'cops', 3289: 'enpc',
3290: 'caps-lm', 3291: 'sah-lm', 3292: 'cart-o-rama', 3293: 'fg-fps', 3294: 'fg-gip',
3295: 'dyniplookup', 3296: 'rib-slm', 3297: 'cytel-lm', 3298: 'deskview', 3299: 'pdrncs',
3300: 'ceph', 3302: 'mcs-fastmail', 3303: 'opsession-clnt', 3304: 'opsession-srvr',
3305: 'odette-ftp', 3306: 'mysql', 3307: 'opsession-prxy', 3308: 'tns-server', 3309: 'tns-adv',
3310: 'dyna-access', 3311: 'mcns-tel-ret', 3312: 'appman-server', 3313: 'uorb', 3314: 'uohost',
3315: 'cdid', 3316: 'aicc-cmi', 3317: 'vsaiport', 3318: 'ssrip', 3319: 'sdt-lmd',
3320: 'officelink2000', 3321: 'vnsstr', 3322 - 3325: 'active-net', 3326: 'sftu', 3327: 'bbars',
3328: 'egptlm', 3329: 'hp-device-disc', 3330: 'mcs-calypsoicf', 3331: 'mcs-messaging',
3332: 'mcs-mailsvr', 3333: 'dec-notes', 3334: 'directv-web', 3335: 'directv-soft',
3336: 'directv-tick', 3337: 'directv-catlg', 3338: 'anet-b', 3339: 'anet-l', 3340: 'anet-m',
3341: 'anet-h', 3342: 'webtie', 3343: 'ms-cluster-net', 3344: 'bnt-manager', 3345: 'influence',
3346: 'trnsprntproxy', 3347: 'phoenix-rpc', 3348: 'pangolin-laser', 3349: 'chevinservices',
3350: 'findviatv', 3351: 'btrieve', 3352: 'ssql', 3353: 'fatpipe', 3354: 'suitjd',
3355: 'ordinox-dbase', 3356: 'upnotifyps', 3357: 'adtech-test', 3358: 'mpsysrmsvr',
3359: 'wg-netforce', 3360: 'kv-server', 3361: 'kv-agent', 3362: 'dj-ilm', 3363: 'nati-vi-server',
3364: 'creativeserver', 3365: 'contentserver', 3366: 'creativepartnr',
3367 - 3371: 'satvid-datalnk', 3372: 'tip2', 3373: 'lavenir-lm', 3374: 'cluster-disc',
3375: 'vsnm-agent', 3376: 'cdbroker', 3377: 'cogsys-lm', 3378: 'wsicopy', 3379: 'socorfs',
3380: 'sns-channels', 3381: 'geneous', 3382: 'fujitsu-neat', 3383: 'esp-lm', 3384: 'hp-clic',
3385: 'qnxnetman', 3386: 'gprs-data', 3387: 'backroomnet', 3388: 'cbserver', 3389: 'ms-wbt-server',
3390: 'dsc', 3391: 'savant', 3392: 'efi-lm', 3393: 'd2k-tapestry1', 3394: 'd2k-tapestry2',
3395: 'dyna-lm', 3396: 'printer-agent', 3397: 'cloanto-lm', 3398: 'mercantile', 3399: 'csms',
3400: 'csms2', 3401: 'filecast', 3402: 'fxaengine-net', 3405: 'nokia-ann-ch1',
3406: 'nokia-ann-ch2', 3407: 'ldap-admin', 3409: 'networklens', 3410: 'networklenss',
3411: 'biolink-auth', 3412: 'xmlblaster', 3413: 'svnet', 3414: 'wip-port', 3415: 'bcinameservice',
3416: 'commandport', 3417: 'csvr', 3418: 'rnmap', 3419: 'softaudit', 3420: 'ifcp-port',
3421: 'bmap', 3422: 'rusb-sys-port', 3423: 'xtrm', 3424: 'xtrms', 3425: 'agps-port',
3426: 'arkivio', 3427: 'websphere-snmp', 3428: 'twcss', 3429: 'gcsp', 3430: 'ssdispatch',
3431: 'ndl-als', 3432: 'osdcp', 3433: 'opnet-smp', 3434: 'opencm', 3435: 'pacom', 3436: 'gc-config',
3437: 'autocueds', 3438: 'spiral-admin', 3439: 'hri-port', 3440: 'ans-console',
3441: 'connect-client', 3442: 'connect-server', 3443: 'ov-nnm-websrv', 3444: 'denali-server',
3445: 'monp', 3447: 'directnet', 3448: 'dnc-port', 3449: 'hotu-chat', 3450: 'castorproxy',
3451: 'asam', 3452: 'sabp-signal', 3453: 'pscupd', 3454: 'mira', 3455: 'prsvp', 3456: 'vat',
3457: 'vat-control', 3458: 'd3winosfi', 3459: 'integral', 3460: 'edm-manager', 3461: 'edm-stager',
3462: 'edm-std-notify', 3463: 'edm-adm-notify', 3464: 'edm-mgr-sync', 3465: 'edm-mgr-cntrl',
3466: 'workflow', 3467: 'rcst', 3468: 'ttcmremotectrl', 3469: 'pluribus', 3470: 'jt400',
3471: 'jt400-ssl', 3472: 'jaugsremotec-1', 3473: 'jaugsremotec-2', 3474: 'ttntspauto',
3475: 'genisar-port', 3476: 'nppmp', 3477: 'ecomm', 3478: 'stun', 3479: 'twrpc', 3480: 'plethora',
3481: 'cleanerliverc', 3482: 'vulture', 3483: 'slim-devices', 3484: 'gbs-stp', 3485: 'celatalk',
3486: 'ifsf-hb-port', 3487: 'ltctcp', 3488: 'fs-rh-srv', 3489: 'dtp-dia', 3490: 'colubris',
3491: 'swr-port', 3492: 'tvdumtray-port', 3493: 'nut', 3494: 'ibm3494', 3495: 'seclayer-tcp',
3496: 'seclayer-tls', 3497: 'ipether232port', 3498: 'dashpas-port', 3499: 'sccip-media',
3500: 'rtmp-port', 3501: 'isoft-p2p', 3502: 'avinstalldisc', 3503: 'lsp-ping', 3504: 'ironstorm',
3505: 'ccmcomm', 3506: 'apc-3506', 3507: 'nesh-broker', 3508: 'interactionweb', 3509: 'vt-ssl',
3510: 'xss-port', 3511: 'webmail-2', 3512: 'aztec', 3513: 'arcpd', 3514: 'must-p2p',
3515: 'must-backplane', 3516: 'smartcard-port', 3518: 'artifact-msg', 3519: 'nvmsgd',
3520: 'galileolog', 3521: 'mc3ss', 3522: 'nssocketport', 3523: 'odeumservlink', 3524: 'ecmport',
3525: 'eisport', 3526: 'starquiz-port', 3527: 'beserver-msg-q', 3528: 'jboss-iiop',
3529: 'jboss-iiop-ssl', 3530: 'gf', 3531: 'joltid', 3532: 'raven-rmp', 3533: 'raven-rdp',
3534: 'urld-port', 3535: 'ms-la', 3536: 'snac', 3537: 'ni-visa-remote', 3538: 'ibm-diradm',
3539: 'ibm-diradm-ssl', 3540: 'pnrp-port', 3541: 'voispeed-port', 3542: 'hacl-monitor',
3543: 'qftest-lookup', 3544: 'teredo', 3545: 'camac', 3547: 'symantec-sim', 3548: 'interworld',
3549: 'tellumat-nms', 3550: 'ssmpp', 3551: 'apcupsd', 3552: 'taserver', 3553: 'rbr-discovery',
3554: 'questnotify', 3555: 'razor', 3556: 'sky-transport', 3557: 'personalos-001', 3558: 'mcp-port',
3559: 'cctv-port', 3560: 'iniserve-port', 3561: 'bmc-onekey', 3562: 'sdbproxy', 3563: 'watcomdebug',
3564: 'esimport', 3565: 'm2pa', 3566: 'quest-data-hub', 3567: 'dof-eps', 3568: 'dof-tunnel-sec',
3569: 'mbg-ctrl', 3570: 'mccwebsvr-port', 3571: 'megardsvr-port', 3572: 'megaregsvrport',
3573: 'tag-ups-1', 3574: 'dmaf-server', 3575: 'ccm-port', 3576: 'cmc-port', 3577: 'config-port',
3578: 'data-port', 3579: 'ttat3lb', 3580: 'nati-svrloc', 3581: 'kfxaclicensing', 3582: 'press',
3583: 'canex-watch', 3584: 'u-dbap', 3585: 'emprise-lls', 3586: 'emprise-lsc', 3587: 'p2pgroup',
3588: 'sentinel', 3589: 'isomair', 3590: 'wv-csp-sms', 3591: 'gtrack-server', 3592: 'gtrack-ne',
3593: 'bpmd', 3594: 'mediaspace', 3595: 'shareapp', 3596: 'iw-mmogame', 3597: 'a14', 3598: 'a15',
3599: 'quasar-server', 3600: 'trap-daemon', 3601: 'visinet-gui', 3602: 'infiniswitchcl',
3603: 'int-rcv-cntrl', 3604: 'bmc-jmx-port', 3605: 'comcam-io', 3606: 'splitlock',
3607: 'precise-i3', 3608: 'trendchip-dcp', 3609: 'cpdi-pidas-cm', 3610: 'echonet',
3611: 'six-degrees', 3612: 'hp-dataprotect', 3613: 'alaris-disc', 3614: 'sigma-port',
3615: 'start-network', 3616: 'cd3o-protocol', 3617: 'sharp-server', 3618: 'aairnet-1',
3619: 'aairnet-2', 3620: 'ep-pcp', 3621: 'ep-nsp', 3622: 'ff-lr-port', 3623: 'haipe-discover',
3624: 'dist-upgrade', 3625: 'volley', 3626: 'bvcdaemon-port', 3627: 'jamserverport',
3628: 'ept-machine', 3629: 'escvpnet', 3630: 'cs-remote-db', 3631: 'cs-services', 3632: 'distcc',
3633: 'wacp', 3634: 'hlibmgr', 3635: 'sdo', 3636: 'servistaitsm', 3637: 'scservp',
3638: 'ehp-backup', 3639: 'xap-ha', 3640: 'netplay-port1', 3641: 'netplay-port2',
3642: 'juxml-port', 3643: 'audiojuggler', 3644: 'ssowatch', 3645: 'cyc', 3646: 'xss-srv-port',
3647: 'splitlock-gw', 3648: 'fjcp', 3649: 'nmmp', 3650: 'prismiq-plugin', 3651: 'xrpc-registry',
3652: 'vxcrnbuport', 3653: 'tsp', 3654: 'vaprtm', 3655: 'abatemgr', 3656: 'abatjss',
3657: 'immedianet-bcn', 3658: 'ps-ams', 3659: 'apple-sasl', 3660: 'can-nds-ssl',
3661: 'can-ferret-ssl', 3662: 'pserver', 3663: 'dtp', 3664: 'ups-engine', 3665: 'ent-engine',
3666: 'eserver-pap', 3667: 'infoexch', 3668: 'dell-rm-port', 3669: 'casanswmgmt', 3670: 'smile',
3671: 'efcp', 3672: 'lispworks-orb', 3673: 'mediavault-gui', 3674: 'wininstall-ipc',
3675: 'calltrax', 3676: 'va-pacbase', 3677: 'roverlog', 3678: 'ipr-dglt', 3680: 'npds-tracker',
3681: 'bts-x73', 3682: 'cas-mapi', 3683: 'bmc-ea', 3684: 'faxstfx-port', 3685: 'dsx-agent',
3686: 'tnmpv2', 3687: 'simple-push', 3688: 'simple-push-s', 3689: 'daap', 3690: 'svn',
3691: 'magaya-network', 3692: 'intelsync', 3693: 'easl', 3695: 'bmc-data-coll', 3696: 'telnetcpcd',
3697: 'nw-license', 3698: 'sagectlpanel', 3699: 'kpn-icw', 3700: 'lrs-paging', 3701: 'netcelera',
3702: 'ws-discovery', 3703: 'adobeserver-3', 3704: 'adobeserver-4', 3705: 'adobeserver-5',
3706: 'rt-event', 3707: 'rt-event-s', 3708: 'sun-as-iiops', 3709: 'ca-idms', 3710: 'portgate-auth',
3711: 'edb-server2', 3712: 'sentinel-ent', 3713: 'tftps', 3714: 'delos-dms', 3715: 'anoto-rendezv',
3716: 'wv-csp-sms-cir', 3717: 'wv-csp-udp-cir', 3718: 'opus-services', 3719: 'itelserverport',
3720: 'ufastro-instr', 3721: 'xsync', 3722: 'xserveraid', 3723: 'sychrond', 3724: 'blizwow',
3725: 'na-er-tip', 3726: 'array-manager', 3727: 'e-mdu', 3728: 'e-woa', 3729: 'fksp-audit',
3730: 'client-ctrl', 3731: 'smap', 3732: 'm-wnn', 3733: 'multip-msg', 3734: 'synel-data',
3735: 'pwdis', 3736: 'rs-rmi', 3737: 'xpanel', 3738: 'versatalk', 3739: 'launchbird-lm',
3740: 'heartbeat', 3741: 'wysdma', 3742: 'cst-port', 3743: 'ipcs-command', 3744: 'sasg',
3745: 'gw-call-port', 3746: 'linktest', 3747: 'linktest-s', 3748: 'webdata', 3749: 'cimtrak',
3750: 'cbos-ip-port', 3751: 'gprs-cube', 3752: 'vipremoteagent', 3753: 'nattyserver',
3754: 'timestenbroker', 3755: 'sas-remote-hlp', 3756: 'canon-capt', 3757: 'grf-port',
3758: 'apw-registry', 3759: 'exapt-lmgr', 3760: 'adtempusclient', 3761: 'gsakmp', 3762: 'gbs-smp',
3763: 'xo-wave', 3764: 'mni-prot-rout', 3765: 'rtraceroute', 3766: 'sitewatch-s',
3767: 'listmgr-port', 3768: 'rblcheckd', 3769: 'haipe-otnk', 3770: 'cindycollab',
3771: 'paging-port', 3772: 'ctp', 3773: 'ctdhercules', 3774: 'zicom', 3775: 'ispmmgr',
3776: 'dvcprov-port', 3777: 'jibe-eb', 3778: 'c-h-it-port', 3779: 'cognima', 3780: 'nnp',
3781: 'abcvoice-port', 3782: 'iso-tp0s', 3783: 'bim-pem', 3784: 'bfd-control', 3785: 'bfd-echo',
3786: 'upstriggervsw', 3787: 'fintrx', 3788: 'isrp-port', 3789: 'remotedeploy',
3790: 'quickbooksrds', 3791: 'tvnetworkvideo', 3792: 'sitewatch', 3793: 'dcsoftware', 3794: 'jaus',
3795: 'myblast', 3796: 'spw-dialer', 3797: 'idps', 3798: 'minilock', 3799: 'radius-dynauth',
3800: 'pwgpsi', 3801: 'ibm-mgr', 3802: 'vhd', 3803: 'soniqsync', 3804: 'iqnet-port',
3805: 'tcpdataserver', 3806: 'wsmlb', 3807: 'spugna', 3808: 'sun-as-iiops-ca', 3809: 'apocd',
3810: 'wlanauth', 3811: 'amp', 3812: 'neto-wol-server', 3813: 'rap-ip', 3814: 'neto-dcs',
3815: 'lansurveyorxml', 3816: 'sunlps-http', 3817: 'tapeware', 3818: 'crinis-hb', 3819: 'epl-slp',
3820: 'scp', 3821: 'pmcp', 3822: 'acp-discovery', 3823: 'acp-conduit', 3824: 'acp-policy',
3825: 'ffserver', 3826: 'warmux', 3827: 'netmpi', 3828: 'neteh', 3829: 'neteh-ext',
3830: 'cernsysmgmtagt', 3831: 'dvapps', 3832: 'xxnetserver', 3833: 'aipn-auth', 3834: 'spectardata',
3835: 'spectardb', 3836: 'markem-dcp', 3837: 'mkm-discovery', 3838: 'sos', 3839: 'amx-rms',
3840: 'flirtmitmir', 3841: 'shiprush-db-svr', 3842: 'nhci', 3843: 'quest-agent', 3844: 'rnm',
3845: 'v-one-spp', 3846: 'an-pcp', 3847: 'msfw-control', 3848: 'item', 3849: 'spw-dnspreload',
3850: 'qtms-bootstrap', 3851: 'spectraport', 3852: 'sse-app-config', 3853: 'sscan',
3854: 'stryker-com', 3855: 'opentrac', 3856: 'informer', 3857: 'trap-port', 3858: 'trap-port-mom',
3859: 'nav-port', 3860: 'sasp', 3861: 'winshadow-hd', 3862: 'giga-pocket', 3863: 'asap-tcp',
3864: 'asap-tcp-tls', 3865: 'xpl', 3866: 'dzdaemon', 3867: 'dzoglserver', 3868: 'diameter',
3869: 'ovsam-mgmt', 3870: 'ovsam-d-agent', 3871: 'avocent-adsap', 3872: 'oem-agent',
3873: 'fagordnc', 3874: 'sixxsconfig', 3875: 'pnbscada', 3876: 'dl-agent', 3877: 'xmpcr-interface',
3878: 'fotogcad', 3879: 'appss-lm', 3880: 'igrs', 3881: 'idac', 3882: 'msdts1', 3883: 'vrpn',
3884: 'softrack-meter', 3885: 'topflow-ssl', 3886: 'nei-management', 3887: 'ciphire-data',
3888: 'ciphire-serv', 3889: 'dandv-tester', 3890: 'ndsconnect', 3891: 'rtc-pm-port',
3892: 'pcc-image-port', 3893: 'cgi-starapi', 3894: 'syam-agent', 3895: 'syam-smc', 3896: 'sdo-tls',
3897: 'sdo-ssh', 3898: 'senip', 3899: 'itv-control', 3900: 'udt-os', 3901: 'nimsh', 3902: 'nimaux',
3903: 'charsetmgr', 3904: 'omnilink-port', 3905: 'mupdate', 3906: 'topovista-data',
3907: 'imoguia-port', 3908: 'hppronetman', 3909: 'surfcontrolcpa', 3910: 'prnrequest',
3911: 'prnstatus', 3912: 'gbmt-stars', 3913: 'listcrt-port', 3914: 'listcrt-port-2', 3915: 'agcat',
3916: 'wysdmc', 3917: 'aftmux', 3918: 'pktcablemmcops', 3919: 'hyperip', 3920: 'exasoftport1',
3921: 'herodotus-net', 3922: 'sor-update', 3923: 'symb-sb-port', 3924: 'mpl-gprs-port', 3925: 'zmp',
3926: 'winport', 3927: 'natdataservice', 3928: 'netboot-pxe', 3929: 'smauth-port',
3930: 'syam-webserver', 3931: 'msr-plugin-port', 3932: 'dyn-site', 3933: 'plbserve-port',
3934: 'sunfm-port', 3935: 'sdp-portmapper', 3936: 'mailprox', 3937: 'dvbservdsc',
3938: 'dbcontrol-agent', 3939: 'aamp', 3940: 'xecp-node', 3941: 'homeportal-web', 3942: 'srdp',
3943: 'tig', 3944: 'sops', 3945: 'emcads', 3946: 'backupedge', 3947: 'ccp', 3948: 'apdap',
3949: 'drip', 3950: 'namemunge', 3951: 'pwgippfax', 3952: 'i3-sessionmgr', 3953: 'xmlink-connect',
3954: 'adrep', 3955: 'p2pcommunity', 3956: 'gvcp', 3957: 'mqe-broker', 3958: 'mqe-agent',
3959: 'treehopper', 3960: 'bess', 3961: 'proaxess', 3962: 'sbi-agent', 3963: 'thrp',
3964: 'sasggprs', 3965: 'ati-ip-to-ncpe', 3966: 'bflckmgr', 3967: 'ppsms', 3968: 'ianywhere-dbns',
3969: 'landmarks', 3970: 'lanrevagent', 3971: 'lanrevserver', 3972: 'iconp', 3973: 'progistics',
3974: 'citysearch', 3975: 'airshot', 3976: 'opswagent', 3977: 'opswmanager', 3978: 'secure-cfg-svr',
3979: 'smwan', 3980: 'acms', 3981: 'starfish', 3982: 'eis', 3983: 'eisp', 3984: 'mapper-nodemgr',
3985: 'mapper-mapethd', 3986: 'mapper-ws-ethd', 3987: 'centerline', 3988: 'dcs-config',
3989: 'bv-queryengine', 3990: 'bv-is', 3991: 'bv-smcsrv', 3992: 'bv-ds', 3993: 'bv-agent',
3995: 'iss-mgmt-ssl', 3996: 'abcsoftware', 3997: 'agentsease-db', 3998: 'dnx', 3999: 'nvcnet',
4000: 'terabase', 4001: 'newoak', 4002: 'pxc-spvr-ft', 4003: 'pxc-splr-ft', 4004: 'pxc-roid',
4005: 'pxc-pin', 4006: 'pxc-spvr', 4007: 'pxc-splr', 4008: 'netcheque', 4009: 'chimera-hwm',
4010: 'samsung-unidex', 4011: 'altserviceboot', 4012: 'pda-gate', 4013: 'acl-manager',
4014: 'taiclock', 4015: 'talarian-mcast1', 4016: 'talarian-mcast2', 4017: 'talarian-mcast3',
4018: 'talarian-mcast4', 4019: 'talarian-mcast5', 4020: 'trap', 4021: 'nexus-portal', 4022: 'dnox',
4023: 'esnm-zoning', 4024: 'tnp1-port', 4025: 'partimage', 4026: 'as-debug', 4027: 'bxp',
4028: 'dtserver-port', 4029: 'ip-qsig', 4030: 'jdmn-port', 4031: 'suucp', 4032: 'vrts-auth-port',
4033: 'sanavigator', 4034: 'ubxd', 4035: 'wap-push-http', 4036: 'wap-push-https', 4037: 'ravehd',
4038: 'fazzt-ptp', 4039: 'fazzt-admin', 4040: 'yo-main', 4041: 'houston', 4042: 'ldxp',
4043: 'nirp', 4044: 'ltp', 4045: 'npp', 4046: 'acp-proto', 4047: 'ctp-state', 4049: 'wafs',
4050: 'cisco-wafs', 4051: 'cppdp', 4052: 'interact', 4053: 'ccu-comm-1', 4054: 'ccu-comm-2',
4055: 'ccu-comm-3', 4056: 'lms', 4057: 'wfm', 4058: 'kingfisher', 4059: 'dlms-cosem',
4060: 'dsmeter-iatc', 4061: 'ice-location', 4062: 'ice-slocation', 4063: 'ice-router',
4064: 'ice-srouter', 4065: 'avanti-cdp', 4066: 'pmas', 4067: 'idp', 4068: 'ipfltbcst',
4069: 'minger', 4070: 'tripe', 4071: 'aibkup', 4072: 'zieto-sock', 4073: 'iRAPP',
4074: 'cequint-cityid', 4075: 'perimlan', 4076: 'seraph', 4077: 'ascomalarm', 4078: 'cssp',
4079: 'santools', 4080: 'lorica-in', 4081: 'lorica-in-sec', 4082: 'lorica-out',
4083: 'lorica-out-sec', 4084: 'fortisphere-vm', 4085: 'ezmessagesrv', 4086: 'ftsync',
4087: 'applusservice', 4088: 'npsp', 4089: 'opencore', 4090: 'omasgport', 4091: 'ewinstaller',
4092: 'ewdgs', 4093: 'pvxpluscs', 4094: 'sysrqd', 4095: 'xtgui', 4096: 'bre', 4097: 'patrolview',
4098: 'drmsfsd', 4099: 'dpcp', 4100: 'igo-incognito', 4101: 'brlp-0', 4102: 'brlp-1',
4103: 'brlp-2', 4104: 'brlp-3', 4105: 'shofar', 4106: 'synchronite', 4107: 'j-ac', 4108: 'accel',
4109: 'izm', 4110: 'g2tag', 4111: 'xgrid', 4112: 'apple-vpns-rp', 4113: 'aipn-reg',
4114: 'jomamqmonitor', 4115: 'cds', 4116: 'smartcard-tls', 4117: 'hillrserv', 4118: 'netscript',
4119: 'assuria-slm', 4121: 'e-builder', 4122: 'fprams', 4123: 'z-wave', 4124: 'tigv2',
4125: 'opsview-envoy', 4126: 'ddrepl', 4127: 'unikeypro', 4128: 'nufw', 4129: 'nuauth',
4130: 'fronet', 4131: 'stars', 4132: 'nuts-dem', 4133: 'nuts-bootp', 4134: 'nifty-hmi',
4135: 'cl-db-attach', 4136: 'cl-db-request', 4137: 'cl-db-remote', 4138: 'nettest', 4139: 'thrtx',
4140: 'cedros-fds', 4141: 'oirtgsvc', 4142: 'oidocsvc', 4143: 'oidsr', 4145: 'vvr-control',
4146: 'tgcconnect', 4147: 'vrxpservman', 4148: 'hhb-handheld', 4149: 'agslb',
4151: 'menandmice-noh', 4152: 'idig-mux', 4153: 'mbl-battd', 4154: 'atlinks', 4155: 'bzr',
4156: 'stat-results', 4157: 'stat-scanner', 4158: 'stat-cc', 4159: 'nss', 4160: 'jini-discovery',
4161: 'omscontact', 4162: 'omstopology', 4163: 'silverpeakpeer', 4164: 'silverpeakcomm',
4165: 'altcp', 4166: 'joost', 4167: 'ddgn', 4168: 'pslicser', 4169: 'iadt', 4170: 'd-cinema-csp',
4171: 'ml-svnet', 4172: 'pcoip', 4173: 'mma-discovery', 4174: 'smcluster', 4175: 'bccp',
4176: 'tl-ipcproxy', 4177: 'wello', 4178: 'storman', 4180: 'httpx', 4181: 'macbak',
4182: 'pcptcpservice', 4183: 'cyborgnet', 4184: 'universe-suite', 4185: 'wcpp',
4186: 'boxbackupstore', 4187: 'csc-proxy', 4188: 'vatata', 4189: 'pcep', 4190: 'sieve',
4191: 'dsmipv6', 4192: 'azeti', 4193: 'pvxplusio', 4199: 'eims-admin',
4200 - 4299: 'vrml-multi-use', 4300: 'corelccam', 4301: 'd-data', 4302: 'd-data-control',
4303: 'srcp', 4304: 'owserver', 4305: 'batman', 4306: 'pinghgl', 4307: 'visicron-vs',
4308: 'compx-lockview', 4309: 'dserver', 4310: 'mirrtex', 4311: 'p6ssmc', 4312: 'pscl-mgt',
4313: 'perrla', 4314: 'choiceview-agt', 4316: 'choiceview-clt', 4320: 'fdt-rcatp', 4321: 'rwhois',
4322: 'trim-event', 4323: 'trim-ice', 4325: 'geognosisman', 4326: 'geognosis', 4327: 'jaxer-web',
4328: 'jaxer-manager', 4329: 'publiqare-sync', 4330: 'dey-sapi', 4331: 'ktickets-rest',
4333: 'ahsp', 4334: 'netconf-ch-ssh', 4335: 'netconf-ch-tls', 4336: 'restconf-ch-tls', 4340: 'gaia',
4341: 'lisp-data', 4342: 'lisp-cons', 4343: 'unicall', 4344: 'vinainstall', 4345: 'm4-network-as',
4346: 'elanlm', 4347: 'lansurveyor', 4348: 'itose', 4349: 'fsportmap', 4350: 'net-device',
4351: 'plcy-net-svcs', 4352: 'pjlink', 4353: 'f5-iquery', 4354: 'qsnet-trans', 4355: 'qsnet-workst',
4356: 'qsnet-assist', 4357: 'qsnet-cond', 4358: 'qsnet-nucl', 4359: 'omabcastltkm',
4360: 'matrix-vnet', 4361: 'nacnl', 4362: 'afore-vdp-disc', 4366: 'shadowstream', 4368: 'wxbrief',
4369: 'epmd', 4370: 'elpro-tunnel', 4371: 'l2c-control', 4372: 'l2c-data', 4373: 'remctl',
4374: 'psi-ptt', 4375: 'tolteces', 4376: 'bip', 4377: 'cp-spxsvr', 4378: 'cp-spxdpy', 4379: 'ctdb',
4389: 'xandros-cms', 4390: 'wiegand', 4391: 'apwi-imserver', 4392: 'apwi-rxserver',
4393: 'apwi-rxspooler', 4394: 'apwi-disc', 4395: 'omnivisionesx', 4396: 'fly', 4400: 'ds-srv',
4401: 'ds-srvr', 4402: 'ds-clnt', 4403: 'ds-user', 4404: 'ds-admin', 4405: 'ds-mail',
4406: 'ds-slp', 4407: 'nacagent', 4408: 'slscc', 4409: 'netcabinet-com', 4410: 'itwo-server',
4411: 'found', 4412: 'smallchat', 4413: 'avi-nms', 4414: 'updog', 4415: 'brcd-vr-req',
4416: 'pjj-player', 4417: 'workflowdir', 4425: 'netrockey6', 4426: 'beacon-port-2', 4427: 'drizzle',
4428: 'omviserver', 4429: 'omviagent', 4430: 'rsqlserver', 4431: 'wspipe', 4432: 'l-acoustics',
4433: 'vop', 4441: 'netblox', 4442: 'saris', 4443: 'pharos', 4444: 'krb524', 4445: 'upnotifyp',
4446: 'n1-fwp', 4447: 'n1-rmgmt', 4448: 'asc-slmd', 4449: 'privatewire', 4450: 'camp',
4451: 'ctisystemmsg', 4452: 'ctiprogramload', 4453: 'nssalertmgr', 4454: 'nssagentmgr',
4455: 'prchat-user', 4456: 'prchat-server', 4457: 'prRegister', 4458: 'mcp', 4484: 'hpssmgmt',
4485: 'assyst-dr', 4486: 'icms', 4487: 'prex-tcp', 4488: 'awacs-ice', 4500: 'ipsec-nat-t',
4502: 'a25-fap-fgw', 4534: 'armagetronad', 4535: 'ehs', 4536: 'ehs-ssl', 4537: 'wssauthsvc',
4538: 'swx-gate', 4545: 'worldscores', 4546: 'sf-lm', 4547: 'lanner-lm', 4548: 'synchromesh',
4549: 'aegate', 4550: 'gds-adppiw-db', 4551: 'ieee-mih', 4552: 'menandmice-mon', 4553: 'icshostsvc',
4554: 'msfrs', 4555: 'rsip', 4556: 'dtn-bundle', 4557: 'mtcevrunqss', 4558: 'mtcevrunqman',
4559: 'hylafax', 4563: 'amahi-anywhere', 4566: 'kwtc', 4567: 'tram', 4568: 'bmc-reporting',
4569: 'iax', 4570: 'deploymentmap', 4590: 'rid', 4591: 'l3t-at-an', 4592: 'hrpd-ith-at-an',
4593: 'ipt-anri-anri', 4594: 'ias-session', 4595: 'ias-paging', 4596: 'ias-neighbor',
4597: 'a21-an-1xbs', 4598: 'a16-an-an', 4599: 'a17-an-an', 4600: 'piranha1', 4601: 'piranha2',
4602: 'mtsserver', 4603: 'menandmice-upg', 4604: 'irp', 4605: 'sixchat', 4658: 'playsta2-app',
4659: 'playsta2-lob', 4660: 'smaclmgr', 4661: 'kar2ouche', 4662: 'oms', 4663: 'noteit', 4664: 'ems',
4665: 'contclientms', 4666: 'eportcomm', 4667: 'mmacomm', 4668: 'mmaeds', 4669: 'eportcommdata',
4670: 'light', 4671: 'acter', 4672: 'rfa', 4673: 'cxws', 4674: 'appiq-mgmt', 4675: 'dhct-status',
4676: 'dhct-alerts', 4677: 'bcs', 4678: 'traversal', 4679: 'mgesupervision', 4680: 'mgemanagement',
4681: 'parliant', 4682: 'finisar', 4683: 'spike', 4684: 'rfid-rp1', 4685: 'autopac', 4686: 'msp-os',
4687: 'nst', 4688: 'mobile-p2p', 4689: 'altovacentral', 4690: 'prelude', 4691: 'mtn',
4692: 'conspiracy', 4700: 'netxms-agent', 4701: 'netxms-mgmt', 4702: 'netxms-sync',
4703: 'npqes-test', 4704: 'assuria-ins', 4725: 'truckstar', 4726: 'a26-fap-fgw', 4727: 'fcis',
4728: 'capmux', 4729: 'gsmtap', 4730: 'gearman', 4731: 'remcap', 4732: 'ohmtrigger',
4733: 'resorcs', 4737: 'ipdr-sp', 4738: 'solera-lpn', 4739: 'ipfix', 4740: 'ipfixs',
4741: 'lumimgrd', 4742: 'sicct', 4743: 'openhpid', 4744: 'ifsp', 4745: 'fmp', 4747: 'buschtrommel',
4749: 'profilemac', 4750: 'ssad', 4751: 'spocp', 4752: 'snap', 4753: 'simon', 4784: 'bfd-multi-ctl',
4785: 'cncp', 4786: 'smart-install', 4787: 'sia-ctrl-plane', 4788: 'xmcp', 4789: 'vxlan',
4790: 'vxlan-gpe', 4791: 'roce', 4800: 'iims', 4801: 'iwec', 4802: 'ilss', 4803: 'notateit',
4804: 'aja-ntv4-disc', 4827: 'htcp', 4837: 'varadero-0', 4838: 'varadero-1', 4839: 'varadero-2',
4840: 'opcua-tcp', 4841: 'quosa', 4842: 'gw-asv', 4843: 'opcua-tls', 4844: 'gw-log',
4845: 'wcr-remlib', 4846: 'contamac-icm', 4847: 'wfc', 4848: 'appserv-http', 4849: 'appserv-https',
4850: 'sun-as-nodeagt', 4851: 'derby-repli', 4867: 'unify-debug', 4868: 'phrelay',
4869: 'phrelaydbg', 4870: 'cc-tracking', 4871: 'wired', 4876: 'tritium-can', 4877: 'lmcs',
4878: 'inst-discovery', 4879: 'wsdl-event', 4880: 'hislip', 4881: 'socp-t', 4882: 'socp-c',
4883: 'wmlserver', 4884: 'hivestor', 4885: 'abbs', 4894: 'lyskom', 4899: 'radmin-port',
4900: 'hfcs', 4901: 'flr-agent', 4902: 'magiccontrol', 4912: 'lutap', 4913: 'lutcp', 4914: 'bones',
4915: 'frcs', 4936: 'an-signaling', 4937: 'atsc-mh-ssc', 4940: 'eq-office-4940',
4941: 'eq-office-4941', 4942: 'eq-office-4942', 4949: 'munin', 4950: 'sybasesrvmon',
4951: 'pwgwims', 4952: 'sagxtsds', 4953: 'dbsyncarbiter', 4969: 'ccss-qmm', 4970: 'ccss-qsm',
4980: 'ctxs-vpp', 4984: 'webyast', 4985: 'gerhcs', 4986: 'mrip', 4987: 'smar-se-port1',
4988: 'smar-se-port2', 4989: 'parallel', 4990: 'busycal', 4991: 'vrt', 4999: 'hfcs-manager',
5000: 'commplex-main', 5001: 'commplex-link', 5002: 'rfe', 5003: 'fmpro-internal',
5004: 'avt-profile-1', 5005: 'avt-profile-2', 5006: 'wsm-server', 5007: 'wsm-server-ssl',
5008: 'synapsis-edge', 5009: 'winfs', 5010: 'telelpathstart', 5011: 'telelpathattack', 5012: 'nsp',
5013: 'fmpro-v6', 5014: 'onpsocket', 5015: 'fmwp', 5020: 'zenginkyo-1', 5021: 'zenginkyo-2',
5022: 'mice', 5023: 'htuilsrv', 5024: 'scpi-telnet', 5025: 'scpi-raw', 5026: 'strexec-d',
5027: 'strexec-s', 5028: 'qvr', 5029: 'infobright', 5030: 'surfpass', 5031: 'dmp',
5032: 'signacert-agent', 5033: 'jtnetd-server', 5034: 'jtnetd-status', 5042: 'asnaacceler8db',
5043: 'swxadmin', 5044: 'lxi-evntsvc', 5045: 'osp', 5046: 'vpm-udp', 5047: 'iscape', 5048: 'texai',
5049: 'ivocalize', 5050: 'mmcc', 5051: 'ita-agent', 5052: 'ita-manager', 5053: 'rlm',
5054: 'rlm-admin', 5055: 'unot', 5056: 'intecom-ps1', 5057: 'intecom-ps2', 5058: 'locus-disc',
5059: 'sds', 5060: 'sip', 5061: 'sips', 5062: 'na-localise', 5063: 'csrpc', 5064: 'ca-1',
5065: 'ca-2', 5066: 'stanag-5066', 5067: 'authentx', 5068: 'bitforestsrv', 5069: 'i-net-2000-npr',
5070: 'vtsas', 5071: 'powerschool', 5072: 'ayiya', 5073: 'tag-pm', 5074: 'alesquery',
5075: 'pvaccess', 5078: 'pixelpusher', 5079: 'cp-spxrpts', 5080: 'onscreen', 5081: 'sdl-ets',
5082: 'qcp', 5083: 'qfp', 5084: 'llrp', 5085: 'encrypted-llrp', 5086: 'aprigo-cs', 5087: 'biotic',
5090: 'car', 5091: 'cxtp', 5092: 'magpie', 5093: 'sentinel-lm', 5094: 'hart-ip',
5099: 'sentlm-srv2srv', 5100: 'socalia', 5101: 'talarian-tcp', 5102: 'oms-nonsecure',
5103: 'actifio-c2c', 5104: 'tinymessage', 5105: 'hughes-ap', 5106: 'actifioudsagent',
5107: 'actifioreplic', 5111: 'taep-as-svc', 5112: 'pm-cmdsvr', 5114: 'ev-services',
5115: 'autobuild', 5116: 'emb-proj-cmd', 5117: 'gradecam', 5120: 'barracuda-bbs', 5133: 'nbt-pc',
5134: 'ppactivation', 5135: 'erp-scale', 5136: 'minotaur-sa', 5137: 'ctsd', 5145: 'rmonitor-secure',
5146: 'social-alarm', 5150: 'atmp', 5151: 'esri-sde', 5152: 'sde-discovery', 5153: 'toruxserver',
5154: 'bzflag', 5155: 'asctrl-agent', 5156: 'rugameonline', 5157: 'mediat', 5161: 'snmpssh',
5162: 'snmpssh-trap', 5163: 'sbackup', 5164: 'vpa', 5165: 'ife-icorp', 5166: 'winpcs',
5167: 'scte104', 5168: 'scte30', 5172: 'pcoip-mgmt', 5190: 'aol', 5191: 'aol-1', 5192: 'aol-2',
5193: 'aol-3', 5194: 'cpscomm', 5195: 'ampl-lic', 5196: 'ampl-tableproxy', 5197: 'tunstall-lwp',
5200: 'targus-getdata', 5201: 'targus-getdata1', 5202: 'targus-getdata2', 5203: 'targus-getdata3',
5209: 'nomad', 5215: 'noteza', 5222: 'xmpp-client', 5223: 'hpvirtgrp', 5224: 'hpvirtctrl',
5225: 'hp-server', 5226: 'hp-status', 5227: 'perfd', 5228: 'hpvroom', 5229: 'jaxflow',
5230: 'jaxflow-data', 5231: 'crusecontrol', 5232: 'csedaemon', 5233: 'enfs', 5234: 'eenet',
5235: 'galaxy-network', 5236: 'padl2sim', 5237: 'mnet-discovery', 5245: 'downtools',
5246: 'capwap-control', 5247: 'capwap-data', 5248: 'caacws', 5249: 'caaclang2', 5250: 'soagateway',
5251: 'caevms', 5252: 'movaz-ssc', 5253: 'kpdp', 5254: 'logcabin', 5269: 'xmpp-server',
5270: 'cartographerxmp', 5271: 'cuelink', 5272: 'pk', 5280: 'xmpp-bosh', 5281: 'undo-lm',
5282: 'transmit-port', 5298: 'presence', 5299: 'nlg-data', 5300: 'hacl-hb', 5301: 'hacl-gs',
5302: 'hacl-cfg', 5303: 'hacl-probe', 5304: 'hacl-local', 5305: 'hacl-test', 5306: 'sun-mc-grp',
5307: 'sco-aip', 5308: 'cfengine', 5309: 'jprinter', 5310: 'outlaws', 5312: 'permabit-cs',
5313: 'rrdp', 5314: 'opalis-rbt-ipc', 5315: 'hacl-poll', 5316: 'hpbladems', 5317: 'hpdevms',
5318: 'pkix-cmc', 5320: 'bsfserver-zn', 5321: 'bsfsvr-zn-ssl', 5343: 'kfserver', 5344: 'xkotodrcp',
5349: 'stuns', 5350: 'pcp-multicast', 5351: 'pcp', 5352: 'dns-llq', 5353: 'mdns',
5354: 'mdnsresponder', 5355: 'llmnr', 5356: 'ms-smlbiz', 5357: 'wsdapi', 5358: 'wsdapi-s',
5359: 'ms-alerter', 5360: 'ms-sideshow', 5361: 'ms-s-sideshow', 5362: 'serverwsd2',
5363: 'net-projection', 5364: 'kdnet', 5397: 'stresstester', 5398: 'elektron-admin',
5399: 'securitychase', 5400: 'excerpt', 5401: 'excerpts', 5402: 'mftp', 5403: 'hpoms-ci-lstn',
5404: 'hpoms-dps-lstn', 5405: 'netsupport', 5406: 'systemics-sox', 5407: 'foresyte-clear',
5408: 'foresyte-sec', 5409: 'salient-dtasrv', 5410: 'salient-usrmgr', 5411: 'actnet',
5412: 'continuus', 5413: 'wwiotalk', 5414: 'statusd', 5415: 'ns-server', 5416: 'sns-gateway',
5417: 'sns-agent', 5418: 'mcntp', 5419: 'dj-ice', 5420: 'cylink-c', 5421: 'netsupport2',
5422: 'salient-mux', 5423: 'virtualuser', 5424: 'beyond-remote', 5425: 'br-channel',
5426: 'devbasic', 5427: 'sco-peer-tta', 5428: 'telaconsole', 5429: 'base', 5430: 'radec-corp',
5431: 'park-agent', 5432: 'postgresql', 5433: 'pyrrho', 5434: 'sgi-arrayd', 5435: 'sceanics',
5436: 'pmip6-cntl', 5437: 'pmip6-data', 5443: 'spss', 5445: 'smbdirect', 5453: 'surebox',
5454: 'apc-5454', 5455: 'apc-5455', 5456: 'apc-5456', 5461: 'silkmeter', 5462: 'ttl-publisher',
5463: 'ttlpriceproxy', 5464: 'quailnet', 5465: 'netops-broker', 5470: 'apsolab-col',
5471: 'apsolab-cols', 5472: 'apsolab-tag', 5473: 'apsolab-tags', 5474: 'apsolab-rpc',
5475: 'apsolab-data', 5500: 'fcp-addr-srvr1', 5501: 'fcp-addr-srvr2', 5502: 'fcp-srvr-inst1',
5503: 'fcp-srvr-inst2', 5504: 'fcp-cics-gw1', 5505: 'checkoutdb', 5506: 'amc',
5507: 'psl-management', 5553: 'sgi-eventmond', 5554: 'sgi-esphttp', 5555: 'personal-agent',
5556: 'freeciv', 5557: 'farenet', 5565: 'hpe-dp-bura', 5566: 'westec-connect',
5567: 'dof-dps-mc-sec', 5568: 'sdt', 5569: 'rdmnet-ctrl', 5573: 'sdmmp', 5574: 'lsi-bobcat',
5575: 'ora-oap', 5579: 'fdtracks', 5580: 'tmosms0', 5581: 'tmosms1', 5582: 'fac-restore',
5583: 'tmo-icon-sync', 5584: 'bis-web', 5585: 'bis-sync', 5586: 'att-mt-sms', 5597: 'ininmessaging',
5598: 'mctfeed', 5599: 'esinstall', 5600: 'esmmanager', 5601: 'esmagent', 5602: 'a1-msc',
5603: 'a1-bs', 5604: 'a3-sdunode', 5605: 'a4-sdunode', 5618: 'efr', 5627: 'ninaf', 5628: 'htrust',
5629: 'symantec-sfdb', 5630: 'precise-comm', 5631: 'pcanywheredata', 5632: 'pcanywherestat',
5633: 'beorl', 5634: 'xprtld', 5635: 'sfmsso', 5636: 'sfm-db-server', 5637: 'cssc', 5638: 'flcrs',
5639: 'ics', 5646: 'vfmobile', 5670: 'filemq', 5671: 'amqps', 5672: 'amqp', 5673: 'jms',
5674: 'hyperscsi-port', 5675: 'v5ua', 5676: 'raadmin', 5677: 'questdb2-lnchr', 5678: 'rrac',
5679: 'dccm', 5680: 'auriga-router', 5681: 'ncxcp', 5682: 'brightcore', 5683: 'coap', 5684: 'coaps',
5687: 'gog-multiplayer', 5688: 'ggz', 5689: 'qmvideo', 5693: 'rbsystem', 5696: 'kmip',
5713: 'proshareaudio', 5714: 'prosharevideo', 5715: 'prosharedata', 5716: 'prosharerequest',
5717: 'prosharenotify', 5718: 'dpm', 5719: 'dpm-agent', 5720: 'ms-licensing', 5721: 'dtpt',
5722: 'msdfsr', 5723: 'omhs', 5724: 'omsdk', 5725: 'ms-ilm', 5726: 'ms-ilm-sts', 5727: 'asgenf',
5728: 'io-dist-data', 5729: 'openmail', 5730: 'unieng', 5741: 'ida-discover1',
5742: 'ida-discover2', 5743: 'watchdoc-pod', 5744: 'watchdoc', 5745: 'fcopy-server',
5746: 'fcopys-server', 5747: 'tunatic', 5748: 'tunalyzer', 5750: 'rscd', 5755: 'openmailg',
5757: 'x500ms', 5766: 'openmailns', 5767: 's-openmail', 5768: 'openmailpxy', 5769: 'spramsca',
5770: 'spramsd', 5771: 'netagent', 5777: 'dali-port', 5780: 'vts-rpc', 5784: 'ibar',
5786: 'cisco-redu', 5787: 'waascluster', 5793: 'xtreamx', 5794: 'spdp', 5813: 'icmpd',
5814: 'spt-automation', 5841: 'shiprush-d-ch', 5842: 'reversion', 5859: 'wherehoo',
5863: 'ppsuitemsg', 5868: 'diameters', 5883: 'jute', 5900: 'rfb', 5910: 'cm', 5911: 'cpdlc',
5912: 'fis', 5913: 'ads-c', 5963: 'indy', 5968: 'mppolicy-v5', 5969: 'mppolicy-mgr',
5984: 'couchdb', 5985: 'wsman', 5986: 'wsmans', 5987: 'wbem-rmi', 5988: 'wbem-http',
5989: 'wbem-https', 5990: 'wbem-exp-https', 5991: 'nuxsl', 5992: 'consul-insight', 5993: 'cim-rs',
5999: 'cvsup', 6000 - 6063: 'x11', 6064: 'ndl-ahp-svc', 6065: 'winpharaoh', 6066: 'ewctsp',
6068: 'gsmp-ancp', 6069: 'trip', 6070: 'messageasap', 6071: 'ssdtp', 6072: 'diagnose-proc',
6073: 'directplay8', 6074: 'max', 6075: 'dpm-acm', 6076: 'msft-dpm-cert', 6077: 'iconstructsrv',
6080: 'gue', 6081: 'geneve', 6082: 'p25cai', 6083: 'miami-bcast', 6084: 'reload-config',
6085: 'konspire2b', 6086: 'pdtp', 6087: 'ldss', 6088: 'doglms', 6099: 'raxa-mgmt',
6100: 'synchronet-db', 6101: 'synchronet-rtc', 6102: 'synchronet-upd', 6103: 'rets', 6104: 'dbdb',
6105: 'primaserver', 6106: 'mpsserver', 6107: 'etc-control', 6108: 'sercomm-scadmin',
6109: 'globecast-id', 6110: 'softcm', 6111: 'spc', 6112: 'dtspcd', 6113: 'dayliteserver',
6114: 'wrspice', 6115: 'xic', 6116: 'xtlserv', 6117: 'daylitetouch', 6118: 'tipc', 6121: 'spdy',
6122: 'bex-webadmin', 6123: 'backup-express', 6124: 'pnbs', 6130: 'damewaremobgtwy',
6133: 'nbt-wol', 6140: 'pulsonixnls', 6141: 'meta-corp', 6142: 'aspentec-lm', 6143: 'watershed-lm',
6144: 'statsci1-lm', 6145: 'statsci2-lm', 6146: 'lonewolf-lm', 6147: 'montage-lm',
6148: 'ricardo-lm', 6149: 'tal-pod', 6159: 'efb-aci', 6160: 'ecmp', 6161: 'patrol-ism',
6162: 'patrol-coll', 6163: 'pscribe', 6200: 'lm-x', 6201: 'thermo-calc', 6209: 'qmtps',
6222: 'radmind', 6241: 'jeol-nsdtp-1', 6242: 'jeol-nsdtp-2', 6243: 'jeol-nsdtp-3',
6244: 'jeol-nsdtp-4', 6251: 'tl1-raw-ssl', 6252: 'tl1-ssh', 6253: 'crip', 6267: 'gld', 6268: 'grid',
6269: 'grid-alt', 6300: 'bmc-grx', 6301: 'bmc-ctd-ldap', 6306: 'ufmp', 6315: 'scup',
6316: 'abb-escp', 6317: 'nav-data-cmd', 6320: 'repsvc', 6321: 'emp-server1', 6322: 'emp-server2',
6324: 'hrd-ncs', 6325: 'dt-mgmtsvc', 6326: 'dt-vra', 6343: 'sflow', 6344: 'streletz',
6346: 'gnutella-svc', 6347: 'gnutella-rtr', 6350: 'adap', 6355: 'pmcs', 6360: 'metaedit-mu',
6363: 'ndn', 6370: 'metaedit-se', 6379: 'redis', 6382: 'metatude-mds', 6389: 'clariion-evr01',
6390: 'metaedit-ws', 6400: 'boe-cms', 6401: 'boe-was', 6402: 'boe-eventsrv', 6403: 'boe-cachesvr',
6404: 'boe-filesvr', 6405: 'boe-pagesvr', 6406: 'boe-processsvr', 6407: 'boe-resssvr1',
6408: 'boe-resssvr2', 6409: 'boe-resssvr3', 6410: 'boe-resssvr4', 6417: 'faxcomservice',
6418: 'syserverremote', 6419: 'svdrp', 6420: 'nim-vdrshell', 6421: 'nim-wan', 6432: 'pgbouncer',
6442: 'tarp', 6443: 'sun-sr-https', 6444: 'sge-qmaster', 6445: 'sge-execd', 6446: 'mysql-proxy',
6455: 'skip-cert-recv', 6456: 'skip-cert-send', 6471: 'lvision-lm', 6480: 'sun-sr-http',
6481: 'servicetags', 6482: 'ldoms-mgmt', 6484: 'sun-sr-jms', 6485: 'sun-sr-iiop',
6486: 'sun-sr-iiops', 6487: 'sun-sr-iiop-aut', 6488: 'sun-sr-jmx', 6489: 'sun-sr-admin',
6500: 'boks', 6501: 'boks-servc', 6502: 'boks-servm', 6503: 'boks-clntd', 6505: 'badm-priv',
6506: 'badm-pub', 6507: 'bdir-priv', 6508: 'bdir-pub', 6509: 'mgcs-mfp-port', 6510: 'mcer-port',
6511: 'dccp-udp', 6513: 'netconf-tls', 6514: 'syslog-tls', 6515: 'elipse-rec', 6543: 'lds-distrib',
6544: 'lds-dump', 6547: 'apc-6547', 6548: 'apc-6548', 6549: 'apc-6549', 6550: 'fg-sysupdate',
6551: 'sum', 6558: 'xdsxdm', 6566: 'sane-port', 6568: 'canit-store', 6579: 'affiliate',
6580: 'parsec-master', 6581: 'parsec-peer', 6582: 'parsec-game', 6583: 'joaJewelSuite',
6600: 'mshvlm', 6601: 'mstmg-sstp', 6602: 'wsscomfrmwk', 6619: 'odette-ftps', 6620: 'kftp-data',
6621: 'kftp', 6622: 'mcftp', 6623: 'ktelnet', 6624: 'datascaler-db', 6625: 'datascaler-ctl',
6626: 'wago-service', 6627: 'nexgen', 6628: 'afesc-mc', 6632: 'mxodbc-connect',
6633: 'cisco-vpath-tun', 6634: 'mpls-pm', 6635: 'mpls-udp', 6636: 'mpls-udp-dtls', 6640: 'ovsdb',
6653: 'openflow', 6655: 'pcs-sf-ui-man', 6656: 'emgmsg', 6657: 'palcom-disc', 6665 - 6669: 'ircu',
6670: 'vocaltec-gold', 6671: 'p4p-portal', 6672: 'vision-server', 6673: 'vision-elmd', 6678: 'vfbp',
6679: 'osaut', 6687: 'clever-ctrace', 6688: 'clever-tcpip', 6689: 'tsa', 6690: 'cleverdetect',
6696: 'babel', 6697: 'ircs-u', 6701: 'kti-icad-srvr', 6702: 'e-design-net', 6703: 'e-design-web',
6704: 'frc-hp', 6705: 'frc-mp', 6706: 'frc-lp', 6714: 'ibprotocol', 6715: 'fibotrader-com',
6716: 'princity-agent', 6767: 'bmc-perf-agent', 6768: 'bmc-perf-mgrd', 6769: 'adi-gxp-srvprt',
6770: 'plysrv-http', 6771: 'plysrv-https', 6777: 'ntz-tracker', 6778: 'ntz-p2p-storage',
6784: 'bfd-lag', 6785: 'dgpf-exchg', 6786: 'smc-jmx', 6787: 'smc-admin', 6788: 'smc-http',
6789: 'smc-https', 6790: 'hnmp', 6791: 'hnm', 6801: 'acnet', 6817: 'pentbox-sim', 6831: 'ambit-lm',
6841: 'netmo-default', 6842: 'netmo-http', 6850: 'iccrushmore', 6868: 'acctopus-cc', 6888: 'muse',
6901: 'jetstream', 6935: 'ethoscan', 6936: 'xsmsvc', 6946: 'bioserver', 6951: 'otlp',
6961: 'jmact3', 6962: 'jmevt2', 6963: 'swismgr1', 6964: 'swismgr2', 6965: 'swistrap',
6966: 'swispol', 6969: 'acmsoda', 6970: 'conductor', 6998: 'iatp-highpri', 6999: 'iatp-normalpri',
7000: 'afs3-fileserver', 7001: 'afs3-callback', 7002: 'afs3-prserver', 7003: 'afs3-vlserver',
7004: 'afs3-kaserver', 7005: 'afs3-volser', 7006: 'afs3-errors', 7007: 'afs3-bos',
7008: 'afs3-update', 7009: 'afs3-rmtsys', 7010: 'ups-onlinet', 7011: 'talon-disc',
7012: 'talon-engine', 7013: 'microtalon-dis', 7014: 'microtalon-com', 7015: 'talon-webserver',
7018: 'fisa-svc', 7019: 'doceri-ctl', 7020: 'dpserve', 7021: 'dpserveadmin', 7022: 'ctdp',
7023: 'ct2nmcs', 7024: 'vmsvc', 7025: 'vmsvc-2', 7030: 'op-probe', 7031: 'iposplanet',
7040: 'quest-disc', 7070: 'arcp', 7071: 'iwg1', 7073: 'martalk', 7080: 'empowerid',
7088: 'zixi-transport', 7095: 'jdp-disc', 7099: 'lazy-ptop', 7100: 'font-service', 7101: 'elcn',
7107: 'aes-x170', 7121: 'virprot-lm', 7128: 'scenidm', 7129: 'scenccs', 7161: 'cabsm-comm',
7162: 'caistoragemgr', 7163: 'cacsambroker', 7164: 'fsr', 7165: 'doc-server', 7166: 'aruba-server',
7167: 'casrmagent', 7168: 'cnckadserver', 7169: 'ccag-pib', 7170: 'nsrp', 7171: 'drm-production',
7172: 'metalbend', 7173: 'zsecure', 7174: 'clutild', 7181: 'janus-disc', 7200: 'fodms',
7201: 'dlip', 7227: 'ramp', 7228: 'citrixupp', 7229: 'citrixuppg', 7235: 'aspcoordination',
7236: 'display', 7237: 'pads', 7262: 'cnap', 7272: 'watchme-7272', 7273: 'oma-rlp',
7274: 'oma-rlp-s', 7275: 'oma-ulp', 7276: 'oma-ilp', 7277: 'oma-ilp-s', 7278: 'oma-dcdocbs',
7279: 'ctxlic', 7280: 'itactionserver1', 7281: 'itactionserver2', 7282: 'mzca-action',
7283: 'genstat', 7300 - 7359: 'swx', 7365: 'lcm-server', 7391: 'mindfilesys',
7392: 'mrssrendezvous', 7393: 'nfoldman', 7394: 'fse', 7395: 'winqedit', 7397: 'hexarc',
7400: 'rtps-discovery', 7401: 'rtps-dd-ut', 7402: 'rtps-dd-mt', 7410: 'ionixnetmon',
7411: 'daqstream', 7421: 'mtportmon', 7426: 'pmdmgr', 7427: 'oveadmgr', 7428: 'ovladmgr',
7429: 'opi-sock', 7430: 'xmpv7', 7431: 'pmd', 7437: 'faximum', 7443: 'oracleas-https',
7471: 'sttunnel', 7473: 'rise', 7474: 'neo4j', 7491: 'telops-lmd', 7500: 'silhouette',
7501: 'ovbus', 7508: 'adcp', 7509: 'acplt', 7510: 'ovhpas', 7511: 'pafec-lm', 7542: 'saratoga',
7543: 'atul', 7544: 'nta-ds', 7545: 'nta-us', 7546: 'cfs', 7547: 'cwmp', 7548: 'tidp',
7549: 'nls-tl', 7550: 'cloudsignaling', 7551: 'controlone-con', 7560: 'sncp', 7563: 'cfw',
7566: 'vsi-omega', 7569: 'dell-eql-asm', 7570: 'aries-kfinder', 7574: 'coherence', 7588: 'sun-lm',
7606: 'mipi-debug', 7624: 'indi', 7626: 'simco', 7627: 'soap-http', 7628: 'zen-pawn', 7629: 'xdas',
7630: 'hawk', 7631: 'tesla-sys-msg', 7633: 'pmdfmgt', 7648: 'cuseeme', 7672: 'imqstomp',
7673: 'imqstomps', 7674: 'imqtunnels', 7675: 'imqtunnel', 7676: 'imqbrokerd',
7677: 'sun-user-https', 7680: 'pando-pub', 7683: 'dmt', 7689: 'collaber', 7697: 'klio',
7700: 'em7-secom', 7707: 'sync-em7', 7708: 'scinet', 7720: 'medimageportal',
7724: 'nsdeepfreezectl', 7725: 'nitrogen', 7726: 'freezexservice', 7727: 'trident-data',
7728: 'osvr', 7734: 'smip', 7738: 'aiagent', 7741: 'scriptview', 7742: 'msss', 7743: 'sstp-1',
7744: 'raqmon-pdu', 7747: 'prgp', 7775: 'inetfs', 7777: 'cbt', 7778: 'interwise', 7779: 'vstat',
7781: 'accu-lmgr', 7784: 's-bfd', 7786: 'minivend', 7787: 'popup-reminders', 7789: 'office-tools',
7794: 'q3ade', 7797: 'pnet-conn', 7798: 'pnet-enc', 7799: 'altbsdp', 7800: 'asr',
7801: 'ssp-client', 7802: 'vns-tp', 7810: 'rbt-wanopt', 7845: 'apc-7845', 7846: 'apc-7846',
7847: 'csoauth', 7869: 'mobileanalyzer', 7870: 'rbt-smc', 7871: 'mdm', 7872: 'mipv6tls',
7878: 'owms', 7880: 'pss', 7887: 'ubroker', 7900: 'mevent', 7901: 'tnos-sp', 7902: 'tnos-dp',
7903: 'tnos-dps', 7913: 'qo-secure', 7932: 't2-drm', 7933: 't2-brm', 7962: 'generalsync',
7967: 'supercell', 7979: 'micromuse-ncps', 7980: 'quest-vista', 7981: 'sossd-collect',
7982: 'sossd-agent', 7997: 'pushns', 7998: 'usicontentpush', 7999: 'irdmi2', 8000: 'irdmi',
8001: 'vcom-tunnel', 8002: 'teradataordbms', 8003: 'mcreport', 8005: 'mxi', 8008: 'http-alt',
8019: 'qbdb', 8020: 'intu-ec-svcdisc', 8021: 'intu-ec-client', 8022: 'oa-system',
8025: 'ca-audit-da', 8026: 'ca-audit-ds', 8032: 'pro-ed', 8033: 'mindprint', 8034: 'vantronix-mgmt',
8040: 'ampify', 8042: 'fs-agent', 8043: 'fs-server', 8044: 'fs-mgmt', 8051: 'rocrail',
8052: 'senomix01', 8053: 'senomix02', 8054: 'senomix03', 8055: 'senomix04', 8056: 'senomix05',
8057: 'senomix06', 8058: 'senomix07', 8059: 'senomix08', 8060: 'aero', 8066: 'toad-bi-appsrvr',
8067: 'infi-async', 8074: 'gadugadu', 8080: 'http-alt', 8081: 'sunproxyadmin', 8082: 'us-cli',
8083: 'us-srv', 8086: 'd-s-n', 8087: 'simplifymedia', 8088: 'radan-http', 8091: 'jamlink',
8097: 'sac', 8100: 'xprint-server', 8101: 'ldoms-migr', 8102: 'kz-migr', 8115: 'mtl8000-matrix',
8116: 'cp-cluster', 8117: 'purityrpc', 8118: 'privoxy', 8121: 'apollo-data', 8122: 'apollo-admin',
8128: 'paycash-online', 8129: 'paycash-wbp', 8130: 'indigo-vrmi', 8131: 'indigo-vbcp',
8132: 'dbabble', 8140: 'puppet', 8148: 'isdd', 8149: 'eor-game', 8153: 'quantastor', 8160: 'patrol',
8161: 'patrol-snmp', 8162: 'lpar2rrd', 8181: 'intermapper', 8182: 'vmware-fdm', 8183: 'proremote',
8184: 'itach', 8190: 'gcp-rphy', 8191: 'limnerpressure', 8192: 'spytechphone', 8194: 'blp1',
8195: 'blp2', 8199: 'vvr-data', 8200: 'trivnet1', 8201: 'trivnet2', 8202: 'aesop',
8204: 'lm-perfworks', 8205: 'lm-instmgr', 8206: 'lm-dta', 8207: 'lm-sserver', 8208: 'lm-webwatcher',
8230: 'rexecj', 8231: 'hncp-udp-port', 8232: 'hncp-dtls-port', 8243: 'synapse-nhttps',
8276: 'pando-sec', 8280: 'synapse-nhttp', 8282: 'libelle', 8292: 'blp3', 8293: 'hiperscan-id',
8294: 'blp4', 8300: 'tmi', 8301: 'amberon', 8313: 'hub-open-net', 8320: 'tnp-discover', 8321: 'tnp',
8322: 'garmin-marine', 8351: 'server-find', 8376: 'cruise-enum', 8377: 'cruise-swroute',
8378: 'cruise-config', 8379: 'cruise-diags', 8380: 'cruise-update', 8383: 'm2mservices',
8384: 'marathontp', 8400: 'cvd', 8401: 'sabarsd', 8402: 'abarsd', 8403: 'admind', 8404: 'svcloud',
8405: 'svbackup', 8415: 'dlpx-sp', 8416: 'espeech', 8417: 'espeech-rtp', 8442: 'cybro-a-bus',
8443: 'pcsync-https', 8444: 'pcsync-http', 8445: 'copy', 8450: 'npmp', 8457: 'nexentamv',
8470: 'cisco-avp', 8471: 'pim-port', 8472: 'otv', 8473: 'vp2p', 8474: 'noteshare', 8500: 'fmtp',
8501: 'cmtp-mgt', 8502: 'ftnmtp', 8503: 'lsp-self-ping', 8554: 'rtsp-alt', 8555: 'd-fence',
8567: 'dof-tunnel', 8600: 'asterix', 8609: 'canon-cpp-disc', 8610: 'canon-mfnp',
8611: 'canon-bjnp1', 8612: 'canon-bjnp2', 8613: 'canon-bjnp3', 8614: 'canon-bjnp4', 8615: 'imink',
8665: 'monetra', 8666: 'monetra-admin', 8675: 'msi-cps-rm', 8686: 'sun-as-jmxrmi',
8688: 'openremote-ctrl', 8699: 'vnyx', 8711: 'nvc', 8732: 'dtp-net', 8733: 'ibus',
8750: 'dey-keyneg', 8763: 'mc-appserver', 8764: 'openqueue', 8765: 'ultraseek-http', 8766: 'amcs',
8770: 'dpap', 8778: 'uec', 8786: 'msgclnt', 8787: 'msgsrvr', 8793: 'acd-pm', 8800: 'sunwebadmin',
8804: 'truecm', 8873: 'dxspider', 8880: 'cddbp-alt', 8881: 'galaxy4d', 8883: 'secure-mqtt',
8888: 'ddi-tcp-1', 8889: 'ddi-tcp-2', 8890: 'ddi-tcp-3', 8891: 'ddi-tcp-4', 8892: 'ddi-tcp-5',
8893: 'ddi-tcp-6', 8894: 'ddi-tcp-7', 8899: 'ospf-lite', 8900: 'jmb-cds1', 8901: 'jmb-cds2',
8910: 'manyone-http', 8911: 'manyone-xml', 8912: 'wcbackup', 8913: 'dragonfly', 8937: 'twds',
8953: 'ub-dns-control', 8954: 'cumulus-admin', 8980: 'nod-provider', 8981: 'nod-client',
8989: 'sunwebadmins', 8990: 'http-wmap', 8991: 'https-wmap', 8997: 'oracle-ms-ens',
8998: 'canto-roboflow', 8999: 'bctp', 9000: 'cslistener', 9001: 'etlservicemgr', 9002: 'dynamid',
9005: 'golem', 9007: 'ogs-client', 9008: 'ogs-server', 9009: 'pichat', 9010: 'sdr', 9020: 'tambora',
9021: 'panagolin-ident', 9022: 'paragent', 9023: 'swa-1', 9024: 'swa-2', 9025: 'swa-3',
9026: 'swa-4', 9050: 'versiera', 9051: 'fio-cmgmt', 9080: 'glrpc', 9082: 'lcs-ap',
9083: 'emc-pp-mgmtsvc', 9084: 'aurora', 9085: 'ibm-rsyscon', 9086: 'net2display', 9087: 'classic',
9088: 'sqlexec', 9089: 'sqlexec-ssl', 9090: 'websm', 9091: 'xmltec-xmlmail', 9093: 'copycat',
9100: 'hp-pdl-datastr', 9101: 'bacula-dir', 9102: 'bacula-fd', 9103: 'bacula-sd', 9104: 'peerwire',
9105: 'xadmin', 9106: 'astergate', 9107: 'astergatefax', 9119: 'mxit', 9122: 'grcmp', 9123: 'grcp',
9131: 'dddp', 9160: 'apani1', 9161: 'apani2', 9162: 'apani3', 9163: 'apani4', 9164: 'apani5',
9191: 'sun-as-jpda', 9200: 'wap-wsp', 9201: 'wap-wsp-wtp', 9202: 'wap-wsp-s', 9203: 'wap-wsp-wtp-s',
9204: 'wap-vcard', 9205: 'wap-vcal', 9206: 'wap-vcard-s', 9207: 'wap-vcal-s', 9208: 'rjcdb-vcards',
9209: 'almobile-system', 9210: 'oma-mlp', 9211: 'oma-mlp-s', 9212: 'serverviewdbms',
9213: 'serverstart', 9214: 'ipdcesgbs', 9215: 'insis', 9216: 'acme', 9217: 'fsc-port',
9222: 'teamcoherence', 9255: 'mon', 9277: 'traingpsdata', 9278: 'pegasus', 9279: 'pegasus-ctl',
9280: 'pgps', 9281: 'swtp-port1', 9282: 'swtp-port2', 9283: 'callwaveiam', 9284: 'visd',
9285: 'n2h2server', 9286: 'n2receive', 9287: 'cumulus', 9292: 'armtechdaemon', 9293: 'storview',
9294: 'armcenterhttp', 9295: 'armcenterhttps', 9300: 'vrace', 9306: 'sphinxql', 9312: 'sphinxapi',
9318: 'secure-ts', 9321: 'guibase', 9343: 'mpidcmgr', 9344: 'mphlpdmc', 9345: 'rancher',
9346: 'ctechlicensing', 9374: 'fjdmimgr', 9380: 'boxp', 9387: 'd2dconfig', 9388: 'd2ddatatrans',
9389: 'adws', 9390: 'otp', 9396: 'fjinvmgr', 9397: 'mpidcagt', 9400: 'sec-t4net-srv',
9401: 'sec-t4net-clt', 9402: 'sec-pc2fax-srv', 9418: 'git', 9443: 'tungsten-https',
9444: 'wso2esb-console', 9445: 'mindarray-ca', 9450: 'sntlkeyssrvr', 9500: 'ismserver',
9522: 'sma-spw', 9535: 'mngsuite', 9536: 'laes-bf', 9555: 'trispen-sra', 9592: 'ldgateway',
9593: 'cba8', 9594: 'msgsys', 9595: 'pds', 9596: 'mercury-disc', 9597: 'pd-admin', 9598: 'vscp',
9599: 'robix', 9600: 'micromuse-ncpw', 9612: 'streamcomm-ds', 9614: 'iadt-tls',
9616: 'erunbook-agent', 9617: 'erunbook-server', 9618: 'condor', 9628: 'odbcpathway',
9629: 'uniport', 9630: 'peoctlr', 9631: 'peocoll', 9632: 'mc-comm', 9640: 'pqsflows',
9666: 'zoomcp', 9667: 'xmms2', 9668: 'tec5-sdctp', 9694: 'client-wakeup', 9695: 'ccnx',
9700: 'board-roar', 9747: 'l5nas-parchan', 9750: 'board-voip', 9753: 'rasadv',
9762: 'tungsten-http', 9800: 'davsrc', 9801: 'sstp-2', 9802: 'davsrcs', 9875: 'sapv1', 9876: 'sd',
9878: 'kca-service', 9888: 'cyborg-systems', 9889: 'gt-proxy', 9898: 'monkeycom',
9899: 'sctp-tunneling', 9900: 'iua', 9901: 'enrp', 9902: 'enrp-sctp-tls', 9903: 'multicast-ping',
9909: 'domaintime', 9911: 'sype-transport', 9925: 'xybrid-cloud', 9950: 'apc-9950',
9951: 'apc-9951', 9952: 'apc-9952', 9953: 'acis', 9954: 'hinp', 9955: 'alljoyn-stm',
9956: 'alljoyn', 9966: 'odnsp', 9978: 'xybrid-rt', 9987: 'dsm-scm-target', 9988: 'nsesrvr',
9990: 'osm-appsrvr', 9991: 'osm-oev', 9992: 'palace-1', 9993: 'palace-2', 9994: 'palace-3',
9995: 'palace-4', 9996: 'palace-5', 9997: 'palace-6', 9998: 'distinct32', 9999: 'distinct',
10000: 'ndmp', 10001: 'scp-config', 10002: 'documentum', 10003: 'documentum-s', 10004: 'emcrmirccd',
10005: 'emcrmird', 10006: 'netapp-sync', 10007: 'mvs-capacity', 10008: 'octopus', 10009: 'swdtp-sv',
10010: 'rxapi', 10050: 'zabbix-agent', 10051: 'zabbix-trapper', 10055: 'qptlmd', 10080: 'amanda',
10081: 'famdc', 10100: 'itap-ddtp', 10101: 'ezmeeting-2', 10102: 'ezproxy-2', 10103: 'ezrelay',
10104: 'swdtp', 10107: 'bctp-server', 10110: 'nmea-0183', 10111: 'nmea-onenet',
10113: 'netiq-endpoint', 10114: 'netiq-qcheck', 10115: 'netiq-endpt', 10116: 'netiq-voipa',
10117: 'iqrm', 10125: 'cimple', 10128: 'bmc-perf-sd', 10129: 'bmc-gms', 10160: 'qb-db-server',
10161: 'snmptls', 10162: 'snmptls-trap', 10200: 'trisoap', 10201: 'rsms', 10252: 'apollo-relay',
10253: 'eapol-relay', 10260: 'axis-wimp-port', 10288: 'blocks', 10321: 'cosir', 10439: 'bngsync',
10500: 'hip-nat-t', 10548: 'serverdocs', 10631: 'printopia', 10800: 'gap', 10805: 'lpdg',
10809: 'nbd', 10810: 'nmc-disc', 10860: 'helix', 10880: 'bveapi', 10933: 'octopustentacle',
10990: 'rmiaux', 11000: 'irisa', 11001: 'metasys', 10023: 'cefd-vmp', 11095: 'weave',
11103: 'origo-sync', 11104: 'netapp-icmgmt', 11105: 'netapp-icdata', 11106: 'sgi-lk',
11108: 'myq-termlink', 11109: 'sgi-dmfmgr', 11110: 'sgi-soap', 11111: 'vce', 11112: 'dicom',
11161: 'suncacao-snmp', 11162: 'suncacao-jmxmp', 11163: 'suncacao-rmi', 11164: 'suncacao-csa',
11165: 'suncacao-websvc', 11171: 'snss', 11172: 'oemcacao-jmxmp', 11173: 't5-straton',
11174: 'oemcacao-rmi', 11175: 'oemcacao-websvc', 11201: 'smsqp', 11202: 'dcsl-backup',
11208: 'wifree', 11211: 'memcache', 11319: 'imip', 11320: 'imip-channels', 11321: 'arena-server',
11367: 'atm-uhas', 11371: 'hkp', 11430: 'lsdp', 11489: 'asgcypresstcps', 11600: 'tempest-port',
11623: 'emc-xsw-dconfig', 11720: 'h323callsigalt', 11723: 'emc-xsw-dcache', 11751: 'intrepid-ssl',
11796: 'lanschool', 11876: 'xoraya', 11877: 'x2e-disc', 11967: 'sysinfo-sp', 11997: 'wmereceiving',
11998: 'wmedistribution', 11999: 'wmereporting', 12000: 'entextxid', 12001: 'entextnetwk',
12002: 'entexthigh', 12003: 'entextmed', 12004: 'entextlow', 12005: 'dbisamserver1',
12006: 'dbisamserver2', 12007: 'accuracer', 12008: 'accuracer-dbms', 12009: 'ghvpn',
12010: 'edbsrvr', 12012: 'vipera', 12013: 'vipera-ssl', 12109: 'rets-ssl', 12121: 'nupaper-ss',
12168: 'cawas', 12172: 'hivep', 12300: 'linogridengine', 12302: 'rads', 12321: 'warehouse-sss',
12322: 'warehouse', 12345: 'italk', 12753: 'tsaf', 12865: 'netperf', 13160: 'i-zipqd',
13216: 'bcslogc', 13217: 'rs-pias', 13218: 'emc-vcas-tcp', 13223: 'powwow-client',
13224: 'powwow-server', 13400: 'doip-data', 13720: 'bprd', 13721: 'bpdbm', 13722: 'bpjava-msvc',
13724: 'vnetd', 13782: 'bpcd', 13783: 'vopied', 13785: 'nbdb', 13786: 'nomdb',
13818: 'dsmcc-config', 13819: 'dsmcc-session', 13820: 'dsmcc-passthru', 13821: 'dsmcc-download',
13822: 'dsmcc-ccp', 13823: 'bmdss', 13894: 'ucontrol', 13929: 'dta-systems', 13930: 'medevolve',
14000: 'scotty-ft', 14001: 'sua', 14002: 'scotty-disc', 14033: 'sage-best-com1',
14034: 'sage-best-com2', 14141: 'vcs-app', 14142: 'icpp', 14145: 'gcm-app', 14149: 'vrts-tdd',
14150: 'vcscmd', 14154: 'vad', 14250: 'cps', 14414: 'ca-web-update', 14936: 'hde-lcesrvr-1',
14937: 'hde-lcesrvr-2', 15000: 'hydap', 15002: 'onep-tls', 15118: 'v2g-secc', 15345: 'xpilot',
15555: 'cisco-snat', 15660: 'bex-xr', 15740: 'ptp', 15999: 'programmar', 16000: 'fmsas',
16001: 'fmsascon', 16002: 'gsms', 16003: 'alfin', 16020: 'jwpc', 16021: 'jwpc-bin',
16161: 'sun-sea-port', 16162: 'solaris-audit', 16309: 'etb4j', 16310: 'pduncs', 16311: 'pdefmns',
16360: 'netserialext1', 16361: 'netserialext2', 16367: 'netserialext3', 16368: 'netserialext4',
16384: 'connected', 16385: 'rdgs', 16619: 'xoms', 16665: 'axon-tunnel', 16666: 'vtp',
16789: 'cadsisvr', 16900: 'newbay-snc-mc', 16950: 'sgcip', 16991: 'intel-rci-mp',
16992: 'amt-soap-http', 16993: 'amt-soap-https', 16994: 'amt-redir-tcp', 16995: 'amt-redir-tls',
17007: 'isode-dua', 17184: 'vestasdlp', 17185: 'soundsvirtual', 17219: 'chipper', 17220: 'avtp',
17221: 'avdecc', 17222: 'cpsp', 17223: 'isa100-gci', 17224: 'trdp-pd', 17225: 'trdp-md',
17234: 'integrius-stp', 17235: 'ssh-mgmt', 17500: 'db-lsp', 17555: 'ailith', 17729: 'ea',
17754: 'zep', 17755: 'zigbee-ip', 17756: 'zigbee-ips', 17777: 'sw-orion', 18000: 'biimenu',
18104: 'radpdf', 18136: 'racf', 18181: 'opsec-cvp', 18182: 'opsec-ufp', 18183: 'opsec-sam',
18184: 'opsec-lea', 18185: 'opsec-omi', 18186: 'ohsc', 18187: 'opsec-ela', 18241: 'checkpoint-rtm',
18242: 'iclid', 18243: 'clusterxl', 18262: 'gv-pf', 18463: 'ac-cluster', 18634: 'rds-ib',
18635: 'rds-ip', 18769: 'ique', 18881: 'infotos', 18888: 'apc-necmp', 19000: 'igrid',
19007: 'scintilla', 19020: 'j-link', 19191: 'opsec-uaa', 19194: 'ua-secureagent', 19283: 'keysrvr',
19315: 'keyshadow', 19398: 'mtrgtrans', 19410: 'hp-sco', 19411: 'hp-sca', 19412: 'hp-sessmon',
19539: 'fxuptp', 19540: 'sxuptp', 19541: 'jcp', 19788: 'mle', 19998: 'iec-104-sec',
19999: 'dnp-sec', 20000: 'dnp', 20001: 'microsan', 20002: 'commtact-http', 20003: 'commtact-https',
20005: 'openwebnet', 20012: 'ss-idi-disc', 20013: 'ss-idi', 20014: 'opendeploy', 20034: 'nburn-id',
20046: 'tmophl7mts', 20048: 'mountd', 20049: 'nfsrdma', 20167: 'tolfab', 20202: 'ipdtp-port',
20222: 'ipulse-ics', 20480: 'emwavemsg', 20670: 'track', 20999: 'athand-mmp', 21000: 'irtrans',
21010: 'notezilla-lan', 21553: 'rdm-tfs', 21554: 'dfserver', 21590: 'vofr-gateway', 21800: 'tvpm',
21845: 'webphone', 21846: 'netspeak-is', 21847: 'netspeak-cs', 21848: 'netspeak-acd',
21849: 'netspeak-cps', 22000: 'snapenetio', 22001: 'optocontrol', 22002: 'optohost002',
22003: 'optohost003', 22004: 'optohost004', 22005: 'optohost004', 22125: 'dcap', 22128: 'gsidcap',
22222: 'easyengine', 22273: 'wnn6', 22305: 'cis', 22335: 'shrewd-control', 22343: 'cis-secure',
22347: 'wibukey', 22350: 'codemeter', 22351: 'codemeter-cmwan', 22537: 'caldsoft-backup',
22555: 'vocaltec-wconf', 22763: 'talikaserver', 22800: 'aws-brf', 22951: 'brf-gw',
23000: 'inovaport1', 23001: 'inovaport2', 23002: 'inovaport3', 23003: 'inovaport4',
23004: 'inovaport5', 23005: 'inovaport6', 23053: 'gntp', 23272: 's102', 23333: 'elxmgmt',
23400: 'novar-dbase', 23401: 'novar-alarm', 23402: 'novar-global', 23456: 'aequus',
23457: 'aequus-alt', 23546: 'areaguard-neo', 24000: 'med-ltp', 24001: 'med-fsp-rx',
24002: 'med-fsp-tx', 24003: 'med-supp', 24004: 'med-ovw', 24005: 'med-ci', 24006: 'med-net-svc',
24242: 'filesphere', 24249: 'vista-4gl', 24321: 'ild', 24322: 'hid', 24386: 'intel-rci',
24465: 'tonidods', 24554: 'binkp', 24577: 'bilobit', 24666: 'sdtvwcam', 24676: 'canditv',
24677: 'flashfiler', 24678: 'proactivate', 24680: 'tcc-http', 24754: 'cslg', 24850: 'assoc-disc',
24922: 'find', 25000: 'icl-twobase1', 25001: 'icl-twobase2', 25002: 'icl-twobase3',
25003: 'icl-twobase4', 25004: 'icl-twobase5', 25005: 'icl-twobase6', 25006: 'icl-twobase7',
25007: 'icl-twobase8', 25008: 'icl-twobase9', 25009: 'icl-twobase10', 25471: 'rna',
25576: 'sauterdongle', 25604: 'idtp', 25793: 'vocaltec-hos', 25900: 'tasp-net', 25901: 'niobserver',
25902: 'nilinkanalyst', 25903: 'niprobe', 25954: 'bf-game', 25955: 'bf-master', 26000: 'quake',
26133: 'scscp', 26208: 'wnn6-ds', 26257: 'cockroach', 26260: 'ezproxy', 26261: 'ezmeeting',
26262: 'k3software-svr', 26263: 'k3software-cli', 26486: 'exoline-tcp', 26487: 'exoconfig',
26489: 'exonet', 27000 - 27009: 'flex-lm', 27345: 'imagepump', 27442: 'jesmsjc',
27504: 'kopek-httphead', 27782: 'ars-vista', 27876: 'astrolink', 27999: 'tw-auth-key',
28000: 'nxlmd', 28001: 'pqsp', 28119: 'a27-ran-ran', 28200: 'voxelstorm', 28240: 'siemensgsm',
29118: 'sgsap', 29167: 'otmp', 29168: 'sbcap', 29169: 'iuhsctpassoc', 29999: 'bingbang',
30000: 'ndmps', 30001: 'pago-services1', 30002: 'pago-services2', 30003: 'amicon-fpsu-ra',
30004: 'amicon-fpsu-s', 30100: 'rwp', 30260: 'kingdomsonline', 30832: 'samsung-disc',
30999: 'ovobs', 31020: 'autotrac-acp', 31029: 'yawn', 31400: 'pace-licensed', 31416: 'xqosd',
31457: 'tetrinet', 31620: 'lm-mon', 31685: 'dsx-monitor', 31765: 'gamesmith-port',
31948: 'iceedcp-tx', 31949: 'iceedcp-rx', 32034: 'iracinghelper', 32249: 't1distproc60',
32483: 'apm-link', 32635: 'sec-ntb-clnt', 32767: 'filenet-powsrm', 32768: 'filenet-tms',
32769: 'filenet-rpc', 32770: 'filenet-nch', 32771: 'filenet-rmi', 32772: 'filenet-pa',
32773: 'filenet-cm', 32774: 'filenet-re', 32775: 'filenet-pch', 32776: 'filenet-peior',
32777: 'filenet-obrok', 32801: 'mlsn', 32811: 'retp', 32896: 'idmgratm', 33060: 'mysqlx',
33123: 'aurora-balaena', 33331: 'diamondport', 33333: 'dgi-serv', 33334: 'speedtrace',
33434: 'traceroute', 33656: 'snip-slave', 34249: 'turbonote-2', 34378: 'p-net-local',
34379: 'p-net-remote', 34567: 'dhanalakshmi', 34962: 'profinet-rt', 34963: 'profinet-rtm',
34964: 'profinet-cm', 34980: 'ethercat', 35000: 'heathview', 35001: 'rt-viewer', 35002: 'rt-sound',
35003: 'rt-devicemapper', 35004: 'rt-classmanager', 35005: 'rt-labtracker', 35006: 'rt-helper',
35354: 'kitim', 35355: 'altova-lm', 35356: 'guttersnex', 35357: 'openstack-id', 36001: 'allpeers',
36411: 'wlcp', 36412: 's1-control', 36422: 'x2-control', 36423: 'slmap', 36424: 'nq-ap',
36443: 'm2ap', 36444: 'm3ap', 36462: 'xw-control', 36524: 'febooti-aw', 36602: 'observium-agent',
36700: 'mapx', 36865: 'kastenxpipe', 37475: 'neckar', 37483: 'gdrive-sync', 37601: 'eftp',
37654: 'unisys-eportal', 38000: 'ivs-database', 38001: 'ivs-insertion', 38002: 'cresco-control',
38201: 'galaxy7-data', 38202: 'fairview', 38203: 'agpolicy', 38800: 'sruth',
38865: 'secrmmsafecopya', 39681: 'turbonote-1', 40000: 'safetynetp', 40023: 'k-patentssensor',
40404: 'sptx', 40841: 'cscp', 40842: 'csccredir', 40843: 'csccfirewall', 40853: 'ortec-disc',
41111: 'fs-qos', 41121: 'tentacle', 41230: 'z-wave-s', 41794: 'crestron-cip', 41795: 'crestron-ctp',
41796: 'crestron-cips', 41797: 'crestron-ctps', 42508: 'candp', 42509: 'candrp', 42510: 'caerpc',
43000: 'recvr-rc', 43188: 'reachout', 43189: 'ndm-agent-port', 43190: 'ip-provision',
43191: 'noit-transport', 43210: 'shaperai', 43439: 'eq3-update', 43440: 'ew-mgmt',
43441: 'ciscocsdb', 44123: 'z-wave-tunnel', 44321: 'pmcd', 44322: 'pmcdproxy', 44323: 'pmwebapi',
44444: 'cognex-dataman', 44544: 'domiq', 44553: 'rbr-debug', 44600: 'asihpi', 44900: 'm3da',
45000: 'asmp', 45001: 'asmps', 45002: 'rs-status', 45045: 'synctest', 45054: 'invision-ag',
45678: 'eba', 45824: 'dai-shell', 45825: 'qdb2service', 45966: 'ssr-servermgr', 46336: 'inedo',
46998: 'spremotetablet', 46999: 'mediabox', 47000: 'mbus', 47001: 'winrm', 47100: 'jvl-mactalk',
47557: 'dbbrowse', 47624: 'directplaysrvr', 47806: 'ap', 47808: 'bacnet', 47809: 'presonus-ucnet',
48000: 'nimcontroller', 48001: 'nimspooler', 48002: 'nimhub', 48003: 'nimgtw', 48004: 'nimbusdb',
48005: 'nimbusdbctrl', 48050: 'weandsf', 48128: 'isnetserv', 48129: 'blp5', 48556: 'com-bardac-dw',
48619: 'iqobject', 48653: 'robotraconteur', 49000: 'matahari'} | /rm-sec-toolkit-0.2.4.tar.gz/rm-sec-toolkit-0.2.4/rmsectkf/core/network/port_service_map.py | 0.408631 | 0.316805 | port_service_map.py | pypi |
import numpy as np
import pandas as pd
from scipy.optimize import minimize
def vol_risk_parity(stockMeans, covar, b=None):
n = len(stockMeans)
# Function for Portfolio Volatility
def pvol(w):
x = np.array(w)
return np.sqrt(x.dot(covar).dot(x))
# Function for Component Standard Deviation
def pCSD(w, b=None, last = False):
x = np.array(w)
pVol = pvol(w)
csd = x * (covar.dot(x)) / pVol
if last:
return csd
if b is not None:
csd /= b
return csd
# Sum Square Error of cSD
def sseCSD(w):
csd = pCSD(w, b)
mCSD = np.sum(csd) / n
dCsd = csd - mCSD
se = dCsd * dCsd
return 1.0e5 * np.sum(se) # Add a large multiplier for better convergence
# Define the optimization problem
m = minimize(sseCSD, [1/n]*n, method='SLSQP', bounds=[(0, None)]*n, constraints={'type': 'eq', 'fun': lambda w: np.sum(w)-1})
w = m.x
# Compute RPWeights
RPWeights = pd.DataFrame({
'Weight': w,
'cEr': stockMeans * w,
'CSD': pCSD(w, b, True)
})
return RPWeights
def es_risk_parity(stock, stockMeans, simReturn, b=None):
# internal ES function
def _ES(*w):
def ES(a, alpha=0.05):
x = np.sort(a)
nup = int(np.ceil(a.size * alpha))
ndn = int(np.floor(a.size * alpha))
v = 0.5 * (x[nup] + x[ndn])
es = np.mean(x[x <= v])
return -es
r = simReturn @ w
return ES(r)
# Function for the component ES
def CES(w, b=None, last = False):
x = list(w)
n = len(x)
ces = np.empty(n)
es = _ES(*x)
e = 1e-6
for i in range(n):
old = x[i]
x[i] = x[i] + e
ces[i] = old * (_ES(*x) - es) / e
x[i] = old
if last:
return ces
if b is not None:
ces /= b
return ces
# SSE of the Component ES
def SSE_CES(*w):
ces = CES(*w,b)
ces = [x - sum(ces) / len(ces) for x in ces]
return 1e3 * (sum([x ** 2 for x in ces]))
n = len(stock)
w0 = np.full(n, 1/n)
bounds = [(0, None)] * n
res = minimize(SSE_CES, w0, method='SLSQP', bounds=bounds, constraints=[{'type': 'eq', 'fun': lambda w: sum(w) - 1}])
w = res.x
# Compute RPWeights
ES_RPWeights = pd.DataFrame({
'Stock': stock,
'Weight': w,
'cEr': stockMeans * w,
'CES': CES(w, b, True)
}).set_index('Stock')
return ES_RPWeights | /rm545_xd-0.7.2.tar.gz/rm545_xd-0.7.2/src/qrm545_xd/risk_parity.py | 0.649356 | 0.419053 | risk_parity.py | pypi |
import numpy as np
import pandas as pd
from . import cov_matrix
from scipy.stats import t, norm
from scipy.optimize import minimize
# Multivariate Normal Simulation
def multivariate_normal_simulation(covariance_matrix, n_samples, method='direct', mean = 0, explained_variance=1.0, seed=1234):
"""
A function to simulate multivariate normal distributions with different methods.
Parameters:
- covariance_matrix (np.array): The covariance matrix for the multivariate normal distribution
- n_samples (int): The number of samples to generate
- method (str, optional): The method to use for simulation, either 'direct' or 'pca', default 'direct'
'direct': simulate directly from the covariance matrix.
'pca': simulate using principal component analysis (PCA).
- explained_variance (float, optional): The percentage of explained variance to keep when using PCA, default 1.0
Returns:
np.array: An array with shape (covariance_matrix.shape[0], n_samples) with the simulated samples.
"""
np.random.seed(seed)
# If the method is 'direct', simulate directly from the covariance matrix
if method == 'direct':
L = cov_matrix.chol_psd(covariance_matrix)
normal_samples = np.random.normal(size=(covariance_matrix.shape[0], n_samples))
samples = np.transpose(np.dot(L, normal_samples) + mean)
return samples
# If the method is 'pca', simulate using PCA
elif method == 'pca':
eigenvalues, eigenvectors = np.linalg.eigh(covariance_matrix)
# Only consider eigenvalues greater than 0
idx = eigenvalues > 1e-8
eigenvalues = eigenvalues[idx]
eigenvectors = eigenvectors[:, idx]
# Sort the eigenvalues in descending order
idx = np.argsort(eigenvalues)[::-1]
eigenvalues = eigenvalues[idx]
eigenvectors = eigenvectors[:, idx]
# Update the explained_variance incase the explained_variance is higher than the cumulative sum of the eigenvalue
if explained_variance == 1.0:
explained_variance = (np.cumsum(eigenvalues)/np.sum(eigenvalues))[-1]
# Determine the number of components to keep based on the explained variance ratio
n_components = np.where((np.cumsum(eigenvalues)/np.sum(eigenvalues))>= explained_variance)[0][0] + 1
eigenvectors = eigenvectors[:,:n_components]
eigenvalues = eigenvalues[:n_components]
normal_samples = np.random.normal(size=(n_components, n_samples))
# Simulate the multivariate normal samples by multiplying the eigenvectors with the normal samples
B = np.dot(eigenvectors, np.diag(np.sqrt(eigenvalues)))
samples = np.transpose(np.dot(B, normal_samples))
return samples
# Fitting T
# MLE fitted T distribution
def Fitting_t_MLE(returns):
def MLE_T(params, returns):
negLL = -1 * np.sum(t.logpdf(returns, df=params[0], loc=params[1], scale=params[2]))
return(negLL)
constraints=({"type":"ineq", "fun":lambda x: x[0]-1},
{"type":"ineq", "fun":lambda x: x[2]})
returns_t = minimize(MLE_T, x0=[10, np.mean(returns), np.std(returns)], args=returns, constraints=constraints)
df, loc, scale = returns_t.x[0], returns_t.x[1], returns_t.x[2]
return df, loc, scale
def gaussian_copula(returns, fitting_model=None, n_sample=10000, seed=12345):
stocks = returns.columns.tolist()
n = len(stocks)
if fitting_model is None:
fitting_model = np.full(n, 't')
# Fitting model for each stock
parameters = []
assets_returns_cdf = pd.DataFrame()
for i, stock in enumerate(stocks):
if fitting_model[i] == 't':
params = Fitting_t_MLE(returns[stock])
fitting = 't'
elif fitting_model[i] == 'n':
params = norm.fit(returns[stock])
fitting = 'n'
parameters.append(params)
assets_returns_cdf[stock] = t.cdf(returns[stock],df=params[0], loc=params[1], scale = params[2]) if fitting == 't' else norm.cdf(returns[stock],loc=params[0], scale = params[1])
# Simulate N samples with spearman correlation matrix
np.random.seed(seed)
spearman_corr_matrix = assets_returns_cdf.corr(method='spearman')
sim_sample = multivariate_normal_simulation(spearman_corr_matrix, n_sample, method='pca',seed=seed)
sim_sample = pd.DataFrame(sim_sample, columns=stocks)
# Convert simulation result with cdf of standard normal distribution
sim_sample_cdf = pd.DataFrame()
for stock in stocks:
sim_sample_cdf[stock] = norm.cdf(sim_sample[stock],loc=0,scale=1)
# Convert cdf matrix to return matrix with parameter
sim_returns = pd.DataFrame()
for i, stock in enumerate(stocks):
if fitting_model[i] == 't':
sim_returns[stock] = t.ppf(sim_sample_cdf[stock], df=parameters[i][0], loc=parameters[i][1], scale = parameters[i][2])
elif fitting_model[i] == 'n':
sim_returns[stock] = norm.ppf(sim_sample_cdf[stock], loc=parameters[i][0], scale = parameters[i][1])
return sim_returns, pd.DataFrame(parameters,index=[stocks,fitting_model]) | /rm545_xd-0.7.2.tar.gz/rm545_xd-0.7.2/src/qrm545_xd/simulation.py | 0.848361 | 0.830869 | simulation.py | pypi |
import numpy as np
import pandas as pd
def risk_contrib(w, covar):
risk_contrib = w * covar.dot(w) / np.sqrt(w.dot(covar).dot(w))
return risk_contrib
def expost_attribution(w, upReturns):
_stocks = list(upReturns.columns)
n = upReturns.shape[0]
pReturn = np.empty(n)
weights = np.empty((n, len(w)))
lastW = np.copy(w)
matReturns = upReturns[_stocks].values
for i in range(n):
# Save Current Weights in Matrix
weights[i,:] = lastW
# Update Weights by return
lastW = lastW * (1.0 + matReturns[i,:])
# Portfolio return is the sum of the updated weights
pR = np.sum(lastW)
# Normalize the wieghts back so sum = 1
lastW = lastW / pR
# Store the return
pReturn[i] = pR - 1
# Set the portfolio return in the Update Return DataFrame
upReturns['Portfolio'] = pReturn
# Calculate the total return
totalRet = np.exp(np.sum(np.log(pReturn + 1))) - 1
# Calculate the Carino K
k = np.log(totalRet + 1) / totalRet
# Carino k_t is the ratio scaled by 1/K
carinoK = np.log(1.0 + pReturn) / pReturn / k
# Calculate the return attribution
attrib = pd.DataFrame(matReturns * (weights * carinoK[:, np.newaxis]), columns=_stocks)
# Set up a Dataframe for output.
Attribution = pd.DataFrame({'Value': ["TotalReturn", "Return Attribution"]})
_ss = list(upReturns.columns)
_ss.append('Portfolio')
for s in _ss:
# Total Stock return over the period
tr = np.exp(np.sum(np.log(upReturns[s] + 1))) - 1
# Attribution Return (total portfolio return if we are updating the portfolio column)
atr = attrib[s].sum() if s != 'Portfolio' else tr
# Set the values
Attribution[s] = [tr, atr]
# Y is our stock returns scaled by their weight at each time
Y = matReturns * weights
# Set up X with the Portfolio Return
X = np.column_stack((np.ones((pReturn.shape[0], 1)), pReturn))
# Calculate the Beta and discard the intercept
B = (np.linalg.inv(X.T @ X) @ X.T @ Y)[1,:]
# Component SD is Beta times the standard Deviation of the portfolio
cSD = B * np.std(pReturn)
Expost_Attribution = pd.concat([Attribution,
pd.DataFrame({"Value": ["Vol Attribution"],
**{_stocks[i]: [cSD[i]] for i in range(len(_stocks))},
"Portfolio": [np.std(pReturn)]})
], ignore_index=True)
return Expost_Attribution
def expost_factor(w, upReturns, upFfData, Betas):
stocks = upReturns.columns
factors = list(upFfData.columns)
n = upReturns.shape[0]
m = len(stocks)
pReturn = np.empty(n)
residReturn = np.empty(n)
weights = np.empty((n, len(w)))
factorWeights = np.empty((n, len(factors)))
lastW = w.copy()
matReturns = upReturns[stocks].to_numpy()
ffReturns = upFfData[factors].to_numpy()
for i in range(n):
# Save Current Weights in Matrix
weights[i,:] = lastW
#Factor Weight
factorWeights[i,:] = Betas.T @ lastW
# Update Weights by return
lastW = lastW * (1.0 + matReturns[i,:])
# Portfolio return is the sum of the updated weights
pR = np.sum(lastW)
# Normalize the weights back so sum = 1
lastW = lastW / pR
# Store the return
pReturn[i] = pR - 1
# Residual
residReturn[i] = (pR-1) - factorWeights[i,:] @ ffReturns[i,:]
# Set the portfolio return in the Update Return DataFrame
upFfData["Alpha"] = residReturn
upFfData["Portfolio"] = pReturn
# Calculate the total return
totalRet = np.exp(np.sum(np.log(pReturn + 1))) - 1
# Calculate the Carino K
k = np.log(totalRet + 1) / totalRet
# Carino k_t is the ratio scaled by 1/K
carinoK = np.log(1.0 + pReturn) / pReturn / k
# Calculate the return attribution
attrib = pd.DataFrame(ffReturns * (factorWeights * carinoK[:, np.newaxis]), columns=factors)
attrib["Alpha"] = residReturn * carinoK
# Set up a DataFrame for output.
Attribution = pd.DataFrame({"Value": ["TotalReturn", "Return Attribution"]})
newFactors = factors[:]
newFactors.append('Alpha')
ss = factors[:]
ss.append('Alpha')
ss.append('Portfolio')
# Loop over the factors
for s in ss:
# Total Stock return over the period
tr = np.exp(np.sum(np.log(upFfData[s] + 1))) - 1
# Attribution Return (total portfolio return if we are updating the portfolio column)
atr = sum(attrib[s]) if s != "Portfolio" else tr
# Set the values
Attribution[s] = [tr, atr]
# Realized Volatility Attribution
# Y is our stock returns scaled by their weight at each time
Y = np.hstack((ffReturns * factorWeights, residReturn[:,np.newaxis]))
# Set up X with the Portfolio Return
X = np.hstack((np.ones((n,1)), pReturn[:,np.newaxis]))
# Calculate the Beta and discard the intercept
B = np.linalg.inv(X.T @ X) @ X.T @ Y
B = B[1,:]
# Component SD is Beta times the standard Deviation of the portfolio
cSD = B * np.std(pReturn)
# Check that the sum of component SD is equal to the portfolio SD
assert np.isclose(np.sum(cSD), np.std(pReturn))
# Add the Vol attribution to the output
Expost_Attribution = pd.concat([Attribution,
pd.DataFrame({"Value": "Vol Attribution", **{newFactors[i]:cSD[i] for i in range(len(newFactors))}, "Portfolio":np.std(pReturn)}, index=[0])
])
return Expost_Attribution | /rm545_xd-0.7.2.tar.gz/rm545_xd-0.7.2/src/qrm545_xd/risk_attribution.py | 0.818193 | 0.604457 | risk_attribution.py | pypi |
import numpy as np
# Exponentially Weighted Covariance Matrix
def exp_weighted_cov(returns, lambda_=0.97):
"""
Perform calculation on the input data set with a given λ for exponentially weighted covariance.
Parameters:
- data: input data set, a pandas DataFrame
- lambda_: fraction for unpdate the covariance matrix, default 0.97
Returns:
cov: an exponentially weighted covariance matrix, a numpy array
"""
# Preprocess the data
returns = returns.values
mean_return = np.mean(returns, axis=0)
normalized_returns = returns - mean_return
# Initializing the covariance matrix
n_timesteps = normalized_returns.shape[0]
cov = np.cov(returns, rowvar=False)
# Updating the covariance matrix
for t in range(1, n_timesteps):
cov = lambda_ * cov + (1 - lambda_) * np.outer(normalized_returns[t], normalized_returns[t])
return cov
# Exponentially Weighted Matrix
def exp_weighted_matrix(returns, lambda_=0.97):
"""
Perform calculation on the input data set with a given λ for exponentially weighted covariance.
Parameters:
- data: input data set, a pandas DataFrame
- lambda_: fraction for unpdate the covariance matrix, default 0.97
Returns:
weights_matrix: an exponentially weighted matrix, a numpy array
"""
# Preprocess the data
returns = returns.values
# Initializing the matrix
n_timesteps = returns.shape[0]
weights = np.zeros(n_timesteps)
# Compute the weight for each time step
for t in range(n_timesteps):
weights[n_timesteps-1-t] = (1-lambda_)*lambda_**t
# Normalize the weights_matrix
weights_matrix = np.diag(weights/sum(weights))
return weights_matrix
# Cholesky Factorization
def chol_psd(cov_matrix):
"""
Perform Cholesky decomposition on the input matrix `covariance`.
Parameters:
- cov_matrix: input matrix, a numpy array with shape (n_samples, n_samples)
Returns:
The Cholesky decomposition of the input matrix `covariance`.
"""
n = cov_matrix.shape[0]
root = np.zeros_like(cov_matrix)
for j in range(n):
s = 0.0
if j > 0:
# calculate dot product of the preceeding row values
s = np.dot(root[j, :j], root[j, :j])
temp = cov_matrix[j, j] - s
if 0 >= temp >= -1e-8:
temp = 0.0
root[j, j] = np.sqrt(temp)
if root[j, j] == 0.0:
# set the column to 0 if we have an eigenvalue of 0
root[j + 1:, j] = 0.0
else:
ir = 1.0 / root[j, j]
for i in range(j + 1, n):
s = np.dot(root[i, :j], root[j, :j])
root[i, j] = (cov_matrix[i, j] - s) * ir
return root
# Dealing with Non-PSD Matrices - Rebonato and Jackel
def near_psd(matrix, epsilon=0.0):
"""
Calculates a near positive semi-definite (PSD) matrix from a given non-PSD matrix.
Parameters:
- matrix: The input matrix, a 2-dimensional numpy array
- epsilon: A small non-negative value used to ensure that the resulting matrix is PSD, default value is 0.0
Returns:
The output of this function is a 2-dimensional numpy array that represents a near PSD matrix.
"""
n = matrix.shape[0]
invSD = None
out = matrix.copy()
# calculate the correlation matrix if we got a covariance
if np.count_nonzero(np.diag(out) == 1.0) != n:
invSD = np.diag(1 / np.sqrt(np.diag(out)))
out = np.matmul(np.matmul(invSD, out), invSD)
# SVD, update the eigen value and scale
vals, vecs = np.linalg.eigh(out)
vals = np.maximum(vals, epsilon)
T = np.reciprocal(np.matmul(np.square(vecs), vals))
T = np.diag(np.sqrt(T))
l = np.diag(np.sqrt(vals))
B = np.matmul(np.matmul(T, vecs), l)
out = np.matmul(B, np.transpose(B))
# Add back the variance
if invSD is not None:
invSD = np.diag(1 / np.diag(invSD))
out = np.matmul(np.matmul(invSD, out), invSD)
return out
# Dealing with Non-PSD Matrices - Higham
def Pu(matrix):
"""The first projection for Higham method with the assumption that weight martrix is diagonal."""
result = matrix.copy()
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if i==j:
result[i][i]=1
return result
def Ps(matrix, weight):
"""The second projection for Higham method."""
matrix = np.sqrt(weight)@ matrix @np.sqrt(weight)
vals, vecs = np.linalg.eigh(matrix)
vals = np.array([max(i,0) for i in vals])
result = np.sqrt(weight)@ vecs @ np.diagflat(vals) @ vecs.T @ np.sqrt(weight)
return result
def Frobenius_Norm(matrix_1, matrix_2):
distance = matrix_1 - matrix_2
result = 0
for i in range(len(distance)):
for j in range(len(distance)):
result += distance[i][j]**2
return result
def Higham_psd(matrix, weight = None, epsilon = 1e-9, max_iter = 1000, tolerance = 1e-8):
"""
Calculates a near positive semi-definite (PSD) matrix from a given non-PSD matrix.
Parameters:
- matrix: The input covariance matrix, a 2-dimensional numpy array
- weight: Assume weight is a diagonal matrix, if unweighted, set 𝑊 = 𝐼
- epsilon: Used to check the smallest eigenvalue from the result
- max_iter: Restriction on the maximum iteration loops
- tolerance: A small non-negative value used to restrict the distance for the original matrix, default value is 1e-8
Returns:
The output of this function is a 2-dimensional numpy array that represents a nearest PSD matrix.
"""
if weight is None:
weight = np.identity(len(matrix))
norml = np.inf
Yk = matrix.copy()
Delta_S = np.zeros_like(Yk)
invSD = None
if np.count_nonzero(np.diag(Yk) == 1.0) != matrix.shape[0]:
invSD = np.diag(1 / np.sqrt(np.diag(Yk)))
Yk = np.matmul(np.matmul(invSD, Yk), invSD)
Y0 = Yk.copy()
for i in range(max_iter):
Rk = Yk - Delta_S
Xk = Ps(Rk, weight)
Delta_S = Xk - Rk
Yk = Pu(Xk)
norm = Frobenius_Norm(Yk, Y0)
minEigVal = np.real(np.linalg.eigvals(Yk)).min()
if abs(norm - norml) < tolerance and minEigVal > -epsilon:
break
else:
norml = norm
if invSD is not None:
invSD = np.diag(1 / np.diag(invSD))
Yk = np.matmul(np.matmul(invSD, Yk), invSD)
return Yk
# Check the matrix is PSD or not
def is_psd(matrix):
"""For a given matrix, check if the matrix is psd or not."""
eigenvalues = np.linalg.eigh(matrix)[0]
return np.all(eigenvalues >= -1e-8)
def missing_cov(x, skipMiss=True, fun=np.corrcoef):
n, m = x.shape
nMiss = np.sum(np.isnan(x), axis=0)
# nothing missing, just calculate it.
if np.sum(nMiss) == 0:
return fun(x)
idxMissing = [set(np.where(np.isnan(x[:, i]))[0]) for i in range(m)]
if skipMiss:
# Skipping Missing, get all the rows which have values and calculate the covariance
rows = set(range(n))
for c in range(m):
for rm in idxMissing[c]:
if rm in rows:
rows.remove(rm)
rows = sorted(list(rows))
return fun(x[rows,:].T)
else:
# Pairwise, for each cell, calculate the covariance.
out = np.empty((m, m))
for i in range(m):
for j in range(i+1):
rows = set(range(n))
for c in (i,j):
for rm in idxMissing[c]:
if rm in rows:
rows.remove(rm)
rows = sorted(list(rows))
out[i,j] = fun(x[rows,:][:,[i,j]].T)[0,1]
if i != j:
out[j,i] = out[i,j]
return out | /rm545_xd-0.7.2.tar.gz/rm545_xd-0.7.2/src/qrm545_xd/cov_matrix.py | 0.933688 | 0.824356 | cov_matrix.py | pypi |
INFINITY = float('inf')
NEGATIVE_INFINITY = -INFINITY
class IntervalSet:
__slots__ = ('intervals', 'size')
def __init__(self, intervals, disjoint=False):
self.intervals = intervals
if not disjoint:
self.intervals = union_overlapping(self.intervals)
self.size = sum(i.size for i in self.intervals)
def __repr__(self):
return repr(self.intervals)
def __iter__(self):
return iter(self.intervals)
def __nonzero__(self):
return self.size != 0
def __sub__(self, other):
return self.intersect( other.complement() )
def complement(self):
complementary = []
cursor = NEGATIVE_INFINITY
for interval in self.intervals:
if cursor < interval.start:
complementary.append( Interval(cursor, interval.start) )
cursor = interval.end
if cursor < INFINITY:
complementary.append( Interval(cursor, INFINITY) )
return IntervalSet(complementary, disjoint=True)
def intersect(self, other): #XXX The last major bottleneck. Factorial-time hell.
# Then again, this function is entirely unused...
if (not self) or (not other):
return IntervalSet([])
#earliest = max(self.intervals[0].start, other.intervals[0].start)
#latest = min(self.intervals[-1].end, other.intervals[-1].end)
#mine = [i for i in self.intervals if i.start >= earliest and i.end <= latest]
#theirs = [i for i in other.intervals if i.start >= earliest and i.end <= latest]
intersections = [x for x in (i.intersect(j)
for i in self.intervals
for j in other.intervals)
if x]
return IntervalSet(intersections, disjoint=True)
def intersect_interval(self, interval):
intersections = [x for x in (i.intersect(interval)
for i in self.intervals)
if x]
return IntervalSet(intersections, disjoint=True)
def union(self, other):
return IntervalSet( sorted(self.intervals + other.intervals) )
class Interval:
__slots__ = ('start', 'end', 'tuple', 'size')
def __init__(self, start, end):
if end - start < 0:
raise ValueError("Invalid interval start=%s end=%s" % (start, end))
self.start = start
self.end = end
self.tuple = (start, end)
self.size = self.end - self.start
def __eq__(self, other):
return self.tuple == other.tuple
def __hash__(self):
return hash( self.tuple )
def __cmp__(self, other):
return cmp(self.start, other.start)
def __len__(self):
raise TypeError("len() doesn't support infinite values, use the 'size' attribute instead")
def __nonzero__(self):
return self.size != 0
def __repr__(self):
return '<Interval: %s>' % str(self.tuple)
def intersect(self, other):
start = max(self.start, other.start)
end = min(self.end, other.end)
if end > start:
return Interval(start, end)
def overlaps(self, other):
earlier = self if self.start <= other.start else other
later = self if earlier is other else other
return earlier.end >= later.start
def union(self, other):
if not self.overlaps(other):
raise TypeError("Union of disjoint intervals is not an interval")
start = min(self.start, other.start)
end = max(self.end, other.end)
return Interval(start, end)
def union_overlapping(intervals):
"""Union any overlapping intervals in the given set."""
disjoint_intervals = []
for interval in intervals:
if disjoint_intervals and disjoint_intervals[-1].overlaps(interval):
disjoint_intervals[-1] = disjoint_intervals[-1].union(interval)
else:
disjoint_intervals.append(interval)
return disjoint_intervals | /rmap-7.5.tar.gz/rmap-7.5/graphite-dballe/intervals.py | 0.719482 | 0.318737 | intervals.py | pypi |
from hashlib import md5
from itertools import chain
import bisect
try:
import pyhash
hasher = pyhash.fnv1a_32()
def fnv32a(string, seed=0x811c9dc5):
return hasher(string, seed=seed)
except ImportError:
def fnv32a(string, seed=0x811c9dc5):
"""
FNV-1a Hash (http://isthe.com/chongo/tech/comp/fnv/) in Python.
Taken from https://gist.github.com/vaiorabbit/5670985
"""
hval = seed
fnv_32_prime = 0x01000193
uint32_max = 2 ** 32
for s in string:
hval = hval ^ ord(s)
hval = (hval * fnv_32_prime) % uint32_max
return hval
def hashRequest(request):
# Normalize the request parameters so ensure we're deterministic
queryParams = ["%s=%s" % (key, '&'.join(values))
for (key,values) in chain(request.POST.lists(), request.GET.lists())
if not key.startswith('_')]
normalizedParams = ','.join( sorted(queryParams) )
return compactHash(normalizedParams)
def hashData(targets, startTime, endTime):
targetsString = ','.join(sorted(targets))
startTimeString = startTime.strftime("%Y%m%d_%H%M")
endTimeString = endTime.strftime("%Y%m%d_%H%M")
myHash = targetsString + '@' + startTimeString + ':' + endTimeString
return compactHash(myHash)
def compactHash(string):
hash = md5()
hash.update(string.encode('utf-8'))
return hash.hexdigest()
class ConsistentHashRing:
def __init__(self, nodes, replica_count=100, hash_type='carbon_ch'):
self.ring = []
self.ring_len = len(self.ring)
self.nodes = set()
self.nodes_len = len(self.nodes)
self.replica_count = replica_count
self.hash_type = hash_type
for node in nodes:
self.add_node(node)
def compute_ring_position(self, key):
if self.hash_type == 'fnv1a_ch':
big_hash = '{:x}'.format(int(fnv32a( str(key) )))
small_hash = int(big_hash[:4], 16) ^ int(big_hash[4:], 16)
else:
big_hash = md5(str(key)).hexdigest()
small_hash = int(big_hash[:4], 16)
return small_hash
def add_node(self, key):
self.nodes.add(key)
self.nodes_len = len(self.nodes)
for i in range(self.replica_count):
if self.hash_type == 'fnv1a_ch':
replica_key = "%d-%s" % (i, key[1])
else:
replica_key = "%s:%d" % (key, i)
position = self.compute_ring_position(replica_key)
entry = (position, key)
bisect.insort(self.ring, entry)
self.ring_len = len(self.ring)
def remove_node(self, key):
self.nodes.discard(key)
self.nodes_len = len(self.nodes)
self.ring = [entry for entry in self.ring if entry[1] != key]
self.ring_len = len(self.ring)
def get_node(self, key):
assert self.ring
position = self.compute_ring_position(key)
search_entry = (position, None)
index = bisect.bisect_left(self.ring, search_entry) % self.ring_len
entry = self.ring[index]
return entry[1]
def get_nodes(self, key):
nodes = []
position = self.compute_ring_position(key)
search_entry = (position, None)
index = bisect.bisect_left(self.ring, search_entry) % self.ring_len
last_index = (index - 1) % self.ring_len
nodes_len = len(nodes)
while nodes_len < self.nodes_len and index != last_index:
next_entry = self.ring[index]
(position, next_node) = next_entry
if next_node not in nodes:
nodes.append(next_node)
nodes_len += 1
index = (index + 1) % self.ring_len
return nodes | /rmap-7.5.tar.gz/rmap-7.5/graphite-dballe/render/hashing.py | 0.535827 | 0.237377 | hashing.py | pypi |
import json
class FloatEncoder(json.JSONEncoder):
def __init__(self, nan_str="null", **kwargs):
super(FloatEncoder, self).__init__(**kwargs)
self.nan_str = nan_str
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = json.encoder.encode_basestring_ascii
else:
_encoder = json.encoder.encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan, _repr=json.encoder.FLOAT_REPR,
_inf=json.encoder.INFINITY, _neginf=-json.encoder.INFINITY,
nan_str=self.nan_str):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on the
# internals.
if o != o:
text = nan_str
elif o == _inf:
text = '1e9999'
elif o == _neginf:
text = '-1e9999'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
_iterencode = json.encoder._make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0) | /rmap-7.5.tar.gz/rmap-7.5/graphite-dballe/render/float_encoder.py | 0.660391 | 0.18717 | float_encoder.py | pypi |
import csv
import math
import pytz
from datetime import datetime
from time import time
from random import shuffle
from httplib import CannotSendRequest
from urllib import urlencode
from urlparse import urlsplit, urlunsplit
from cgi import parse_qs
from cStringIO import StringIO
try:
import cPickle as pickle
except ImportError:
import pickle
from ..compat import HttpResponse
from ..util import getProfileByUsername, json, unpickle
from ..remote_storage import connector_class_selector
from ..logger import log
from .evaluator import evaluateTarget
from .attime import parseATTime
from .functions import PieFunctions
from .hashing import hashRequest, hashData
from .glyph import GraphTypes
from .float_encoder import FloatEncoder
from django.http import HttpResponseServerError, HttpResponseRedirect
from django.template import Context, loader
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from django.utils.cache import add_never_cache_headers, patch_response_headers
def renderView(request):
start = time()
(graphOptions, requestOptions) = parseOptions(request)
useCache = 'noCache' not in requestOptions
cacheTimeout = requestOptions['cacheTimeout']
requestContext = {
'startTime' : requestOptions['startTime'],
'endTime' : requestOptions['endTime'],
'localOnly' : requestOptions['localOnly'],
'template' : requestOptions['template'],
'tzinfo' : requestOptions['tzinfo'],
'data' : []
}
data = requestContext['data']
# First we check the request cache
if useCache:
requestKey = hashRequest(request)
cachedResponse = cache.get(requestKey)
if cachedResponse:
log.cache('Request-Cache hit [%s]' % requestKey)
log.rendering('Returned cached response in %.6f' % (time() - start))
return cachedResponse
else:
log.cache('Request-Cache miss [%s]' % requestKey)
# Now we prepare the requested data
if requestOptions['graphType'] == 'pie':
for target in requestOptions['targets']:
if target.find(':') >= 0:
try:
name,value = target.split(':',1)
value = float(value)
except:
raise ValueError("Invalid target '%s'" % target)
data.append( (name,value) )
else:
seriesList = evaluateTarget(requestContext, target)
for series in seriesList:
func = PieFunctions[requestOptions['pieMode']]
data.append( (series.name, func(requestContext, series) or 0 ))
elif requestOptions['graphType'] == 'line':
# Let's see if at least our data is cached
if useCache:
targets = requestOptions['targets']
startTime = requestOptions['startTime']
endTime = requestOptions['endTime']
dataKey = hashData(targets, startTime, endTime)
cachedData = cache.get(dataKey)
if cachedData:
log.cache("Data-Cache hit [%s]" % dataKey)
else:
log.cache("Data-Cache miss [%s]" % dataKey)
else:
cachedData = None
if cachedData is not None:
requestContext['data'] = data = cachedData
else: # Have to actually retrieve the data now
for target in requestOptions['targets']:
if not target.strip():
continue
t = time()
seriesList = evaluateTarget(requestContext, target)
log.rendering("Retrieval of %s took %.6f" % (target, time() - t))
data.extend(seriesList)
if useCache:
cache.add(dataKey, data, cacheTimeout)
# If data is all we needed, we're done
format = requestOptions.get('format')
if format == 'csv':
response = HttpResponse(content_type='text/csv')
writer = csv.writer(response, dialect='excel')
for series in data:
for i, value in enumerate(series):
timestamp = datetime.fromtimestamp(series.start + (i * series.step), requestOptions['tzinfo'])
writer.writerow((series.name, timestamp.strftime("%Y-%m-%d %H:%M:%S"), value))
return response
if format == 'json':
series_data = []
if 'maxDataPoints' in requestOptions and any(data):
startTime = min([series.start for series in data])
endTime = max([series.end for series in data])
timeRange = endTime - startTime
maxDataPoints = requestOptions['maxDataPoints']
for series in data:
numberOfDataPoints = timeRange/series.step
if maxDataPoints < numberOfDataPoints:
valuesPerPoint = math.ceil(float(numberOfDataPoints) / float(maxDataPoints))
secondsPerPoint = int(valuesPerPoint * series.step)
# Nudge start over a little bit so that the consolidation bands align with each call
# removing 'jitter' seen when refreshing.
nudge = secondsPerPoint + (series.start % series.step) - (series.start % secondsPerPoint)
series.start = series.start + nudge
valuesToLose = int(nudge/series.step)
for r in range(1, valuesToLose):
del series[0]
series.consolidate(valuesPerPoint)
timestamps = range(int(series.start), int(series.end) + 1, int(secondsPerPoint))
else:
timestamps = range(int(series.start), int(series.end) + 1, int(series.step))
datapoints = zip(series, timestamps)
series_data.append(dict(target=series.name, datapoints=datapoints))
elif 'noNullPoints' in requestOptions and any(data):
for series in data:
values = []
for (index,v) in enumerate(series):
if v is not None:
timestamp = series.start + (index * series.step)
values.append((v,timestamp))
if len(values) > 0:
series_data.append(dict(target=series.name, datapoints=values))
else:
for series in data:
timestamps = range(int(series.start), int(series.end) + 1, int(series.step))
datapoints = zip(series, timestamps)
series_data.append(dict(target=series.name, datapoints=datapoints))
if 'jsonp' in requestOptions:
response = HttpResponse(
content="%s(%s)" % (requestOptions['jsonp'], json.dumps(series_data, cls=FloatEncoder)),
content_type='text/javascript')
else:
response = HttpResponse(content=json.dumps(series_data, cls=FloatEncoder),
content_type='application/json')
if useCache:
cache.add(requestKey, response, cacheTimeout)
patch_response_headers(response, cache_timeout=cacheTimeout)
else:
add_never_cache_headers(response)
log.rendering('Total json rendering time %.6f' % (time() - start))
return response
if format == 'dygraph':
labels = ['Time']
result = '{}'
if data:
datapoints = [[ts] for ts in range(data[0].start, data[0].end, data[0].step)]
for series in data:
labels.append(series.name)
for i, point in enumerate(series):
if point is None:
point = 'null'
elif point == float('inf'):
point = 'Infinity'
elif point == float('-inf'):
point = '-Infinity'
elif math.isnan(point):
point = 'null'
datapoints[i].append(point)
line_template = '[%%s000%s]' % ''.join([', %s'] * len(data))
lines = [line_template % tuple(points) for points in datapoints]
result = '{"labels" : %s, "data" : [%s]}' % (json.dumps(labels), ', '.join(lines))
response = HttpResponse(content=result, content_type='application/json')
if useCache:
cache.add(requestKey, response, cacheTimeout)
patch_response_headers(response, cache_timeout=cacheTimeout)
else:
add_never_cache_headers(response)
log.rendering('Total dygraph rendering time %.6f' % (time() - start))
return response
if format == 'rickshaw':
series_data = []
for series in data:
timestamps = range(series.start, series.end, series.step)
datapoints = [{'x' : x, 'y' : y} for x, y in zip(timestamps, series)]
series_data.append( dict(target=series.name, datapoints=datapoints) )
if 'jsonp' in requestOptions:
response = HttpResponse(
content="%s(%s)" % (requestOptions['jsonp'], json.dumps(series_data)),
mimetype='text/javascript')
else:
response = HttpResponse(content=json.dumps(series_data),
content_type='application/json')
if useCache:
cache.add(requestKey, response, cacheTimeout)
patch_response_headers(response, cache_timeout=cacheTimeout)
else:
add_never_cache_headers(response)
log.rendering('Total rickshaw rendering time %.6f' % (time() - start))
return response
if format == 'raw':
response = HttpResponse(content_type='text/plain')
for series in data:
response.write( "%s,%d,%d,%d|" % (series.name, series.start, series.end, series.step) )
response.write( ','.join(map(repr,series)) )
response.write('\n')
log.rendering('Total rawData rendering time %.6f' % (time() - start))
return response
if format == 'svg':
graphOptions['outputFormat'] = 'svg'
elif format == 'pdf':
graphOptions['outputFormat'] = 'pdf'
if format == 'pickle':
response = HttpResponse(content_type='application/pickle')
seriesInfo = [series.getInfo() for series in data]
pickle.dump(seriesInfo, response, protocol=-1)
log.rendering('Total pickle rendering time %.6f' % (time() - start))
return response
# We've got the data, now to render it
graphOptions['data'] = data
if settings.REMOTE_RENDERING: # Rendering on other machines is faster in some situations
image = delegateRendering(requestOptions['graphType'], graphOptions)
else:
image = doImageRender(requestOptions['graphClass'], graphOptions)
useSVG = graphOptions.get('outputFormat') == 'svg'
if useSVG and 'jsonp' in requestOptions:
response = HttpResponse(
content="%s(%s)" % (requestOptions['jsonp'], json.dumps(image)),
content_type='text/javascript')
elif graphOptions.get('outputFormat') == 'pdf':
response = buildResponse(image, 'application/x-pdf')
else:
response = buildResponse(image, 'image/svg+xml' if useSVG else 'image/png')
if useCache:
cache.add(requestKey, response, cacheTimeout)
patch_response_headers(response, cache_timeout=cacheTimeout)
else:
add_never_cache_headers(response)
log.rendering('Total rendering time %.6f seconds' % (time() - start))
return response
def parseOptions(request):
queryParams = request.GET.copy()
queryParams.update(request.POST)
# Start with some defaults
graphOptions = {'width' : 330, 'height' : 250}
requestOptions = {}
graphType = queryParams.get('graphType','line')
assert graphType in GraphTypes, "Invalid graphType '%s', must be one of %s" % (graphType,GraphTypes.keys())
graphClass = GraphTypes[graphType]
# Fill in the requestOptions
requestOptions['graphType'] = graphType
requestOptions['graphClass'] = graphClass
requestOptions['pieMode'] = queryParams.get('pieMode', 'average')
cacheTimeout = int( queryParams.get('cacheTimeout', settings.DEFAULT_CACHE_DURATION) )
requestOptions['targets'] = []
# Extract the targets out of the queryParams
mytargets = []
# Normal format: ?target=path.1&target=path.2
if len(queryParams.getlist('target')) > 0:
mytargets = queryParams.getlist('target')
# Rails/PHP/jQuery common practice format: ?target[]=path.1&target[]=path.2
elif len(queryParams.getlist('target[]')) > 0:
mytargets = queryParams.getlist('target[]')
# Collect the targets
for target in mytargets:
requestOptions['targets'].append(target)
template = dict()
for key, val in queryParams.items():
if key.startswith("template["):
template[key[9:-1]] = val
requestOptions['template'] = template
if 'pickle' in queryParams:
requestOptions['format'] = 'pickle'
if 'rawData' in queryParams:
requestOptions['format'] = 'raw'
if 'format' in queryParams:
requestOptions['format'] = queryParams['format']
if 'jsonp' in queryParams:
requestOptions['jsonp'] = queryParams['jsonp']
if 'noCache' in queryParams:
requestOptions['noCache'] = True
if 'maxDataPoints' in queryParams and queryParams['maxDataPoints'].isdigit():
requestOptions['maxDataPoints'] = int(queryParams['maxDataPoints'])
if 'noNullPoints' in queryParams:
requestOptions['noNullPoints'] = True
requestOptions['localOnly'] = queryParams.get('local') == '1'
# Fill in the graphOptions
for opt in graphClass.customizable:
if opt in queryParams:
val = queryParams[opt]
if (val.isdigit() or (val.startswith('-') and val[1:].isdigit())) and 'color' not in opt.lower():
val = int(val)
elif '.' in val and (val.replace('.','',1).isdigit() or (val.startswith('-') and val[1:].replace('.','',1).isdigit())):
val = float(val)
elif val.lower() in ('true','false'):
val = val.lower() == 'true'
elif val.lower() == 'default' or val == '':
continue
graphOptions[opt] = val
tzinfo = pytz.timezone(settings.TIME_ZONE)
if 'tz' in queryParams:
try:
tzinfo = pytz.timezone(queryParams['tz'])
except pytz.UnknownTimeZoneError:
pass
requestOptions['tzinfo'] = tzinfo
# Get the time interval for time-oriented graph types
if graphType == 'line' or graphType == 'pie':
if 'until' in queryParams:
untilTime = parseATTime(queryParams['until'], tzinfo)
else:
untilTime = parseATTime('now', tzinfo)
if 'from' in queryParams:
fromTime = parseATTime(queryParams['from'], tzinfo)
else:
fromTime = parseATTime('-1d', tzinfo)
startTime = min(fromTime, untilTime)
endTime = max(fromTime, untilTime)
assert startTime != endTime, "Invalid empty time range"
requestOptions['startTime'] = startTime
requestOptions['endTime'] = endTime
timeRange = endTime - startTime
queryTime = timeRange.days * 86400 + timeRange.seconds # convert the time delta to seconds
if settings.DEFAULT_CACHE_POLICY and not queryParams.get('cacheTimeout'):
timeouts = [timeout for period,timeout in settings.DEFAULT_CACHE_POLICY if period <= queryTime]
cacheTimeout = max(timeouts or (0,))
if cacheTimeout == 0:
requestOptions['noCache'] = True
requestOptions['cacheTimeout'] = cacheTimeout
return (graphOptions, requestOptions)
connectionPools = {}
def delegateRendering(graphType, graphOptions):
start = time()
postData = graphType + '\n' + pickle.dumps(graphOptions)
servers = settings.RENDERING_HOSTS[:] #make a copy so we can shuffle it safely
shuffle(servers)
connector_class = connector_class_selector(settings.INTRACLUSTER_HTTPS)
for server in servers:
start2 = time()
try:
# Get a connection
try:
pool = connectionPools[server]
except KeyError: #happens the first time
pool = connectionPools[server] = set()
try:
connection = pool.pop()
except KeyError: #No available connections, have to make a new one
connection = connector_class(server)
connection.timeout = settings.REMOTE_RENDER_CONNECT_TIMEOUT
# Send the request
try:
connection.request('POST','/render/local/', postData)
except CannotSendRequest:
connection = connector_class(server) #retry once
connection.timeout = settings.REMOTE_RENDER_CONNECT_TIMEOUT
connection.request('POST', '/render/local/', postData)
# Read the response
try: # Python 2.7+, use buffering of HTTP responses
response = connection.getresponse(buffering=True)
except TypeError: # Python 2.6 and older
response = connection.getresponse()
assert response.status == 200, "Bad response code %d from %s" % (response.status,server)
contentType = response.getheader('Content-Type')
imageData = response.read()
assert contentType == 'image/png', "Bad content type: \"%s\" from %s" % (contentType,server)
assert imageData, "Received empty response from %s" % server
# Wrap things up
log.rendering('Remotely rendered image on %s in %.6f seconds' % (server,time() - start2))
log.rendering('Spent a total of %.6f seconds doing remote rendering work' % (time() - start))
pool.add(connection)
return imageData
except:
log.exception("Exception while attempting remote rendering request on %s" % server)
log.rendering('Exception while remotely rendering on %s wasted %.6f' % (server,time() - start2))
continue
def renderLocalView(request):
try:
start = time()
reqParams = StringIO(request.body)
graphType = reqParams.readline().strip()
optionsPickle = reqParams.read()
reqParams.close()
graphClass = GraphTypes[graphType]
options = unpickle.loads(optionsPickle)
image = doImageRender(graphClass, options)
log.rendering("Delegated rendering request took %.6f seconds" % (time() - start))
response = buildResponse(image)
add_never_cache_headers(response)
return response
except:
log.exception("Exception in graphite.render.views.rawrender")
return HttpResponseServerError()
def renderMyGraphView(request,username,graphName):
profile = getProfileByUsername(username)
if not profile:
return errorPage("No such user '%s'" % username)
try:
graph = profile.mygraph_set.get(name=graphName)
except ObjectDoesNotExist:
return errorPage("User %s doesn't have a MyGraph named '%s'" % (username,graphName))
request_params = dict(request.REQUEST.items())
if request_params:
url_parts = urlsplit(graph.url)
query_string = url_parts[3]
if query_string:
url_params = parse_qs(query_string)
# Remove lists so that we can do an update() on the dict
for param, value in url_params.items():
if isinstance(value, list) and param != 'target':
url_params[param] = value[-1]
url_params.update(request_params)
# Handle 'target' being a list - we want duplicate &target params out of it
url_param_pairs = []
for key,val in url_params.items():
if isinstance(val, list):
for v in val:
url_param_pairs.append( (key,v) )
else:
url_param_pairs.append( (key,val) )
query_string = urlencode(url_param_pairs)
url = urlunsplit(url_parts[:3] + (query_string,) + url_parts[4:])
else:
url = graph.url
return HttpResponseRedirect(url)
def doImageRender(graphClass, graphOptions):
pngData = StringIO()
t = time()
img = graphClass(**graphOptions)
img.output(pngData)
log.rendering('Rendered PNG in %.6f seconds' % (time() - t))
imageData = pngData.getvalue()
pngData.close()
return imageData
def buildResponse(imageData, content_type="image/png"):
return HttpResponse(imageData, content_type=content_type)
def errorPage(message):
template = loader.get_template('500.html')
context = Context(dict(message=message))
return HttpResponseServerError( template.render(context) ) | /rmap-7.5.tar.gz/rmap-7.5/graphite-dballe/render/views.py | 0.418459 | 0.159643 | views.py | pypi |
from pyparsing import (
ParserElement, Forward, Combine, Optional, Word, Literal, CaselessKeyword,
CaselessLiteral, Group, FollowedBy, LineEnd, OneOrMore, ZeroOrMore,
nums, alphas, alphanums, printables, delimitedList, quotedString,
__version__,
)
ParserElement.enablePackrat()
grammar = Forward()
expression = Forward()
# Literals
intNumber = Combine(
Optional('-') + Word(nums)
)('integer')
floatNumber = Combine(
Optional('-') + Word(nums) + Literal('.') + Word(nums)
)('float')
sciNumber = Combine(
(floatNumber | intNumber) + CaselessLiteral('e') + intNumber
)('scientific')
aString = quotedString('string')
# Use lookahead to match only numbers in a list (can't remember why this is necessary)
afterNumber = FollowedBy(",") ^ FollowedBy(")") ^ FollowedBy(LineEnd())
number = Group(
(sciNumber + afterNumber) |
(floatNumber + afterNumber) |
(intNumber + afterNumber)
)('number')
boolean = Group(
CaselessKeyword("true") |
CaselessKeyword("false")
)('boolean')
argname = Word(alphas + '_', alphanums + '_')('argname')
funcname = Word(alphas + '_', alphanums + '_')('funcname')
## Symbols
leftParen = Literal('(').suppress()
rightParen = Literal(')').suppress()
comma = Literal(',').suppress()
equal = Literal('=').suppress()
# Function calls
## Symbols
leftBrace = Literal('{')
rightBrace = Literal('}')
leftParen = Literal('(').suppress()
rightParen = Literal(')').suppress()
comma = Literal(',').suppress()
equal = Literal('=').suppress()
backslash = Literal('\\').suppress()
symbols = '''(){},=.'"\\'''
arg = Group(
boolean |
number |
aString |
expression
)('args*')
kwarg = Group(argname + equal + arg)('kwargs*')
args = delimitedList(~kwarg + arg) # lookahead to prevent failing on equals
kwargs = delimitedList(kwarg)
call = Group(
funcname + leftParen +
Optional(
args + Optional(
comma + kwargs
)
) + rightParen
)('call')
# Metric pattern (aka. pathExpression)
validMetricChars = ''.join((set(printables) - set(symbols)))
escapedChar = backslash + Word(symbols, exact=1)
partialPathElem = Combine(
OneOrMore(
escapedChar | Word(validMetricChars)
)
)
matchEnum = Combine(
leftBrace +
delimitedList(partialPathElem, combine=True) +
rightBrace
)
pathElement = Combine(
Group(partialPathElem | matchEnum) +
ZeroOrMore(matchEnum | partialPathElem)
)
pathExpression = delimitedList(pathElement, delim='.', combine=True)('pathExpression')
litarg = Group(
number | aString
)('args*')
litkwarg = Group(argname + equal + litarg)('kwargs*')
litargs = delimitedList(~litkwarg + litarg) # lookahead to prevent failing on equals
litkwargs = delimitedList(litkwarg)
template = Group(
Literal('template') + leftParen +
(call | pathExpression) +
Optional(comma + (litargs | litkwargs)) +
rightParen
)('template')
if __version__.startswith('1.'):
expression << Group(template | call | pathExpression)('expression')
grammar << expression
else:
expression <<= Group(template | call | pathExpression)('expression')
grammar <<= expression
def enableDebug():
for name,obj in globals().items():
try:
obj.setName(name)
obj.setDebug(True)
except:
pass | /rmap-7.5.tar.gz/rmap-7.5/graphite-dballe/render/grammar.py | 0.745769 | 0.171165 | grammar.py | pypi |
__all__ = ["GeoJsonMapLayer"]
import json
from kivy.properties import StringProperty, ObjectProperty
from mapview.view import MapLayer
from mapview.downloader import Downloader
def flatten(l):
return [item for sublist in l for item in sublist]
class GeoJsonMapLayer(MapLayer):
source = StringProperty()
geojson = ObjectProperty()
#features = ListProperty()
def reposition(self):
if self.geojson:
print "Reload geojson"
self.on_geojson(self, self.geojson)
def on_geojson(self, instance, geojson):
if self.parent is None:
return
#self.features = []
self.canvas.clear()
self._geojson_part(geojson)
def on_source(self, instance, value):
if value.startswith("http://") or value.startswith("https://"):
Downloader.instance().download(value, self._load_geojson_url)
else:
with open(value, "rb") as fd:
geojson = json.load(fd)
self.geojson = geojson
def _load_geojson_url(self, url, r):
self.geojson = r.json()
def _geojson_part(self, part):
tp = part["type"]
if tp == "FeatureCollection":
for feature in part["features"]:
self._geojson_part_f(feature)
elif tp == "Feature":
self._geojson_part_f(part)
else:
# unhandled geojson part
pass
def _geojson_part_f(self, feature):
properties = feature["properties"]
geometry = feature["geometry"]
graphics = self._geojson_part_geometry(geometry, properties)
for g in graphics:
self.canvas.add(g)
def _geojson_part_geometry(self, geometry, properties):
from kivy.graphics import Mesh, Line, Color
from kivy.graphics.tesselator import Tesselator, WINDING_ODD, TYPE_POLYGONS
from kivy.utils import get_color_from_hex
from kivy.metrics import dp
tp = geometry["type"]
graphics = []
if tp == "Polygon":
tess = Tesselator()
for c in geometry["coordinates"]:
xy = list(self._lonlat_to_xy(c))
xy = flatten(xy)
tess.add_contour(xy)
tess.tesselate(WINDING_ODD, TYPE_POLYGONS)
graphics.append(Color(1, 0, 0, .5))
for vertices, indices in tess.meshes:
graphics.append(Mesh(
vertices=vertices, indices=indices,
mode="triangle_fan"))
elif tp == "LineString":
stroke = get_color_from_hex(properties.get("stroke", "#ffffff"))
stroke_width = dp(properties.get("stroke-width"))
xy = list(self._lonlat_to_xy(geometry["coordinates"]))
xy = flatten(xy)
graphics.append(Color(*stroke))
graphics.append(Line(points=xy, width=stroke_width))
return graphics
def _lonlat_to_xy(self, lonlats):
view = self.parent
zoom = view.zoom
for lon, lat in lonlats:
yield view.get_window_xy_from(lat, lon, zoom) | /rmap-7.5.tar.gz/rmap-7.5/mapview/geojson.py | 0.569134 | 0.253959 | geojson.py | pypi |
(function() {
var B = {
"B33194": {
"description": "[SIM] Space consistency",
"unit": "%"
},
"B33195": {
"description": "[SIM] MeteoDB variable ID",
"unit": "NUMERIC"
},
"B33196": {
"description": "[SIM] Data has been invalidated",
"unit": "CODE TABLE 33196"
},
"B33197": {
"description": "[SIM] Manual replacement in substitution",
"unit": "CODE TABLE 33197"
},
"B33192": {
"description": "[SIM] Climatological and consistency check",
"unit": "%"
},
"B33193": {
"description": "[SIM] Time consistency",
"unit": "%"
},
"B11002": {
"description": "WIND SPEED",
"unit": "M/S"
},
"B11003": {
"description": "U-COMPONENT",
"unit": "M/S"
},
"B11001": {
"description": "WIND DIRECTION",
"unit": "DEGREE TRUE"
},
"B11006": {
"description": "W-COMPONENT",
"unit": "M/S"
},
"B33050": {
"description": "GLOBAL GTSPP QUALITY FLAG",
"unit": "CODE TABLE 33050"
},
"B11004": {
"description": "V-COMPONENT",
"unit": "M/S"
},
"B11005": {
"description": "W-COMPONENT",
"unit": "PA/S"
},
"B14018": {
"description": "INSTANTANEOUS SHORT-WAVE RADIATION (incoming)",
"unit": "W/M**2"
},
"B14019": {
"description": "SURFACE ALBEDO",
"unit": "%"
},
"B14016": {
"description": "NET RADIATION",
"unit": "J/M**2"
},
"B14017": {
"description": "INSTANTANEOUS LONG-WAVE RADIATION (incoming)",
"unit": "W/M**2"
},
"B33198": {
"description": "[SIM] Observation increment",
"unit": "NUMERIC"
},
"B07002": {
"description": "HEIGHT OR ALTITUDE",
"unit": "M"
},
"B07004": {
"description": "PRESSURE",
"unit": "PA"
},
"B07007": {
"description": "HEIGHT",
"unit": "M"
},
"B31000": {
"description": "SHORT DELAYED DESCRIPTOR REPLICATION FACTOR",
"unit": "NUMERIC"
},
"B31001": {
"description": "DELAYED DESCRIPTOR REPLICATION FACTOR",
"unit": "NUMERIC"
},
"B31002": {
"description": "EXTENDED DELAYED DESCRIPTOR REPLICATION FACTOR",
"unit": "NUMERIC"
},
"B48149": {
"description": "[SIM] Conta Chenopodiacee - Amarantacee Indistinte_Amaranto",
"unit": "NUMERIC"
},
"B48148": {
"description": "[SIM] Conta Cupressacee - Taxacee indistinte_Cupressacee - TaxacNUMERIC",
"unit": "NUMERIC"
},
"B48184": {
"description": "[SIM] Conta Ciperacee_Ciperacee indistinte 1",
"unit": "NUMERIC"
},
"B48141": {
"description": "[SIM] Conta Fagacee_Fagacee indistinte",
"unit": "NUMERIC"
},
"B22074": {
"description": "AVERAGE WAVE PERIOD",
"unit": "S"
},
"B48143": {
"description": "[SIM] Conta Oleacee_Frassino",
"unit": "NUMERIC"
},
"B48142": {
"description": "[SIM] Conta Oleacee_Olivo",
"unit": "NUMERIC"
},
"B22071": {
"description": "SPECTRAL PEAK WAVE PERIOD",
"unit": "S"
},
"B22070": {
"description": "SIGNIFICANT WAVE HEIGHT",
"unit": "M"
},
"B48147": {
"description": "[SIM] Conta Cupressacee - Taxacee indistinte_Cipresso comune",
"unit": "NUMERIC"
},
"B48146": {
"description": "[SIM] Conta Urticacee_Urticacee indistinte",
"unit": "NUMERIC"
},
"B15204": {
"description": "[SIM] PM06 Concentration (tot. aerosol < 0.6 ug)",
"unit": "KG/M**3"
},
"B15205": {
"description": "[SIM] PM03 Concentration (tot. aerosol < 0.3 ug)",
"unit": "KG/M**3"
},
"B15206": {
"description": "[SIM] PM015 Concentration (tot. aerosol < 0.15 ug)",
"unit": "KG/M**3"
},
"B15207": {
"description": "[SIM] PM008 Concentration (tot. aerosol < 0.08 ug)",
"unit": "KG/M**3"
},
"B15200": {
"description": "[SIM] HCNM Concentration",
"unit": "KG/M**3"
},
"B15201": {
"description": "[SIM] ALDE Concentration",
"unit": "KG/M**3"
},
"B15202": {
"description": "[SIM] PM5 Concentration (tot. aerosol < 5 ug)",
"unit": "KG/M**3"
},
"B15203": {
"description": "[SIM] PM1 Concentration (tot. aerosol < 1.25 ug)",
"unit": "KG/M**3"
},
"B15208": {
"description": "[SIM] Concentration of primary particulate matter in PM10",
"unit": "KG/M**3"
},
"B15209": {
"description": "[SIM] Concentration of sulfate in PM10",
"unit": "KG/M**3"
},
"B10063": {
"description": "CHARACTERISTIC OF PRESSURE TENDENCY",
"unit": "CODE TABLE 10063"
},
"B10060": {
"description": "PRESSURE CHANGE",
"unit": "PA"
},
"B22192": {
"description": "[SIM] Current X component",
"unit": "M/S"
},
"B22193": {
"description": "[SIM] Current Y component",
"unit": "M/S"
},
"B08208": {
"description": "[SIM] Number of cloud cover mean values present",
"unit": "NUMERIC"
},
"B08209": {
"description": "[SIM] Number of cloud cover maximum values present",
"unit": "NUMERIC"
},
"B08202": {
"description": "[SIM] Number of mean pressure values present",
"unit": "NUMERIC"
},
"B08203": {
"description": "[SIM] Number of minimum pressure values present",
"unit": "NUMERIC"
},
"B08200": {
"description": "[SIM] Number of minimum relative humidity values present",
"unit": "NUMERIC"
},
"B08201": {
"description": "[SIM] Number of maximum relative humidity values present",
"unit": "NUMERIC"
},
"B08206": {
"description": "[SIM] Number of leaf wetness values present",
"unit": "NUMERIC"
},
"B08207": {
"description": "[SIM] Number of scalar wind velocity mean values present",
"unit": "NUMERIC"
},
"B08204": {
"description": "[SIM] Number of maximum pressure values present",
"unit": "NUMERIC"
},
"B08205": {
"description": "[SIM] Number of precipitation values present",
"unit": "NUMERIC"
},
"B04004": {
"description": "HOUR",
"unit": "HOUR"
},
"B04005": {
"description": "MINUTE",
"unit": "MINUTE"
},
"B04006": {
"description": "SECOND",
"unit": "SECOND"
},
"B04001": {
"description": "YEAR",
"unit": "YEAR"
},
"B04002": {
"description": "MONTH",
"unit": "MONTH"
},
"B04003": {
"description": "DAY",
"unit": "DAY"
},
"B15213": {
"description": "[SIM] Concentration of organic carbon in PM10",
"unit": "KG/M**3"
},
"B02004": {
"description": "TYPE OF INSTRUMENTATION FOR EVAPORATION MEASUREMENT OR TYPE OF CCODE TABLE 2004",
"unit": "CODE TABLE 2004"
},
"B02005": {
"description": "PRECISION OF TEMPERATURE OBSERVATION",
"unit": "K"
},
"B02002": {
"description": "TYPE OF INSTRUMENTATION FOR WIND MEASUREMENT",
"unit": "FLAG TABLE 2002"
},
"B02003": {
"description": "TYPE OF MEASURING EQUIPMENT USED",
"unit": "CODE TABLE 2003"
},
"B02001": {
"description": "TYPE OF STATION",
"unit": "CODE TABLE 2001"
},
"B23193": {
"description": "[SIM] Wet deposition of H2SO4",
"unit": "MOL/M**2"
},
"B23192": {
"description": "[SIM] Dry deposition of H2SO4",
"unit": "MOL/M**2"
},
"B13192": {
"description": "[SIM] Cloud liquid water content",
"unit": "KG/KG"
},
"B13193": {
"description": "[SIM] Cloud ice content",
"unit": "KG/KG"
},
"B14199": {
"description": "[SIM] Visible radiation (upward)",
"unit": "W/M**2"
},
"B14198": {
"description": "[SIM] Visible radiation (downward)",
"unit": "W/M**2"
},
"B14193": {
"description": "[SIM] Instantenous latent heat flux",
"unit": "W/m**2"
},
"B14192": {
"description": "[SIM] Instantenous sensible heat flux",
"unit": "W/m**2"
},
"B20044": {
"description": "AVERAGE LIQUID WATER CONTENT",
"unit": "KG/M**3"
},
"B20045": {
"description": "SUPERCOOLED LARGE DROPLET (SLD) CONDITIONS",
"unit": "CODE TABLE 20045"
},
"B20042": {
"description": "AIRFRAME ICING PRESENT",
"unit": "CODE TABLE 20042"
},
"B20043": {
"description": "PEAK LIQUID WATER CONTENT",
"unit": "KG/M**3"
},
"B14195": {
"description": "[SIM] Instantenous diffuse solar radiation",
"unit": "W/M**2"
},
"B14194": {
"description": "[SIM] Instantenous direct solar radiation",
"unit": "W/M**2"
},
"B15219": {
"description": "[SIM] Concentration of water in PM10",
"unit": "KG/M**3"
},
"B15218": {
"description": "[SIM] Concentration of biogenic BmP in PM10",
"unit": "KG/M**3"
},
"B48048": {
"description": "[SIM] Graminacee_Graminacee indistinte 1",
"unit": "POLLEN/M**3"
},
"B48049": {
"description": "[SIM] Plantaginacee_Plantaginacee indistinte 1",
"unit": "POLLEN/M**3"
},
"B20038": {
"description": "BEARING OF ICE EDGE (SEE NOTE 3)",
"unit": "DEGREE TRUE"
},
"B20033": {
"description": "CAUSE OF ICE ACCRETION",
"unit": "FLAG TABLE 20033"
},
"B20032": {
"description": "RATE OF ICE ACCRETION",
"unit": "CODE TABLE 20032"
},
"B20031": {
"description": "ICE DEPOSIT (THICKNESS)",
"unit": "M"
},
"B48043": {
"description": "[SIM] Spore fungine_Epicoccum",
"unit": "POLLEN/M**3"
},
"B20037": {
"description": "ICE DEVELOPMENT",
"unit": "CODE TABLE 20037"
},
"B20036": {
"description": "ICE SITUATION",
"unit": "CODE TABLE 20036"
},
"B20035": {
"description": "AMOUNT AND TYPE OF ICE",
"unit": "CODE TABLE 20035"
},
"B20034": {
"description": "SEA ICE CONCENTRATION",
"unit": "CODE TABLE 20034"
},
"B22031": {
"description": "SPEED OF CURRENT",
"unit": "M/S"
},
"B22032": {
"description": "SPEED OF SEA SURFACE CURRENT",
"unit": "M/S"
},
"B22037": {
"description": "Tidal elevation with respect to national land datum",
"unit": "M"
},
"B48185": {
"description": "[SIM] Conta Juglandacee_Juglandacee indistinte 1",
"unit": "NUMERIC"
},
"B22038": {
"description": "Tidal elevation with respect to local chart datum",
"unit": "M"
},
"B48187": {
"description": "[SIM] Conta Oleacee_Ligustro",
"unit": "NUMERIC"
},
"B48186": {
"description": "[SIM] Conta Ippocastanacee_Ippocastanacee indistinte 1",
"unit": "NUMERIC"
},
"B48181": {
"description": "[SIM] Conta Platanacee_Platanacee indistinte 1",
"unit": "NUMERIC"
},
"B48180": {
"description": "[SIM] Conta Mirtacee_Mirtacee indistinte 1",
"unit": "NUMERIC"
},
"B48183": {
"description": "[SIM] Conta Pinacee_Pinacee indistinte 1",
"unit": "NUMERIC"
},
"B48182": {
"description": "[SIM] Conta Aceraceae_Aceracee indistinte 1",
"unit": "NUMERIC"
},
"B05021": {
"description": "BEARING OR AZIMUTH",
"unit": "DEGREE TRUE"
},
"B05022": {
"description": "SOLAR AZIMUTH",
"unit": "DEGREE TRUE"
},
"B33015": {
"description": "DATA QUALITY CHECK INDICATOR",
"unit": "CODE TABLE 33015"
},
"B33201": {
"description": "[SIM] Kalman coefficient, state vector (s.v.) x1",
"unit": "NUMERIC"
},
"B33202": {
"description": "[SIM] Kalman coefficient, state vector (s.v.) x2",
"unit": "NUMERIC"
},
"B33203": {
"description": "[SIM] Kalman coefficient, s.v. error covariance matrix(1,1)",
"unit": "NUMERIC"
},
"B33204": {
"description": "[SIM] Kalman coefficient, s.v. error covariance matrix(1,2)",
"unit": "NUMERIC"
},
"B33205": {
"description": "[SIM] Kalman coefficient, s.v. error covariance matrix(2,1)",
"unit": "NUMERIC"
},
"B33206": {
"description": "[SIM] Kalman coefficient, s.v. error covariance matrix(2,2)",
"unit": "NUMERIC"
},
"B33207": {
"description": "[SIM] Kalman observation sequential counter",
"unit": "NUMERIC"
},
"B33208": {
"description": "[SIM] Kalman osservation missing counter",
"unit": "NUMERIC"
},
"B33209": {
"description": "[SIM] Normalized Density Index",
"unit": "%"
},
"B01194": {
"description": "[SIM] Report mnemonic",
"unit": "CCITTIA5"
},
"B01193": {
"description": "[SIM] Report code",
"unit": "NUMERIC"
},
"B01192": {
"description": "[SIM] MeteoDB station ID",
"unit": "NUMERIC"
},
"B48130": {
"description": "[SIM] Conta Betulacee_Betulla",
"unit": "NUMERIC"
},
"B48131": {
"description": "[SIM] Conta Betulacee_Betulacee indistinte",
"unit": "NUMERIC"
},
"B48132": {
"description": "[SIM] Conta Composite_Ambrosia",
"unit": "NUMERIC"
},
"B48133": {
"description": "[SIM] Conta Composite_Artemisia",
"unit": "NUMERIC"
},
"B48134": {
"description": "[SIM] Conta Composite_Composite indistinte",
"unit": "NUMERIC"
},
"B48135": {
"description": "[SIM] Conta Corilacee_Nocciolo",
"unit": "NUMERIC"
},
"B48136": {
"description": "[SIM] Conta Corilacee_Carpino bianco -Carpino nero",
"unit": "NUMERIC"
},
"B48137": {
"description": "[SIM] Conta Corilacee_Corilacee indistinte",
"unit": "NUMERIC"
},
"B48138": {
"description": "[SIM] Conta Fagacee_Castagno",
"unit": "NUMERIC"
},
"B48139": {
"description": "[SIM] Conta Fagacee_Faggio",
"unit": "NUMERIC"
},
"B12121": {
"description": "GROUND MINIMUM TEMPERATURE",
"unit": "K"
},
"B48026": {
"description": "[SIM] Mirtacee_Mirtacee indistinte",
"unit": "POLLEN/M**3"
},
"B15212": {
"description": "[SIM] Concentration of black carbon in PM10",
"unit": "KG/M**3"
},
"B48027": {
"description": "[SIM] Ulmacee_Bagolaro comune",
"unit": "POLLEN/M**3"
},
"B02048": {
"description": "SATELLITE SENSOR INDICATOR",
"unit": "CODE TABLE 2048"
},
"B10052": {
"description": "ALTIMETER SETTING (QNH)",
"unit": "PA"
},
"B48028": {
"description": "[SIM] Ulmacee_Olmo campestre",
"unit": "POLLEN/M**3"
},
"B10051": {
"description": "PRESSURE REDUCED TO MEAN SEA LEVEL",
"unit": "PA"
},
"B48029": {
"description": "[SIM] Ulmacee_Ulmacee indistinte",
"unit": "POLLEN/M**3"
},
"B04196": {
"description": "[SIM] Relative humidity event - time of occurrence",
"unit": "MINUTE"
},
"B04197": {
"description": "[SIM] Wind velocity event - time of occurrence",
"unit": "MINUTE"
},
"B04194": {
"description": "[SIM] Time range P2",
"unit": "NUMERIC"
},
"B04195": {
"description": "[SIM] Temperature event - time of occurrence",
"unit": "MINUTE"
},
"B04192": {
"description": "[SIM] Time range type",
"unit": "NUMERIC"
},
"B04193": {
"description": "[SIM] Time range P1",
"unit": "NUMERIC"
},
"B04198": {
"description": "[SIM] Pressure event - time of occurrence",
"unit": "MINUTE"
},
"B48008": {
"description": "[SIM] Corilacee_Nocciolo",
"unit": "POLLEN/M**3"
},
"B48009": {
"description": "[SIM] Corilacee_Carpino bianco -Carpino nero",
"unit": "POLLEN/M**3"
},
"B48004": {
"description": "[SIM] Betulacee_Betulacee indistinte",
"unit": "POLLEN/M**3"
},
"B48005": {
"description": "[SIM] Composite_Ambrosia",
"unit": "POLLEN/M**3"
},
"B48006": {
"description": "[SIM] Composite_Artemisia",
"unit": "POLLEN/M**3"
},
"B48007": {
"description": "[SIM] Composite_Composite indistinte",
"unit": "POLLEN/M**3"
},
"B48001": {
"description": "[SIM] Graminacee_Graminacee indistinte",
"unit": "POLLEN/M**3"
},
"B48002": {
"description": "[SIM] Betulacee_Ontano nero",
"unit": "POLLEN/M**3"
},
"B48003": {
"description": "[SIM] Betulacee_Betulla",
"unit": "POLLEN/M**3"
},
"B01023": {
"description": "OBSERVATION SEQUENCE NUMBER",
"unit": "NUMERIC"
},
"B11016": {
"description": "EXTREME COUNTERCLOCKWISE WIND DIRECTION OF A VARIABLE WIND",
"unit": "DEGREE TRUE"
},
"B20003": {
"description": "PRESENT WEATHER (SEE NOTE 1)",
"unit": "CODE TABLE 20003"
},
"B20001": {
"description": "HORIZONTAL VISIBILITY",
"unit": "M"
},
"B20004": {
"description": "PAST WEATHER (1) (SEE NOTE 2)",
"unit": "CODE TABLE 20004"
},
"B20005": {
"description": "PAST WEATHER (2) (SEE NOTE 2)",
"unit": "CODE TABLE 20005"
},
"B20009": {
"description": "GENERAL WEATHER INDICATOR (TAF/METAR)",
"unit": "CODE TABLE 20009"
},
"B13206": {
"description": "[SIM] Soil water content",
"unit": "KG/M**2"
},
"B13204": {
"description": "[SIM] Total convective precipitation (liquid + snow)",
"unit": "KG/M**2"
},
"B13205": {
"description": "[SIM] Snowfall (grid-scale + convective)",
"unit": "KG/M**2"
},
"B13202": {
"description": "[SIM] Convective liquid precipitation",
"unit": "KG/M**2"
},
"B13203": {
"description": "[SIM] Convective snowfall",
"unit": "KG/M**2"
},
"B13200": {
"description": "[SIM] Grid-scale liquid precipitation",
"unit": "KG/M**2"
},
"B13201": {
"description": "[SIM] Grid-scale snowfall",
"unit": "KG/M**2"
},
"B48174": {
"description": "[SIM] Conta Altre Spore / Non identificati_Spore fungine non ideNUMERIC",
"unit": "NUMERIC"
},
"B48175": {
"description": "[SIM] Conta Graminacee_Graminacee indistinte 1",
"unit": "NUMERIC"
},
"B48176": {
"description": "[SIM] Conta Plantaginacee_Plantaginacee indistinte 1",
"unit": "NUMERIC"
},
"B48177": {
"description": "[SIM] Conta Urticacee_Urticacee indistinte 1",
"unit": "NUMERIC"
},
"B48170": {
"description": "[SIM] Conta Spore fungine_Epicoccum",
"unit": "NUMERIC"
},
"B48171": {
"description": "[SIM] Conta Altri Pollini / Non Identificati_Altri pollini identNUMERIC",
"unit": "NUMERIC"
},
"B48172": {
"description": "[SIM] Conta Altri Pollini / Non Identificati_Pollini non identifNUMERIC",
"unit": "NUMERIC"
},
"B48173": {
"description": "[SIM] Conta Altre Spore / Non identificati_Altre spore fungine",
"unit": "NUMERIC"
},
"B31012": {
"description": "EXTENDED DELAYED DESCRIPTOR AND DATA REPETITION FACTOR",
"unit": "NUMERIC"
},
"B31011": {
"description": "DELAYED DESCRIPTOR AND DATA REPETITION FACTOR",
"unit": "NUMERIC"
},
"B07030": {
"description": "HEIGHT OF STATION GROUND ABOVE MEAN SEA LEVEL (SEE NOTE 3)",
"unit": "M"
},
"B07031": {
"description": "HEIGHT OF BAROMETER ABOVE MEAN SEA LEVEL (SEE NOTE 4)",
"unit": "M"
},
"B07032": {
"description": "HEIGHT OF SENSOR ABOVE LOCAL GROUND (OR DECK OF MARINE PLATFORM)M",
"unit": "M"
},
"B22049": {
"description": "SEA-SURFACE TEMPERATURE",
"unit": "K"
},
"B12003": {
"description": "DEW-POINT TEMPERATURE",
"unit": "K"
},
"B12001": {
"description": "TEMPERATURE/AIR TEMPERATURE",
"unit": "K"
},
"B22043": {
"description": "SEA/WATER TEMPERATURE",
"unit": "K"
},
"B07195": {
"description": "[SIM] Second level type",
"unit": "NUMERIC"
},
"B07194": {
"description": "[SIM] Level L2",
"unit": "NUMERIC"
},
"B07193": {
"description": "[SIM] Level L1",
"unit": "NUMERIC"
},
"B07192": {
"description": "[SIM] First level type",
"unit": "NUMERIC"
},
"B15198": {
"description": "[SIM] PM2.5 Concentration",
"unit": "KG/M**3"
},
"B15199": {
"description": "[SIM] NOY Concentration",
"unit": "KG/M**3"
},
"B15211": {
"description": "[SIM] Concentration of ammonium in PM10",
"unit": "KG/M**3"
},
"B15210": {
"description": "[SIM] Concentration of nitrate in PM10",
"unit": "KG/M**3"
},
"B15217": {
"description": "[SIM] Concentration of biogenic A1D in PM10",
"unit": "KG/M**3"
},
"B15216": {
"description": "[SIM] Concentration of anthrop. BmP in PM10",
"unit": "KG/M**3"
},
"B15215": {
"description": "[SIM] Concentration of anthrop. A1D in PM10",
"unit": "KG/M**3"
},
"B15214": {
"description": "[SIM] Concentration of dust in PM10",
"unit": "KG/M**3"
},
"B15192": {
"description": "[SIM] NO Concentration",
"unit": "KG/M**3"
},
"B15193": {
"description": "[SIM] NO2 Concentration",
"unit": "KG/M**3"
},
"B15194": {
"description": "[SIM] O3 Concentration",
"unit": "KG/M**3"
},
"B15195": {
"description": "[SIM] PM10 Concentration",
"unit": "KG/M**3"
},
"B15196": {
"description": "[SIM] CO Concentration",
"unit": "KG/M**3"
},
"B15197": {
"description": "[SIM] SO2 Concentration",
"unit": "KG/M**3"
},
"B48145": {
"description": "[SIM] Conta Plantaginacee_Plantaginacee indistinte",
"unit": "NUMERIC"
},
"B48144": {
"description": "[SIM] Conta Oleacee_Oleacee indistinte",
"unit": "NUMERIC"
},
"B08210": {
"description": "[SIM] Number of cloud cover minimum values present",
"unit": "NUMERIC"
},
"B02014": {
"description": "TRACKING TECHNIQUE/STATUS OF SYSTEM USED",
"unit": "CODE TABLE 2014"
},
"B02011": {
"description": "RADIOSONDE TYPE",
"unit": "CODE TABLE 2011"
},
"B02013": {
"description": "SOLAR AND INFRARED RADIATION CORRECTION",
"unit": "CODE TABLE 2013"
},
"B02012": {
"description": "RADIOSONDE COMPUTATIONAL METHOD",
"unit": "CODE TABLE 2012"
},
"B33006": {
"description": "INTERNAL MEASUREMENT STATUS INFORMATION (AWS)",
"unit": "CODE TABLE 33006"
},
"B04086": {
"description": "LONG TIME PERIOD OR DISPLACEMENT",
"unit": "SECOND"
},
"B01002": {
"description": "WMO STATION NUMBER",
"unit": "NUMERIC"
},
"B20194": {
"description": "[SIM] Presence of shower",
"unit": "BOOLEAN"
},
"B20195": {
"description": "[SIM] Presence of hail",
"unit": "BOOLEAN"
},
"B20196": {
"description": "[SIM] Presence of thunderstorm",
"unit": "BOOLEAN"
},
"B20197": {
"description": "[SIM] Presence of snow",
"unit": "BOOLEAN"
},
"B20192": {
"description": "[SIM] Presence of rain > 1mm",
"unit": "BOOLEAN"
},
"B20193": {
"description": "[SIM] Cloud type (METAR)",
"unit": "CCITTIA5"
},
"B06001": {
"description": "LONGITUDE (HIGH ACCURACY)",
"unit": "DEGREE"
},
"B20198": {
"description": "[SIM] Presence of frost",
"unit": "BOOLEAN"
},
"B20199": {
"description": "[SIM] Presence of dew",
"unit": "BOOLEAN"
},
"B13031": {
"description": "EVAPOTRANSPIRATION",
"unit": "KG/M**2"
},
"B13033": {
"description": "EVAPORATION/EVAPOTRANSPIRATION",
"unit": "KG/M**2"
},
"B48057": {
"description": "[SIM] Ciperacee_Ciperacee indistinte 1",
"unit": "POLLEN/M**3"
},
"B48056": {
"description": "[SIM] Pinacee_Pinacee indistinte 1",
"unit": "POLLEN/M**3"
},
"B48055": {
"description": "[SIM] Aceraceae_Aceracee indistinte 1",
"unit": "POLLEN/M**3"
},
"B48054": {
"description": "[SIM] Platanacee_Platanacee indistinte 1",
"unit": "POLLEN/M**3"
},
"B48053": {
"description": "[SIM] Mirtacee_Mirtacee indistinte 1",
"unit": "POLLEN/M**3"
},
"B48052": {
"description": "[SIM] Euphorbiacee_Euforbiacee indistinte 1",
"unit": "POLLEN/M**3"
},
"B48051": {
"description": "[SIM] Poligonacee_Poligonacee indistinte 1",
"unit": "POLLEN/M**3"
},
"B48050": {
"description": "[SIM] Urticacee_Urticacee indistinte 1",
"unit": "POLLEN/M**3"
},
"B48059": {
"description": "[SIM] Ippocastanacee_Ippocastanacee indistinte 1",
"unit": "POLLEN/M**3"
},
"B48058": {
"description": "[SIM] Juglandacee_Juglandacee indistinte 1",
"unit": "POLLEN/M**3"
},
"B11208": {
"description": "[SIM] Distance covered by the hourly mean wind",
"unit": "M"
},
"B25076": {
"description": "LOG-10 OF (TEMP-RAD CENTRAL WAVENUMBER) FOR ATOVS",
"unit": "LOGM-1"
},
"B02070": {
"description": "ORIGINAL SPECIFICATION OF LATITUDE/LONGITUDE",
"unit": "CODE TABLE 2070"
},
"B48045": {
"description": "[SIM] Altri Pollini / Non Identificati_Pollini non identificati",
"unit": "POLLEN/M**3"
},
"B11200": {
"description": "[SIM] U-component of momentum flux",
"unit": "N/M**2"
},
"B11201": {
"description": "[SIM] V-component of momentum flux",
"unit": "N/M**2"
},
"B11202": {
"description": "[SIM] Friction velocity (diagmet)",
"unit": "M/S"
},
"B11203": {
"description": "[SIM] Mixing height (diagmet)",
"unit": "M"
},
"B11204": {
"description": "[SIM] Obukov lenght (diagmet)",
"unit": "M"
},
"B11205": {
"description": "[SIM] Convective velocitiy scale (diagmet)",
"unit": "M/S"
},
"B11206": {
"description": "[SIM] Friction velocity (COSMO)",
"unit": "M/S"
},
"B11207": {
"description": "[SIM] Obukov lenght (COSMO)",
"unit": "M"
},
"B11061": {
"description": "ABSOLUTE WIND SHEAR IN 1 KM LAYER BELOW",
"unit": "M/S"
},
"B11062": {
"description": "ABSOLUTE WIND SHEAR IN 1 KM LAYER ABOVE",
"unit": "M/S"
},
"B08009": {
"description": "DETAILED PHASE OF FLIGHT",
"unit": "CODE TABLE 8009"
},
"B08002": {
"description": "VERTICAL SIGNIFICANCE (SURFACE OBSERVATIONS)",
"unit": "CODE TABLE 8002"
},
"B08004": {
"description": "PHASE OF AIRCRAFT FLIGHT",
"unit": "CODE TABLE 8004"
},
"B22001": {
"description": "DIRECTION OF WAVES",
"unit": "DEGREE TRUE"
},
"B22002": {
"description": "DIRECTION OF WIND WAVES",
"unit": "DEGREE TRUE"
},
"B22003": {
"description": "DIRECTION OF SWELL WAVES",
"unit": "DEGREE TRUE"
},
"B01063": {
"description": "ICAO LOCATION INDICATOR",
"unit": "CCITTIA5"
},
"B33024": {
"description": "STATION ELEVATION QUALITY MARK (FOR MOBILE STATIONS)",
"unit": "CODE TABLE 33024"
},
"B33025": {
"description": "ACARS INTERPOLATED VALUES",
"unit": "CODE TABLE 33025"
},
"B33026": {
"description": "MOISTURE QUALITY",
"unit": "CODE TABLE 33026"
},
"B33027": {
"description": "LOCATION QUALITY CLASS (RANGE OF RADIUS OF 66 % CONFIDENCE)",
"unit": "CODE TABLE 33027"
},
"B33020": {
"description": "QUALITY CONTROL INDICATION OF FOLLOWING VALUE",
"unit": "CODE TABLE 33020"
},
"B33021": {
"description": "QUALITY OF FOLLOWING VALUE",
"unit": "CODE TABLE 33021"
},
"B33022": {
"description": "QUALITY OF BUOY SATELLITE TRANSMISSION",
"unit": "CODE TABLE 33022"
},
"B33023": {
"description": "QUALITY OF BUOY LOCATION",
"unit": "CODE TABLE 33023"
},
"B48178": {
"description": "[SIM] Conta Poligonacee_Poligonacee indistinte 1",
"unit": "NUMERIC"
},
"B33031": {
"description": "SCAN LINE QUALITY FLAGS FOR ATOVS",
"unit": "FLAG TABLE 33031"
},
"B48179": {
"description": "[SIM] Conta Euphorbiacee_Euforbiacee indistinte 1",
"unit": "NUMERIC"
},
"B31021": {
"description": "ASSOCIATED FIELD SIGNIFICANCE",
"unit": "CODE TABLE 31021"
},
"B12030": {
"description": "SOIL TEMPERATURE",
"unit": "K"
},
"B33003": {
"description": "QUALITY INFORMATION",
"unit": "CODE TABLE 33003"
},
"B48129": {
"description": "[SIM] Conta Betulacee_Ontano nero",
"unit": "NUMERIC"
},
"B48128": {
"description": "[SIM] Conta Graminacee_Graminacee indistinte",
"unit": "NUMERIC"
},
"B12131": {
"description": "SNOW TEMPERATURE",
"unit": "K"
},
"B15228": {
"description": "[SIM] NH3 Concentration",
"unit": "KG/M**3"
},
"B15229": {
"description": "[SIM] Concentration of primary part. matter in aerosol",
"unit": "KG/M**3"
},
"B15222": {
"description": "[SIM] Total concentration of primary aerosol in PM10",
"unit": "KG/M**3"
},
"B15223": {
"description": "[SIM] Total concentration of secondary aerosol in PM10",
"unit": "KG/M**3"
},
"B15220": {
"description": "[SIM] Concentration of sea salt in PM10",
"unit": "KG/M**3"
},
"B15221": {
"description": "[SIM] Concentration of secondary organic aerosol in PM10",
"unit": "KG/M**3"
},
"B15226": {
"description": "[SIM] Uncertainity in NO2 estimate (Pesco)",
"unit": "KG/M**3"
},
"B15227": {
"description": "[SIM] Uncertainity in PM2.5 estimate (Pesco)",
"unit": "KG/M**3"
},
"B15224": {
"description": "[SIM] Uncertainity in O3 estimate (Pesco)",
"unit": "KG/M**3"
},
"B15225": {
"description": "[SIM] Uncertainity in PM10 estimate (Pesco)",
"unit": "KG/M**3"
},
"B48019": {
"description": "[SIM] Urticacee_Urticacee indistinte",
"unit": "POLLEN/M**3"
},
"B48018": {
"description": "[SIM] Plantaginacee_Plantaginacee indistinte",
"unit": "POLLEN/M**3"
},
"B48013": {
"description": "[SIM] Fagacee_Quercia",
"unit": "POLLEN/M**3"
},
"B48012": {
"description": "[SIM] Fagacee_Faggio",
"unit": "POLLEN/M**3"
},
"B48011": {
"description": "[SIM] Fagacee_Castagno",
"unit": "POLLEN/M**3"
},
"B48010": {
"description": "[SIM] Corilacee_Corilacee indistinte",
"unit": "POLLEN/M**3"
},
"B48017": {
"description": "[SIM] Oleacee_Oleacee indistinte",
"unit": "POLLEN/M**3"
},
"B48016": {
"description": "[SIM] Oleacee_Frassino",
"unit": "POLLEN/M**3"
},
"B48015": {
"description": "[SIM] Oleacee_Olivo",
"unit": "POLLEN/M**3"
},
"B48014": {
"description": "[SIM] Fagacee_Fagacee indistinte",
"unit": "POLLEN/M**3"
},
"B01019": {
"description": "LONG STATION OR SITE NAME",
"unit": "CCITTIA5"
},
"B01011": {
"description": "SHIP OR MOBILE LAND STATION IDENTIFIER",
"unit": "CCITTIA5"
},
"B01012": {
"description": "DIRECTION OF MOTION OF MOVING OBSERVING PLATFORM**",
"unit": "DEGREE TRUE"
},
"B01013": {
"description": "SPEED OF MOTION OF MOVING OBSERVING PLATFORM*",
"unit": "M/S"
},
"B08044": {
"description": "(VAL) CAS REGISTRY NUMBER",
"unit": "CCITTIA5"
},
"B14031": {
"description": "TOTAL SUNSHINE",
"unit": "MINUTE"
},
"B08042": {
"description": "EXTENDED VERTICAL SOUNDING SIGNIFICANCE",
"unit": "FLAG TABLE 8042"
},
"B20011": {
"description": "CLOUD AMOUNT",
"unit": "CODE TABLE 20011"
},
"B20010": {
"description": "CLOUD COVER (TOTAL)",
"unit": "%"
},
"B20013": {
"description": "HEIGHT OF BASE OF CLOUD",
"unit": "M"
},
"B20012": {
"description": "CLOUD TYPE",
"unit": "CODE TABLE 20012"
},
"B20017": {
"description": "CLOUD TOP DESCRIPTION",
"unit": "CODE TABLE 20017"
},
"B20019": {
"description": "SIGNIFICANT PRESENT OR FORECAST WEATHER",
"unit": "CCITTIA5"
},
"B48060": {
"description": "[SIM] Oleacee_Ligustro",
"unit": "POLLEN/M**3"
},
"B20200": {
"description": "[SIM] Presence of fog",
"unit": "BOOLEAN"
},
"B20201": {
"description": "[SIM] Presence of water-spout",
"unit": "BOOLEAN"
},
"B20202": {
"description": "[SIM] State of the ground with snow",
"unit": "CODE TABLE"
},
"B12061": {
"description": "SKIN TEMPERATURE",
"unit": "K"
},
"B12063": {
"description": "BRIGHTNESS TEMPERATURE",
"unit": "K"
},
"B13215": {
"description": "[SIM] River level",
"unit": "M"
},
"B13217": {
"description": "[SIM] 5 minutes precipitation",
"unit": "KG/M**2"
},
"B13216": {
"description": "[SIM] Hourly precipitation",
"unit": "KG/M**2"
},
"B13210": {
"description": "[SIM] Penetration of the probe in the snow",
"unit": "M"
},
"B13212": {
"description": "[SIM] Leaf wetness duration",
"unit": "S"
},
"B13219": {
"description": "[SIM] 15 minutes precipitation",
"unit": "KG/M**2"
},
"B13218": {
"description": "[SIM] 10 minutes precipitation",
"unit": "KG/M**2"
},
"B48163": {
"description": "[SIM] Conta Ciperacee_Ciperacee indistinte",
"unit": "NUMERIC"
},
"B48162": {
"description": "[SIM] Conta Salicacee_Salicacee indistinte",
"unit": "NUMERIC"
},
"B48161": {
"description": "[SIM] Conta Salicacee_Pioppo",
"unit": "NUMERIC"
},
"B48160": {
"description": "[SIM] Conta Salicacee_Salice comune",
"unit": "NUMERIC"
},
"B48167": {
"description": "[SIM] Conta Spore fungine_Botrytis",
"unit": "NUMERIC"
},
"B48166": {
"description": "[SIM] Conta Spore fungine_Alternaria",
"unit": "NUMERIC"
},
"B48165": {
"description": "[SIM] Conta Ippocastanacee_Ippocastanacee indistinte",
"unit": "NUMERIC"
},
"B48164": {
"description": "[SIM] Conta Juglandacee_Juglandacee indistinte",
"unit": "NUMERIC"
},
"B48169": {
"description": "[SIM] Conta Spore fungine_Cladosporium",
"unit": "NUMERIC"
},
"B48168": {
"description": "[SIM] Conta Spore fungine_Stemphylium",
"unit": "NUMERIC"
},
"B07025": {
"description": "SOLAR ZENITH ANGLE",
"unit": "DEGREE"
},
"B07024": {
"description": "SATELLITE ZENITH ANGLE",
"unit": "DEGREE"
},
"B08198": {
"description": "[SIM] Number of maximum temperature values present",
"unit": "NUMERIC"
},
"B08199": {
"description": "[SIM] Number of mean relative humidity values present",
"unit": "NUMERIC"
},
"B05043": {
"description": "FIELD OF VIEW NUMBER",
"unit": "NUMERIC"
},
"B05041": {
"description": "SCAN LINE NUMBER",
"unit": "NUMERIC"
},
"B05040": {
"description": "ORBIT NUMBER",
"unit": "NUMERIC"
},
"B08192": {
"description": "[SIM] Number of wind velocity mean values present",
"unit": "NUMERIC"
},
"B08193": {
"description": "[SIM] Number of wind velocity minimum values present",
"unit": "NUMERIC"
},
"B08196": {
"description": "[SIM] Number of mean temperature values present",
"unit": "NUMERIC"
},
"B08197": {
"description": "[SIM] Number of minimum temperature values present",
"unit": "NUMERIC"
},
"B08194": {
"description": "[SIM] Number of wind velocity maximum values present",
"unit": "NUMERIC"
},
"B08195": {
"description": "[SIM] Number of wind prevalent direction values present",
"unit": "NUMERIC"
},
"B12193": {
"description": "PSEUDO-EQUIVALENT POTENTIAL TEMPERATURE",
"unit": "K"
},
"B12192": {
"description": "POTENTIAL TEMPERATURE",
"unit": "K"
},
"B10004": {
"description": "PRESSURE",
"unit": "PA"
},
"B10007": {
"description": "HEIGHT",
"unit": "M"
},
"B10009": {
"description": "GEOPOTENTIAL HEIGHT",
"unit": "GPM"
},
"B10008": {
"description": "GEOPOTENTIAL",
"unit": "M**2/S**2"
},
"B12102": {
"description": "WET-BULB TEMPERATURE",
"unit": "K"
},
"B12103": {
"description": "DEW-POINT TEMPERATURE",
"unit": "K"
},
"B12101": {
"description": "TEMPERATURE/DRY-BULB TEMPERATURE",
"unit": "K"
},
"B05015": {
"description": "LATITUDE DISPLACEMENT (HIGH ACCURACY)",
"unit": "DEGREE"
},
"B13001": {
"description": "SPECIFIC HUMIDITY",
"unit": "KG/KG"
},
"B13002": {
"description": "MIXING RATIO",
"unit": "KG/KG"
},
"B13003": {
"description": "RELATIVE HUMIDITY",
"unit": "%"
},
"B20062": {
"description": "STATE OF THE GROUND (WITH OR WITHOUT SNOW)",
"unit": "CODE TABLE 20062"
},
"B06015": {
"description": "LONGITUDE DISPLACEMENT (HIGH ACCURACY)",
"unit": "DEGREE"
},
"B48022": {
"description": "[SIM] Chenopodiacee - Amarantacee Indistinte_Amaranto",
"unit": "POLLEN/M**3"
},
"B48023": {
"description": "[SIM] Chenopodiacee - Amarantacee Indistinte_Chenopodiacee - AmaPOLLEN/M**3",
"unit": "POLLEN/M**3"
},
"B48020": {
"description": "[SIM] Cupressacee - Taxacee indistinte_Cipresso comune",
"unit": "POLLEN/M**3"
},
"B48021": {
"description": "[SIM] Cupressacee - Taxacee indistinte_Cupressacee - Taxacee indPOLLEN/M**3",
"unit": "POLLEN/M**3"
},
"B11198": {
"description": "[SIM] SQRT(2*TKE)",
"unit": "M/S"
},
"B11199": {
"description": "[SIM] Surface Roughness",
"unit": "M"
},
"B48024": {
"description": "[SIM] Poligonacee_Poligonacee indistinte",
"unit": "POLLEN/M**3"
},
"B48025": {
"description": "[SIM] Euphorbiacee_Euforbiacee indistinte",
"unit": "POLLEN/M**3"
},
"B11194": {
"description": "[SIM] Friction velocity (calmet)",
"unit": "M/S"
},
"B11195": {
"description": "[SIM] Mixing height (calmet)",
"unit": "M"
},
"B11196": {
"description": "[SIM] Obukov lenght (calmet)",
"unit": "M"
},
"B11197": {
"description": "[SIM] Convective velocitiy scale (calmet)",
"unit": "M/S"
},
"B11192": {
"description": "[SIM] W-component terrain following",
"unit": "M/S"
},
"B11193": {
"description": "[SIM] Stability class",
"unit": "NUMERIC"
},
"B11077": {
"description": "REPORTING INTERVAL OR AVERAGING TIME FOR EDDY DISSIPATION RATE",
"unit": "S"
},
"B11076": {
"description": "PEAK TURBULENCE INTENSITY (EDDY DISSIPATION RATE)",
"unit": "M**(2/3)/S"
},
"B11075": {
"description": "MEAN TURBULENCE INTENSITY (EDDY DISSIPATION RATE)",
"unit": "M**(2/3)/S"
},
"B33040": {
"description": "CONFIDENCE INTERVAL",
"unit": "%"
},
"B33041": {
"description": "ATTRIBUTE OF FOLLOWING VALUE",
"unit": "CODE TABLE 33041"
},
"B11017": {
"description": "EXTREME CLOCKWISE WIND DIRECTION OF A VARIABLE WIND",
"unit": "DEGREE TRUE"
},
"B22013": {
"description": "PERIOD OF SWELL WAVES",
"unit": "S"
},
"B22012": {
"description": "PERIOD OF WIND WAVES",
"unit": "S"
},
"B22011": {
"description": "PERIOD OF WAVES",
"unit": "S"
},
"B33038": {
"description": "QUALITY FLAGS FOR GROUND-BASED GNSS DATA",
"unit": "FLAG TABLE 33038"
},
"B33033": {
"description": "FIELD OF VIEW QUALITY FLAGS FOR ATOVS",
"unit": "FLAG TABLE 33033"
},
"B33032": {
"description": "CHANNEL QUALITY FLAGS FOR ATOVS",
"unit": "FLAG TABLE 33032"
},
"B05001": {
"description": "LATITUDE (HIGH ACCURACY)",
"unit": "DEGREE"
},
"B33030": {
"description": "SCAN LINE STATUS FLAGS FOR ATOVS",
"unit": "FLAG TABLE 33030"
},
"B33037": {
"description": "WIND CORRELATION ERROR",
"unit": "FLAG TABLE 33037"
},
"B33036": {
"description": "NOMINAL CONFIDENCE THRESHOLD",
"unit": "%"
},
"B33035": {
"description": "MANUAL/AUTOMATIC QUALITY CONTROL",
"unit": "CODE TABLE 33035"
},
"B31031": {
"description": "DATA PRESENT INDICATOR",
"unit": "FLAG TABLE 31031"
},
"B07010": {
"description": "FLIGHT LEVEL",
"unit": "M"
},
"B48158": {
"description": "[SIM] Conta Aceraceae_Aceracee indistinte",
"unit": "NUMERIC"
},
"B48159": {
"description": "[SIM] Conta Pinacee_Pinacee indistinte",
"unit": "NUMERIC"
},
"B48152": {
"description": "[SIM] Conta Euphorbiacee_Euforbiacee indistinte",
"unit": "NUMERIC"
},
"B48153": {
"description": "[SIM] Conta Mirtacee_Mirtacee indistinte",
"unit": "NUMERIC"
},
"B48150": {
"description": "[SIM] Conta Chenopodiacee - Amarantacee Indistinte_ChenopodiaceeNUMERIC",
"unit": "NUMERIC"
},
"B48151": {
"description": "[SIM] Conta Poligonacee_Poligonacee indistinte",
"unit": "NUMERIC"
},
"B48156": {
"description": "[SIM] Conta Ulmacee_Ulmacee indistinte",
"unit": "NUMERIC"
},
"B48157": {
"description": "[SIM] Conta Platanacee_Platanacee indistinte",
"unit": "NUMERIC"
},
"B48154": {
"description": "[SIM] Conta Ulmacee_Bagolaro comune",
"unit": "NUMERIC"
},
"B48155": {
"description": "[SIM] Conta Ulmacee_Olmo campestre",
"unit": "NUMERIC"
},
"B14197": {
"description": "[SIM] INSTANTANEOUS NET SHORT-WAVE RADIATION",
"unit": "W/M**2"
},
"B14196": {
"description": "[SIM] INSTANTANEOUS NET LONG-WAVE RADIATION",
"unit": "W/M**2"
},
"B02064": {
"description": "AIRCRAFT ROLL ANGLE QUALITY",
"unit": "CODE TABLE 2064"
},
"B02061": {
"description": "AIRCRAFT NAVIGATIONAL SYSTEM",
"unit": "CODE TABLE 2061"
},
"B02062": {
"description": "TYPE OF AIRCRAFT DATA RELAY SYSTEM",
"unit": "CODE TABLE 2062"
},
"B02063": {
"description": "AIRCRAFT ROLL ANGLE",
"unit": "DEGREE"
},
"B15231": {
"description": "[SIM] Concentration of nitrate in aerosol",
"unit": "KG/M**3"
},
"B15230": {
"description": "[SIM] Concentration of sulfate in aerosol",
"unit": "KG/M**3"
},
"B15233": {
"description": "[SIM] Concentration of anthrop. sec. org. in aerosol",
"unit": "KG/M**3"
},
"B15232": {
"description": "[SIM] Concentration of ammonium in aerosol",
"unit": "KG/M**3"
},
"B15235": {
"description": "[SIM] Concentration of ISOPA1 in PM10",
"unit": "KG/M**3"
},
"B15234": {
"description": "[SIM] Concentration of biogenic sec. org. in aerosol",
"unit": "KG/M**3"
},
"B15236": {
"description": "[SIM] C6H6 Concentration",
"unit": "KG/M**3"
},
"B14021": {
"description": "GLOBAL SOLAR RADIATION, INTEGRATED OVER PERIOD SPECIFIED",
"unit": "J/M**2"
},
"B13196": {
"description": "[SIM] Precipitating ice",
"unit": "KG/KG"
},
"B13197": {
"description": "[SIM] Total precipitating water+ice",
"unit": "KG/KG"
},
"B13194": {
"description": "[SIM] Water table depth",
"unit": "M"
},
"B13195": {
"description": "[SIM] Precipitating liquid water",
"unit": "KG/KG"
},
"B23195": {
"description": "[SIM] Wet deposition of NH4",
"unit": "MOL/M**2"
},
"B23194": {
"description": "[SIM] Dry deposition of NH4",
"unit": "MOL/M**2"
},
"B23197": {
"description": "[SIM] Wet deposition of HNO3",
"unit": "MOL/M**2"
},
"B23196": {
"description": "[SIM] Dry deposition of HNO3",
"unit": "MOL/M**2"
},
"B23198": {
"description": "[SIM] Solid transport by river",
"unit": "KG/S"
},
"B13198": {
"description": "[SIM] Total liquid water (cloud+precipitating)",
"unit": "KG/KG"
},
"B13199": {
"description": "[SIM] Total ice (cloud+precipitating)",
"unit": "KG/KG"
},
"B20021": {
"description": "TYPE OF PRECIPITATION",
"unit": "FLAG TABLE 20021"
},
"B11043": {
"description": "MAXIMUM WIND GUST DIRECTION",
"unit": "DEGREE TRUE"
},
"B11041": {
"description": "MAXIMUM WIND GUST SPEED",
"unit": "M/S"
},
"B13220": {
"description": "[SIM] 20 minutes precipitation",
"unit": "KG/M**2"
},
"B13221": {
"description": "[SIM] 30 minutes precipitation",
"unit": "KG/M**2"
},
"B13222": {
"description": "[SIM] 180 minutes precipitation",
"unit": "KG/M**2"
},
"B13223": {
"description": "[SIM] 360 minutes precipitation",
"unit": "KG/M**2"
},
"B13224": {
"description": "[SIM] 720 minutes precipitation",
"unit": "KG/M**2"
},
"B13225": {
"description": "[SIM] 1440 minutes precipitation",
"unit": "KG/M**2"
},
"B13226": {
"description": "[SIM] River discharge",
"unit": "M**3/S"
},
"B13227": {
"description": "[SIM] Soil volumetric water content",
"unit": "%"
},
"B13228": {
"description": "[SIM] Piezometric level",
"unit": "M"
},
"B13229": {
"description": "[SIM] Density of snow",
"unit": "KG/M**3"
},
"B22022": {
"description": "HEIGHT OF WIND WAVES",
"unit": "M"
},
"B22023": {
"description": "HEIGHT OF SWELL WAVES",
"unit": "M"
},
"B22021": {
"description": "HEIGHT OF WAVES",
"unit": "M"
},
"B08021": {
"description": "TIME SIGNIFICANCE",
"unit": "CODE TABLE 8021"
},
"B01007": {
"description": "SATELLITE IDENTIFIER",
"unit": "CODE TABLE 1007"
},
"B01006": {
"description": "AIRCRAFT FLIGHT NUMBER",
"unit": "CCITTIA5"
},
"B11031": {
"description": "DEGREE OF TURBULENCE",
"unit": "CODE TABLE 11031"
},
"B11037": {
"description": "TURBULENCE INDEX",
"unit": "CODE TABLE 11037"
},
"B33007": {
"description": "PER CENT CONFIDENCE",
"unit": "%"
},
"B01001": {
"description": "WMO BLOCK NUMBER",
"unit": "NUMERIC"
},
"B33005": {
"description": "QUALITY INFORMATION (AWS DATA)",
"unit": "FLAG TABLE 33005"
},
"B11039": {
"description": "EXTENDED TIME OF OCCURRENCE OF PEAK EDDY DISSIPATION RATE",
"unit": "CODE TABLE 11039"
},
"B01008": {
"description": "AIRCRAFT REGISTRATION NUMBER OR OTHER IDENTIFICATION",
"unit": "CCITTIA5"
},
"B01216": {
"description": "AIR QUALITY OBSERVING STATION AREA TYPE",
"unit": "CODE TABLE 001216"
},
"B01217": {
"description": "AIR QUALITY OBSERVING STATION TERRAIN TYPE",
"unit": "CODE TABLE 001217"
},
"B01214": {
"description": "GEMS AIR QUALITY OBSERVING STATION CODE",
"unit": "CCITTIA5"
},
"B01215": {
"description": "AIR QUALITY OBSERVING STATION DOMINANT EMISSION SOURCE",
"unit": "CODE TABLE 001215"
},
"B01212": {
"description": "AIR QUALITY OBSERVING STATION LOCAL CODE",
"unit": "CCITTIA5"
},
"B01213": {
"description": "AIRBASE AIR QUALITY OBSERVING STATION CODE",
"unit": "CCITTIA5"
},
"B48040": {
"description": "[SIM] Spore fungine_Botrytis",
"unit": "POLLEN/M**3"
},
"B48041": {
"description": "[SIM] Spore fungine_Stemphylium",
"unit": "POLLEN/M**3"
},
"B48042": {
"description": "[SIM] Spore fungine_Cladosporium",
"unit": "POLLEN/M**3"
},
"B48044": {
"description": "[SIM] Altri Pollini / Non Identificati_Altri pollini identificatPOLLEN/M**3",
"unit": "POLLEN/M**3"
},
"B14201": {
"description": "[SIM] Infrared radiation (upward)",
"unit": "W/M**2"
},
"B14200": {
"description": "[SIM] Infrared radiation (downward)",
"unit": "W/M**2"
},
"B48046": {
"description": "[SIM] Altre Spore / Non identificati_Altre spore fungine",
"unit": "POLLEN/M**3"
},
"B48047": {
"description": "[SIM] Altre Spore / Non identificati_Spore fungine non identificPOLLEN/M**3",
"unit": "POLLEN/M**3"
},
"B10197": {
"description": "ANEMOMETER HEIGHT",
"unit": "M"
},
"B02039": {
"description": "METHOD OF WET-BULB TEMPERATURE MEASUREMENT",
"unit": "CODE TABLE 2039"
},
"B02038": {
"description": "METHOD OF WATER TEMPERATURE AND/OR SALINITY MEASUREMENT",
"unit": "CODE TABLE 2038"
},
"B48140": {
"description": "[SIM] Conta Fagacee_Quercia",
"unit": "NUMERIC"
},
"B13013": {
"description": "TOTAL SNOW DEPTH",
"unit": "M"
},
"B13012": {
"description": "DEPTH OF FRESH SNOW",
"unit": "M"
},
"B13011": {
"description": "TOTAL PRECIPITATION / TOTAL WATER EQUIVALENT",
"unit": "KG/M**2"
},
"B33002": {
"description": "QUALITY INFORMATION",
"unit": "CODE TABLE 33002"
},
"B48031": {
"description": "[SIM] Aceraceae_Aceracee indistinte",
"unit": "POLLEN/M**3"
},
"B48030": {
"description": "[SIM] Platanacee_Platanacee indistinte",
"unit": "POLLEN/M**3"
},
"B48033": {
"description": "[SIM] Salicacee_Salice comune",
"unit": "POLLEN/M**3"
},
"B48032": {
"description": "[SIM] Pinacee_Pinacee indistinte",
"unit": "POLLEN/M**3"
},
"B48035": {
"description": "[SIM] Salicacee_Salicacee indistinte",
"unit": "POLLEN/M**3"
},
"B48034": {
"description": "[SIM] Salicacee_Pioppo",
"unit": "POLLEN/M**3"
},
"B48037": {
"description": "[SIM] Juglandacee_Juglandacee indistinte",
"unit": "POLLEN/M**3"
},
"B48036": {
"description": "[SIM] Ciperacee_Ciperacee indistinte",
"unit": "POLLEN/M**3"
},
"B48039": {
"description": "[SIM] Spore fungine_Alternaria",
"unit": "POLLEN/M**3"
},
"B48038": {
"description": "[SIM] Ippocastanacee_Ippocastanacee indistinte",
"unit": "POLLEN/M**3"
},
"B29192": {
"description": "[SIM] Land fraction",
"unit": "%"
},
"B13081": {
"description": "WATER CONDUCTIVITY",
"unit": "S/M"
},
"B13082": {
"description": "WATER TEMPERATURE",
"unit": "K"
}
};
this.borinud = this.borinud || {}
$.extend(true, this.borinud, {
config: {
B: B
}
});
}()); | /rmap-7.5.tar.gz/rmap-7.5/showdata/static/showdata/borinud.B.js | 0.503662 | 0.538923 | borinud.B.js | pypi |
from imagekit.models import ImageSpecField
from imagekit.models import ProcessedImageField
from imagekit.processors import ResizeToFill, Transpose, SmartResize, ResizeToFit
from djgeojson.fields import PointField
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy
from django import VERSION as djversion
if ((djversion[0] == 1 and djversion[1] >= 3) or
djversion[0] > 1):
from django.db.models import signals
class DeletingImageField(ProcessedImageField):
"""
ProcessedImageField subclass that deletes the refernced file when the model object
itself is deleted.
WARNING: Be careful using this class - it can cause data loss! This class
makes at attempt to see if the file's referenced elsewhere, but it can get
it wrong in any number of cases.
"""
def contribute_to_class(self, cls, name):
super(DeletingImageField, self).contribute_to_class(cls, name)
signals.post_delete.connect(self.delete_file, sender=cls)
def delete_file(self, instance, sender, **kwargs):
file = getattr(instance, self.attname)
# If no other object of this type references the file,
# and it's not the default value for future objects,
# delete it from the backend.
if file and file.name != self.default and \
not sender._default_manager.filter(**{self.name: file.name}):
file.delete(save=False)
elif file:
# Otherwise, just close the file, so it doesn't tie up resources.
file.close()
else:
DeletingImageField=ProcessedImageField
CATEGORY_CHOICES = (
('meteo','Meteo phenomena'),
('others', 'Others'),
)
class GeorefencedImage(models.Model):
active = models.BooleanField(ugettext_lazy("Active"),default=True,null=False,blank=False,help_text=ugettext_lazy("Activate this geoimage"))
geom = PointField()
comment = models.TextField()
#image = DeletingImageField()
ident = models.ForeignKey(User)
date=models.DateTimeField(auto_now=False, auto_now_add=False)
category = models.CharField(max_length=50, blank=False,choices=CATEGORY_CHOICES)
image = DeletingImageField(processors=[Transpose(),ResizeToFit(1024, 1024)],
format='jpeg',
options={'quality': 70})
image_thumbnail = ImageSpecField(
source='image',
processors = [Transpose(),SmartResize(128, 128)],
format = 'JPEG',
options = {'quality': 60}
)
@property
def popupContent(self):
return \
u'\
<p>\
<a href="#" onClick="window.open(\'/geoimage/{}/{}\',\'geoimage\', \'width=800, height=620\').focus(); return false;" >\
<img src="/{}" style="float:right;">\
</a>\
{}\
</p>\
<p><a href="/geoimage/{}">{}</a> {}</p>'.format(
self.ident,
self.id,
self.image_thumbnail.url,
self.comment,
self.ident,
self.ident,
self.date
) | /rmap-7.5.tar.gz/rmap-7.5/geoimage/models.py | 0.577614 | 0.280422 | models.py | pypi |
import json
import dballe
class BaseJSONEncoder(json.JSONEncoder):
"""Base JSON encoder."""
def default(self, o):
from datetime import datetime
if isinstance(o, datetime):
return o.isoformat()
else:
return super(BaseJSONEncoder, self).default(o)
class GeoJSONEncoder(BaseJSONEncoder):
"""GeoJSON encoder."""
def encode(self, o):
try:
iterable = iter(o)
except TypeError:
return super(GeoJSONEncoder, self).encode(o)
else:
return super(GeoJSONEncoder, self).encode(self.toFeatureCollection(o))
def default(self, o):
from datetime import datetime
if isinstance(o, datetime) and o == datetime(1000, 1, 1, 0, 0, 0):
return None
else:
return super(GeoJSONEncoder, self).default(o)
def toSkip(self, record):
"""True if the record must be skipped, false otherwise."""
from datetime import datetime
if record.get("date") == datetime(1000, 1, 1, 0, 0, 0) and record.get("var") in ( "B05001", "B06001", "B01194" ):
return True
else:
return False
def toFeatureCollection(self, cursor):
"""Convert a collection of items to a feature collection."""
return {
"type": "FeatureCollection",
"features": [ self.toFeature(r) for r in cursor if not self.toSkip(r) ]
}
def toFeature(self, rec):
"""Convert a record to a feature."""
return {
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [ rec["lon"], rec["lat"] ],
},
"properties": self.toProperties(rec)
}
def toProperties(self, rec):
"""Get feature properties from a record."""
p = {
"ident": rec.get("ident"),
"lon": rec.key("lon").enqi(),
"lat": rec.key("lat").enqi(),
"network": rec["rep_memo"],
"level_t1": rec["level"][0],
"level_v1": rec["level"][1],
"level_t2": rec["level"][2],
"level_v2": rec["level"][3],
"trange_pind": rec["trange"][0],
"trange_p1": rec["trange"][1],
"trange_p2": rec["trange"][2],
"bcode": rec["var"],
"datetime": None
}
if rec.get("date"):
p["datetime"] = rec.get("date")
p["value"] = rec.get(rec["var"])
elif rec.date_extremes() != (None, None):
p["datetime"] = rec.date_extremes()
return p | /rmap-7.5.tar.gz/rmap-7.5/borinud/utils/codec.py | 0.588298 | 0.280382 | codec.py | pypi |
(function() {
var B = {
"B33194": {
"description": "[SIM] Space consistency",
"unit": "%"
},
"B33195": {
"description": "[SIM] MeteoDB variable ID",
"unit": "NUMERIC"
},
"B33196": {
"description": "[SIM] Data has been invalidated",
"unit": "CODE TABLE 33196"
},
"B33197": {
"description": "[SIM] Manual replacement in substitution",
"unit": "CODE TABLE 33197"
},
"B33192": {
"description": "[SIM] Climatological and consistency check",
"unit": "%"
},
"B33193": {
"description": "[SIM] Time consistency",
"unit": "%"
},
"B11002": {
"description": "WIND SPEED",
"unit": "M/S"
},
"B11003": {
"description": "U-COMPONENT",
"unit": "M/S"
},
"B11001": {
"description": "WIND DIRECTION",
"unit": "DEGREE TRUE"
},
"B11006": {
"description": "W-COMPONENT",
"unit": "M/S"
},
"B33050": {
"description": "GLOBAL GTSPP QUALITY FLAG",
"unit": "CODE TABLE 33050"
},
"B11004": {
"description": "V-COMPONENT",
"unit": "M/S"
},
"B11005": {
"description": "W-COMPONENT",
"unit": "PA/S"
},
"B14018": {
"description": "INSTANTANEOUS SHORT-WAVE RADIATION (incoming)",
"unit": "W/M**2"
},
"B14019": {
"description": "SURFACE ALBEDO",
"unit": "%"
},
"B14016": {
"description": "NET RADIATION",
"unit": "J/M**2"
},
"B14017": {
"description": "INSTANTANEOUS LONG-WAVE RADIATION (incoming)",
"unit": "W/M**2"
},
"B33198": {
"description": "[SIM] Observation increment",
"unit": "NUMERIC"
},
"B07002": {
"description": "HEIGHT OR ALTITUDE",
"unit": "M"
},
"B07004": {
"description": "PRESSURE",
"unit": "PA"
},
"B07007": {
"description": "HEIGHT",
"unit": "M"
},
"B31000": {
"description": "SHORT DELAYED DESCRIPTOR REPLICATION FACTOR",
"unit": "NUMERIC"
},
"B31001": {
"description": "DELAYED DESCRIPTOR REPLICATION FACTOR",
"unit": "NUMERIC"
},
"B31002": {
"description": "EXTENDED DELAYED DESCRIPTOR REPLICATION FACTOR",
"unit": "NUMERIC"
},
"B48149": {
"description": "[SIM] Conta Chenopodiacee - Amarantacee Indistinte_Amaranto",
"unit": "NUMERIC"
},
"B48148": {
"description": "[SIM] Conta Cupressacee - Taxacee indistinte_Cupressacee - TaxacNUMERIC",
"unit": "NUMERIC"
},
"B48184": {
"description": "[SIM] Conta Ciperacee_Ciperacee indistinte 1",
"unit": "NUMERIC"
},
"B48141": {
"description": "[SIM] Conta Fagacee_Fagacee indistinte",
"unit": "NUMERIC"
},
"B22074": {
"description": "AVERAGE WAVE PERIOD",
"unit": "S"
},
"B48143": {
"description": "[SIM] Conta Oleacee_Frassino",
"unit": "NUMERIC"
},
"B48142": {
"description": "[SIM] Conta Oleacee_Olivo",
"unit": "NUMERIC"
},
"B22071": {
"description": "SPECTRAL PEAK WAVE PERIOD",
"unit": "S"
},
"B22070": {
"description": "SIGNIFICANT WAVE HEIGHT",
"unit": "M"
},
"B48147": {
"description": "[SIM] Conta Cupressacee - Taxacee indistinte_Cipresso comune",
"unit": "NUMERIC"
},
"B48146": {
"description": "[SIM] Conta Urticacee_Urticacee indistinte",
"unit": "NUMERIC"
},
"B15204": {
"description": "[SIM] PM06 Concentration (tot. aerosol < 0.6 ug)",
"unit": "KG/M**3"
},
"B15205": {
"description": "[SIM] PM03 Concentration (tot. aerosol < 0.3 ug)",
"unit": "KG/M**3"
},
"B15206": {
"description": "[SIM] PM015 Concentration (tot. aerosol < 0.15 ug)",
"unit": "KG/M**3"
},
"B15207": {
"description": "[SIM] PM008 Concentration (tot. aerosol < 0.08 ug)",
"unit": "KG/M**3"
},
"B15200": {
"description": "[SIM] HCNM Concentration",
"unit": "KG/M**3"
},
"B15201": {
"description": "[SIM] ALDE Concentration",
"unit": "KG/M**3"
},
"B15202": {
"description": "[SIM] PM5 Concentration (tot. aerosol < 5 ug)",
"unit": "KG/M**3"
},
"B15203": {
"description": "[SIM] PM1 Concentration (tot. aerosol < 1.25 ug)",
"unit": "KG/M**3"
},
"B15208": {
"description": "[SIM] Concentration of primary particulate matter in PM10",
"unit": "KG/M**3"
},
"B15209": {
"description": "[SIM] Concentration of sulfate in PM10",
"unit": "KG/M**3"
},
"B10063": {
"description": "CHARACTERISTIC OF PRESSURE TENDENCY",
"unit": "CODE TABLE 10063"
},
"B10060": {
"description": "PRESSURE CHANGE",
"unit": "PA"
},
"B22192": {
"description": "[SIM] Current X component",
"unit": "M/S"
},
"B22193": {
"description": "[SIM] Current Y component",
"unit": "M/S"
},
"B08208": {
"description": "[SIM] Number of cloud cover mean values present",
"unit": "NUMERIC"
},
"B08209": {
"description": "[SIM] Number of cloud cover maximum values present",
"unit": "NUMERIC"
},
"B08202": {
"description": "[SIM] Number of mean pressure values present",
"unit": "NUMERIC"
},
"B08203": {
"description": "[SIM] Number of minimum pressure values present",
"unit": "NUMERIC"
},
"B08200": {
"description": "[SIM] Number of minimum relative humidity values present",
"unit": "NUMERIC"
},
"B08201": {
"description": "[SIM] Number of maximum relative humidity values present",
"unit": "NUMERIC"
},
"B08206": {
"description": "[SIM] Number of leaf wetness values present",
"unit": "NUMERIC"
},
"B08207": {
"description": "[SIM] Number of scalar wind velocity mean values present",
"unit": "NUMERIC"
},
"B08204": {
"description": "[SIM] Number of maximum pressure values present",
"unit": "NUMERIC"
},
"B08205": {
"description": "[SIM] Number of precipitation values present",
"unit": "NUMERIC"
},
"B04004": {
"description": "HOUR",
"unit": "HOUR"
},
"B04005": {
"description": "MINUTE",
"unit": "MINUTE"
},
"B04006": {
"description": "SECOND",
"unit": "SECOND"
},
"B04001": {
"description": "YEAR",
"unit": "YEAR"
},
"B04002": {
"description": "MONTH",
"unit": "MONTH"
},
"B04003": {
"description": "DAY",
"unit": "DAY"
},
"B15213": {
"description": "[SIM] Concentration of organic carbon in PM10",
"unit": "KG/M**3"
},
"B02004": {
"description": "TYPE OF INSTRUMENTATION FOR EVAPORATION MEASUREMENT OR TYPE OF CCODE TABLE 2004",
"unit": "CODE TABLE 2004"
},
"B02005": {
"description": "PRECISION OF TEMPERATURE OBSERVATION",
"unit": "K"
},
"B02002": {
"description": "TYPE OF INSTRUMENTATION FOR WIND MEASUREMENT",
"unit": "FLAG TABLE 2002"
},
"B02003": {
"description": "TYPE OF MEASURING EQUIPMENT USED",
"unit": "CODE TABLE 2003"
},
"B02001": {
"description": "TYPE OF STATION",
"unit": "CODE TABLE 2001"
},
"B23193": {
"description": "[SIM] Wet deposition of H2SO4",
"unit": "MOL/M**2"
},
"B23192": {
"description": "[SIM] Dry deposition of H2SO4",
"unit": "MOL/M**2"
},
"B13192": {
"description": "[SIM] Cloud liquid water content",
"unit": "KG/KG"
},
"B13193": {
"description": "[SIM] Cloud ice content",
"unit": "KG/KG"
},
"B14199": {
"description": "[SIM] Visible radiation (upward)",
"unit": "W/M**2"
},
"B14198": {
"description": "[SIM] Visible radiation (downward)",
"unit": "W/M**2"
},
"B14193": {
"description": "[SIM] Instantenous latent heat flux",
"unit": "W/m**2"
},
"B14192": {
"description": "[SIM] Instantenous sensible heat flux",
"unit": "W/m**2"
},
"B20044": {
"description": "AVERAGE LIQUID WATER CONTENT",
"unit": "KG/M**3"
},
"B20045": {
"description": "SUPERCOOLED LARGE DROPLET (SLD) CONDITIONS",
"unit": "CODE TABLE 20045"
},
"B20042": {
"description": "AIRFRAME ICING PRESENT",
"unit": "CODE TABLE 20042"
},
"B20043": {
"description": "PEAK LIQUID WATER CONTENT",
"unit": "KG/M**3"
},
"B14195": {
"description": "[SIM] Instantenous diffuse solar radiation",
"unit": "W/M**2"
},
"B14194": {
"description": "[SIM] Instantenous direct solar radiation",
"unit": "W/M**2"
},
"B15219": {
"description": "[SIM] Concentration of water in PM10",
"unit": "KG/M**3"
},
"B15218": {
"description": "[SIM] Concentration of biogenic BmP in PM10",
"unit": "KG/M**3"
},
"B48048": {
"description": "[SIM] Graminacee_Graminacee indistinte 1",
"unit": "POLLEN/M**3"
},
"B48049": {
"description": "[SIM] Plantaginacee_Plantaginacee indistinte 1",
"unit": "POLLEN/M**3"
},
"B20038": {
"description": "BEARING OF ICE EDGE (SEE NOTE 3)",
"unit": "DEGREE TRUE"
},
"B20033": {
"description": "CAUSE OF ICE ACCRETION",
"unit": "FLAG TABLE 20033"
},
"B20032": {
"description": "RATE OF ICE ACCRETION",
"unit": "CODE TABLE 20032"
},
"B20031": {
"description": "ICE DEPOSIT (THICKNESS)",
"unit": "M"
},
"B48043": {
"description": "[SIM] Spore fungine_Epicoccum",
"unit": "POLLEN/M**3"
},
"B20037": {
"description": "ICE DEVELOPMENT",
"unit": "CODE TABLE 20037"
},
"B20036": {
"description": "ICE SITUATION",
"unit": "CODE TABLE 20036"
},
"B20035": {
"description": "AMOUNT AND TYPE OF ICE",
"unit": "CODE TABLE 20035"
},
"B20034": {
"description": "SEA ICE CONCENTRATION",
"unit": "CODE TABLE 20034"
},
"B22031": {
"description": "SPEED OF CURRENT",
"unit": "M/S"
},
"B22032": {
"description": "SPEED OF SEA SURFACE CURRENT",
"unit": "M/S"
},
"B22037": {
"description": "Tidal elevation with respect to national land datum",
"unit": "M"
},
"B48185": {
"description": "[SIM] Conta Juglandacee_Juglandacee indistinte 1",
"unit": "NUMERIC"
},
"B22038": {
"description": "Tidal elevation with respect to local chart datum",
"unit": "M"
},
"B48187": {
"description": "[SIM] Conta Oleacee_Ligustro",
"unit": "NUMERIC"
},
"B48186": {
"description": "[SIM] Conta Ippocastanacee_Ippocastanacee indistinte 1",
"unit": "NUMERIC"
},
"B48181": {
"description": "[SIM] Conta Platanacee_Platanacee indistinte 1",
"unit": "NUMERIC"
},
"B48180": {
"description": "[SIM] Conta Mirtacee_Mirtacee indistinte 1",
"unit": "NUMERIC"
},
"B48183": {
"description": "[SIM] Conta Pinacee_Pinacee indistinte 1",
"unit": "NUMERIC"
},
"B48182": {
"description": "[SIM] Conta Aceraceae_Aceracee indistinte 1",
"unit": "NUMERIC"
},
"B05021": {
"description": "BEARING OR AZIMUTH",
"unit": "DEGREE TRUE"
},
"B05022": {
"description": "SOLAR AZIMUTH",
"unit": "DEGREE TRUE"
},
"B33015": {
"description": "DATA QUALITY CHECK INDICATOR",
"unit": "CODE TABLE 33015"
},
"B33201": {
"description": "[SIM] Kalman coefficient, state vector (s.v.) x1",
"unit": "NUMERIC"
},
"B33202": {
"description": "[SIM] Kalman coefficient, state vector (s.v.) x2",
"unit": "NUMERIC"
},
"B33203": {
"description": "[SIM] Kalman coefficient, s.v. error covariance matrix(1,1)",
"unit": "NUMERIC"
},
"B33204": {
"description": "[SIM] Kalman coefficient, s.v. error covariance matrix(1,2)",
"unit": "NUMERIC"
},
"B33205": {
"description": "[SIM] Kalman coefficient, s.v. error covariance matrix(2,1)",
"unit": "NUMERIC"
},
"B33206": {
"description": "[SIM] Kalman coefficient, s.v. error covariance matrix(2,2)",
"unit": "NUMERIC"
},
"B33207": {
"description": "[SIM] Kalman observation sequential counter",
"unit": "NUMERIC"
},
"B33208": {
"description": "[SIM] Kalman osservation missing counter",
"unit": "NUMERIC"
},
"B33209": {
"description": "[SIM] Normalized Density Index",
"unit": "%"
},
"B01194": {
"description": "[SIM] Report mnemonic",
"unit": "CCITTIA5"
},
"B01193": {
"description": "[SIM] Report code",
"unit": "NUMERIC"
},
"B01192": {
"description": "[SIM] MeteoDB station ID",
"unit": "NUMERIC"
},
"B48130": {
"description": "[SIM] Conta Betulacee_Betulla",
"unit": "NUMERIC"
},
"B48131": {
"description": "[SIM] Conta Betulacee_Betulacee indistinte",
"unit": "NUMERIC"
},
"B48132": {
"description": "[SIM] Conta Composite_Ambrosia",
"unit": "NUMERIC"
},
"B48133": {
"description": "[SIM] Conta Composite_Artemisia",
"unit": "NUMERIC"
},
"B48134": {
"description": "[SIM] Conta Composite_Composite indistinte",
"unit": "NUMERIC"
},
"B48135": {
"description": "[SIM] Conta Corilacee_Nocciolo",
"unit": "NUMERIC"
},
"B48136": {
"description": "[SIM] Conta Corilacee_Carpino bianco -Carpino nero",
"unit": "NUMERIC"
},
"B48137": {
"description": "[SIM] Conta Corilacee_Corilacee indistinte",
"unit": "NUMERIC"
},
"B48138": {
"description": "[SIM] Conta Fagacee_Castagno",
"unit": "NUMERIC"
},
"B48139": {
"description": "[SIM] Conta Fagacee_Faggio",
"unit": "NUMERIC"
},
"B12121": {
"description": "GROUND MINIMUM TEMPERATURE",
"unit": "K"
},
"B48026": {
"description": "[SIM] Mirtacee_Mirtacee indistinte",
"unit": "POLLEN/M**3"
},
"B15212": {
"description": "[SIM] Concentration of black carbon in PM10",
"unit": "KG/M**3"
},
"B48027": {
"description": "[SIM] Ulmacee_Bagolaro comune",
"unit": "POLLEN/M**3"
},
"B02048": {
"description": "SATELLITE SENSOR INDICATOR",
"unit": "CODE TABLE 2048"
},
"B10052": {
"description": "ALTIMETER SETTING (QNH)",
"unit": "PA"
},
"B48028": {
"description": "[SIM] Ulmacee_Olmo campestre",
"unit": "POLLEN/M**3"
},
"B10051": {
"description": "PRESSURE REDUCED TO MEAN SEA LEVEL",
"unit": "PA"
},
"B48029": {
"description": "[SIM] Ulmacee_Ulmacee indistinte",
"unit": "POLLEN/M**3"
},
"B04196": {
"description": "[SIM] Relative humidity event - time of occurrence",
"unit": "MINUTE"
},
"B04197": {
"description": "[SIM] Wind velocity event - time of occurrence",
"unit": "MINUTE"
},
"B04194": {
"description": "[SIM] Time range P2",
"unit": "NUMERIC"
},
"B04195": {
"description": "[SIM] Temperature event - time of occurrence",
"unit": "MINUTE"
},
"B04192": {
"description": "[SIM] Time range type",
"unit": "NUMERIC"
},
"B04193": {
"description": "[SIM] Time range P1",
"unit": "NUMERIC"
},
"B04198": {
"description": "[SIM] Pressure event - time of occurrence",
"unit": "MINUTE"
},
"B48008": {
"description": "[SIM] Corilacee_Nocciolo",
"unit": "POLLEN/M**3"
},
"B48009": {
"description": "[SIM] Corilacee_Carpino bianco -Carpino nero",
"unit": "POLLEN/M**3"
},
"B48004": {
"description": "[SIM] Betulacee_Betulacee indistinte",
"unit": "POLLEN/M**3"
},
"B48005": {
"description": "[SIM] Composite_Ambrosia",
"unit": "POLLEN/M**3"
},
"B48006": {
"description": "[SIM] Composite_Artemisia",
"unit": "POLLEN/M**3"
},
"B48007": {
"description": "[SIM] Composite_Composite indistinte",
"unit": "POLLEN/M**3"
},
"B48001": {
"description": "[SIM] Graminacee_Graminacee indistinte",
"unit": "POLLEN/M**3"
},
"B48002": {
"description": "[SIM] Betulacee_Ontano nero",
"unit": "POLLEN/M**3"
},
"B48003": {
"description": "[SIM] Betulacee_Betulla",
"unit": "POLLEN/M**3"
},
"B01023": {
"description": "OBSERVATION SEQUENCE NUMBER",
"unit": "NUMERIC"
},
"B11016": {
"description": "EXTREME COUNTERCLOCKWISE WIND DIRECTION OF A VARIABLE WIND",
"unit": "DEGREE TRUE"
},
"B20003": {
"description": "PRESENT WEATHER (SEE NOTE 1)",
"unit": "CODE TABLE 20003"
},
"B20001": {
"description": "HORIZONTAL VISIBILITY",
"unit": "M"
},
"B20004": {
"description": "PAST WEATHER (1) (SEE NOTE 2)",
"unit": "CODE TABLE 20004"
},
"B20005": {
"description": "PAST WEATHER (2) (SEE NOTE 2)",
"unit": "CODE TABLE 20005"
},
"B20009": {
"description": "GENERAL WEATHER INDICATOR (TAF/METAR)",
"unit": "CODE TABLE 20009"
},
"B13206": {
"description": "[SIM] Soil water content",
"unit": "KG/M**2"
},
"B13204": {
"description": "[SIM] Total convective precipitation (liquid + snow)",
"unit": "KG/M**2"
},
"B13205": {
"description": "[SIM] Snowfall (grid-scale + convective)",
"unit": "KG/M**2"
},
"B13202": {
"description": "[SIM] Convective liquid precipitation",
"unit": "KG/M**2"
},
"B13203": {
"description": "[SIM] Convective snowfall",
"unit": "KG/M**2"
},
"B13200": {
"description": "[SIM] Grid-scale liquid precipitation",
"unit": "KG/M**2"
},
"B13201": {
"description": "[SIM] Grid-scale snowfall",
"unit": "KG/M**2"
},
"B48174": {
"description": "[SIM] Conta Altre Spore / Non identificati_Spore fungine non ideNUMERIC",
"unit": "NUMERIC"
},
"B48175": {
"description": "[SIM] Conta Graminacee_Graminacee indistinte 1",
"unit": "NUMERIC"
},
"B48176": {
"description": "[SIM] Conta Plantaginacee_Plantaginacee indistinte 1",
"unit": "NUMERIC"
},
"B48177": {
"description": "[SIM] Conta Urticacee_Urticacee indistinte 1",
"unit": "NUMERIC"
},
"B48170": {
"description": "[SIM] Conta Spore fungine_Epicoccum",
"unit": "NUMERIC"
},
"B48171": {
"description": "[SIM] Conta Altri Pollini / Non Identificati_Altri pollini identNUMERIC",
"unit": "NUMERIC"
},
"B48172": {
"description": "[SIM] Conta Altri Pollini / Non Identificati_Pollini non identifNUMERIC",
"unit": "NUMERIC"
},
"B48173": {
"description": "[SIM] Conta Altre Spore / Non identificati_Altre spore fungine",
"unit": "NUMERIC"
},
"B31012": {
"description": "EXTENDED DELAYED DESCRIPTOR AND DATA REPETITION FACTOR",
"unit": "NUMERIC"
},
"B31011": {
"description": "DELAYED DESCRIPTOR AND DATA REPETITION FACTOR",
"unit": "NUMERIC"
},
"B07030": {
"description": "HEIGHT OF STATION GROUND ABOVE MEAN SEA LEVEL (SEE NOTE 3)",
"unit": "M"
},
"B07031": {
"description": "HEIGHT OF BAROMETER ABOVE MEAN SEA LEVEL (SEE NOTE 4)",
"unit": "M"
},
"B07032": {
"description": "HEIGHT OF SENSOR ABOVE LOCAL GROUND (OR DECK OF MARINE PLATFORM)M",
"unit": "M"
},
"B22049": {
"description": "SEA-SURFACE TEMPERATURE",
"unit": "K"
},
"B12003": {
"description": "DEW-POINT TEMPERATURE",
"unit": "K"
},
"B12001": {
"description": "TEMPERATURE/AIR TEMPERATURE",
"unit": "K"
},
"B22043": {
"description": "SEA/WATER TEMPERATURE",
"unit": "K"
},
"B07195": {
"description": "[SIM] Second level type",
"unit": "NUMERIC"
},
"B07194": {
"description": "[SIM] Level L2",
"unit": "NUMERIC"
},
"B07193": {
"description": "[SIM] Level L1",
"unit": "NUMERIC"
},
"B07192": {
"description": "[SIM] First level type",
"unit": "NUMERIC"
},
"B15198": {
"description": "[SIM] PM2.5 Concentration",
"unit": "KG/M**3"
},
"B15199": {
"description": "[SIM] NOY Concentration",
"unit": "KG/M**3"
},
"B15211": {
"description": "[SIM] Concentration of ammonium in PM10",
"unit": "KG/M**3"
},
"B15210": {
"description": "[SIM] Concentration of nitrate in PM10",
"unit": "KG/M**3"
},
"B15217": {
"description": "[SIM] Concentration of biogenic A1D in PM10",
"unit": "KG/M**3"
},
"B15216": {
"description": "[SIM] Concentration of anthrop. BmP in PM10",
"unit": "KG/M**3"
},
"B15215": {
"description": "[SIM] Concentration of anthrop. A1D in PM10",
"unit": "KG/M**3"
},
"B15214": {
"description": "[SIM] Concentration of dust in PM10",
"unit": "KG/M**3"
},
"B15192": {
"description": "[SIM] NO Concentration",
"unit": "KG/M**3"
},
"B15193": {
"description": "[SIM] NO2 Concentration",
"unit": "KG/M**3"
},
"B15194": {
"description": "[SIM] O3 Concentration",
"unit": "KG/M**3"
},
"B15195": {
"description": "[SIM] PM10 Concentration",
"unit": "KG/M**3"
},
"B15196": {
"description": "[SIM] CO Concentration",
"unit": "KG/M**3"
},
"B15197": {
"description": "[SIM] SO2 Concentration",
"unit": "KG/M**3"
},
"B48145": {
"description": "[SIM] Conta Plantaginacee_Plantaginacee indistinte",
"unit": "NUMERIC"
},
"B48144": {
"description": "[SIM] Conta Oleacee_Oleacee indistinte",
"unit": "NUMERIC"
},
"B08210": {
"description": "[SIM] Number of cloud cover minimum values present",
"unit": "NUMERIC"
},
"B02014": {
"description": "TRACKING TECHNIQUE/STATUS OF SYSTEM USED",
"unit": "CODE TABLE 2014"
},
"B02011": {
"description": "RADIOSONDE TYPE",
"unit": "CODE TABLE 2011"
},
"B02013": {
"description": "SOLAR AND INFRARED RADIATION CORRECTION",
"unit": "CODE TABLE 2013"
},
"B02012": {
"description": "RADIOSONDE COMPUTATIONAL METHOD",
"unit": "CODE TABLE 2012"
},
"B33006": {
"description": "INTERNAL MEASUREMENT STATUS INFORMATION (AWS)",
"unit": "CODE TABLE 33006"
},
"B04086": {
"description": "LONG TIME PERIOD OR DISPLACEMENT",
"unit": "SECOND"
},
"B01002": {
"description": "WMO STATION NUMBER",
"unit": "NUMERIC"
},
"B20194": {
"description": "[SIM] Presence of shower",
"unit": "BOOLEAN"
},
"B20195": {
"description": "[SIM] Presence of hail",
"unit": "BOOLEAN"
},
"B20196": {
"description": "[SIM] Presence of thunderstorm",
"unit": "BOOLEAN"
},
"B20197": {
"description": "[SIM] Presence of snow",
"unit": "BOOLEAN"
},
"B20192": {
"description": "[SIM] Presence of rain > 1mm",
"unit": "BOOLEAN"
},
"B20193": {
"description": "[SIM] Cloud type (METAR)",
"unit": "CCITTIA5"
},
"B06001": {
"description": "LONGITUDE (HIGH ACCURACY)",
"unit": "DEGREE"
},
"B20198": {
"description": "[SIM] Presence of frost",
"unit": "BOOLEAN"
},
"B20199": {
"description": "[SIM] Presence of dew",
"unit": "BOOLEAN"
},
"B13031": {
"description": "EVAPOTRANSPIRATION",
"unit": "KG/M**2"
},
"B13033": {
"description": "EVAPORATION/EVAPOTRANSPIRATION",
"unit": "KG/M**2"
},
"B48057": {
"description": "[SIM] Ciperacee_Ciperacee indistinte 1",
"unit": "POLLEN/M**3"
},
"B48056": {
"description": "[SIM] Pinacee_Pinacee indistinte 1",
"unit": "POLLEN/M**3"
},
"B48055": {
"description": "[SIM] Aceraceae_Aceracee indistinte 1",
"unit": "POLLEN/M**3"
},
"B48054": {
"description": "[SIM] Platanacee_Platanacee indistinte 1",
"unit": "POLLEN/M**3"
},
"B48053": {
"description": "[SIM] Mirtacee_Mirtacee indistinte 1",
"unit": "POLLEN/M**3"
},
"B48052": {
"description": "[SIM] Euphorbiacee_Euforbiacee indistinte 1",
"unit": "POLLEN/M**3"
},
"B48051": {
"description": "[SIM] Poligonacee_Poligonacee indistinte 1",
"unit": "POLLEN/M**3"
},
"B48050": {
"description": "[SIM] Urticacee_Urticacee indistinte 1",
"unit": "POLLEN/M**3"
},
"B48059": {
"description": "[SIM] Ippocastanacee_Ippocastanacee indistinte 1",
"unit": "POLLEN/M**3"
},
"B48058": {
"description": "[SIM] Juglandacee_Juglandacee indistinte 1",
"unit": "POLLEN/M**3"
},
"B11208": {
"description": "[SIM] Distance covered by the hourly mean wind",
"unit": "M"
},
"B25076": {
"description": "LOG-10 OF (TEMP-RAD CENTRAL WAVENUMBER) FOR ATOVS",
"unit": "LOGM-1"
},
"B02070": {
"description": "ORIGINAL SPECIFICATION OF LATITUDE/LONGITUDE",
"unit": "CODE TABLE 2070"
},
"B48045": {
"description": "[SIM] Altri Pollini / Non Identificati_Pollini non identificati",
"unit": "POLLEN/M**3"
},
"B11200": {
"description": "[SIM] U-component of momentum flux",
"unit": "N/M**2"
},
"B11201": {
"description": "[SIM] V-component of momentum flux",
"unit": "N/M**2"
},
"B11202": {
"description": "[SIM] Friction velocity (diagmet)",
"unit": "M/S"
},
"B11203": {
"description": "[SIM] Mixing height (diagmet)",
"unit": "M"
},
"B11204": {
"description": "[SIM] Obukov lenght (diagmet)",
"unit": "M"
},
"B11205": {
"description": "[SIM] Convective velocitiy scale (diagmet)",
"unit": "M/S"
},
"B11206": {
"description": "[SIM] Friction velocity (COSMO)",
"unit": "M/S"
},
"B11207": {
"description": "[SIM] Obukov lenght (COSMO)",
"unit": "M"
},
"B11061": {
"description": "ABSOLUTE WIND SHEAR IN 1 KM LAYER BELOW",
"unit": "M/S"
},
"B11062": {
"description": "ABSOLUTE WIND SHEAR IN 1 KM LAYER ABOVE",
"unit": "M/S"
},
"B08009": {
"description": "DETAILED PHASE OF FLIGHT",
"unit": "CODE TABLE 8009"
},
"B08002": {
"description": "VERTICAL SIGNIFICANCE (SURFACE OBSERVATIONS)",
"unit": "CODE TABLE 8002"
},
"B08004": {
"description": "PHASE OF AIRCRAFT FLIGHT",
"unit": "CODE TABLE 8004"
},
"B22001": {
"description": "DIRECTION OF WAVES",
"unit": "DEGREE TRUE"
},
"B22002": {
"description": "DIRECTION OF WIND WAVES",
"unit": "DEGREE TRUE"
},
"B22003": {
"description": "DIRECTION OF SWELL WAVES",
"unit": "DEGREE TRUE"
},
"B01063": {
"description": "ICAO LOCATION INDICATOR",
"unit": "CCITTIA5"
},
"B33024": {
"description": "STATION ELEVATION QUALITY MARK (FOR MOBILE STATIONS)",
"unit": "CODE TABLE 33024"
},
"B33025": {
"description": "ACARS INTERPOLATED VALUES",
"unit": "CODE TABLE 33025"
},
"B33026": {
"description": "MOISTURE QUALITY",
"unit": "CODE TABLE 33026"
},
"B33027": {
"description": "LOCATION QUALITY CLASS (RANGE OF RADIUS OF 66 % CONFIDENCE)",
"unit": "CODE TABLE 33027"
},
"B33020": {
"description": "QUALITY CONTROL INDICATION OF FOLLOWING VALUE",
"unit": "CODE TABLE 33020"
},
"B33021": {
"description": "QUALITY OF FOLLOWING VALUE",
"unit": "CODE TABLE 33021"
},
"B33022": {
"description": "QUALITY OF BUOY SATELLITE TRANSMISSION",
"unit": "CODE TABLE 33022"
},
"B33023": {
"description": "QUALITY OF BUOY LOCATION",
"unit": "CODE TABLE 33023"
},
"B48178": {
"description": "[SIM] Conta Poligonacee_Poligonacee indistinte 1",
"unit": "NUMERIC"
},
"B33031": {
"description": "SCAN LINE QUALITY FLAGS FOR ATOVS",
"unit": "FLAG TABLE 33031"
},
"B48179": {
"description": "[SIM] Conta Euphorbiacee_Euforbiacee indistinte 1",
"unit": "NUMERIC"
},
"B31021": {
"description": "ASSOCIATED FIELD SIGNIFICANCE",
"unit": "CODE TABLE 31021"
},
"B12030": {
"description": "SOIL TEMPERATURE",
"unit": "K"
},
"B33003": {
"description": "QUALITY INFORMATION",
"unit": "CODE TABLE 33003"
},
"B48129": {
"description": "[SIM] Conta Betulacee_Ontano nero",
"unit": "NUMERIC"
},
"B48128": {
"description": "[SIM] Conta Graminacee_Graminacee indistinte",
"unit": "NUMERIC"
},
"B12131": {
"description": "SNOW TEMPERATURE",
"unit": "K"
},
"B15228": {
"description": "[SIM] NH3 Concentration",
"unit": "KG/M**3"
},
"B15229": {
"description": "[SIM] Concentration of primary part. matter in aerosol",
"unit": "KG/M**3"
},
"B15222": {
"description": "[SIM] Total concentration of primary aerosol in PM10",
"unit": "KG/M**3"
},
"B15223": {
"description": "[SIM] Total concentration of secondary aerosol in PM10",
"unit": "KG/M**3"
},
"B15220": {
"description": "[SIM] Concentration of sea salt in PM10",
"unit": "KG/M**3"
},
"B15221": {
"description": "[SIM] Concentration of secondary organic aerosol in PM10",
"unit": "KG/M**3"
},
"B15226": {
"description": "[SIM] Uncertainity in NO2 estimate (Pesco)",
"unit": "KG/M**3"
},
"B15227": {
"description": "[SIM] Uncertainity in PM2.5 estimate (Pesco)",
"unit": "KG/M**3"
},
"B15224": {
"description": "[SIM] Uncertainity in O3 estimate (Pesco)",
"unit": "KG/M**3"
},
"B15225": {
"description": "[SIM] Uncertainity in PM10 estimate (Pesco)",
"unit": "KG/M**3"
},
"B48019": {
"description": "[SIM] Urticacee_Urticacee indistinte",
"unit": "POLLEN/M**3"
},
"B48018": {
"description": "[SIM] Plantaginacee_Plantaginacee indistinte",
"unit": "POLLEN/M**3"
},
"B48013": {
"description": "[SIM] Fagacee_Quercia",
"unit": "POLLEN/M**3"
},
"B48012": {
"description": "[SIM] Fagacee_Faggio",
"unit": "POLLEN/M**3"
},
"B48011": {
"description": "[SIM] Fagacee_Castagno",
"unit": "POLLEN/M**3"
},
"B48010": {
"description": "[SIM] Corilacee_Corilacee indistinte",
"unit": "POLLEN/M**3"
},
"B48017": {
"description": "[SIM] Oleacee_Oleacee indistinte",
"unit": "POLLEN/M**3"
},
"B48016": {
"description": "[SIM] Oleacee_Frassino",
"unit": "POLLEN/M**3"
},
"B48015": {
"description": "[SIM] Oleacee_Olivo",
"unit": "POLLEN/M**3"
},
"B48014": {
"description": "[SIM] Fagacee_Fagacee indistinte",
"unit": "POLLEN/M**3"
},
"B01019": {
"description": "LONG STATION OR SITE NAME",
"unit": "CCITTIA5"
},
"B01011": {
"description": "SHIP OR MOBILE LAND STATION IDENTIFIER",
"unit": "CCITTIA5"
},
"B01012": {
"description": "DIRECTION OF MOTION OF MOVING OBSERVING PLATFORM**",
"unit": "DEGREE TRUE"
},
"B01013": {
"description": "SPEED OF MOTION OF MOVING OBSERVING PLATFORM*",
"unit": "M/S"
},
"B08044": {
"description": "(VAL) CAS REGISTRY NUMBER",
"unit": "CCITTIA5"
},
"B14031": {
"description": "TOTAL SUNSHINE",
"unit": "MINUTE"
},
"B08042": {
"description": "EXTENDED VERTICAL SOUNDING SIGNIFICANCE",
"unit": "FLAG TABLE 8042"
},
"B20011": {
"description": "CLOUD AMOUNT",
"unit": "CODE TABLE 20011"
},
"B20010": {
"description": "CLOUD COVER (TOTAL)",
"unit": "%"
},
"B20013": {
"description": "HEIGHT OF BASE OF CLOUD",
"unit": "M"
},
"B20012": {
"description": "CLOUD TYPE",
"unit": "CODE TABLE 20012"
},
"B20017": {
"description": "CLOUD TOP DESCRIPTION",
"unit": "CODE TABLE 20017"
},
"B20019": {
"description": "SIGNIFICANT PRESENT OR FORECAST WEATHER",
"unit": "CCITTIA5"
},
"B48060": {
"description": "[SIM] Oleacee_Ligustro",
"unit": "POLLEN/M**3"
},
"B20200": {
"description": "[SIM] Presence of fog",
"unit": "BOOLEAN"
},
"B20201": {
"description": "[SIM] Presence of water-spout",
"unit": "BOOLEAN"
},
"B20202": {
"description": "[SIM] State of the ground with snow",
"unit": "CODE TABLE"
},
"B12061": {
"description": "SKIN TEMPERATURE",
"unit": "K"
},
"B12063": {
"description": "BRIGHTNESS TEMPERATURE",
"unit": "K"
},
"B13215": {
"description": "[SIM] River level",
"unit": "M"
},
"B13217": {
"description": "[SIM] 5 minutes precipitation",
"unit": "KG/M**2"
},
"B13216": {
"description": "[SIM] Hourly precipitation",
"unit": "KG/M**2"
},
"B13210": {
"description": "[SIM] Penetration of the probe in the snow",
"unit": "M"
},
"B13212": {
"description": "[SIM] Leaf wetness duration",
"unit": "S"
},
"B13219": {
"description": "[SIM] 15 minutes precipitation",
"unit": "KG/M**2"
},
"B13218": {
"description": "[SIM] 10 minutes precipitation",
"unit": "KG/M**2"
},
"B48163": {
"description": "[SIM] Conta Ciperacee_Ciperacee indistinte",
"unit": "NUMERIC"
},
"B48162": {
"description": "[SIM] Conta Salicacee_Salicacee indistinte",
"unit": "NUMERIC"
},
"B48161": {
"description": "[SIM] Conta Salicacee_Pioppo",
"unit": "NUMERIC"
},
"B48160": {
"description": "[SIM] Conta Salicacee_Salice comune",
"unit": "NUMERIC"
},
"B48167": {
"description": "[SIM] Conta Spore fungine_Botrytis",
"unit": "NUMERIC"
},
"B48166": {
"description": "[SIM] Conta Spore fungine_Alternaria",
"unit": "NUMERIC"
},
"B48165": {
"description": "[SIM] Conta Ippocastanacee_Ippocastanacee indistinte",
"unit": "NUMERIC"
},
"B48164": {
"description": "[SIM] Conta Juglandacee_Juglandacee indistinte",
"unit": "NUMERIC"
},
"B48169": {
"description": "[SIM] Conta Spore fungine_Cladosporium",
"unit": "NUMERIC"
},
"B48168": {
"description": "[SIM] Conta Spore fungine_Stemphylium",
"unit": "NUMERIC"
},
"B07025": {
"description": "SOLAR ZENITH ANGLE",
"unit": "DEGREE"
},
"B07024": {
"description": "SATELLITE ZENITH ANGLE",
"unit": "DEGREE"
},
"B08198": {
"description": "[SIM] Number of maximum temperature values present",
"unit": "NUMERIC"
},
"B08199": {
"description": "[SIM] Number of mean relative humidity values present",
"unit": "NUMERIC"
},
"B05043": {
"description": "FIELD OF VIEW NUMBER",
"unit": "NUMERIC"
},
"B05041": {
"description": "SCAN LINE NUMBER",
"unit": "NUMERIC"
},
"B05040": {
"description": "ORBIT NUMBER",
"unit": "NUMERIC"
},
"B08192": {
"description": "[SIM] Number of wind velocity mean values present",
"unit": "NUMERIC"
},
"B08193": {
"description": "[SIM] Number of wind velocity minimum values present",
"unit": "NUMERIC"
},
"B08196": {
"description": "[SIM] Number of mean temperature values present",
"unit": "NUMERIC"
},
"B08197": {
"description": "[SIM] Number of minimum temperature values present",
"unit": "NUMERIC"
},
"B08194": {
"description": "[SIM] Number of wind velocity maximum values present",
"unit": "NUMERIC"
},
"B08195": {
"description": "[SIM] Number of wind prevalent direction values present",
"unit": "NUMERIC"
},
"B12193": {
"description": "PSEUDO-EQUIVALENT POTENTIAL TEMPERATURE",
"unit": "K"
},
"B12192": {
"description": "POTENTIAL TEMPERATURE",
"unit": "K"
},
"B10004": {
"description": "PRESSURE",
"unit": "PA"
},
"B10007": {
"description": "HEIGHT",
"unit": "M"
},
"B10009": {
"description": "GEOPOTENTIAL HEIGHT",
"unit": "GPM"
},
"B10008": {
"description": "GEOPOTENTIAL",
"unit": "M**2/S**2"
},
"B12102": {
"description": "WET-BULB TEMPERATURE",
"unit": "K"
},
"B12103": {
"description": "DEW-POINT TEMPERATURE",
"unit": "K"
},
"B12101": {
"description": "TEMPERATURE/DRY-BULB TEMPERATURE",
"unit": "K"
},
"B05015": {
"description": "LATITUDE DISPLACEMENT (HIGH ACCURACY)",
"unit": "DEGREE"
},
"B13001": {
"description": "SPECIFIC HUMIDITY",
"unit": "KG/KG"
},
"B13002": {
"description": "MIXING RATIO",
"unit": "KG/KG"
},
"B13003": {
"description": "RELATIVE HUMIDITY",
"unit": "%"
},
"B20062": {
"description": "STATE OF THE GROUND (WITH OR WITHOUT SNOW)",
"unit": "CODE TABLE 20062"
},
"B06015": {
"description": "LONGITUDE DISPLACEMENT (HIGH ACCURACY)",
"unit": "DEGREE"
},
"B48022": {
"description": "[SIM] Chenopodiacee - Amarantacee Indistinte_Amaranto",
"unit": "POLLEN/M**3"
},
"B48023": {
"description": "[SIM] Chenopodiacee - Amarantacee Indistinte_Chenopodiacee - AmaPOLLEN/M**3",
"unit": "POLLEN/M**3"
},
"B48020": {
"description": "[SIM] Cupressacee - Taxacee indistinte_Cipresso comune",
"unit": "POLLEN/M**3"
},
"B48021": {
"description": "[SIM] Cupressacee - Taxacee indistinte_Cupressacee - Taxacee indPOLLEN/M**3",
"unit": "POLLEN/M**3"
},
"B11198": {
"description": "[SIM] SQRT(2*TKE)",
"unit": "M/S"
},
"B11199": {
"description": "[SIM] Surface Roughness",
"unit": "M"
},
"B48024": {
"description": "[SIM] Poligonacee_Poligonacee indistinte",
"unit": "POLLEN/M**3"
},
"B48025": {
"description": "[SIM] Euphorbiacee_Euforbiacee indistinte",
"unit": "POLLEN/M**3"
},
"B11194": {
"description": "[SIM] Friction velocity (calmet)",
"unit": "M/S"
},
"B11195": {
"description": "[SIM] Mixing height (calmet)",
"unit": "M"
},
"B11196": {
"description": "[SIM] Obukov lenght (calmet)",
"unit": "M"
},
"B11197": {
"description": "[SIM] Convective velocitiy scale (calmet)",
"unit": "M/S"
},
"B11192": {
"description": "[SIM] W-component terrain following",
"unit": "M/S"
},
"B11193": {
"description": "[SIM] Stability class",
"unit": "NUMERIC"
},
"B11077": {
"description": "REPORTING INTERVAL OR AVERAGING TIME FOR EDDY DISSIPATION RATE",
"unit": "S"
},
"B11076": {
"description": "PEAK TURBULENCE INTENSITY (EDDY DISSIPATION RATE)",
"unit": "M**(2/3)/S"
},
"B11075": {
"description": "MEAN TURBULENCE INTENSITY (EDDY DISSIPATION RATE)",
"unit": "M**(2/3)/S"
},
"B33040": {
"description": "CONFIDENCE INTERVAL",
"unit": "%"
},
"B33041": {
"description": "ATTRIBUTE OF FOLLOWING VALUE",
"unit": "CODE TABLE 33041"
},
"B11017": {
"description": "EXTREME CLOCKWISE WIND DIRECTION OF A VARIABLE WIND",
"unit": "DEGREE TRUE"
},
"B22013": {
"description": "PERIOD OF SWELL WAVES",
"unit": "S"
},
"B22012": {
"description": "PERIOD OF WIND WAVES",
"unit": "S"
},
"B22011": {
"description": "PERIOD OF WAVES",
"unit": "S"
},
"B33038": {
"description": "QUALITY FLAGS FOR GROUND-BASED GNSS DATA",
"unit": "FLAG TABLE 33038"
},
"B33033": {
"description": "FIELD OF VIEW QUALITY FLAGS FOR ATOVS",
"unit": "FLAG TABLE 33033"
},
"B33032": {
"description": "CHANNEL QUALITY FLAGS FOR ATOVS",
"unit": "FLAG TABLE 33032"
},
"B05001": {
"description": "LATITUDE (HIGH ACCURACY)",
"unit": "DEGREE"
},
"B33030": {
"description": "SCAN LINE STATUS FLAGS FOR ATOVS",
"unit": "FLAG TABLE 33030"
},
"B33037": {
"description": "WIND CORRELATION ERROR",
"unit": "FLAG TABLE 33037"
},
"B33036": {
"description": "NOMINAL CONFIDENCE THRESHOLD",
"unit": "%"
},
"B33035": {
"description": "MANUAL/AUTOMATIC QUALITY CONTROL",
"unit": "CODE TABLE 33035"
},
"B31031": {
"description": "DATA PRESENT INDICATOR",
"unit": "FLAG TABLE 31031"
},
"B07010": {
"description": "FLIGHT LEVEL",
"unit": "M"
},
"B48158": {
"description": "[SIM] Conta Aceraceae_Aceracee indistinte",
"unit": "NUMERIC"
},
"B48159": {
"description": "[SIM] Conta Pinacee_Pinacee indistinte",
"unit": "NUMERIC"
},
"B48152": {
"description": "[SIM] Conta Euphorbiacee_Euforbiacee indistinte",
"unit": "NUMERIC"
},
"B48153": {
"description": "[SIM] Conta Mirtacee_Mirtacee indistinte",
"unit": "NUMERIC"
},
"B48150": {
"description": "[SIM] Conta Chenopodiacee - Amarantacee Indistinte_ChenopodiaceeNUMERIC",
"unit": "NUMERIC"
},
"B48151": {
"description": "[SIM] Conta Poligonacee_Poligonacee indistinte",
"unit": "NUMERIC"
},
"B48156": {
"description": "[SIM] Conta Ulmacee_Ulmacee indistinte",
"unit": "NUMERIC"
},
"B48157": {
"description": "[SIM] Conta Platanacee_Platanacee indistinte",
"unit": "NUMERIC"
},
"B48154": {
"description": "[SIM] Conta Ulmacee_Bagolaro comune",
"unit": "NUMERIC"
},
"B48155": {
"description": "[SIM] Conta Ulmacee_Olmo campestre",
"unit": "NUMERIC"
},
"B14197": {
"description": "[SIM] INSTANTANEOUS NET SHORT-WAVE RADIATION",
"unit": "W/M**2"
},
"B14196": {
"description": "[SIM] INSTANTANEOUS NET LONG-WAVE RADIATION",
"unit": "W/M**2"
},
"B02064": {
"description": "AIRCRAFT ROLL ANGLE QUALITY",
"unit": "CODE TABLE 2064"
},
"B02061": {
"description": "AIRCRAFT NAVIGATIONAL SYSTEM",
"unit": "CODE TABLE 2061"
},
"B02062": {
"description": "TYPE OF AIRCRAFT DATA RELAY SYSTEM",
"unit": "CODE TABLE 2062"
},
"B02063": {
"description": "AIRCRAFT ROLL ANGLE",
"unit": "DEGREE"
},
"B15231": {
"description": "[SIM] Concentration of nitrate in aerosol",
"unit": "KG/M**3"
},
"B15230": {
"description": "[SIM] Concentration of sulfate in aerosol",
"unit": "KG/M**3"
},
"B15233": {
"description": "[SIM] Concentration of anthrop. sec. org. in aerosol",
"unit": "KG/M**3"
},
"B15232": {
"description": "[SIM] Concentration of ammonium in aerosol",
"unit": "KG/M**3"
},
"B15235": {
"description": "[SIM] Concentration of ISOPA1 in PM10",
"unit": "KG/M**3"
},
"B15234": {
"description": "[SIM] Concentration of biogenic sec. org. in aerosol",
"unit": "KG/M**3"
},
"B15236": {
"description": "[SIM] C6H6 Concentration",
"unit": "KG/M**3"
},
"B14021": {
"description": "GLOBAL SOLAR RADIATION, INTEGRATED OVER PERIOD SPECIFIED",
"unit": "J/M**2"
},
"B13196": {
"description": "[SIM] Precipitating ice",
"unit": "KG/KG"
},
"B13197": {
"description": "[SIM] Total precipitating water+ice",
"unit": "KG/KG"
},
"B13194": {
"description": "[SIM] Water table depth",
"unit": "M"
},
"B13195": {
"description": "[SIM] Precipitating liquid water",
"unit": "KG/KG"
},
"B23195": {
"description": "[SIM] Wet deposition of NH4",
"unit": "MOL/M**2"
},
"B23194": {
"description": "[SIM] Dry deposition of NH4",
"unit": "MOL/M**2"
},
"B23197": {
"description": "[SIM] Wet deposition of HNO3",
"unit": "MOL/M**2"
},
"B23196": {
"description": "[SIM] Dry deposition of HNO3",
"unit": "MOL/M**2"
},
"B23198": {
"description": "[SIM] Solid transport by river",
"unit": "KG/S"
},
"B13198": {
"description": "[SIM] Total liquid water (cloud+precipitating)",
"unit": "KG/KG"
},
"B13199": {
"description": "[SIM] Total ice (cloud+precipitating)",
"unit": "KG/KG"
},
"B20021": {
"description": "TYPE OF PRECIPITATION",
"unit": "FLAG TABLE 20021"
},
"B11043": {
"description": "MAXIMUM WIND GUST DIRECTION",
"unit": "DEGREE TRUE"
},
"B11041": {
"description": "MAXIMUM WIND GUST SPEED",
"unit": "M/S"
},
"B13220": {
"description": "[SIM] 20 minutes precipitation",
"unit": "KG/M**2"
},
"B13221": {
"description": "[SIM] 30 minutes precipitation",
"unit": "KG/M**2"
},
"B13222": {
"description": "[SIM] 180 minutes precipitation",
"unit": "KG/M**2"
},
"B13223": {
"description": "[SIM] 360 minutes precipitation",
"unit": "KG/M**2"
},
"B13224": {
"description": "[SIM] 720 minutes precipitation",
"unit": "KG/M**2"
},
"B13225": {
"description": "[SIM] 1440 minutes precipitation",
"unit": "KG/M**2"
},
"B13226": {
"description": "[SIM] River discharge",
"unit": "M**3/S"
},
"B13227": {
"description": "[SIM] Soil volumetric water content",
"unit": "%"
},
"B13228": {
"description": "[SIM] Piezometric level",
"unit": "M"
},
"B13229": {
"description": "[SIM] Density of snow",
"unit": "KG/M**3"
},
"B22022": {
"description": "HEIGHT OF WIND WAVES",
"unit": "M"
},
"B22023": {
"description": "HEIGHT OF SWELL WAVES",
"unit": "M"
},
"B22021": {
"description": "HEIGHT OF WAVES",
"unit": "M"
},
"B08021": {
"description": "TIME SIGNIFICANCE",
"unit": "CODE TABLE 8021"
},
"B01007": {
"description": "SATELLITE IDENTIFIER",
"unit": "CODE TABLE 1007"
},
"B01006": {
"description": "AIRCRAFT FLIGHT NUMBER",
"unit": "CCITTIA5"
},
"B11031": {
"description": "DEGREE OF TURBULENCE",
"unit": "CODE TABLE 11031"
},
"B11037": {
"description": "TURBULENCE INDEX",
"unit": "CODE TABLE 11037"
},
"B33007": {
"description": "PER CENT CONFIDENCE",
"unit": "%"
},
"B01001": {
"description": "WMO BLOCK NUMBER",
"unit": "NUMERIC"
},
"B33005": {
"description": "QUALITY INFORMATION (AWS DATA)",
"unit": "FLAG TABLE 33005"
},
"B11039": {
"description": "EXTENDED TIME OF OCCURRENCE OF PEAK EDDY DISSIPATION RATE",
"unit": "CODE TABLE 11039"
},
"B01008": {
"description": "AIRCRAFT REGISTRATION NUMBER OR OTHER IDENTIFICATION",
"unit": "CCITTIA5"
},
"B01216": {
"description": "AIR QUALITY OBSERVING STATION AREA TYPE",
"unit": "CODE TABLE 001216"
},
"B01217": {
"description": "AIR QUALITY OBSERVING STATION TERRAIN TYPE",
"unit": "CODE TABLE 001217"
},
"B01214": {
"description": "GEMS AIR QUALITY OBSERVING STATION CODE",
"unit": "CCITTIA5"
},
"B01215": {
"description": "AIR QUALITY OBSERVING STATION DOMINANT EMISSION SOURCE",
"unit": "CODE TABLE 001215"
},
"B01212": {
"description": "AIR QUALITY OBSERVING STATION LOCAL CODE",
"unit": "CCITTIA5"
},
"B01213": {
"description": "AIRBASE AIR QUALITY OBSERVING STATION CODE",
"unit": "CCITTIA5"
},
"B48040": {
"description": "[SIM] Spore fungine_Botrytis",
"unit": "POLLEN/M**3"
},
"B48041": {
"description": "[SIM] Spore fungine_Stemphylium",
"unit": "POLLEN/M**3"
},
"B48042": {
"description": "[SIM] Spore fungine_Cladosporium",
"unit": "POLLEN/M**3"
},
"B48044": {
"description": "[SIM] Altri Pollini / Non Identificati_Altri pollini identificatPOLLEN/M**3",
"unit": "POLLEN/M**3"
},
"B14201": {
"description": "[SIM] Infrared radiation (upward)",
"unit": "W/M**2"
},
"B14200": {
"description": "[SIM] Infrared radiation (downward)",
"unit": "W/M**2"
},
"B48046": {
"description": "[SIM] Altre Spore / Non identificati_Altre spore fungine",
"unit": "POLLEN/M**3"
},
"B48047": {
"description": "[SIM] Altre Spore / Non identificati_Spore fungine non identificPOLLEN/M**3",
"unit": "POLLEN/M**3"
},
"B10197": {
"description": "ANEMOMETER HEIGHT",
"unit": "M"
},
"B02039": {
"description": "METHOD OF WET-BULB TEMPERATURE MEASUREMENT",
"unit": "CODE TABLE 2039"
},
"B02038": {
"description": "METHOD OF WATER TEMPERATURE AND/OR SALINITY MEASUREMENT",
"unit": "CODE TABLE 2038"
},
"B48140": {
"description": "[SIM] Conta Fagacee_Quercia",
"unit": "NUMERIC"
},
"B13013": {
"description": "TOTAL SNOW DEPTH",
"unit": "M"
},
"B13012": {
"description": "DEPTH OF FRESH SNOW",
"unit": "M"
},
"B13011": {
"description": "TOTAL PRECIPITATION / TOTAL WATER EQUIVALENT",
"unit": "KG/M**2"
},
"B33002": {
"description": "QUALITY INFORMATION",
"unit": "CODE TABLE 33002"
},
"B48031": {
"description": "[SIM] Aceraceae_Aceracee indistinte",
"unit": "POLLEN/M**3"
},
"B48030": {
"description": "[SIM] Platanacee_Platanacee indistinte",
"unit": "POLLEN/M**3"
},
"B48033": {
"description": "[SIM] Salicacee_Salice comune",
"unit": "POLLEN/M**3"
},
"B48032": {
"description": "[SIM] Pinacee_Pinacee indistinte",
"unit": "POLLEN/M**3"
},
"B48035": {
"description": "[SIM] Salicacee_Salicacee indistinte",
"unit": "POLLEN/M**3"
},
"B48034": {
"description": "[SIM] Salicacee_Pioppo",
"unit": "POLLEN/M**3"
},
"B48037": {
"description": "[SIM] Juglandacee_Juglandacee indistinte",
"unit": "POLLEN/M**3"
},
"B48036": {
"description": "[SIM] Ciperacee_Ciperacee indistinte",
"unit": "POLLEN/M**3"
},
"B48039": {
"description": "[SIM] Spore fungine_Alternaria",
"unit": "POLLEN/M**3"
},
"B48038": {
"description": "[SIM] Ippocastanacee_Ippocastanacee indistinte",
"unit": "POLLEN/M**3"
},
"B29192": {
"description": "[SIM] Land fraction",
"unit": "%"
},
"B13081": {
"description": "WATER CONDUCTIVITY",
"unit": "S/M"
},
"B13082": {
"description": "WATER TEMPERATURE",
"unit": "K"
}
};
this.borinud = this.borinud || {}
$.extend(true, this.borinud, {
config: {
B: B
}
});
}()); | /rmap-7.5.tar.gz/rmap-7.5/borinud/static/borinud/borinud.B.js | 0.503662 | 0.538923 | borinud.B.js | pypi |
from django.conf import settings
from django.contrib.sites.requests import RequestSite
from django.contrib.sites.models import Site
from registration import signals
from registration.models import RegistrationProfile
from registration.views import ActivationView as BaseActivationView
from registration.views import RegistrationView as BaseRegistrationView
from registration.users import UserModel
class RegistrationView(BaseRegistrationView):
"""
A registration backend which follows a simple workflow:
1. User signs up, inactive account is created.
2. Email is sent to user with activation link.
3. User clicks activation link, account is now active.
Using this backend requires that
* ``registration`` be listed in the ``INSTALLED_APPS`` setting
(since this backend makes use of models defined in this
application).
* The setting ``ACCOUNT_ACTIVATION_DAYS`` be supplied, specifying
(as an integer) the number of days from registration during
which a user may activate their account (after that period
expires, activation will be disallowed).
* The creation of the templates
``registration/activation_email_subject.txt`` and
``registration/activation_email.txt``, which will be used for
the activation email. See the notes for this backends
``register`` method for details regarding these templates.
When subclassing this view, you can set the ``SEND_ACTIVATION_EMAIL``
class variable to False to skip sending the new user a confirmation
email or set ``SEND_ACTIVATION_EMAIL`` to ``False``. Doing so implies
that you will have to activate the user manually from the admin site or
send an activation by some other method. For example, by listening for
the ``user_registered`` signal.
Additionally, registration can be temporarily closed by adding the
setting ``REGISTRATION_OPEN`` and setting it to
``False``. Omitting this setting, or setting it to ``True``, will
be interpreted as meaning that registration is currently open and
permitted.
Internally, this is accomplished via storing an activation key in
an instance of ``registration.models.RegistrationProfile``. See
that model and its custom manager for full documentation of its
fields and supported operations.
"""
SEND_ACTIVATION_EMAIL = getattr(settings, 'SEND_ACTIVATION_EMAIL', True)
def register(self, request, form):
"""
Given a username, email address and password, register a new
user account, which will initially be inactive.
Along with the new ``User`` object, a new
``registration.models.RegistrationProfile`` will be created,
tied to that ``User``, containing the activation key which
will be used for this account.
An email will be sent to the supplied email address; this
email should contain an activation link. The email will be
rendered using two templates. See the documentation for
``RegistrationProfile.send_activation_email()`` for
information about these templates and the contexts provided to
them.
After the ``User`` and ``RegistrationProfile`` are created and
the activation email is sent, the signal
``registration.signals.user_registered`` will be sent, with
the new ``User`` as the keyword argument ``user`` and the
class of this backend as the sender.
"""
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
if hasattr(form, 'save'):
new_user_instance = form.save()
else:
new_user_instance = UserModel().objects.create_user(**form.cleaned_data)
new_user = RegistrationProfile.objects.create_inactive_user(
new_user=new_user_instance,
site=site,
send_email=self.SEND_ACTIVATION_EMAIL,
request=request,
)
signals.user_registered.send(sender=self.__class__,
user=new_user,
request=request)
return new_user
def registration_allowed(self, request):
"""
Indicate whether account registration is currently permitted,
based on the value of the setting ``REGISTRATION_OPEN``. This
is determined as follows:
* If ``REGISTRATION_OPEN`` is not specified in settings, or is
set to ``True``, registration is permitted.
* If ``REGISTRATION_OPEN`` is both specified and set to
``False``, registration is not permitted.
"""
return getattr(settings, 'REGISTRATION_OPEN', True)
def get_success_url(self, request, user):
"""
Return the name of the URL to redirect to after successful
user registration.
"""
return ('registration_complete', (), {})
class ActivationView(BaseActivationView):
def activate(self, request, activation_key):
"""
Given an an activation key, look up and activate the user
account corresponding to that key (if possible).
After successful activation, the signal
``registration.signals.user_activated`` will be sent, with the
newly activated ``User`` as the keyword argument ``user`` and
the class of this backend as the sender.
"""
activated_user = RegistrationProfile.objects.activate_user(activation_key)
if activated_user:
signals.user_activated.send(sender=self.__class__,
user=activated_user,
request=request)
return activated_user
def get_success_url(self, request, user):
return ('registration_activation_complete', (), {}) | /rmap-7.5.tar.gz/rmap-7.5/registration/backends/default/views.py | 0.802594 | 0.326218 | views.py | pypi |
import numpy as np
import scipy
from scipy import stats
import matplotlib.pylab as plt
class gaussian_kde_set_covariance(stats.gaussian_kde):
'''
from Anne Archibald in mailinglist:
http://www.nabble.com/Width-of-the-gaussian-in-stats.kde.gaussian_kde---td19558924.html#a19558924
'''
def __init__(self, dataset, covariance):
self.covariance = covariance
scipy.stats.gaussian_kde.__init__(self, dataset)
def _compute_covariance(self):
self.inv_cov = np.linalg.inv(self.covariance)
self._norm_factor = sqrt(np.linalg.det(2*np.pi*self.covariance)) * self.n
class gaussian_kde_covfact(stats.gaussian_kde):
def __init__(self, dataset, covfact = 'scotts'):
self.covfact = covfact
scipy.stats.gaussian_kde.__init__(self, dataset)
def _compute_covariance_(self):
'''not used'''
self.inv_cov = np.linalg.inv(self.covariance)
self._norm_factor = sqrt(np.linalg.det(2*np.pi*self.covariance)) * self.n
def covariance_factor(self):
if self.covfact in ['sc', 'scotts']:
return self.scotts_factor()
if self.covfact in ['si', 'silverman']:
return self.silverman_factor()
elif self.covfact:
return float(self.covfact)
else:
raise ValueError('covariance factor has to be scotts, silverman or a number')
def reset_covfact(self, covfact):
self.covfact = covfact
self.covariance_factor()
self._compute_covariance()
def plotkde(covfact):
gkde.reset_covfact(covfact)
kdepdf = gkde.evaluate(ind)
plt.figure()
# plot histgram of sample
plt.hist(xn, bins=20, normed=1)
# plot estimated density
plt.plot(ind, kdepdf, label='kde', color="g")
# plot data generating density
plt.plot(ind, alpha * stats.norm.pdf(ind, loc=mlow) +
(1-alpha) * stats.norm.pdf(ind, loc=mhigh),
color="r", label='DGP: normal mix')
plt.title('Kernel Density Estimation - ' + str(gkde.covfact))
plt.legend()
from numpy.testing import assert_array_almost_equal, \
assert_almost_equal, assert_
def test_kde_1d():
np.random.seed(8765678)
n_basesample = 500
xn = np.random.randn(n_basesample)
xnmean = xn.mean()
xnstd = xn.std(ddof=1)
print(xnmean, xnstd)
# get kde for original sample
gkde = stats.gaussian_kde(xn)
# evaluate the density funtion for the kde for some points
xs = np.linspace(-7,7,501)
kdepdf = gkde.evaluate(xs)
normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd)
print('MSE', np.sum((kdepdf - normpdf)**2))
print('axabserror', np.max(np.abs(kdepdf - normpdf)))
intervall = xs[1] - xs[0]
assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01)
#assert_array_almost_equal(kdepdf, normpdf, decimal=2)
print(gkde.integrate_gaussian(0.0, 1.0))
print(gkde.integrate_box_1d(-np.inf, 0.0))
print(gkde.integrate_box_1d(0.0, np.inf))
print(gkde.integrate_box_1d(-np.inf, xnmean))
print(gkde.integrate_box_1d(xnmean, np.inf))
assert_almost_equal(gkde.integrate_box_1d(xnmean, np.inf), 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box_1d(-np.inf, xnmean), 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box(xnmean, np.inf), 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), 0.5, decimal=1)
assert_almost_equal(gkde.integrate_kde(gkde),
(kdepdf**2).sum()*intervall, decimal=2)
assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2),
(kdepdf*normpdf).sum()*intervall, decimal=2)
## assert_almost_equal(gkde.integrate_gaussian(0.0, 1.0),
## (kdepdf*normpdf).sum()*intervall, decimal=2)
if __name__ == '__main__':
# generate a sample
n_basesample = 1000
np.random.seed(8765678)
alpha = 0.6 #weight for (prob of) lower distribution
mlow, mhigh = (-3,3) #mean locations for gaussian mixture
xn = np.concatenate([mlow + np.random.randn(alpha * n_basesample),
mhigh + np.random.randn((1-alpha) * n_basesample)])
# get kde for original sample
#gkde = stats.gaussian_kde(xn)
gkde = gaussian_kde_covfact(xn, 0.1)
# evaluate the density funtion for the kde for some points
ind = np.linspace(-7,7,101)
kdepdf = gkde.evaluate(ind)
plt.figure()
# plot histgram of sample
plt.hist(xn, bins=20, normed=1)
# plot estimated density
plt.plot(ind, kdepdf, label='kde', color="g")
# plot data generating density
plt.plot(ind, alpha * stats.norm.pdf(ind, loc=mlow) +
(1-alpha) * stats.norm.pdf(ind, loc=mhigh),
color="r", label='DGP: normal mix')
plt.title('Kernel Density Estimation')
plt.legend()
gkde = gaussian_kde_covfact(xn, 'scotts')
kdepdf = gkde.evaluate(ind)
plt.figure()
# plot histgram of sample
plt.hist(xn, bins=20, normed=1)
# plot estimated density
plt.plot(ind, kdepdf, label='kde', color="g")
# plot data generating density
plt.plot(ind, alpha * stats.norm.pdf(ind, loc=mlow) +
(1-alpha) * stats.norm.pdf(ind, loc=mhigh),
color="r", label='DGP: normal mix')
plt.title('Kernel Density Estimation')
plt.legend()
#plt.show()
for cv in ['scotts', 'silverman', 0.05, 0.1, 0.5]:
plotkde(cv)
test_kde_1d()
np.random.seed(8765678)
n_basesample = 1000
xn = np.random.randn(n_basesample)
xnmean = xn.mean()
xnstd = xn.std(ddof=1)
# get kde for original sample
gkde = stats.gaussian_kde(xn) | /rmats2sashimiplot-2.0.4-py3-none-any.whl/MISO/misopy/kde_subclass.py | 0.675444 | 0.533519 | kde_subclass.py | pypi |
from scipy import *
from numpy import *
def format_credible_intervals(event_name, samples,
confidence_level=0.95):
"""
Returns a list of print-able credible intervals for an NxM samples
matrix. Handles both the two isoform and multi-isoform cases.
"""
num_samples, num_isoforms = shape(samples)
if num_isoforms > 2:
cred_interval = compute_multi_iso_credible_intervals(samples,
confidence_level=confidence_level)
cred_interval_lowbounds = ",".join([str("%.2f" %(ci[0])) for ci in cred_interval])
cred_interval_highbounds = ",".join([str("%.2f" %(ci[1])) for ci in cred_interval])
posterior_mean = ",".join("%.2f" %(val) for val in mean(samples, 0))
output_fields = [event_name,
"%s" %(posterior_mean),
"%s" %(cred_interval_lowbounds),
"%s" %(cred_interval_highbounds)]
else:
cred_interval = compute_credible_intervals(samples)
posterior_mean = mean(samples, 0)[0]
output_fields = [event_name,
"%.2f" %(posterior_mean),
"%.2f" %(cred_interval[0]),
"%.2f" %(cred_interval[1])]
return output_fields
def compute_credible_intervals(samples, confidence_level=.95):
"""
Compute Bayesian confidence intevals (credible intervals) for the set of samples given
based on the method of Chen and Shao (1998).
Assumes that samples is an Nx2 vector of posterior samples.
"""
if samples.ndim == 2:
samples = samples[:, 0]
num_samples = len(samples)
# confidence percentage is 100(1-alpha)%
alpha = 1 - confidence_level
# compute the lower bound of the interval
# the lower bound is the (alpha/2)*n-th smallest sample, where n is the
# number of samples
lower_bound_indx = round((alpha/2)*num_samples) - 1
# the upper bound is the (1-alpha/2)*n nth smallest sample, where n is
# the number of samples
upper_bound_indx = round((1-alpha/2)*num_samples) - 1
assert(lower_bound_indx > 0)
assert(upper_bound_indx > 0)
# sort samples along first axis
samples.sort()
cred_interval = [samples[lower_bound_indx], samples[upper_bound_indx]]
return cred_interval
def compute_multi_iso_credible_intervals(multi_iso_samples,
confidence_level=0.95):
"""
Compute multiple isoforms credible intervals for a set of NxM matrix.
"""
credible_intervals = []
num_samples, num_isoforms = shape(multi_iso_samples)
for iso_num in range(num_isoforms):
ci = compute_credible_intervals(multi_iso_samples[:, iso_num],
confidence_level=confidence_level)
credible_intervals.append(ci)
return credible_intervals | /rmats2sashimiplot-2.0.4-py3-none-any.whl/MISO/misopy/credible_intervals.py | 0.840062 | 0.505432 | credible_intervals.py | pypi |
from numpy import *
from scipy import *
import time
import csv
def dictlist2csv(filename, dictlist, header_fields, delimiter='\t'):
"""
Serialize a list of dictionaries into the output
"""
str_header_fields = [str(f) for f in header_fields]
header = "\t".join(str_header_fields) + '\n'
output = open(filename, 'w')
# write header to file
output.write(header)
for row in dictlist:
row = "\t".join([str(row[field]) for field in header_fields]) + '\n'
output.write(row)
output.close()
def dictlist2dict(dictlist, header_name):
"""
For the given dictlist, create a dictionary of each element keyed by
the field in header_name. Note that this assumes the header name is
unique.
"""
indexed_dict = {}
for item in dictlist:
indexed_dict[item[header_name]] = item
return indexed_dict
def dictlist2array(dictlist, header_fields):
"""
Convert a list of dictionaries into a numpy array,
based on the given order of fields.
"""
data_array = []
for data_elt in dictlist:
data_row = []
for field in header_fields:
data_row.append(data_elt[field])
data_array.append(data_row)
return data_array
def csv2array(f,
skiprows=0,
delimiter='\t',
raw_header=False,
missing=None,
with_header=True,
comments="#"):
"""
Parse a file into a dictionary of arrays.
Return the array and additional header lines. By default,
parse the header lines into dictionaries, assuming the parameters
are numeric, using 'parse_header'.
"""
file_in = f
if type(f) == str:
file_in = open(f)
skipped_rows = []
for n in range(skiprows):
header_line = file_in.readline().strip()
if raw_header:
skipped_rows.append(header_line)
else:
skipped_rows.append(parse_header(header_line))
try:
data_array = genfromtxt(file_in,
dtype=None,
deletechars='',
delimiter=delimiter)
except IOError as io_error:
raise Exception("IOError: %s, file: %s" %(io_error, file_in))
cols = data_array[0,:]
data = {}
for n in range(data_array.ndim):
data[cols[n]] = data_array[1:, n]
return (data, skipped_rows)
def tryEval(s):
try:
return eval(s, {}, {})
except:
return s
def evalDict(d):
for k, v in d.items():
d[k] = tryEval(v)
return d
def get_header_fields(filename, delimiter='\t',
excel_tab=False):
if excel_tab:
f = open(filename, "rU")
else:
f = open(filename, "r")
header_fields = f.readline().strip().split(delimiter)
return header_fields
def file2dictlist(filename, delimiter='\t',
excel_tab=False):
if excel_tab:
f = open(filename, "rU")
data = csv.DictReader(f, delimiter=delimiter,
quoting=csv.QUOTE_NONE,
dialect='excel')
else:
f = open(filename, "r")
data = csv.DictReader(f, delimiter=delimiter,
quoting=csv.QUOTE_NONE)
return data
def dictlist2file(dictrows, filename, fieldnames, delimiter='\t',
lineterminator='\n', extrasaction='ignore',
write_raw=False):
out_f = open(filename, 'w')
# Write out header
if fieldnames != None:
header = delimiter.join(fieldnames) + lineterminator
else:
header = list(dictrows[0].keys())
header.sort()
out_f.write(header)
if write_raw:
for row in dictrows:
out_f.write("%s%s" %(delimiter.join([row[name] for name in fieldnames]),
lineterminator))
else:
# Write out dictionary
data = csv.DictWriter(out_f, fieldnames,
delimiter=delimiter,
lineterminator=lineterminator,
extrasaction=extrasaction)
for row in dictrows:
data.writerow(row)
out_f.close()
def csv2dictlist_raw(filename, delimiter='\t'):
f = open(filename)
header_line = f.readline().strip()
header_fields = header_line.split(delimiter)
dictlist = []
# convert data to list of dictionaries
for line in f:
values = list(map(tryEval, line.strip().split(delimiter)))
dictline = dict(list(zip(header_fields, values)))
dictlist.append(dictline)
return (dictlist, header_fields)
def find(val, values):
"""
Find all instances of val in array. Return their indices.
"""
indices = []
values = list(values)
n = 0
for elt in values:
if elt == val:
indices.append(n)
n += 1
return indices
def parse_header(line, numeric_vals=True):
"""
Parse a line of the form:
#param=val\tparam=val\tparam=val...
Return a dictionary of params: vals
"""
line = line.strip()
if line[0] == '#':
line = line[1:]
params = {}
for pair in line.split('\t'):
k, v = pair.split('=')
if numeric_vals:
params[k] = float(v)
else:
params[k] = v
return params | /rmats2sashimiplot-2.0.4-py3-none-any.whl/MISO/misopy/parse_csv.py | 0.489259 | 0.375964 | parse_csv.py | pypi |
import os
import time
import scipy
import numpy
from scipy import *
from numpy import *
import misopy
import misopy.sam_utils as sam_utils
from misopy.Gene import load_genes_from_gff
from misopy.parse_csv import *
import pysam
def rpkm_per_region(region_lens, region_counts, read_len,
num_total_reads):
"""
Compute RPKM for the set of regions (defined by their lengths)
and the counts in the region, assuming the given read length.
"""
lens = array(region_lens)
num_reads = sum(region_counts)
# Number of mappable positions (in KB)
num_positions_in_kb = (sum(lens - read_len + 1)) / 1e3
# Number of reads (in millions)
num_reads_in_millions = num_total_reads / 1e6
# Reads counts
rpkm = (num_reads/num_positions_in_kb) / num_reads_in_millions
return rpkm
class Counter:
mCounts = 0
def __call__(self, alignment):
self.mCounts += 1
def count_total_reads(bam_filename):
"""
Return total number of proper reads in BAM file.
"""
bamfile = sam_utils.load_bam_reads(bam_filename)
num_total_reads = 0
for r in bamfile:
num_total_reads += 1
return num_total_reads
def compute_rpkm(gff_filename, bam_filename, read_len,
output_dir):
"""
Compute RPKMs for genes listed in GFF based on BAM reads.
"""
print("Computing RPKMs...")
print(" - GFF filename: %s" %(gff_filename))
print(" - BAM filename: %s" %(bam_filename))
print(" - Output dir: %s" %(output_dir))
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
output_filename = os.path.join(output_dir,
"%s.rpkm" %(os.path.basename(bam_filename)))
print("Outputting RPKMs to: %s" %(output_filename))
rpkm_fieldnames = ['gene_id', 'rpkm', 'const_exon_lens',
'num_reads']
# Parse the GFF into genes
print("Parsing GFF into genes...")
t1 = time.time()
gff_genes = load_genes_from_gff(gff_filename)
t2 = time.time()
print("Parsing took %.2f seconds" %(t2 - t1))
# Load the BAM file
bamfile = sam_utils.load_bam_reads(bam_filename)
print("Counting all reads...")
t1 = time.time()
num_total_reads = count_total_reads(bam_filename)
t2 = time.time()
print("Took: %.2f seconds" %(t2 - t1))
print("Number of total reads in BAM file: %d" %(num_total_reads))
num_genes = 0
rpkms_dictlist = []
exons_too_small = {}
num_no_const = 0
for gene_id, gene_info in gff_genes.items():
# Get the gene object
gene = gene_info['gene_object']
# Get constitutive exons
const_exons = gene.get_const_parts()
num_reads = []
exon_lens = []
regions_counted = {}
if not gene.chrom.startswith("chr"):
chrom = "chr%s" %(gene.chrom)
else:
chrom = gene.chrom
if "random" in chrom:
print("Skipping random chromosome gene: %s, %s" \
%(gene_id, chrom))
continue
if len(const_exons) == 0:
print("Gene %s has no constitutive regions!" %(gene_id))
num_no_const += 1
continue
total_num_reads = 0
for exon in const_exons:
exon_len = exon.end - exon.start + 1
counts = 0
try:
reads = bamfile.fetch(chrom, exon.start, exon.end)
except ValueError:
print("Error fetching region: %s:%d-%d" %(chrom,
exon.start,
exon.end))
break
# Count reads landing in exon
for r in reads: counts += 1
total_num_reads += counts
# Skip exons that we've seen already or exons that are shorter
# than the read length
if (exon.start, exon.end) in regions_counted or \
exon_len < read_len:
continue
exon_lens.append(exon_len)
num_reads.append(counts)
regions_counted[(exon.start, exon.end)] = True
if len(regions_counted) == 0:
# print "Gene %s exons are too small for %d-long reads" \
# %(gene_id, read_len)
exons_too_small[gene_id] = total_num_reads
continue
# print "Used total of %d regions" %(len(regions_counted))
# print "Total of %d regions are too small" %(num_too_small)
rpkm = rpkm_per_region(exon_lens, num_reads, read_len,
num_total_reads)
# print rpkm, exon_lens, num_reads, read_len
# Convert region lengths and number of reads to strings
exon_lens_str = ",".join([str(e) for e in exon_lens])
num_reads_str = ",".join([str(n) for n in num_reads])
rpkm_entry = {'gene_id': gene_id,
'rpkm': "%.2f" %(rpkm),
'const_exon_lens': exon_lens_str,
'num_reads': num_reads_str}
rpkms_dictlist.append(rpkm_entry)
# print "RPKM: %.2f" %(rpkm)
# Compute how many reads land in each constitutive exon
num_genes += 1
num_too_small = len(list(exons_too_small.keys()))
print("Computed RPKMs for %d genes." %(num_genes))
print(" - Total of %d genes cannot be used because they lack const. regions." \
%(num_no_const))
print(" - Total of %d genes cannot be used since their exons are too small." \
%(num_too_small))
for gene, total_counts in exons_too_small.items():
print(" gene_id\ttotal_counts")
print(" * %s\t%d" %(gene, total_counts))
# Output RPKMs to file
dictlist2file(rpkms_dictlist, output_filename,
rpkm_fieldnames)
return rpkms_dictlist
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--compute-rpkm", dest="compute_rpkm", nargs=3, default=None,
help="Compute RPKMs. Takes a GFF file with genes, an indexed/sorted BAM format "
"and an output directory.")
parser.add_option("--read-len", dest="read_len", nargs=1, type="int", default=0,
help="Read length to use for RPKM computation.")
(options, args) = parser.parse_args()
if options.compute_rpkm != None:
if options.read_len == 0:
print("Error: Must give --read-len to compute RPKMs.")
return
gff_filename = os.path.abspath(os.path.expanduser(options.compute_rpkm[0]))
bam_filename = os.path.abspath(os.path.expanduser(options.compute_rpkm[1]))
output_dir = os.path.abspath(os.path.expanduser(options.compute_rpkm[2]))
compute_rpkm(gff_filename, bam_filename, options.read_len, output_dir)
if __name__ == "__main__":
main() | /rmats2sashimiplot-2.0.4-py3-none-any.whl/MISO/misopy/sam_rpkm.py | 0.517815 | 0.339307 | sam_rpkm.py | pypi |
import os
import sys
import glob
import matplotlib
# Add misopy path
miso_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, miso_path)
# Use PDF backend
matplotlib.use("pdf")
from scipy import *
from numpy import *
import pysam
import shelve
import misopy
import misopy.gff_utils as gff_utils
import misopy.pe_utils as pe_utils
from misopy.parse_csv import csv2dictlist_raw
from misopy.samples_utils import load_samples
from misopy.sashimi_plot.Sashimi import Sashimi
from misopy.sashimi_plot.plot_utils.samples_plotter import SamplesPlotter
from misopy.sashimi_plot.plot_utils.plotting import *
from misopy.sashimi_plot.plot_utils.plot_gene import plot_density_from_file
import matplotlib.pyplot as plt
from matplotlib import rc
def plot_bf_dist(bf_filename, settings_filename, output_dir,
max_bf=1e12):
"""
Plot a Bayes factor distribution from a .miso_bf file.
"""
if not bf_filename.endswith(".miso_bf"):
print("WARNING: %s does not end in .miso_bf, are you sure it is the " \
"output of a MISO samples comparison?" %(bf_filename))
# Load BF data
data, h = csv2dictlist_raw(bf_filename)
plot_name = os.path.basename(bf_filename)
sashimi_obj = Sashimi(plot_name, output_dir,
settings_filename=settings_filename)
settings = sashimi_obj.settings
# Setup the figure
sashimi_obj.setup_figure()
# Matrix of bayes factors and delta psi pairs
bfs_and_deltas = []
for event in data:
bf = event['bayes_factor']
delta_psi = event['diff']
if type(bf) == str and "," in bf:
print("WARNING: %s is a multi-isoform event, skipping..." \
%(event))
continue
else:
# Impose upper limit on Bayes factor
bf = min(1e12, float(bf))
delta_psi = float(delta_psi)
bfs_and_deltas.append([bf, delta_psi])
bfs_and_deltas = array(bfs_and_deltas)
num_events = len(bfs_and_deltas)
print("Loaded %d event comparisons." %(num_events))
output_filename = sashimi_obj.output_filename
print("Plotting Bayes factors distribution")
print(" - Output filename: %s" %(output_filename))
bf_thresholds = settings["bf_thresholds"]
bar_color = settings["bar_color"]
min_bf_thresh = min(bf_thresholds)
num_events_used = sum(bfs_and_deltas[:, 0] >= min_bf_thresh)
for thresh in bf_thresholds:
if type(thresh) != int:
print("Error: BF thresholds must be integers.")
sys.exit(1)
print("Using BF thresholds: ")
print(bf_thresholds)
print("Using bar color: %s" %(bar_color))
plot_cumulative_bars(bfs_and_deltas[:, 0],
bf_thresholds,
bar_color=bar_color,
logged=True)
plt.xticks(bf_thresholds)
c = 1
plt.xlim([bf_thresholds[0] - c, bf_thresholds[-1] + c])
plt.title("Bayes factor distributions\n(using %d/%d events)" \
%(num_events_used, num_events))
plt.xlabel("Bayes factor thresh.")
plt.ylabel("No. events")
sashimi_obj.save_plot()
def plot_event(event_name, pickle_dir, settings_filename,
output_dir,
group_info=None,
no_posteriors=False,
plot_title=None,
plot_label=None):
"""
Visualize read densities across the exons and junctions
of a given MISO alternative RNA processing event.
Also plots MISO estimates and Psi values.
"""
if not os.path.isfile(settings_filename):
print("Error: settings filename %s not found." %(settings_filename))
sys.exit(1)
if not os.path.isdir(pickle_dir):
print("Error: event pickle directory %s not found." %(pickle_dir))
sys.exit(1)
# Retrieve the full pickle filename
genes_filename = os.path.join(pickle_dir,
"genes_to_filenames.shelve")
# Check that file basename exists
if len(glob.glob("%s*" %(genes_filename))) == 0:
raise Exception("Cannot find file %s. Are you sure the events " \
"were indexed with the latest version of index_gff.py?" \
%(genes_filename))
event_to_filenames = shelve.open(genes_filename)
if event_name not in event_to_filenames:
raise Exception("Event %s not found in pickled directory %s. " \
"Are you sure this is the right directory for the event?" \
%(event_name, pickle_dir))
pickle_filename = event_to_filenames[event_name]
if no_posteriors:
print("Asked to not plot MISO posteriors.")
plot_density_from_file(settings_filename, pickle_filename, event_name,
output_dir,
group_info=group_info,
no_posteriors=no_posteriors,
plot_title=plot_title,
plot_label=plot_label)
def plot_insert_len(insert_len_filename,
settings_filename,
output_dir):
"""
Plot insert length distribution.
"""
if not os.path.isfile(settings_filename):
print("Error: settings filename %s not found." %(settings_filename))
sys.exit(1)
plot_name = os.path.basename(insert_len_filename)
sashimi_obj = Sashimi(plot_name, output_dir,
settings_filename=settings_filename)
settings = sashimi_obj.settings
num_bins = settings["insert_len_bins"]
output_filename = sashimi_obj.output_filename
sashimi_obj.setup_figure()
s = plt.subplot(1, 1, 1)
print("Plotting insert length distribution...")
print(" - Distribution file: %s" %(insert_len_filename))
print(" - Output plot: %s" %(output_filename))
insert_dist, params = pe_utils.load_insert_len(insert_len_filename)
mean, sdev, dispersion, num_pairs \
= pe_utils.compute_insert_len_stats(insert_dist)
print("min insert: %.1f" %(min(insert_dist)))
print("max insert: %.1f" %(max(insert_dist)))
plt.title("%s (%d read-pairs)" \
%(plot_name,
num_pairs),
fontsize=10)
plt.hist(insert_dist, bins=num_bins, color='k',
edgecolor="#ffffff", align='mid')
axes_square(s)
ymin, ymax = s.get_ylim()
plt.text(0.05, 0.95, "$\mu$: %.1f\n$\sigma$: %.1f\n$d$: %.1f" \
%(round(mean, 2),
round(sdev, 2),
round(dispersion, 2)),
horizontalalignment='left',
verticalalignment='top',
bbox=dict(edgecolor='k', facecolor="#ffffff",
alpha=0.5),
fontsize=10,
transform=s.transAxes)
plt.xlabel("Insert length (nt)")
plt.ylabel("No. read pairs")
sashimi_obj.save_plot()
def greeting():
print("Sashimi plot: Visualize spliced RNA-Seq reads along gene models. " \
"Part of the MISO (Mixture of Isoforms model) framework.")
print("See --help for usage.\n")
print("Manual available at: http://genes.mit.edu/burgelab/miso/docs/sashimi.html\n")
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--plot-insert-len", dest="plot_insert_len", nargs=2, default=None,
help="Plot the insert length distribution from a given insert length (*.insert_len) "
"filename. Second argument is a settings file name.")
parser.add_option("--plot-bf-dist", dest="plot_bf_dist", nargs=2, default=None,
help="Plot Bayes factor distributon. Takes the arguments: "
"(1) Bayes factor filename (*.miso_bf) filename, "
"(2) a settings filename.")
parser.add_option("--plot-event", dest="plot_event", nargs=3, default=None,
help="Plot read densities and MISO inferences for a given alternative event. "
"Takes the arguments: (1) event name (i.e. the ID= of the event based on MISO gff3 "
"annotation file, (2) directory where indexed GFF annotation is (output of "
"index_gff.py), (3) path to plotting settings file.")
parser.add_option("--no-posteriors", dest="no_posteriors", default=False, action="store_true",
help="If given this argument, MISO posterior estimates are not plotted.")
parser.add_option("--plot-title", dest="plot_title", default=None, nargs=1,
help="Title of plot: a string that will be displayed at top of plot. Example: " \
"--plot-title \"My favorite gene\".")
parser.add_option("--plot-label", dest="plot_label", default=None, nargs=1,
help="Plot label. If given, plot will be saved in the output directory as " \
"the plot label ending in the relevant extension, e.g. <plot_label>.pdf. " \
"Example: --plot-label my_gene")
parser.add_option("--output-dir", dest="output_dir", nargs=1, default=None,
help="Output directory.")
parser.add_option("--group-info", dest="group_info", nargs=1, default= None,
help="If there is the need to divide bam files into groups, then provided this parameter with the"
" the group files' name. Exemple:"
" \'--group-info gf.gf\'") # TODO: modify it when the format is determined
(options, args) = parser.parse_args()
if options.plot_event is None:
greeting()
sys.exit(1)
if options.output_dir == None:
print("Error: need --output-dir")
sys.exit(1)
output_dir = os.path.abspath(os.path.expanduser(options.output_dir))
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
no_posteriors = options.no_posteriors
plot_title = options.plot_title
plot_label = options.plot_label
if options.plot_insert_len != None:
insert_len_filename = os.path.abspath(os.path.expanduser(options.plot_insert_len[0]))
settings_filename = os.path.abspath(os.path.expanduser(options.plot_insert_len[1]))
plot_insert_len(insert_len_filename, settings_filename, output_dir)
if options.plot_bf_dist != None:
bf_filename = os.path.abspath(os.path.expanduser(options.plot_bf_dist[0]))
settings_filename = os.path.abspath(os.path.expanduser(options.plot_bf_dist[1]))
plot_bf_dist(bf_filename, settings_filename, output_dir)
if options.plot_event != None:
event_name = options.plot_event[0]
pickle_dir = os.path.abspath(os.path.expanduser(options.plot_event[1]))
settings_filename = os.path.abspath(os.path.expanduser(options.plot_event[2]))
group_info = options.group_info
plot_event(event_name, pickle_dir, settings_filename, output_dir,
group_info=group_info,
no_posteriors=no_posteriors,
plot_title=plot_title,
plot_label=plot_label)
if __name__ == '__main__':
main() | /rmats2sashimiplot-2.0.4-py3-none-any.whl/MISO/misopy/sashimi_plot/sashimi_plot.py | 0.446253 | 0.235988 | sashimi_plot.py | pypi |
import os
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import rc
import misopy.sashimi_plot.plot_utils.plot_settings as plot_settings
import misopy.sashimi_plot.plot_utils.plotting as plotting
class Sashimi:
"""
Representation of a figure.
"""
def __init__(self, label, output_dir, dimensions=None, png=False,
output_filename=None, settings_filename=None,
event=None, chrom=None, no_posteriors=False):
"""
Initialize image settings.
"""
self.output_ext = ".pdf"
if png:
self.output_ext = ".png"
# Plot label, will be used in creating the plot
# output filename
self.label = label
# Set output directory
self.set_output_dir(output_dir)
# Plot settings
self.settings_filename = settings_filename
if self.settings_filename != None:
self.settings = plot_settings.parse_plot_settings(settings_filename,
event=event,
chrom=chrom,
no_posteriors=no_posteriors)
else:
# Load default settings if no settings filename was given
self.settings = plot_settings.get_default_settings()
if output_filename != None:
# If explicit output filename is given to us, use it
self.output_filename = output_filename
else:
# Otherwise, use the label and the output directory
self.set_output_filename()
if dimensions != None:
self.dimensions = dimensions
else:
fig_height = self.settings["fig_height"]
fig_width = self.settings["fig_width"]
print("Reading dimensions from settings...")
print(" - Height: %.2f" %(float(fig_height)))
print(" - Width: %.2f" %(float(fig_width)))
self.dimensions = [fig_width, fig_height]
def set_output_dir(self, output_dir):
self.output_dir = os.path.abspath(os.path.expanduser(output_dir))
def set_output_filename(self):
plot_basename = "%s%s" %(self.label, self.output_ext)
self.output_filename = os.path.join(self.output_dir, plot_basename)
def setup_figure(self):
print("Setting up plot using dimensions: ", self.dimensions)
plt.figure(figsize=self.dimensions)
# If asked, use sans serif fonts
font_size = self.settings["font_size"]
if self.settings["sans_serif"]:
print("Using sans serif fonts.")
plotting.make_sans_serif(font_size=font_size)
def save_plot(self, plot_label=None):
"""
Save plot to the output directory. Determine
the file type.
"""
if self.output_filename == None:
raise Exception("sashimi_plot does not know where to save the plot.")
output_fname = None
if plot_label is not None:
# Use custom plot label if given
ext = self.output_filename.rsplit(".")[0]
dirname = os.path.dirname(self.output_filename)
output_fname = \
os.path.dirname(dirname, "%s.%s" %(plot_label, ext))
else:
output_fname = self.output_filename
print("Saving plot to: %s" %(output_fname))
plt.savefig(output_fname) | /rmats2sashimiplot-2.0.4-py3-none-any.whl/MISO/misopy/sashimi_plot/Sashimi.py | 0.642657 | 0.254677 | Sashimi.py | pypi |
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid.axislines import SubplotZero
import scipy.stats as stats
from scipy import *
from numpy import array
from scipy import linalg
import sys
def plot_cumulative_bars(data, bins,
bar_color='k',
edgecolor='#ffffff',
logged=False):
"""
Plot a cumulative discrete and bounded CDF.
"""
data = array(data)
n = 1
num_events = [sum(data >= curr_bin).astype('float')/n \
for curr_bin in bins]
# if logged:
# num_events = log2(num_events)
ax = plt.gca()
if logged:
ax.set_yscale('log')
bar_color=str(bar_color)
plt.bar(bins, num_events,
align='center',
color=bar_color,
edgecolor=edgecolor)
def make_errorbar_plot(labels, bar_locations,
bar_values, bar_errors,
colors=None, width=0.2):
"""
Make a bar plot.
"""
assert(len(bar_values) == len(bar_locations))
assert(len(bar_errors) == len(bar_values))
if colors == None:
colors = ['k'] * len(bar_locations)
for n, val in enumerate(bar_values):
plt.bar([bar_locations[n]], [val], width, yerr=[bar_errors[n]],
color=colors[n], align='center', ecolor='k', label=labels[n])\
def make_grouped_bar_plot(ax, x_axis_labels, group_labels, group_values,
group_errs, width, group_colors=None, x_offset_val=None,
with_legend=True):
"""
Make grouped bar plot, where group_labels are the labels for each group
(to appear on x-axis), the group values is a list of N lists, each of length N,
where N is the number of groups.
"""
all_rects = []
if x_offset_val == None:
x_offset_val = width
num_items_on_x_axis = len(x_axis_labels)
num_groups = len(group_labels)
ind = arange(num_items_on_x_axis)
for group_num, group_vals in enumerate(group_values):
group_len = len(group_vals)
gene_rects = ax.bar(ind, group_vals, width, color=group_colors[group_num],
yerr=group_errs[group_num], label=group_labels[group_num],
ecolor='k')
# Advance x-axis offset
ind = ind + x_offset_val
all_rects.append(gene_rects)
if with_legend:
# x_label_offset = (num_items_on_x_axis * width) / num_items_on_x_axis
x_label_offset = (num_groups / 2.) * width #num_items_on_x_axis
xticks = arange(num_items_on_x_axis) + x_label_offset
ax.set_xticks(xticks)
plt.xlim([0 - width, max(xticks) + (group_num * width)])
ax.set_xticklabels(x_axis_labels)
ax.legend(tuple([rect[0] for rect in all_rects]), group_labels,
borderpad=0.01, labelspacing=.003, handlelength=1.0, loc='upper left', numpoints=1,
handletextpad=0.3)
return ax
def show_spines(ax,spines):
for loc, spine in ax.spines.items():
if loc not in spines:
spine.set_color('none') # don't draw spine
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
# no yaxis ticks
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
# no xaxis ticks
ax.xaxis.set_ticks([])
def fit_line(x, y, plot_line=False):
"""
Plot best fit least squares line, return R^2
"""
A = vstack([x, ones(len(x))]).T
m, c = linalg.lstsq(A, y)[0]
if plot_line:
plt.plot(x, m*x + c, 'r', lw=1.2)
return square(stats.pearsonr(x, y)), m, c
def remove_extra_ticks(ax):
for i, line in enumerate(ax.get_xticklines() + ax.get_yticklines()):
if i%2 == 1: # odd indices
line.set_visible(False)
def axes_square(plot_handle):
plot_handle.axes.set_aspect(1/plot_handle.axes.get_data_ratio())
def setup_two_axes(fig, labelpad=1, invisible=["bottom", "top", "right"]):
plt.rcParams['xtick.major.pad'] = 0.1
plt.rcParams['xtick.minor.pad'] = 0.1
plt.rcParams['ytick.major.pad'] = 2
plt.rcParams['ytick.minor.pad'] = 2
ax = SubplotZero(fig, 1, 1, 1)
ax.yaxis.labelpad = labelpad
fig.add_subplot(ax)
# make xzero axis (horizontal axis line through y=0) visible.
ax.axis["xzero"].set_visible(True)
ax.xaxis.labelpad = labelpad
# make other axis (bottom, top, right) invisible.
for n in invisible:
ax.axis[n].set_visible(False)
return ax
def setup_two_axes_subplot(fig, m, n, curr_plot_num, invisible=["bottom", "top", "right"]):
ax = SubplotZero(fig, m, n, curr_plot_num)
fig.add_subplot(ax)
ax.axis["xzero"].set_visible(True)
for n in invisible:
ax.axis[n].set_visible(False)
return ax
def restyle_ticks(c, min_val=0, max_val=1):
plt.xlim(min_val - c, max_val + c)
plt.ylim(min_val - c, max_val + c)
def label_stacked_bars(rects1, rects2, labels, h=1.02):
label_ind = 0
for rect, rect_other in zip(rects1, rects2):
height = rect.get_height() + rect_other.get_height()
plt.text(rect.get_x()+rect.get_width()/2., h*height, labels[label_ind], ha='center', va='bottom')
label_ind += 1
import matplotlib.transforms as mtransforms
def make_sans_serif(font_size=10):
from matplotlib import rc
plt.rcParams['ps.useafm'] = True
plt.rcParams['pdf.fonttype'] = 42
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
print("Setting to FreeSans")
rc('font',**{'family':'sans-serif','sans-serif':['FreeSans']})
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['font.size'] = font_size
def expand_subplot(ax, num2):
update_params_orig = ax.update_params
ax._num2 = num2 - 1
def _f(self=ax):
num_orig = self._num
self._num = self._num2
update_params_orig()
right, top = self.figbox.extents[2:]
self._num = num_orig
update_params_orig()
left, bottom = self.figbox.extents[:2]
self.figbox = mtransforms.Bbox.from_extents(left, bottom,
right, top)
ax.update_params = _f
ax.update_params()
ax.set_position(ax.figbox)
colors = {'steelblue': '#63B8FF',
'lightblue1': '#3399FF',
'signblue': '#003F87', # darkblue
'grey1': '#999999',
'darkred': '#990000'} | /rmats2sashimiplot-2.0.4-py3-none-any.whl/MISO/misopy/sashimi_plot/plot_utils/plotting.py | 0.438545 | 0.59658 | plotting.py | pypi |
import os
import numpy as np
from disorder.diffuse import scattering, space
from disorder.diffuse import displacive, magnetic
from disorder.material import crystal, symmetry
def factor(u, v, w, atms, occupancy, U11, U22, U33, U23, U13, U12,
a, b, c, alpha, beta, gamma, symops, dmin=0.3, source='neutron'):
"""
Structure factor :math:`F(h,k,l)`.
Parameters
----------
u, v, w : 1d array
Fractional coordinates for each atom site.
atms : 1d array, str
Ion or isotope for each atom site.
occupancy : 1d array
Site occupancies for each atom site.
U11, U22, U33, U23, U13, U12 : 1d array
Atomic displacement parameters in crystal axis system.
a, b, c, alpha, beta, gamma : float
Lattice constants :math:`a`, :math:`b`, :math:`c`, :math:`\\alpha`,
:math:`\\beta`, and :math:`\\gamma`. Angles are in radians.
symops : 1d array, str
Space group symmetry operations.
dmin : float, optional
Minimum d-spacing. Default ``0.3``
source : str, optional
Radiation source ``'neutron'``, ``'x-ray'``, or ``'electron'``.
Default ``'neutron'``.
Returns
-------
h, k, l : 1d array, int
Miller indices.
d : 1d array
d-spacing distance between planes of atoms.
F : 1d array, complex
Structure factor.
mult : 1d array, int
Multiplicity.
"""
n_atm = atms.shape[0]
inv_constants = crystal.reciprocal(a, b, c, alpha, beta, gamma)
a_, b_, c_, alpha_, beta_, gamma_ = inv_constants
B = crystal.cartesian(a_, b_, c_, alpha_, beta_, gamma_)
hmax, kmax, lmax = np.floor(np.array([a,b,c])/dmin).astype(int)
h, k, l = np.meshgrid(np.arange(-hmax, hmax+1),
np.arange(-kmax, kmax+1),
np.arange(-lmax, lmax+1), indexing='ij')
h = np.delete(h, lmax+(2*lmax+1)*(kmax+(2*kmax+1)*hmax))
k = np.delete(k, lmax+(2*lmax+1)*(kmax+(2*kmax+1)*hmax))
l = np.delete(l, lmax+(2*lmax+1)*(kmax+(2*kmax+1)*hmax))
h, k, l = h[::-1], k[::-1], l[::-1]
Qh, Qk, Ql = crystal.vector(h, k, l, B)
Q = np.sqrt(Qh**2+Qk**2+Ql**2)
d = 2*np.pi/Q
ind = d >= dmin
h, k, l, d, Q = h[ind], k[ind], l[ind], d[ind], Q[ind]
ind = ~symmetry.absence(symops, h, k, l)
h, k, l, d, Q = h[ind], k[ind], l[ind], d[ind], Q[ind]
n_hkl = Q.size
phase_factor = np.exp(2j*np.pi*(h[:,np.newaxis]*u+
k[:,np.newaxis]*v+
l[:,np.newaxis]*w))
if (source == 'neutron'):
scattering_power = scattering.length(atms, n_hkl).reshape(n_hkl,n_atm)
else:
scattering_power = scattering.form(atms, Q).reshape(n_hkl,n_atm)
T = np.exp(-2*np.pi**2*(U11*(h*a_)[:,np.newaxis]**2+
U22*(k*b_)[:,np.newaxis]**2+
U33*(l*c_)[:,np.newaxis]**2+
U23*(k*l*b_*c_*2)[:,np.newaxis]+
U13*(h*l*a_*c_*2)[:,np.newaxis]+
U12*(h*k*a_*b_*2)[:,np.newaxis]))
factors = scattering_power*occupancy*T*phase_factor
F = factors.sum(axis=1)
coordinate = [h,k,l]
symops = np.unique(symmetry.inverse(symops))
total = []
for symop in symops:
transformed = symmetry.evaluate([symop], coordinate, translate=False)
total.append(transformed)
total = np.vstack(total)
for i in range(n_hkl):
total[:,:,i] = total[np.lexsort(total[:,:,i].T),:,i]
total = np.vstack(total)
total, ind, mult = np.unique(total, axis=1,
return_index=True,
return_counts=True)
h, k, l, d, F = h[ind], k[ind], l[ind], d[ind], F[ind]
ind = np.lexsort((h,k,l,d),axis=0)[::-1]
h, k, l, d, F, mult = h[ind], k[ind], l[ind], d[ind], F[ind], mult[ind]
return h, k, l, d, F, mult
class UnitCell:
"""
Unit cell.
Parameters
----------
filename : str
Name of CIF file.
tol : float, optional
Tolerance of unique atom coordinates.
Methods
-------
get_filepath()
Path of CIF file.
get_filename()
Name of CIF file.
get_sites()
Atom sites in the unit cell.
get_active_sites()
Active atom sites in the unit cell.
set_active_sites()
Update active atom sites in the unit cell.
get_number_atoms_per_unit_cell()
Total number of atoms in the unit cell.
get_fractional_coordinates()
Fractional coordiantes.
set_fractional_coordinates()
Update fractional coordiantes of active atoms.
get_unit_cell_cartesian_atomic_coordinates()
Cartesian coordiantes.
get_unit_cell_atoms()
Atom symbols of active atoms.
set_unit_cell_atoms()
Update atom symbols.
get_occupancies()
Occupancies.
set_occupancies()
Update occupancies.
get_anisotropic_displacement_parameters()
Anisotropic displacement parameters in crystal coordinates.
set_anisotropic_displacement_parameters()
Update anisotropic displacement parameters in crystal coordinates.
get_isotropic_displacement_parameter()
Isotropic displacement parameters.
set_isotropic_displacement_parameter()
Update isotropic displacement parameters.
get_principal_displacement_parameters()
Principal displacement parameters in Cartesian coordinates.
get_cartesian_anistropic_displacement_parameters()
Anisotropic displacement parameters in Cartesian coordinates.
get_crystal_axis_magnetic_moments()
Magnetic moments in crystal coordinates.
set_crystal_axis_magnetic_moments()
Update magnetic moments in crystal coordinates.
get_magnetic_moment_magnitude()
Magnitude of magnetic moments.
get_cartesian_magnetic_moments()
Magnetic moments in Cartesian coordinates.
get_g_factors()
g-factors.
set_g_factors()
Update g-factors.
get_lattice_constants()
Lattice parameters.
set_lattice_constants()
Update lattice parameters.
get_reciprocal_lattice_constants()
Reciprocal lattice parameters.
get_symmetry_operators()
Symmetry operators.
get_magnetic_symmetry_operators()
Magnetic symmetry operators.
get_lattice_system()
Lattice system of unit cell.
get_lattice_volume()
Lattice volume of unit cell.
get_reciprocal_lattice_volume()
Reciprocal lattice volume of reciprocal cell.
get_metric_tensor()
Unit cell metric tensor.
get_reciprocal_metric_tensor()
Reciprocal cell metric tensor.
get_fractional_cartesian_transform()
Fractional to Cartesian coordinates transform matrix.
get_miller_cartesian_transform()
Miller to Cartesian coordinates transform matrix.
get_cartesian_rotation()
Transform matrix between Cartesian axes of real and reciprocal lattice.
get_moment_cartesian_transform()
Magnetic moment components crystal to Cartesian transfomrmation matrix.
get_atomic_displacement_cartesian_transform()
Atomic displacement parameters crystal to Cartesian transfomrmation
matrix.
get_space_group_symbol()
Space group symbol.
get_space_group_number()
Space group number.
get_laue()
Laue class.
get_site_symmetries()
Site symmetry operators.
get_wyckoff_special_positions()
Wyckoff special positions.
get_site_multiplicities()
Site multiplicites.
"""
def __init__(self, filename, tol=1e-2):
filename = os.path.abspath(filename)
folder = os.path.dirname(filename)
filename = os.path.basename(filename)
self.__folder = folder
self.__filename = filename
self. __load_unit_cell(tol)
def __load_unit_cell(self, tol):
folder = self.get_filepath()
filename = self.get_filename()
constants = crystal.parameters(folder=folder, filename=filename)
a, b, c, alpha, beta, gamma = constants
self.__a, self.__b, self.__c = a, b, c
self.__alpha, self.__beta, self.__gamma = alpha, beta, gamma
uc_dict = crystal.unitcell(folder=folder, filename=filename, tol=tol)
n_atm = uc_dict['n_atom']
self.__atm = uc_dict['atom']
self.__site = uc_dict['site']
uni, ind = np.unique(self.__site, return_index=True)
self.__act = np.full(uni.size, True)
self.__ind = ind
self.__mask = self.__ind[self.__act]
self.__index = self.__act[self.__site]
self.__inverse = np.arange(self.__act.size)[self.__site][self.__index]
self.__op = uc_dict['operator']
self.__mag_op = uc_dict['magnetic_operator']
self.__u = uc_dict['u']
self.__v = uc_dict['v']
self.__w = uc_dict['w']
self.__occ = uc_dict['occupancy']
displacement = uc_dict['displacement']
self.__U11 = np.zeros(n_atm)
self.__U22 = np.zeros(n_atm)
self.__U33 = np.zeros(n_atm)
self.__U23 = np.zeros(n_atm)
self.__U13 = np.zeros(n_atm)
self.__U12 = np.zeros(n_atm)
if (displacement.shape[1] == 1):
self.set_isotropic_displacement_parameter(displacement.flatten())
else:
self.__U11 = displacement.T[0]
self.__U22 = displacement.T[1]
self.__U33 = displacement.T[2]
self.__U23 = displacement.T[3]
self.__U13 = displacement.T[4]
self.__U12 = displacement.T[5]
self.__mu1, self.__mu2, self.__mu3 = uc_dict['moment'].T
self.__g = np.full(n_atm, 2.0)
hm, sg = crystal.group(folder=folder, filename=filename)
self.__hm = hm
self.__sg = sg
lat = crystal.lattice(*constants)
self.__lat = lat
laue = crystal.laue(folder=folder, filename=filename)
self.__laue = laue
self.__pg = np.empty(n_atm, dtype=object)
self.__mult = np.empty(n_atm, dtype=int)
self.__sp_pos = np.empty(n_atm, dtype=object)
A = self.get_fractional_cartesian_transform()
for i, (u, v, w) in enumerate(zip(self.__u,self.__v,self.__w)):
pg, mult, sp_pos = symmetry.site(self.__op, [u,v,w], A, tol=1e-2)
self.__pg[i], self.__mult[i], self.__sp_pos[i] = pg, mult, sp_pos
def __get_all_lattice_constants(self):
constants = self.__a, self.__b, self.__c, \
self.__alpha, self.__beta, self.__gamma
return constants
def get_filepath(self):
"""
Path of CIF file.
Returns
-------
str
Name of path excluding filename.
"""
return self.__folder
def get_filename(self):
"""
Name of CIF file.
Returns
-------
str
Name of filename excluding path.
"""
return self.__filename
def get_sites(self):
"""
Atom sites in the unit cell.
Returns
-------
1d array, int
All site numbers.
"""
return self.__site
def get_active_sites(self):
"""
Active atom sites in the unit cell.
Returns
-------
1d array, int
All active site numbers.
"""
return self.__act
def set_active_sites(self, act):
"""
Update active atom sites in the unit cell.
Parameters
----------
act : 1d array, int
All active site numbers.
"""
self.__act = act
self.__mask = self.__ind[self.__act]
self.__index = self.__act[self.__site]
self.__inverse = np.arange(self.__act.size)[self.__site][self.__index]
def get_number_atoms_per_unit_cell(self):
"""
Total number of atoms in the unit cell.
Returns
-------
int
All active atoms.
"""
return self.__act[self.__site].sum()
def get_fractional_coordinates(self):
"""
Fractional coordiantes of active atoms.
"""
mask = self.__mask
return self.__u[mask], self.__v[mask], self.__w[mask]
def set_fractional_coordinates(self, u, v, w):
"""
Update fractional coordiantes of active atoms.
"""
mask = self.__mask
ind = self.__index
inv = self.__inverse
operators = symmetry.binary(self.__op[ind], self.__op[mask][inv])
up, vp, wp = u[inv], v[inv], w[inv]
for i, operator in enumerate(operators):
uvw = symmetry.evaluate([operator], [up[i], vp[i], wp[i]])
up[i], vp[i], wp[i] = np.mod(uvw, 1).flatten()
self.__u[ind], self.__v[ind], self.__w[ind] = up, vp, wp
def get_unit_cell_cartesian_atomic_coordinates(self):
"""
Cartesian coordiantes of active atoms.
"""
A = self.get_fractional_cartesian_transform()
u, v, w = self.get_fractional_coordinates()
return crystal.transform(u, v, w, A)
def get_unit_cell_atoms(self):
"""
Atom symbols of active atoms.
"""
mask = self.__mask
return self.__atm[mask]
def set_unit_cell_atoms(self, atm):
"""
Update atom symbols of active atoms.
"""
ind = self.__index
inv = self.__inverse
self.__atm[ind] = atm[inv]
def get_occupancies(self):
"""
Occupancies of active atoms.
"""
mask = self.__mask
return self.__occ[mask]
def set_occupancies(self, occ):
"""
Update occupancies of active atoms.
"""
ind = self.__index
inv = self.__inverse
self.__occ[ind] = occ[inv]
def get_anisotropic_displacement_parameters(self):
"""
Anisotropic displacement parameters in crystal coordinates of active
atoms.
"""
mask = self.__mask
U11 = self.__U11[mask]
U22 = self.__U22[mask]
U33 = self.__U33[mask]
U23 = self.__U23[mask]
U13 = self.__U13[mask]
U12 = self.__U12[mask]
return U11, U22, U33, U23, U13, U12
def set_anisotropic_displacement_parameters(self, U11, U22, U33,
U23, U13, U12):
"""
Update anisotropic displacement parameters in crystal coordinates of
active atoms.
"""
mask = self.__mask
ind = self.__index
inv = self.__inverse
operators = symmetry.binary(self.__op[ind], self.__op[mask][inv])
U11p = U11[inv]
U22p = U22[inv]
U33p = U33[inv]
U23p = U23[inv]
U13p = U13[inv]
U12p = U12[inv]
for i, operator in enumerate(operators):
disp = [U11p[i], U22p[i], U33p[i], U23p[i], U13p[i], U12p[i]]
disp = symmetry.evaluate_disp([operator], disp)
U11p[i], U22p[i], U33p[i], U23p[i], U13p[i], U12p[i] = disp
self.__U11[ind] = U11p
self.__U22[ind] = U22p
self.__U33[ind] = U33p
self.__U23[ind] = U23p
self.__U13[ind] = U13p
self.__U12[ind] = U12p
def get_isotropic_displacement_parameter(self):
"""
Isotropic displacement parameters of active atoms.
"""
D = self.get_atomic_displacement_cartesian_transform()
adps = self.get_anisotropic_displacement_parameters()
U11, U22, U33, U23, U13, U12 = adps
return displacive.isotropic(U11, U22, U33, U23, U13, U12, D)
def set_isotropic_displacement_parameter(self, Uiso):
"""
Update isotropic displacement parameters of active atoms.
"""
ind = self.__index
inv = self.__inverse
D = self.get_atomic_displacement_cartesian_transform()
uiso = np.dot(np.linalg.inv(D), np.linalg.inv(D.T))
U11, U22, U33 = Uiso*uiso[0,0], Uiso*uiso[1,1], Uiso*uiso[2,2]
U23, U13, U12 = Uiso*uiso[1,2], Uiso*uiso[0,2], Uiso*uiso[0,1]
self.__U11[ind] = U11[inv]
self.__U22[ind] = U22[inv]
self.__U33[ind] = U33[inv]
self.__U23[ind] = U23[inv]
self.__U13[ind] = U13[inv]
self.__U12[ind] = U12[inv]
def get_principal_displacement_parameters(self):
"""
Principal displacement parameters in Cartesian coordinates of active
atoms.
"""
D = self.get_atomic_displacement_cartesian_transform()
adps = self.get_anisotropic_displacement_parameters()
U11, U22, U33, U23, U13, U12 = adps
return displacive.principal(U11, U22, U33, U23, U13, U12, D)
def get_cartesian_anistropic_displacement_parameters(self):
"""
Anisotropic displacement parameters in Cartesian coordinates of active
atoms.
"""
D = self.get_atomic_displacement_cartesian_transform()
adps = self.get_anisotropic_displacement_parameters()
U11, U22, U33, U23, U13, U12 = adps
return displacive.cartesian(U11, U22, U33, U23, U13, U12, D)
def get_crystal_axis_magnetic_moments(self):
"""
Magnetic moments in crystal coordinates of active atoms.
"""
mask = self.__mask
mu1 = self.__mu1[mask]
mu2 = self.__mu2[mask]
mu3 = self.__mu3[mask]
return mu1, mu2, mu3
def set_crystal_axis_magnetic_moments(self, mu1, mu2, mu3):
"""
Update magnetic moments in crystal coordinates of active atoms.
"""
mask = self.__mask
ind = self.__index
inv = self.__inverse
operators = symmetry.binary_mag(self.__mag_op[ind],
self.__mag_op[mask][inv])
mu1p = mu1[inv]
mu2p = mu2[inv]
mu3p = mu3[inv]
for i, operator in enumerate(operators):
mag = [mu1p[i], mu2p[i], mu3p[i]]
mag = symmetry.evaluate_mag([operator], mag)
mu1p[i], mu2p[i], mu3p[i] = np.array(mag).flatten()
self.__mu1[ind] = mu1p
self.__mu2[ind] = mu2p
self.__mu3[ind] = mu3p
def get_magnetic_moment_magnitude(self):
"""
Magnitude of magnetic moments of active atoms.
"""
C = self.get_moment_cartesian_transform()
mu1, mu2, mu3 = self.get_crystal_axis_magnetic_moments()
return magnetic.magnitude(mu1, mu2, mu3, C)
def get_cartesian_magnetic_moments(self):
"""
Magnetic moments in Cartesian coordinates of active atoms.
"""
C = self.get_moment_cartesian_transform()
mu1, mu2, mu3 = self.get_crystal_axis_magnetic_moments()
return magnetic.cartesian(mu1, mu2, mu3, C)
def get_g_factors(self):
"""
g-factors of active atoms.
"""
mask = self.__mask
return self.__g[mask]
def set_g_factors(self, g):
"""
Update g-factors of active atoms.
"""
ind = self.__index
inv = self.__inverse
self.__g[ind] = g[inv]
def get_lattice_constants(self):
"""
Lattice parameters.
"""
lat = self.get_lattice_system()
a = self.__a
b = self.__b
c = self.__c
alpha = self.__alpha
beta = self.__beta
gamma = self.__gamma
if (lat == 'Cubic'):
constants = a
elif (lat == 'Hexagonal' or lat == 'Tetragonal'):
constants = a, c
elif (lat == 'Rhobmohedral'):
constants = a, alpha
elif (lat == 'Orthorhombic'):
constants = a, b, c
elif (lat == 'Monoclinic'):
if (not np.isclose(beta, np.pi/2)):
constants = a, b, c, alpha, gamma
else:
constants = a, b, c, alpha, beta
else:
constants = a, b, c, alpha, beta, gamma
return constants
def set_lattice_constants(self, *constants):
"""
Update lattice parameters.
"""
lat = self.get_lattice_system()
a = self.__a
b = self.__b
c = self.__c
alpha = self.__alpha
beta = self.__beta
gamma = self.__gamma
if (lat == 'Cubic'):
a = constants
b = c = a
elif (lat == 'Hexagonal' or lat == 'Tetragonal'):
a, c = constants
b = a
elif (lat == 'Rhobmohedral'):
a, alpha = constants
b = c = a
beta = gamma = alpha
elif (lat == 'Orthorhombic'):
a, b, c = constants
alpha = beta = gamma = np.pi/2
elif (lat == 'Monoclinic'):
if (not np.isclose(beta, np.pi/2)):
a, b, c, alpha, gamma = constants
else:
a, b, c, alpha, beta = constants
else:
a, b, c, alpha, beta, gamma = constants
self.__a = a
self.__b = b
self.__c = c
self.__alpha = alpha
self.__beta = beta
self.__gamma = gamma
def get_reciprocal_lattice_constants(self):
"""
Reciprocal lattice parameters.
"""
constants = self.__get_all_lattice_constants()
return crystal.reciprocal(*constants)
def get_symmetry_operators(self):
"""
Symmetry operators of active atoms.
"""
mask = self.__mask
return self.__op[mask]
def get_magnetic_symmetry_operators(self):
"""
Magnetic symmetry operators of active atoms.
"""
mask = self.__mask
return self.__mag_op[mask]
def get_lattice_system(self):
"""
Lattice system of unit cell.
"""
return self.__lat
def get_lattice_volume(self):
"""
Lattice volume of unit cell.
"""
constants = self.__get_all_lattice_constants()
return crystal.volume(*constants)
def get_reciprocal_lattice_volume(self):
"""
Reciprocal lattice volume of reciprocal cell.
"""
constants = self.__get_all_lattice_constants()
return crystal.volume(*constants)
def get_metric_tensor(self):
"""
Unit cell metric tensor.
"""
constants = self.__get_all_lattice_constants()
return crystal.metric(*constants)
def get_reciprocal_metric_tensor(self):
"""
Reciprocal cell metric tensor.
"""
constants = self.__get_all_lattice_constants()
return crystal.metric(*constants)
def get_fractional_cartesian_transform(self):
"""
Trasform matrix from fractional to Cartesian coordinates.
"""
constants = self.__get_all_lattice_constants()
return crystal.cartesian(*constants)
def get_miller_cartesian_transform(self):
"""
Trasform matrix from Miller to Cartesian coordinates.
"""
constants = self.__get_all_lattice_constants()
return crystal.cartesian(*constants)
def get_cartesian_rotation(self):
"""
Transform matrix between Cartesian axes of real and reciprocal lattice.
"""
constants = self.__get_all_lattice_constants()
return crystal.cartesian_rotation(*constants)
def get_moment_cartesian_transform(self):
"""
Transform matrix between crystal and Cartesian coordinates for \
magnetic moments.
"""
constants = self.__get_all_lattice_constants()
return crystal.cartesian_moment(*constants)
def get_atomic_displacement_cartesian_transform(self):
"""
Transform matrix between crystal and Cartesian coordinates for atomic \
displacement parameters.
"""
constants = self.__get_all_lattice_constants()
return crystal.cartesian_displacement(*constants)
def get_space_group_symbol(self):
"""
Space group symbol.
"""
return self.__hm
def get_space_group_number(self):
"""
Space group number.
"""
return self.__sg
def get_laue(self):
"""
Laue class.
"""
return self.__laue
def get_site_symmetries(self):
"""
Site symmetry operators.
"""
mask = self.__mask
return self.__pg[mask]
def get_wyckoff_special_positions(self):
"""
Wyckoff special positions of active atoms.
"""
mask = self.__mask
return self.__sp_pos[mask]
def get_site_multiplicities(self):
"""
Site multiplicites of active atoms.
Returns
-------
1d array
Multiplicities
"""
mask = self.__mask
return self.__mult[mask]
class SuperCell(UnitCell):
def __init__(self, filename, nu=1, nv=1, nw=1, tol=1e-2):
super(SuperCell, self).__init__(filename, tol)
self.set_super_cell_dimensions(nu, nv, nw)
def get_super_cell_dimensions(self):
return self.__nu, self.__nv, self.__nw
def set_super_cell_dimensions(self, nu, nv, nw):
self.__nu = nu
self.__nv = nv
self.__nw = nw
def get_super_cell_size(self):
nu, nv, nw = self.get_super_cell_dimensions()
return nu*nv*nw
def get_number_atoms_per_super_cell(self):
n_uvw = self.get_super_cell_size()
n_atm = self.get_number_atoms_per_unit_cell()
return n_uvw*n_atm
def get_cartesian_lattice_points(self):
A = self.get_fractional_cartesian_transform()
nu, nv, nw = self.get_super_cell_dimensions()
return space.cell(nu, nv, nw, A)
def get_super_cell_cartesian_atomic_coordinates(self):
ux, uy, uz = self.get_unit_cell_cartesian_atomic_coordinates()
ix, iy, iz = self.get_cartesian_lattice_points()
atm = self.get_unit_cell_atoms()
return space.real(ux, uy, uz, ix, iy, iz, atm) | /rmc_discord-0.0.4-cp36-cp36m-win_amd64.whl/disorder/material/structure.py | 0.856242 | 0.541227 | structure.py | pypi |
import os
import numpy as np
directory = os.path.abspath(os.path.dirname(__file__))
def magnetic_form_factor_coefficients_j0():
"""
Table of magnetic form factors zeroth-order :math:`j_0` coefficients.
Returns
-------
j0 : dict
Dictionary of magnetic form factors coefficients with magnetic ion
keys.
"""
filename = directory+'/j0.csv'
names = ('Ion', 'A', 'a', 'B', 'b', 'C', 'c', 'D')
formats = ('U15', float, float, float, float, float, float, float)
columns = (0, 1, 2, 3, 4, 5, 6, 7)
ion, A, a, B, b, C, c, D = np.loadtxt(filename,
delimiter=',',
dtype={'names': names,
'formats': formats},
usecols=columns,
skiprows=1,
unpack=True)
vals = [A, a, B, b, C, c, D]
return dict(zip(ion, zip(*vals)))
def magnetic_form_factor_coefficients_j2():
"""
Table of magnetic form factors second-order :math:`j_2` coefficients.
Returns
-------
j2 : dict
Dictionary of magnetic form factors coefficients with magnetic ion
keys.
"""
filename = directory+'/j2.csv'
names = ('Ion', 'A', 'a', 'B', 'b', 'C', 'c', 'D')
formats = ('U15', float, float, float, float, float, float, float)
columns = (0, 1, 2, 3, 4, 5, 6, 7)
ion, A, a, B, b, C, c, D = np.loadtxt(filename,
delimiter=',',
dtype={'names': names,
'formats': formats},
usecols=columns,
skiprows=1,
unpack=True)
vals = [A, a, B, b, C, c, D]
return dict(zip(ion, zip(*vals)))
def neutron_scattering_length_b():
"""
Table of neutron scattering lengths :math:`b`.
Returns
-------
b : dict
Dictionary of neutron scattering lengths with nuclear isotope keys.
"""
filename = directory+'/b.csv'
names = ('Isotope', 'b')
formats = ('U15', complex)
columns = (0, 1)
isotope, b = np.loadtxt(filename,
delimiter=',',
dtype={'names': names,'formats': formats},
usecols=columns,
skiprows=1,
unpack=True)
return dict(zip(isotope, b))
def xray_form_factor_coefficients():
"""
Table of X-ray form factor :math:`f` coefficients.
Returns
-------
X : dict
Dictionary of X-ray form factor coefficients with ion keys.
"""
filename = directory+'/x.csv'
names = ('Ion', 'a1', 'b1', 'a2,', 'b2', 'a3', 'b3', 'a4', 'b4', 'c')
formats = ('U15', float, float, float, float,
float, float, float, float, float)
columns = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
data = np.loadtxt(filename,
delimiter=',',
dtype={'names': names, 'formats': formats},
usecols=columns,
skiprows=1,
unpack=True)
ion, a1, b1, a2, b2, a3, b3, a4, b4, c = data
vals = [a1, b1, a2, b2, a3, b3, a4, b4, c]
return dict(zip(ion, zip(*vals)))
def electron_form_factor_coefficients():
"""
Table of electron form factor :math:`f` coefficients.
Returns
-------
E : dict
Dictionary of electron form factor coefficients with ion keys.
"""
filename = directory+'/e.csv'
names = ('Ion', 'a1', 'b1', 'a2,', 'b2',
'a3', 'b3', 'a4', 'b4', 'a5', 'b5')
formats = ('U15', float, float, float, float, float,
float, float, float, float, float)
columns = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
data = np.loadtxt(filename,
delimiter=',',
dtype={'names': names, 'formats': formats},
usecols=columns,
skiprows=1,
unpack=True)
ion, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5 = data
vals = [a1, b1, a2, b2, a3, b3, a4, b4, a5, b5]
return dict(zip(ion, zip(*vals)))
def atomic_numbers():
"""
Table of atomic numbers.
Returns
-------
Z : dict
Dictionary of atomic numbers with atomic symbol keys.
"""
filename = directory+'/z.csv'
names = ('Ion', 'Z')
formats = ('U15', int)
columns = (0, 1)
ion, z = np.loadtxt(filename,
delimiter=',',
dtype={'names': names, 'formats': formats},
usecols=columns,
skiprows=1,
unpack=True)
vals = [z]
return dict(zip(ion, zip(*vals)))
def space_groups():
"""
Table of space group numbers.
Returns
-------
Z : dict
Dictionary of space group numbers with space group symbol keys.
"""
filename = directory+'/groups.csv'
names = ('Number', 'Name')
formats = (int, 'U15')
columns = (0, 1)
sg_number, sg_name = np.loadtxt(filename,
delimiter=',',
dtype={'names': names, 'formats': formats},
usecols=columns,
skiprows=0,
unpack=True)
sg_name = [sg.replace('\"', '').replace(' ', '') for sg in sg_name]
return dict(zip(sg_name, sg_number))
def element_radii():
"""
Table of atomic, ionic and van der Waals radii.
Returns
-------
Z : dict
Dictionary of radii with atomic symbol keys.
"""
filename = directory+'/radii.csv'
names = ('Element', 'Atomic', 'Ionic', 'van der Waals')
formats = ('U15', float, float, float)
columns = (0, 1, 2, 3)
element, atm, ion, vdw = np.loadtxt(filename,
delimiter=',',
dtype={'names': names,
'formats': formats},
usecols=columns,
skiprows=1,
unpack=True)
vals = [atm, ion, vdw]
return dict(zip(element, zip(*vals)))
def element_colors():
"""
Table of element colors in red, green, and blue.
Returns
-------
Z : dict
Dictionary of element colors with atomic symbol keys.
"""
filename = directory+'/colors.csv'
names = ('Element', 'Red', 'Green', 'Blue')
formats = ('U15', float, float, float)
columns = (0, 1, 2, 3)
element, r, g, b = np.loadtxt(filename,
delimiter=',',
dtype={'names': names, 'formats': formats},
usecols=columns,
skiprows=1,
unpack=True)
vals = [r, g, b]
return dict(zip(element, zip(*vals)))
j0 = magnetic_form_factor_coefficients_j0()
j2 = magnetic_form_factor_coefficients_j2()
bc = neutron_scattering_length_b()
X = xray_form_factor_coefficients()
E = electron_form_factor_coefficients()
Z = atomic_numbers()
sg = space_groups()
r = element_radii()
rgb = element_colors() | /rmc_discord-0.0.4-cp36-cp36m-win_amd64.whl/disorder/material/tables.py | 0.796055 | 0.513668 | tables.py | pypi |
import numpy as np
import matplotlib
import matplotlib.style as mplstyle
mplstyle.use('fast')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.transforms as mtransforms
from matplotlib import ticker
from matplotlib.ticker import Locator
from matplotlib.patches import Polygon
from itertools import cycle
matplotlib.rcParams['mathtext.fontset'] = 'custom'
matplotlib.rcParams['mathtext.it'] = 'STIXGeneral:italic'
matplotlib.rcParams['mathtext.bf'] = 'STIXGeneral:italic:bold'
matplotlib.rcParams['mathtext.cal'] = 'sans'
matplotlib.rcParams['mathtext.rm'] = 'sans'
matplotlib.rcParams['mathtext.sf'] = 'sans'
matplotlib.rcParams['mathtext.tt'] = 'monospace'
matplotlib.rcParams['axes.titlesize'] = 'medium'
matplotlib.rcParams['axes.labelsize'] = 'medium'
matplotlib.rcParams['legend.fancybox'] = True
matplotlib.rcParams['legend.loc'] = 'best'
matplotlib.rcParams['legend.fontsize'] = 'medium'
class MinorSymLogLocator(Locator):
def __init__(self, linthresh, nints=10):
self.linthresh = linthresh
self.nintervals = nints
def __call__(self):
majorlocs = self.axis.get_majorticklocs()
if len(majorlocs) == 1:
return self.raise_if_exceeds(np.array([]))
dmlower = majorlocs[1]-majorlocs[0]
dmupper = majorlocs[-1]-majorlocs[-2]
if (majorlocs[0] != 0. and
((majorlocs[0] != self.linthresh and dmlower > self.linthresh) or
(dmlower == self.linthresh and majorlocs[0] < 0))):
majorlocs = np.insert(majorlocs, 0, majorlocs[0]*10.)
else:
majorlocs = np.insert(majorlocs, 0, majorlocs[0]-self.linthresh)
if (majorlocs[-1] != 0. and
((np.abs(majorlocs[-1]) != self.linthresh
and dmupper > self.linthresh) or
(dmupper == self.linthresh and majorlocs[-1] > 0))):
majorlocs = np.append(majorlocs, majorlocs[-1]*10.)
else:
majorlocs = np.append(majorlocs, majorlocs[-1]+self.linthresh)
minorlocs = []
for i in range(1, len(majorlocs)):
majorstep = majorlocs[i]-majorlocs[i-1]
if abs(majorlocs[i-1]+majorstep/2) < self.linthresh:
ndivs = self.nintervals
else:
ndivs = self.nintervals-1.
minorstep = majorstep/ndivs
locs = np.arange(majorlocs[i-1], majorlocs[i], minorstep)[1:]
minorlocs.extend(locs)
return self.raise_if_exceeds(np.array(minorlocs))
def tick_values(self, vmin, vmax):
raise NotImplementedError('Cannot get tick locations for a '
'%s type.' % type(self))
class Plot():
def __init__(self, canvas):
self.canvas = canvas
self.fig = canvas.figure
self.ax = canvas.figure.add_subplot(111)
self.ax.minorticks_on()
def get_aspect(self):
width, height = self.ax.get_figure().get_size_inches()
_, _, w, h = self.ax.get_position().bounds
xmin, xmax = self.ax.get_xlim()
ymin, ymax = self.ax.get_ylim()
disp_ratio = (height*h)/(width*w)
data_ratio = (ymax-ymin)/(xmax-xmin)
return disp_ratio/data_ratio
def save_figure(self, filename):
self.fig.savefig(filename, bbox_inches='tight', transparent=False)
def clear_canvas(self):
self.ax.remove()
self.fig.clear()
self.ax = self.fig.add_subplot(111)
self.ax.minorticks_on()
def draw_canvas(self):
self.canvas.draw_idle()
def tight_layout(self, pad=3.24):
self.fig.tight_layout(pad=pad)
def set_labels(self, title='', xlabel='', ylabel=''):
self.ax.set_title(title)
self.ax.set_xlabel(xlabel)
self.ax.set_ylabel(ylabel)
def set_aspect(self, value):
self.ax.set_aspect(value)
class Line(Plot):
def __init__(self, canvas):
super(Line, self).__init__(canvas)
self.p = []
self.hl = None
self.twin_ax = None
self.ax.spines['top'].set_visible(False)
self.ax.spines['right'].set_visible(False)
prop_cycle = matplotlib.rcParams['axes.prop_cycle']
self.colors = cycle(prop_cycle.by_key()['color'])
def __get_axis(self, twin=False):
if not twin:
return self.ax
else:
if self.twin_ax is None:
self.twin_ax = self.ax.twinx()
return self.twin_ax
def set_labels(self, title='', xlabel='', ylabel='', twin_ylabel=''):
super().set_labels(title=title, xlabel=xlabel, ylabel=ylabel)
if self.twin_ax:
self.twin_ax.set_ylabel(twin_ylabel)
def clear_canvas(self):
super().clear_canvas()
self.clear_lines()
self.ax.spines['top'].set_visible(False)
self.ax.spines['right'].set_visible(False)
def clear_lines(self):
if self.p:
for p in self.p:
p.lines[0].remove()
self.p = []
if self.hl:
self.hl.remove()
self.hl = None
if self.twin_ax:
self.twin_ax.remove()
self.twin_ax = None
if self.ax.get_legend():
self.ax.get_legend().remove()
self.set_labels()
prop_cycle = matplotlib.rcParams['axes.prop_cycle']
self.colors = cycle(prop_cycle.by_key()['color'])
def set_normalization(self, norm='linear', twin=False):
ax = self.__get_axis(twin)
if (norm.lower() == 'linear'):
ax.set_yscale('linear')
ax.yaxis.set_major_formatter(ticker.ScalarFormatter())
ax.yaxis.set_minor_locator(ticker.AutoMinorLocator())
elif (norm.lower() == 'logarithmic'):
ax.set_yscale('log')
ax.yaxis.set_major_formatter(ticker.ScalarFormatter())
# ax.yaxis.set_minor_locator(ticker.LogLocator())
else:
thresh, scale = 0.1, 0.9
ax.set_yscale('symlog', linthresh=thresh, linscale=scale)
ax.yaxis.set_major_formatter(ticker.ScalarFormatter())
ax.yaxis.set_minor_locator(MinorSymLogLocator(0.1))
def set_limits(self, vmin=None, vmax=None, twin=False):
xmin, xmax, ymin, ymax = self.ax.axis()
if vmin is not None: ymin = vmin
if vmax is not None: ymax = vmax
margin = 0.05
ax = self.__get_axis(twin)
transform = ax.yaxis.get_transform()
inverse_trans = transform.inverted()
ymint, ymaxt = transform.transform([ymin,ymax])
delta = (ymaxt-ymint)*margin
ymin, ymax = inverse_trans.transform([ymint-delta,ymaxt+delta])
ax.set_xlim([xmin,xmax])
ax.set_ylim([ymin,ymax])
def reset_view(self, twin=False):
ax = self.__get_axis(twin)
ax.relim()
ax.autoscale_view()
def update_data(self, x, y, i=0):
self.p[i].lines[0].set_data(x, y)
def get_data(self, i=0):
return self.p[i].lines[0].get_xydata().T
def plot_data(self, x, y, yerr=None, marker='o', label='', twin=False):
ax = self.__get_axis(twin)
color = next(self.colors)
err = ax.errorbar(x, y, yerr=yerr, fmt=marker, label=label,
color=color, ecolor=color, clip_on=False)
self.p.append(err)
def show_legend(self):
handles = [p for p in self.p if p.get_label() != '']
labels = [p.get_label() for p in handles]
self.ax.legend(handles, labels)
def draw_horizontal(self):
self.hl = self.ax.axhline(y=0, xmin=0, xmax=1, color='k', \
linestyle='-', linewidth=0.8)
def use_scientific(self, twin=False):
ax = self.__get_axis(twin)
ax.ticklabel_format(style='sci', scilimits=(0,0), axis='x')
ax.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
class HeatMap(Plot):
def __init__(self, canvas):
super(HeatMap, self).__init__(canvas)
self.im = None
self.norm = None
self.__color_limits()
def __matrix_transform(self, matrix):
matrix /= matrix[1,1]
scale = 1/matrix[0,0]
matrix[0,:] *= scale
return matrix, scale
def __extents(self, min_x, min_y, max_x, max_y, size_x, size_y):
dx = 0 if size_x <= 1 else (max_x-min_x)/(size_x-1)
dy = 0 if size_y <= 1 else (max_y-min_y)/(size_y-1)
return [min_x-dx/2, max_x+dx/2, min_y-dy/2, max_y+dy/2]
def __reverse_extents(self):
extents = self.im.get_extent()
size_x, size_y = self.im.get_size()
range_x = extents[1]-extents[0]
range_y = extents[3]-extents[2]
dx = 0 if size_x <= 1 else range_x/(size_x-1)
dy = 0 if size_y <= 1 else range_y/(size_y-1)
min_x = extents[0]-dx/2
max_x = extents[1]+dx/2
min_y = extents[2]-dy/2
max_y = extents[3]+dy/2
return [min_x, max_x, min_y, max_y]
def __transform_extents(self, matrix, extents):
ext_min = np.dot(matrix, extents[0::2])
ext_max = np.dot(matrix, extents[1::2])
return ext_min, ext_max
def __offset(self, matrix, minimum):
return -np.dot(matrix, [0,minimum])[0]
def __color_limits(self, category='sequential'):
if (category.lower() == 'sequential'):
self.cmap = plt.cm.viridis
elif (category.lower() == 'diverging'):
self.cmap = plt.cm.bwr
else:
self.cmap = plt.cm.binary
def set_normalization(self, vmin, vmax, norm='linear'):
if np.isclose(vmin, vmax): vmin, vmax = 1e-3, 1e+3
if (norm.lower() == 'linear'):
self.norm = colors.Normalize(vmin=vmin, vmax=vmax)
elif (norm.lower() == 'logarithmic'):
self.norm = colors.LogNorm(vmin=vmin, vmax=vmax)
else:
self.norm = colors.SymLogNorm(linthresh=0.1, linscale=0.9, base=10,
vmin=vmin, vmax=vmax)
self.im.set_norm(self.norm)
if self.im.colorbar is not None:
orientation = self.im.colorbar.orientation
self.remove_colorbar()
self.create_colorbar(orientation, norm)
if (norm.lower() == 'symlog'):
self.cb.locator = ticker.SymmetricalLogLocator(linthresh=0.1,
base=10)
self.cb.update_ticks()
def update_normalization(self, norm='linear'):
if self.im is not None:
vmin, vmax = self.im.get_clim()
self.set_normalization(vmin, vmax, norm)
def reformat_colorbar(self, formatstr='{:.1f}'):
if (self.cb.orientation == 'vertical'):
ticks = self.cb.ax.get_yticks()
else:
ticks = self.cb.ax.get_xticks()
vmin, vmax = self.cb.vmin, self.cb.vmax
inv = self.norm.inverse
tscale = inv((ticks-vmin)/(vmax-vmin))
labels = [formatstr.format(t) for t in tscale]
norm = self.norm
minorticks = []
if (vmin < ticks[0]):
vn = 11 if vmin >= -0.1 and vmin <= 0.1 else 10
tmin = inv((np.array([ticks[0]])-vmin)/(vmax-vmin))[0]
if (vmin >= -0.1 and vmin <= 0.1):
tn = int(vmin/-0.01)
nmin = -0.1 if vmin < 0.0 else 0.0
else:
tn = int(vmin/tmin)
nmin = tmin*10
values = (vmax-vmin)*norm(np.linspace(nmin, tmin, vn))[-tn:-1]+vmin
minorticks += values.tolist()
for i in range(len(ticks)-1):
tmin = inv((np.array([ticks[i]])-vmin)/(vmax-vmin))[0]
tmax = inv((np.array([ticks[i+1]])-vmin)/(vmax-vmin))[0]
tn = 11 if tmin >= -0.1 and tmax <= 0.1 else 10
values = (vmax-vmin)*norm(np.linspace(tmin, tmax, tn))[1:-1]+vmin
minorticks += values.tolist()
if (vmax > ticks[-1]):
vn = 11 if vmax >= -0.1 and vmax <= 0.1 else 10
tmax = inv((np.array([ticks[-1]])-vmin)/(vmax-vmin))[0]
if (vmax >= -0.1 and vmax <= 0.1):
tn = int(vmax/0.01)
nmax = 0.1 if vmax > 0.0 else 0.0
else:
tn = int(vmax/tmax)
nmax = tmax*10
values = (vmax-vmin)*norm(np.linspace(tmax, nmax, vn))[1:tn]+vmin
minorticks += values.tolist()
if (self.cb.orientation == 'vertical'):
self.cb.ax.set_yticklabels(labels)
self.cb.ax.yaxis.set_ticks(minorticks, minor=True)
else:
self.cb.ax.set_xticklabels(labels)
self.cb.ax.xaxis.set_ticks(minorticks, minor=True)
def update_colormap(self, category='sequential'):
self.__color_limits(category)
if self.im is not None:
self.im.set_cmap(self.cmap)
def create_colorbar(self, orientation='vertical', norm='linear'):
self.remove_colorbar()
pad = 0.05 if orientation.lower() == 'vertical' else 0.2
self.cb = self.fig.colorbar(self.im, ax=self.ax,
orientation=orientation, pad=pad)
self.cb.ax.minorticks_on()
def remove_colorbar(self):
if self.im.colorbar is not None:
self.im.colorbar.remove()
def set_colorbar_label(self, label):
if self.im.colorbar is not None:
self.im.colorbar.set_label(label)
def reset_color_limits(self):
self.im.autoscale()
def update_data(self, data, vmin, vmax):
if self.im is not None:
self.im.set_array(data.T)
self.im.set_clim(vmin=vmin, vmax=vmax)
def get_data(self):
if self.im is not None:
return self.im.get_array().T
def plot_data(self, data, min_x, min_y, max_x, max_y, matrix=np.eye(2)):
size_x, size_y = data.shape[1], data.shape[0]
extents = self.__extents(min_x, min_y, max_x, max_y, size_x, size_y)
self.im = self.ax.imshow(data.T, interpolation='nearest',
origin='lower', extent=extents)
self.transform_axes(matrix)
self.ax.minorticks_on()
def transform_axes(self, matrix):
extents = self.__reverse_extents()
transformation, scale = self.__matrix_transform(matrix)
ext_min, ext_max = self.__transform_extents(transformation, extents)
min_y = extents[2]
offset = self.__offset(transformation, min_y)
self.transformation = transformation
self.offset = offset
trans = mtransforms.Affine2D()
trans_matrix = np.eye(3)
trans_matrix[0:2,0:2] = transformation
trans.set_matrix(trans_matrix)
shift = mtransforms.Affine2D().translate(offset,0)
self.ax.set_aspect(scale)
trans_data = trans+shift+self.ax.transData
self.im.set_transform(trans_data)
self.ax.set_xlim(ext_min[0]+offset,ext_max[0]+offset)
self.ax.set_ylim(ext_min[1],ext_max[1])
for p in reversed(self.ax.patches):
p.remove()
x = [extents[1],ext_max[0]+offset,ext_max[0]+offset]
y = [ext_min[1],ext_min[1],ext_max[1]]
p = Polygon(np.column_stack((x,y)), fc='w', ec='w')
self.ax.add_patch(p)
x = [extents[0],extents[0],extents[0]+offset*2]
y = [ext_min[1],ext_max[1],ext_max[1]]
p = Polygon(np.column_stack((x,y)), fc='w', ec='w')
self.ax.add_patch(p)
def draw_line(self, xlim=None, ylim=None):
if xlim is None: xlim = self.ax.get_xlim()
if ylim is None: ylim = self.ax.get_ylim()
self.ax.plot(xlim, ylim, color='w')
def add_text(self, x, y, s, color='w'):
self.ax.text(x, y, s, color=color, ha='center', va='center')
class Scatter(Plot):
def __init__(self, canvas):
super(Scatter, self).__init__(canvas)
self.s = None
self.__color_limits()
def __color_limits(self, category='sequential'):
if (category == 'sequential'):
self.cmap = plt.cm.viridis
elif (category == 'diverging'):
self.cmap = plt.cm.bwr
else:
self.cmap = plt.cm.binary
def set_normalization(self, vmin, vmax, norm='linear'):
if np.isclose(vmin, vmax): vmin, vmax = 1e-3, 1e+3
if (norm.lower() == 'linear'):
self.norm = colors.Normalize(vmin=vmin, vmax=vmax)
elif (norm.lower() == 'logarithmic'):
self.norm = colors.LogNorm(vmin=vmin, vmax=vmax)
else:
self.norm = colors.SymLogNorm(linthresh=0.1, linscale=0.9, base=10,
vmin=vmin, vmax=vmax)
self.s.set_norm(self.norm)
if self.s.colorbar is not None:
orientation = self.s.colorbar.orientation
self.remove_colorbar()
self.create_colorbar(orientation, norm)
if (norm.lower() == 'symlog'):
self.cb.locator = ticker.SymmetricalLogLocator(linthresh=0.1,
base=10)
self.cb.update_ticks()
def update_normalization(self, norm='linear'):
if self.s is not None:
vmin, vmax = self.s.get_clim()
self.set_normalization(vmin, vmax, norm)
def update_colormap(self, category='sequential'):
self.__color_limits(category)
if self.s is not None:
self.s.set_cmap(self.cmap)
def create_colorbar(self, orientation='vertical', norm='linear'):
if self.s is not None:
self.remove_colorbar()
pad = 0.05 if orientation.lower() == 'vertical' else 0.2
self.cb = self.fig.colorbar(self.s, ax=self.ax,
orientation=orientation, pad=pad)
self.cb.ax.minorticks_on()
def remove_colorbar(self):
if self.s.colorbar is not None:
self.s.colorbar.remove()
def set_colorbar_label(self, label):
if self.s.colorbar is not None:
self.s.colorbar.set_label(label)
def reset_colorbar_limits(self):
if self.s is not None:
self.s.autoscale()
def update_data(self, c, vmin, vmax):
if self.s is not None:
self.s.set_array(c)
self.s.set_clim(vmin=vmin, vmax=vmax)
def get_data(self):
if self.s is not None:
return self.s.get_array()
def plot_data(self, x, y, c):
self.s = self.ax.scatter(x, y, c=c, cmap=self.cmap)
def reformat_colorbar(self, formatstr='{:.1f}'):
if (self.cb.orientation == 'vertical'):
ticks = self.cb.ax.get_yticks()
else:
ticks = self.cb.ax.get_xticks()
vmin, vmax = self.cb.vmin, self.cb.vmax
inv = self.norm.inverse
tscale = inv((ticks-vmin)/(vmax-vmin))
labels = [formatstr.format(t) for t in tscale]
norm = self.norm
minorticks = []
if (vmin < ticks[0]):
vn = 11 if vmin >= -0.1 and vmin <= 0.1 else 10
tmin = inv((np.array([ticks[0]])-vmin)/(vmax-vmin))[0]
if (vmin >= -0.1 and vmin <= 0.1):
tn = int(vmin/-0.01)
nmin = -0.1 if vmin < 0.0 else 0.0
else:
tn = int(vmin/tmin)
nmin = tmin*10
values = (vmax-vmin)*norm(np.linspace(nmin, tmin, vn))[-tn:-1]+vmin
minorticks += values.tolist()
for i in range(len(ticks)-1):
tmin = inv((np.array([ticks[i]])-vmin)/(vmax-vmin))[0]
tmax = inv((np.array([ticks[i+1]])-vmin)/(vmax-vmin))[0]
tn = 11 if tmin >= -0.1 and tmax <= 0.1 else 10
values = (vmax-vmin)*norm(np.linspace(tmin, tmax, tn))[1:-1]+vmin
minorticks += values.tolist()
if (vmax > ticks[-1]):
vn = 11 if vmax >= -0.1 and vmax <= 0.1 else 10
tmax = inv((np.array([ticks[-1]])-vmin)/(vmax-vmin))[0]
if (vmax >= -0.1 and vmax <= 0.1):
tn = int(vmax/0.01)
nmax = 0.1 if vmax > 0.0 else 0.0
else:
tn = int(vmax/tmax)
nmax = tmax*10
values = (vmax-vmin)*norm(np.linspace(tmax, nmax, vn))[1:tn]+vmin
minorticks += values.tolist()
if (self.cb.orientation == 'vertical'):
self.cb.ax.set_yticklabels(labels)
self.cb.ax.yaxis.set_ticks(minorticks, minor=True)
else:
self.cb.ax.set_xticklabels(labels)
self.cb.ax.xaxis.set_ticks(minorticks, minor=True) | /rmc_discord-0.0.4-cp36-cp36m-win_amd64.whl/disorder/graphical/plots.py | 0.533884 | 0.403714 | plots.py | pypi |
import mayavi.mlab as mlab
import numpy as np
from scipy.stats import chi2
from scipy.spatial.transform.rotation import Rotation
from mayavi.sources.api import ParametricSurface
from mayavi.modules.api import Surface
class CrystalStructure:
def __init__(self):
self.fig = mlab.figure(fgcolor=(0,0,0), bgcolor=(1,1,1))
self.engine = mlab.get_engine()
self.engine.start()
self.fig.scene.parallel_projection = True
def __probability_ellipsoid(self, Uxx, Uyy, Uzz, Uyz, Uxz, Uxy, p=0.99):
U = np.array([[Uxx,Uxy,Uxz],
[Uxy,Uyy,Uyz],
[Uxz,Uyz,Uzz]])
w, v = np.linalg.eig(np.linalg.inv(U))
r_eff = chi2.ppf(1-p, 3)
radii = np.sqrt(r_eff/w)
rot = Rotation.from_matrix(v)
euler_angles = rot.as_euler('ZXY', degrees=True)
return radii, euler_angles
def save_figure(self, filename):
mlab.savefig(filename, figure=self.fig)
def view_direction(self, u, v, w):
A = self.A
x, y, z = np.dot(A, [u,v,w])
r = np.sqrt(x**2+y**2+z**2)
theta = np.rad2deg(np.arccos(z/r))
phi = np.rad2deg(np.arctan2(y,x))
mlab.view(azimuth=phi, elevation=theta, distance=None, focalpoint=None,
roll=None, reset_roll=True, figure=self.fig)
def draw_basis_vectors(self):
A = self.A
a, b, c = self.a, self.b, self.c
scale = np.min([a,b,c])
offset = 0.5
ar = np.dot(A, [1.0+offset*scale/a,0,0])
br = np.dot(A, [0,1.0+offset*scale/b,0])
cr = np.dot(A, [0,0,1.0+offset*scale/c])
av = np.dot(A, [offset/scale*a,0,0])
bv = np.dot(A, [0,offset/scale*b,0])
cv = np.dot(A, [0,0,offset*scale/c])
ca = mlab.quiver3d(ar[0], ar[1], ar[2], av[0], av[1], av[2],
color=(1,0,0), resolution=60, scale_factor=1,
mode='arrow', figure=self.fig)
cb = mlab.quiver3d(br[0], br[1], br[2], bv[0], bv[1], bv[2],
color=(0,1,0), resolution=60, scale_factor=1,
mode='arrow', figure=self.fig)
cc = mlab.quiver3d(cr[0], cr[1], cr[2], cv[0], cv[1], cv[2],
color=(0,0,1), resolution=60, scale_factor=1,
mode='arrow', figure=self.fig)
ca.glyph.glyph_source.glyph_position = 'tail'
cb.glyph.glyph_source.glyph_position = 'tail'
cc.glyph.glyph_source.glyph_position = 'tail'
mlab.text3d(ar[0], ar[1], ar[2], 'a', figure=self.fig)
mlab.text3d(br[0], br[1], br[2], 'b', figure=self.fig)
mlab.text3d(cr[0], cr[1], cr[2], 'c', figure=self.fig)
def draw_cell_edges(self, ucx, ucy, ucz):
connections = ((0,1),(0,2),(0,4),(1,3),(1,5),(2,3),
(2,6),(3,7),(4,5),(4,6),(5,7),(6,7))
pts = mlab.points3d(ucx, ucy, ucz, np.zeros(8), figure=self.fig)
pts.mlab_source.dataset.lines = np.array(connections)
tube = mlab.pipeline.tube(pts, tube_radius=0.05)
tube.filter.radius_factor = 0.
mlab.pipeline.surface(tube, color=(0.0,0.0,0.0), figure=self.fig)
def atomic_displacement_ellipsoids(self, ux, uy, uz,
Uxx, Uyy, Uzz, Uyz, Uxz, Uxy,
colors, p=0.99):
self.fig.scene.disable_render = True
n_atm = ux.shape[0]
for i in range(n_atm):
s, a = self.__probability_ellipsoid(Uxx[i], Uyy[i], Uzz[i],
Uyz[i], Uxz[i], Uxy[i], p)
source = ParametricSurface()
source.function = 'ellipsoid'
self.engine.add_source(source)
surface = Surface()
source.add_module(surface)
actor = surface.actor
actor.property.opacity = 1.0
actor.property.color = colors[i]
actor.mapper.scalar_visibility = False
actor.property.backface_culling = True
actor.property.diffuse = 1.0
actor.property.specular = 0.0
actor.actor.origin = np.array([0,0,0])
actor.actor.position = np.array([ux[i],uy[i],uz[i]])
actor.actor.scale = np.array([s[0],s[1],s[2]])
actor.actor.orientation = np.array([a[0],a[1],a[2]]) #ZXY
actor.enable_texture = True
actor.property.representation = 'surface'
self.fig.scene.disable_render = False
def atomic_radii(self, ux, uy, uz, radii, colors):
n_atm = ux.shape[0]
points = []
for i in range(n_atm):
p = mlab.points3d(ux[i], uy[i], uz[i], radii[i], color=colors[i],
resolution=60, scale_factor=1, figure=self.fig)
points.append(p)
return points
def magnetic_vectors(self, ux, uy, uz, sx, sy, sz):
n_atm = ux.shape[0]
for i in range(n_atm):
v = mlab.quiver3d(ux[i], uy[i], uz[i], sx[i], sy[i], sz[i],
color=(1,0,0), line_width=60, resolution=60,
scale_factor=1, mode='arrow', figure=self.fig)
v.glyph.glyph_source.glyph_position = 'tail'
t = mlab.quiver3d(ux[i], uy[i], uz[i], -sx[i], -sy[i], -sz[i],
color=(1,0,0), line_width=60, resolution=60,
scale_factor=1, mode='arrow', figure=self.fig)
t.glyph.glyph_source.glyph_position = 'tail'
t.glyph.glyph_source.glyph_source.tip_radius = 0 | /rmc_discord-0.0.4-cp36-cp36m-win_amd64.whl/disorder/graphical/visualization.py | 0.641871 | 0.33595 | visualization.py | pypi |
import re
import os
import numpy as np
from disorder.diffuse import experimental, space, filters, scattering
from disorder.diffuse import monocrystal, powder
from disorder.diffuse import magnetic, occupational, displacive, refinement
from disorder.material import crystal, symmetry, tables
import disorder.correlation.functions as correlations
from shutil import copyfile
class Model:
def __init__(self):
pass
def supercell_size(self, n_atm, nu, nv, nw):
return n_atm*nu*nv*nw
def unitcell_size(self, nu, nv, nw):
return nu*nv*nw
def ion_symbols(self, keys):
return np.array([re.sub(r'[\d.+-]+$', '', key) for key in keys])
def iso_symbols(self, keys):
return np.array([re.sub(r'^\d+\s*', '', key) for key in keys])
def remove_symbols(self, keys):
return np.array([re.sub(r'[a-zA-Z]', '', key) for key in keys])
def sort_keys(self, col0, col1, keys):
keys = np.array([key for key in keys])
sort = np.lexsort(np.array((col0, col1)))
return keys[sort]
def get_isotope(self, element, nucleus):
nucleus[nucleus == '-'] = ''
return np.array([pre+atm for atm, pre in zip(element, nucleus)])
def get_ion(self, element, charge):
charge[charge == '-'] = ''
return np.array([atm+app for atm, app in zip(element, charge)])
def get_neutron_scattering_length_keys(self):
bc_keys = tables.bc.keys()
bc_atm = self.iso_symbols(bc_keys)
bc_nuc = self.remove_symbols(bc_keys)
return self.sort_keys(bc_nuc,bc_atm,bc_keys)
def get_xray_form_factor_keys(self):
X_keys = tables.X.keys()
X_atm = self.ion_symbols(X_keys)
X_ion = self.remove_symbols(X_keys)
return self.sort_keys(X_ion,X_atm,X_keys)
def get_magnetic_form_factor_keys(self):
j0_keys = tables.j0.keys()
j0_atm = self.ion_symbols(j0_keys)
j0_ion = self.remove_symbols(j0_keys)
return self.sort_keys(j0_ion,j0_atm,j0_keys)
def load_unit_cell(self, folder, filename):
return crystal.unitcell(folder=folder,
filename=filename,
tol=1e-2)
def load_space_group(self, folder, filename):
return crystal.group(folder=folder, filename=filename)
def load_lattice_parameters(self, folder, filename):
return crystal.parameters(folder=folder, filename=filename)
def reciprocal_lattice_parameters(self, a, b, c, alpha, beta, gamma):
return crystal.reciprocal(a, b, c, alpha, beta, gamma)
def find_laue(self, folder, filename):
return crystal.laue(folder, filename)
def find_lattice(self, a, b, c, alpha, beta, gamma):
return crystal.lattice(a, b, c, alpha, beta, gamma)
def crystal_matrices(self, a, b, c, alpha, beta, gamma):
constants = a, b, c, alpha, beta, gamma
inv_constants = crystal.reciprocal(*constants)
a_, b_, c_, alpha_, beta_, gamma_ = inv_constants
A = crystal.cartesian(*constants)
B = crystal.cartesian(*inv_constants)
R = crystal.cartesian_rotation(*constants)
C = crystal.cartesian_moment(*constants)
D = crystal.cartesian_displacement(*constants)
return A, B, R, C, D
def crystal_reciprocal_matrices(self, a, b, c, alpha, beta, gamma):
constants = a, b, c, alpha, beta, gamma
inv_constants = crystal.reciprocal(*constants)
a_, b_, c_, alpha_, beta_, gamma_ = inv_constants
A_ = crystal.cartesian(*inv_constants)
B_ = crystal.cartesian(*constants)
R_ = crystal.cartesian_rotation(*inv_constants)
C_ = crystal.cartesian_moment(*inv_constants)
D_ = crystal.cartesian_displacement(*inv_constants)
return A_, B_, R_, C_, D_
def anisotropic_parameters(self, displacement, D):
if (len(displacement.shape) != 2):
displacement = displacement.reshape(displacement.size, 1)
if (displacement.shape[1] == 6):
U11, U22, U33, U23, U13, U12 = np.round(displacement.T, 4)
else:
Uiso = np.round(displacement.flatten(), 4)
uiso = np.dot(np.linalg.inv(D), np.linalg.inv(D.T))
U11, U22, U33 = Uiso*uiso[0,0], Uiso*uiso[1,1], Uiso*uiso[2,2]
U23, U13, U12 = Uiso*uiso[1,2], Uiso*uiso[0,2], Uiso*uiso[0,1]
return U11, U22, U33, U23, U13, U12
def atomic_displacement_parameters(self, U11, U22, U33, U23, U13, U12, D):
U = np.array([[U11,U12,U13], [U12,U22,U23], [U13,U23,U33]])
n = np.size(U11)
U = U.reshape(3,3,n)
Uiso, U1, U2, U3 = [], [], [], []
for i in range(n):
Up, _ = np.linalg.eig(np.dot(np.dot(D, U[...,i]), D.T))
Up.sort()
U1.append(Up[0].real)
U2.append(Up[1].real)
U3.append(Up[2].real)
Uiso.append(np.mean(Up).real)
return np.array(Uiso), np.array(U1), np.array(U2), np.array(U3)
def decompose_adps(self, U11, U22, U33, U23, U13, U12, D):
U = np.array([[U11,U12,U13], [U12,U22,U23], [U13,U23,U33]])
n = np.size(U11)
U = U.reshape(3,3,n)
Lxx, Lyy, Lzz, Lyz, Lxz, Lxy = [], [], [], [], [], []
for i in range(n):
if np.all(np.linalg.eigvals(U[...,i]) > 0):
L = np.linalg.cholesky(np.dot(np.dot(D, U[...,i]), D.T))
Lxx.append(L[0,0])
Lyy.append(L[1,1])
Lzz.append(L[2,2])
Lyz.append(L[1,2])
Lxz.append(L[0,2])
Lxy.append(L[0,1])
return np.array(Lxx), np.array(Lyy), np.array(Lzz), \
np.array(Lyz), np.array(Lxz), np.array(Lxy)
def transform_adps(self, U11, U22, U33, U23, U13, U12, D):
U = np.array([[U11,U12,U13], [U12,U22,U23], [U13,U23,U33]])
n = np.size(U11)
U = U.reshape(3,3,n)
Uxx, Uyy, Uzz, Uyz, Uxz, Uxy = [], [], [], [], [], []
for i in range(n):
Up = np.dot(np.dot(D, U[...,i]), D.T)
Uxx.append(Up[0,0])
Uyy.append(Up[1,1])
Uzz.append(Up[2,2])
Uyz.append(Up[1,2])
Uxz.append(Up[0,2])
Uxy.append(Up[0,1])
return np.array(Uxx), np.array(Uyy), np.array(Uzz), \
np.array(Uyz), np.array(Uxz), np.array(Uxy)
def magnetic_moments(self, mu1, mu2, mu3, C):
M = np.array([mu1,mu2,mu3])
n = np.size(mu1)
M = M.reshape(3,n)
mu = []
for i in range(n):
mu.append(np.linalg.norm(np.dot(C, M[:,i])))
return np.array(mu)
def transform_moments(self, mu1, mu2, mu3, C):
M = np.array([mu1,mu2,mu3])
n = np.size(mu1)
M = M.reshape(3,n)
mux, muy, muz = [], [], []
for i in range(n):
Mp = np.dot(C, M[:,i])
mux.append(Mp[0])
muy.append(Mp[1])
muz.append(Mp[2])
return np.array(mux), np.array(muy), np.array(muz)
def magnetic_symmetry(self, operator, moment):
return symmetry.evaluate_mag(operator, moment)
def symmetry(self, operator, coordinate):
coord = symmetry.evaluate(operator, coordinate)
return [c+(c < 0)-(c > 1) for c in coord]
def reverse_symmetry(self, operator, coordinate):
rev_operator = symmetry.reverse(operator)
coord = symmetry.evaluate(rev_operator, coordinate)
return [c+(c < 0)-(c > 1) for c in coord]
def save_crystal(self, filename, fname):
if (filename != fname):
copyfile(filename, fname)
def save_supercell(self, fname, atm, occ, disp, mom,
u, v, w, nu, nv, nw, folder, filename):
crystal.supercell(atm, occ, disp, mom, u, v, w, nu, nv, nw,
fname, folder=folder, filename=filename)
def save_disorder(self, fname, Sx, Sy, Sz, delta, Ux, Uy, Uz, rx, ry, rz,
nu, nv, nw, atm, A, folder, filename):
crystal.disordered(delta, Ux, Uy, Uz, Sx, Sy, Sz,
rx, ry, rz, nu, nv, nw, atm, A, fname,
folder=folder, filename=filename,
ulim=[0,nu], vlim=[0,nv], wlim=[0,nw])
def load_data(self, fname):
if fname.endswith('.nxs'):
signal, sigma_sq, \
h_range, k_range, l_range, \
nh, nk, nl = experimental.data(fname)
elif fname.endswith('.npz'):
npzfile = np.load(fname, allow_pickle=True)
signal = npzfile['signal']
sigma_sq = npzfile['sigma_sq']
limits = npzfile['limits']
min_h, max_h, nh, min_k, max_k, nk, min_l, max_l, nl = limits
h_range = [min_h, max_h]
k_range = [min_k, max_k]
l_range = [min_l, max_l]
return signal, sigma_sq, h_range, k_range, l_range, nh, nk, nl
def save_data(self, fname, signal, sigma_sq,
h_range, k_range, l_range, nh, nk, nl):
min_h, max_h = h_range
min_k, max_k = k_range
min_l, max_l = l_range
limits = np.array([min_h, max_h, nh,
min_k, max_k, nk,
min_l, max_l, nl], dtype=object)
np.savez('{}-intensity.npz'.format(fname),
signal=signal, sigma_sq=sigma_sq, limits=limits)
def load_region_of_interest(self, fname):
signal = np.load('{}-intensity-roi.npy'.format(fname))
sigma_sq = np.load('{}-error-roi.npy'.format(fname))
return signal, sigma_sq
def save_region_of_interest(self, fname, signal, sigma_sq):
np.save('{}-intensity-roi.npy'.format(fname), signal)
np.save('{}-error-roi.npy'.format(fname), sigma_sq)
def rebin_parameters(self, size, minimum, maximum, centered=True):
if (size > 0):
step = (maximum-minimum)/(size-1)
if centered:
round_min = round(minimum)
round_max = round(maximum)
offset_min = int(np.round((round_min-minimum)/step, 4))
offset_max = int(np.round((round_max-minimum)/step, 4))
scale = experimental.factors(offset_max-offset_min)
mask = np.isclose(np.mod(1/(step*scale), 1), 0)
scale = scale[mask]
else:
scale = experimental.factors(size-1)
mask = step*scale <= 1
scale = scale[mask]
steps = np.round(step*scale, 4)
sizes = (size-1) // scale+1
return steps, sizes
else:
return np.array([]), np.array([])
def slice_value(self, minimum, maximum, size, index):
if (index > size):
return np.round(maximum, 4)
elif (index < 0 or size <= 1):
return np.round(minimum, 4)
else:
step = (maximum-minimum)/(size-1)
return np.round(minimum+step*index, 4)
def slice_index(self, minimum, maximum, size, value):
if (value > maximum):
return size-1
elif (value < minimum or size <= 1):
return 0
else:
step = (maximum-minimum)/(size-1)
return int(round((value-minimum)/step))
def step_value(self, minimum, maximum, size):
return (maximum-minimum)/(size-1) if (size > 1) else 0
def size_value(self, minimum, maximum, step):
return int(round((maximum-minimum)/step))+1 if (step > 0) else 1
def minimum_value(self, size, step, maximum):
return maximum-step*(size-1)
def maximum_value(self, size, step, minimum):
return minimum+step*(size-1)
def matrix_transform(self, B, T=np.eye(3)):
return np.linalg.cholesky(np.dot(T.T,np.dot(np.dot(B.T,B),T))).T
def mask_array(self, array):
return np.ma.masked_less_equal(np.ma.masked_invalid(array, copy=False),
0, copy=False)
def crop(self, array, h_slice, k_slice, l_slice):
return experimental.crop(array, h_slice, k_slice, l_slice)
def rebin(self, array, binsize):
return experimental.rebin(array, binsize)
def crop_parameters(self, xmin, xmax, minimum, maximum, size):
binning = np.linspace(minimum, maximum, size)
i_min = np.where(binning <= xmin)[0][-1]
i_max = np.where(binning <= xmax)[0][-1]
return i_min, i_max
def punch(self, array, radius_h, radius_k, radius_l,
h_range, k_range, l_range,
centering, outlier, punch):
return experimental.punch(array, radius_h, radius_k, radius_l,
h_range, k_range, l_range,
centering=centering,
outlier=outlier,
punch=punch)
def get_mask(self, signal, error_sq):
return experimental.mask(signal, error_sq)
def get_refinement_data(self, signal, error_sq, mask):
return signal[~mask], 1/error_sq[~mask]
def reciprocal_space_mapping(self, h_range, k_range, l_range,
nu, nv, nw, mask):
nh, nk, nl = mask.shape
output = space.mapping(h_range, k_range, l_range,
nh, nk, nl, nu, nv, nw)
h, k, l, H, K, L, \
indices, inverses, operators = output
i_mask, i_unmask = space.indices(mask)
return h, k, l, H, K, L, indices, inverses, i_mask, i_unmask
def reciprocal_space_coordinate_transform(self, h, k, l, B, R):
Qh, Qk, Ql = crystal.vector(h, k, l, B)
Qx, Qy, Qz = crystal.transform(Qh, Qk, Ql, R)
Qx_norm, Qy_norm, Qz_norm, Q = space.unit(Qx, Qy, Qz)
return Qx, Qy, Qz, Qx_norm, Qy_norm, Qz_norm, Q
def real_space_coordinate_transform(self, u, v, w, atm, A, nu, nv, nw):
ux, uy, uz = crystal.transform(u, v, w, A)
ix, iy, iz = space.cell(nu, nv, nw, A)
rx, ry, rz, atms = space.real(ux, uy, uz, ix, iy, iz, atm)
return ux, uy, uz, rx, ry, rz, atms
def exponential_factors(self, Qx, Qy, Qz, ux, uy, uz, nu, nv, nw):
phase_factor = scattering.phase(Qx, Qy, Qz, ux, uy, uz)
space_factor = space.factor(nu, nv, nw)
return phase_factor, space_factor
def neutron_factors(self, Q, atm, ion, occupancy, T, g, phase_factor):
scattering_length = scattering.length(atm, Q.size)
factors = space.prefactors(scattering_length, phase_factor, occupancy)
magnetic_form_factor = magnetic.form(Q, ion, g)
magnetic_factors = space.prefactors(magnetic_form_factor,
phase_factor, occupancy)
return factors*T, magnetic_factors*T
def xray_factors(self, Q, ion, occupancy, T, phase_factor):
form_factor = scattering.form(ion, Q)
factors = space.prefactors(form_factor, phase_factor, occupancy)
return factors*T
def debye_waller_factors(self, h_range, k_range, l_range, nh, nk, nl,
U11, U22, U33, U23, U13, U12, a, b, c):
T = space.debye_waller(h_range, k_range, l_range, nh, nk, nl,
U11, U22, U33, U23, U13, U12, a, b, c)
return T.flatten()
def initialize_intensity(self, mask, Q):
nh, nk, nl = mask.shape
I_obs = np.full((nh, nk, nl), np.nan)
I_ref = I_obs[~mask]
I_calc = np.zeros(Q.size, dtype=float)
I_raw = np.zeros(mask.size, dtype=float)
I_flat = np.zeros(mask.size, dtype=float)
return I_obs, I_ref, I_calc, I_raw, I_flat
def initialize_filter(self, mask):
a_filt = np.zeros(mask.size, dtype=float)
b_filt = np.zeros(mask.size, dtype=float)
c_filt = np.zeros(mask.size, dtype=float)
d_filt = np.zeros(mask.size, dtype=float)
e_filt = np.zeros(mask.size, dtype=float)
f_filt = np.zeros(mask.size, dtype=float)
g_filt = np.zeros(mask.size, dtype=float)
h_filt = np.zeros(mask.size, dtype=float)
i_filt = np.zeros(mask.size, dtype=float)
return a_filt, b_filt, c_filt, \
d_filt, e_filt, f_filt, \
g_filt, h_filt, i_filt
def blurring(self, intensity, sigma):
return filters.blurring(intensity, sigma)
def gaussian(self, mask, sigma):
v_inv = filters.gaussian(mask, sigma)
boxes = filters.boxblur(sigma, 3)
return v_inv, boxes
def random_moments(self, nu, nv, nw, n_atm, moment, fixed):
return magnetic.spin(nu, nv, nw, n_atm, moment, fixed)
def random_occupancies(self, nu, nv, nw, n_atm, occupancy):
return occupational.composition(nu, nv, nw, n_atm, occupancy)
def random_displacements(self, nu, nv, nw, n_atm, displacement, fixed):
return displacive.expansion(nu, nv, nw, n_atm, displacement, fixed)
def initialize_magnetic(self, Sx, Sy, Sz, H, K, L,
Qx_norm, Qy_norm, Qz_norm, indices,
magnetic_factors, nu, nv, nw, n_atm):
n_uvw = nu*nv*nw
Sx_k, Sy_k, Sz_k, i_dft = magnetic.transform(Sx, Sy, Sz, H, K, L,
nu, nv, nw, n_atm)
Fx, Fy, Fz, \
prod_x, prod_y, prod_z = magnetic.structure(Qx_norm, Qy_norm, Qz_norm,
Sx_k, Sy_k, Sz_k, i_dft,
magnetic_factors)
Fx_orig = np.zeros(indices.size, dtype=complex)
Fy_orig = np.zeros(indices.size, dtype=complex)
Fz_orig = np.zeros(indices.size, dtype=complex)
prod_x_orig = np.zeros(indices.size, dtype=complex)
prod_y_orig = np.zeros(indices.size, dtype=complex)
prod_z_orig = np.zeros(indices.size, dtype=complex)
Sx_k_orig = np.zeros(n_uvw, dtype=complex)
Sy_k_orig = np.zeros(n_uvw, dtype=complex)
Sz_k_orig = np.zeros(n_uvw, dtype=complex)
Fx_cand = np.zeros(indices.size, dtype=complex)
Fy_cand = np.zeros(indices.size, dtype=complex)
Fz_cand = np.zeros(indices.size, dtype=complex)
prod_x_cand = np.zeros(indices.size, dtype=complex)
prod_y_cand = np.zeros(indices.size, dtype=complex)
prod_z_cand = np.zeros(indices.size, dtype=complex)
Sx_k_cand = np.zeros(n_uvw, dtype=complex)
Sy_k_cand = np.zeros(n_uvw, dtype=complex)
Sz_k_cand = np.zeros(n_uvw, dtype=complex)
return Sx_k, Sy_k, Sz_k, \
Sx_k_orig, Sy_k_orig, Sz_k_orig, \
Sx_k_cand, Sy_k_cand, Sz_k_cand, \
Fx, Fy, Fz, \
Fx_orig, Fy_orig, Fz_orig, \
Fx_cand, Fy_cand, Fz_cand, \
prod_x, prod_y, prod_z, \
prod_x_orig, prod_y_orig, prod_z_orig, \
prod_x_cand, prod_y_cand, prod_z_cand, i_dft
def initialize_occupational(self, A_r, H, K, L, indices,
factors, nu, nv, nw, n_atm):
n_uvw = nu*nv*nw
A_k, i_dft = occupational.transform(A_r, H, K, L, nu, nv, nw, n_atm)
F, prod = occupational.structure(A_k, i_dft, factors)
F_orig = np.zeros(indices.size, dtype=complex)
prod_orig = np.zeros(indices.size, dtype=complex)
A_k_orig = np.zeros(n_uvw, dtype=complex)
F_cand = np.zeros(indices.size, dtype=complex)
prod_cand = np.zeros(indices.size, dtype=complex)
A_k_cand = np.zeros(n_uvw, dtype=complex)
return A_k, A_k_orig, A_k_cand, F, F_orig, F_cand, \
prod, prod_orig, prod_cand, i_dft
def initialize_displacive(self, Ux, Uy, Uz, H, K, L, Qx, Qy, Qz,
indices, factors, nu, nv, nw, n_atm,
p, centering):
n_uvw = nu*nv*nw
coeffs = displacive.coefficients(p)
U_r = displacive.products(Ux, Uy, Uz, p)
Q_k = displacive.products(Qx, Qy, Qz, p)
U_k, i_dft = displacive.transform(U_r, H, K, L, nu, nv, nw, n_atm)
H_nuc, K_nuc, L_nuc, \
cond = space.condition(H, K, L, nu, nv, nw, centering)
F, F_nuc, \
prod, prod_nuc, \
V_k, V_k_nuc, \
even, \
bragg = displacive.structure(U_k, Q_k, coeffs, cond, p, i_dft, factors)
F_orig = np.zeros(indices.size, dtype=complex)
F_nuc_orig = np.zeros(bragg.size, dtype=complex)
prod_orig = np.zeros(indices.size, dtype=complex)
prod_nuc_orig = np.zeros(bragg.size, dtype=complex)
V_k_orig = np.zeros(indices.size, dtype=complex)
V_k_nuc_orig = np.zeros(bragg.size, dtype=complex)
U_k_orig = np.zeros(n_uvw*coeffs.size, dtype=complex)
F_cand = np.zeros(indices.shape, dtype=complex)
F_nuc_cand = np.zeros(bragg.shape, dtype=complex)
prod_cand = np.zeros(indices.shape, dtype=complex)
prod_nuc_cand = np.zeros(bragg.shape, dtype=complex)
V_k_cand = np.zeros(indices.size, dtype=complex)
V_k_nuc_cand = np.zeros(bragg.size, dtype=complex)
U_k_cand = np.zeros(n_uvw*coeffs.size, dtype=complex)
U_r_orig = np.zeros(coeffs.size, dtype=float)
U_r_cand = np.zeros(coeffs.size, dtype=float)
return U_r, U_r_orig, U_r_cand, Q_k, \
U_k, U_k_orig, U_k_cand, \
V_k, V_k_orig, V_k_cand, \
V_k_nuc, V_k_nuc_orig, V_k_nuc_cand, \
F, F_orig, F_cand, \
F_nuc, F_nuc_orig, F_nuc_cand, \
prod, prod_orig, prod_cand, \
prod_nuc, prod_nuc_orig, prod_nuc_cand, \
i_dft, coeffs, H_nuc, K_nuc, L_nuc, cond, even, bragg
def reduced_reciprocal_space_symmetry(self, h_range, k_range, l_range,
nh, nk, nl, nu, nv, nw, T, laue):
indices, inverses, operators, \
Nu, Nv, Nw = space.reduced(h_range, k_range, l_range, nh, nk, nl,
nu, nv, nw, T=T, laue=laue)
lauesym = symmetry.operators(invert=True)
symmetries = list(lauesym.keys())
symop = [11,1]
for count, sym in enumerate(symmetries):
if (np.array([operators[p] in lauesym.get(sym) \
for p in range(len(operators))]).all() and \
len(lauesym.get(sym)) == len(operators)):
symop = [count,len(lauesym.get(sym))]
return indices, inverses, operators, Nu, Nv, Nw, symop
def displacive_parameters(self, p, centering):
coeffs = displacive.coefficients(p)
start = (np.cumsum(displacive.number(np.arange(p+1)))
- displacive.number(np.arange(p+1)))[::2]
end = np.cumsum(displacive.number(np.arange(p+1)))[::2]
even = []
for k in range(len(end)):
even += range(start[k], end[k])
even = np.array(even)
nuclear = ['P', 'I', 'F', 'R', 'C', 'A', 'B']
cntr = np.argwhere([x in centering for x in nuclear])[0][0]
cntr += 1
return coeffs, even, cntr
def save_magnetic(self, fname, run, Sx, Sy, Sz):
np.save('{}-calculated-spin-x-{}.npy'.format(fname,run), Sx)
np.save('{}-calculated-spin-y-{}.npy'.format(fname,run), Sy)
np.save('{}-calculated-spin-z-{}.npy'.format(fname,run), Sz)
def save_occupational(self, fname, run, A_r):
np.save('{}-calculated-composition-{}.npy'.format(fname,run), A_r)
def save_displacive(self, fname, run, Ux, Uy, Uz):
np.save('{}-calculated-displacement-x-{}.npy'.format(fname,run), Ux)
np.save('{}-calculated-displacement-y-{}.npy'.format(fname,run), Uy)
np.save('{}-calculated-displacement-z-{}.npy'.format(fname,run), Uz)
def load_magnetic(self, fname, run):
Sx = np.load('{}-calculated-spin-x-{}.npy'.format(fname,run))
Sy = np.load('{}-calculated-spin-y-{}.npy'.format(fname,run))
Sz = np.load('{}-calculated-spin-z-{}.npy'.format(fname,run))
return Sx, Sy, Sz
def load_occupational(self, fname, run):
A_r = np.load('{}-calculated-composition-{}.npy'.format(fname,run))
return A_r
def load_displacive(self, fname, run):
Ux = np.load('{}-calculated-displacement-x-{}.npy'.format(fname,run))
Uy = np.load('{}-calculated-displacement-y-{}.npy'.format(fname,run))
Uz = np.load('{}-calculated-displacement-z-{}.npy'.format(fname,run))
return Ux, Uy, Uz
def save_refinement(self, fname, run, I_obs, chi_sq, energy, temperature,
scale, acc_moves, rej_moves, acc_temps, rej_temps):
np.save('{}-calculated-intensity-{}.npy'.format(fname,run), I_obs)
np.save('{}-goodness-of-fit-{}.npy'.format(fname,run), chi_sq)
np.save('{}-energy-{}.npy'.format(fname,run), energy)
np.save('{}-temperature-{}.npy'.format(fname,run), temperature)
np.save('{}-scale-factor-{}.npy'.format(fname,run), scale)
np.save('{}-accepted-moves-{}.npy'.format(fname,run), acc_moves)
np.save('{}-rejected-moves-{}.npy'.format(fname,run), rej_moves)
np.save('{}-accepted-temperature-{}.npy'.format(fname,run), acc_temps)
np.save('{}-rejected-temperature-{}.npy'.format(fname,run), rej_temps)
def load_refinement(self, fname, run):
I_obs = np.load('{}-calculated-intensity-{}.npy'.format(fname,run))
chi_sq = np.load('{}-goodness-of-fit-{}.npy'.format(fname,run))
energy = np.load('{}-energy-{}.npy'.format(fname,run))
temperature = np.load('{}-temperature-{}.npy'.format(fname,run))
scale = np.load('{}-scale-factor-{}.npy'.format(fname,run))
acc_moves = np.load('{}-accepted-moves-{}.npy'.format(fname,run))
rej_moves = np.load('{}-rejected-moves-{}.npy'.format(fname,run))
acc_temps = np.load('{}-accepted-temperature-{}.npy'.format(fname,run))
rej_temps = np.load('{}-rejected-temperature-{}.npy'.format(fname,run))
return I_obs, chi_sq.tolist(), energy.tolist(), \
temperature.tolist(), scale.tolist(), \
acc_moves.tolist(), rej_moves.tolist(), \
acc_temps.tolist(), rej_temps.tolist()
def save_recalculation_1d(self, fname, I_recalc):
I_total, I_bragg, I_diffuse = I_recalc
np.save('{}-intensity-total-recalc-1d.npy'.format(fname), I_total)
np.save('{}-intensity-bragg-recalc-1d.npy'.format(fname), I_bragg)
np.save('{}-intensity-diffuse-recalc-1d.npy'.format(fname), I_diffuse)
def load_recalculation_1d(self, fname):
if os.path.isfile('{}-intensity-total-recalc-1d.npy'.format(fname)):
I_total = np.load('{}-intensity-total-recalc-1d.npy'.format(fname))
I_bragg = np.load('{}-intensity-bragg-recalc-1d.npy'.format(fname))
I_diffuse = np.load('{}-intensity-diffuse-'\
'recalc-1d.npy'.format(fname))
return I_total, I_bragg, I_diffuse
else:
return None, None, None
def save_recalculation_3d(self, fname, I_recalc):
np.save('{}-intensity-recalc-3d.npy'.format(fname), I_recalc)
def load_recalculation_3d(self, fname):
if os.path.isfile('{}-intensity-recalc-3d.npy'.format(fname)):
I_recalc = np.load('{}-intensity-recalc-3d.npy'.format(fname))
return I_recalc
else:
return None
def save_correlations_1d(self, fname, data, header):
np.savetxt(fname, np.column_stack(data), delimiter=',',
fmt='%s', header=header)
def save_correlations_3d(self, fname, data, label):
experimental.correlations(fname, data, label)
def save_intensity_1d(self, fname, Q, data):
np.savetxt(fname, np.column_stack((Q, *data)), delimiter=',', fmt='%s')
def save_intensity_3d(self, fname, h, k, l, data, B):
experimental.intensity(fname, h, k, l, data, B)
def magnetic_intensity_1d(self, fname, run, occupancy,
U11, U22, U33, U23, U13, U12, rx, ry, rz, atm,
Q_range, nQ, A, D, nu, nv, nw, g, mask):
Sx, Sy, Sz = self.load_magnetic(fname, run)
n_atm = np.size(Sx) // (nu*nv*nw)
Sx = Sx.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
Sy = Sy.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
Sz = Sz.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
rx = rx.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
ry = ry.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
rz = rz.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
Q = np.linspace(Q_range[0], Q_range[1], nQ)
I_calc = powder.magnetic(Sx, Sy, Sz, occupancy,
U11, U22, U33, U23, U13, U12,
rx, ry, rz, atm, Q, A, D, nu, nv, nw, g)
return I_calc
def occupational_intensity_1d(self, fname, run, occupancy,
U11, U22, U33, U23, U13, U12, rx, ry, rz,
atm, Q_range, nQ, A, D, nu, nv, nw, mask):
A_r = self.load_occupational(fname, run)
n_atm = np.size(A_r) // (nu*nv*nw)
A_r = A_r.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
rx = rx.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
ry = ry.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
rz = rz.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
Q = np.linspace(Q_range[0], Q_range[1], nQ)
I_calc = powder.occupational(A_r, occupancy,
U11, U22, U33, U23, U13, U12,
rx, ry, rz, atm, Q, A, D, nu, nv, nw)
return I_calc
def displacive_intensity_1d(self, fname, run, occupancy, rx, ry, rz,
atm, Q_range, nQ, A, D, nu, nv, nw, p, mask):
Ux, Uy, Uz = self.load_displacive(fname, run)
n_atm = np.size(Ux) // (nu*nv*nw)
Ux = Ux.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
Uy = Uy.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
Uz = Uz.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
rx = rx.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
ry = ry.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
rz = rz.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
Q = np.linspace(Q_range[0], Q_range[1], nQ)
I_calc = powder.displacive(Ux, Uy, Uz, occupancy,
rx, ry, rz, atm, Q, A, D, nu, nv, nw, p)
return I_calc
def structural_intensity_1d(self, occupancy,
U11, U22, U33, U23, U13, U12, rx, ry, rz,
atm, Q_range, nQ, A, D, nu, nv, nw, mask):
n_atm = np.size(rx) // (nu*nv*nw)
rx = rx.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
ry = ry.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
rz = rz.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
Q = np.linspace(Q_range[0], Q_range[1], nQ)
I_calc = powder.structural(occupancy, U11, U22, U33, U23, U13, U12,
rx, ry, rz, atm, Q, A, D, nu, nv, nw)
return I_calc
def magnetic_intensity_3d(self, fname, run, occupancy,
U11, U22, U33, U23, U13, U12, ux, uy, uz, atm,
h_range, k_range, l_range, indices, symop,
T, B, R, D, twins, variants, nh, nk, nl,
nu, nv, nw, Nu, Nv, Nw, g, mask):
Sx, Sy, Sz = self.load_magnetic(fname, run)
n_atm = np.size(Sx) // (nu*nv*nw)
Sx = Sx.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
Sy = Sy.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
Sz = Sz.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
I_calc = monocrystal.magnetic(Sx, Sy, Sz, occupancy,
U11, U22, U33, U23, U13, U12,
ux, uy, uz, atm,
h_range, k_range, l_range, indices,
symop, T, B, R, D, twins, variants,
nh, nk, nl, nu, nv, nw, Nu, Nv, Nw, g)
return I_calc
def occupational_intensity_3d(self, fname, run, occupancy,
U11, U22, U33, U23, U13, U12, ux, uy, uz,
atm, h_range, k_range, l_range, indices,
symop, T, B, R, D, twins, variants,
nh, nk, nl, nu, nv, nw, Nu, Nv, Nw, mask):
A_r = self.load_occupational(fname, run)
n_atm = np.size(A_r) // (nu*nv*nw)
A_r = A_r.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
I_calc = monocrystal.occupational(A_r, occupancy,
U11, U22, U33, U23, U13, U12,
ux, uy, uz, atm,
h_range, k_range, l_range, indices,
symop, T, B, R, D, twins, variants,
nh, nk, nl, nu, nv, nw, Nu, Nv, Nw)
return I_calc
def displacive_intensity_3d(self, fname, run, coeffs, occupancy,
ux, uy, uz, atm, h_range, k_range, l_range,
indices, symop, T, B, R, twins, variants,
nh, nk, nl, nu, nv, nw, Nu, Nv, Nw,
p, even, cntr, mask):
Ux, Uy, Uz = self.load_displacive(fname, run)
n_atm = np.size(Ux) // (nu*nv*nw)
Ux = Ux.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
Uy = Uy.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
Uz = Uz.reshape(nu,nv,nw,n_atm).T[mask].T.flatten()
U_r = displacive.products(Ux, Uy, Uz, p)
I_calc = monocrystal.displacive(U_r, coeffs, occupancy, ux, uy, uz,
atm, h_range, k_range, l_range,
indices, symop, T, B, R, twins, variants,
nh, nk, nl, nu, nv, nw, Nu, Nv, Nw,
p, even, cntr)
return I_calc
def structural_intensity_3d(self, occupancy,
U11, U22, U33, U23, U13, U12, ux, uy, uz, atm,
h_range, k_range, l_range, indices, symop,
T, B, R, D, twins, variants, nh, nk, nl,
nu, nv, nw, Nu, Nv, Nw, cntr, mask):
I_calc = monocrystal.structural(occupancy,
U11, U22, U33, U23, U13, U12,
ux, uy, uz, atm,
h_range, k_range, l_range, indices,
symop, T, B, R, D, twins, variants,
nh, nk, nl, nu, nv, nw, Nu, Nv, Nw,
cntr)
return I_calc
def magnetic_refinement(self, Sx, Sy, Sz, Qx_norm, Qy_norm, Qz_norm,
Sx_k, Sy_k, Sz_k,
Sx_k_orig, Sy_k_orig, Sz_k_orig,
Sx_k_cand, Sy_k_cand, Sz_k_cand,
Fx, Fy, Fz,
Fx_orig, Fy_orig, Fz_orig,
Fx_cand, Fy_cand, Fz_cand,
prod_x, prod_y, prod_z,
prod_x_orig, prod_y_orig, prod_z_orig,
prod_x_cand, prod_y_cand, prod_z_cand,
space_factor, factors, moment, I_calc, I_expt,
inv_sigma_sq, I_raw, I_flat, I_ref, v_inv,
a_filt, b_filt, c_filt,
d_filt, e_filt, f_filt,
g_filt, h_filt, i_filt,
boxes, i_dft, inverses, i_mask, i_unmask,
acc_moves, acc_temps, rej_moves, rej_temps, chi_sq,
energy, temperature, scale, constant, fixed,
heisenberg, nh, nk, nl, nu, nv, nw, n_atm, n, N):
refinement.magnetic(Sx, Sy, Sz, Qx_norm, Qy_norm, Qz_norm,
Sx_k, Sy_k, Sz_k,
Sx_k_orig, Sy_k_orig, Sz_k_orig,
Sx_k_cand, Sy_k_cand, Sz_k_cand,
Fx, Fy, Fz,
Fx_orig, Fy_orig, Fz_orig,
Fx_cand, Fy_cand, Fz_cand,
prod_x, prod_y, prod_z,
prod_x_orig, prod_y_orig, prod_z_orig,
prod_x_cand, prod_y_cand, prod_z_cand,
space_factor, factors, moment, I_calc, I_expt,
inv_sigma_sq, I_raw, I_flat, I_ref, v_inv,
a_filt, b_filt, c_filt,
d_filt, e_filt, f_filt,
g_filt, h_filt, i_filt,
boxes, i_dft, inverses, i_mask, i_unmask,
acc_moves, acc_temps, rej_moves, rej_temps, chi_sq,
energy, temperature, scale, constant, fixed,
heisenberg, nh, nk, nl, nu, nv, nw, n_atm, n, N)
def occupational_refinement(self, A_r, A_k, A_k_orig, A_k_cand,
F, F_orig, F_cand,
prod, prod_orig, prod_cand,
space_factor, factors, occupancy,
I_calc, I_expt, inv_sigma_sq,
I_raw, I_flat, I_ref, v_inv,
a_filt, b_filt, c_filt,
d_filt, e_filt, f_filt,
g_filt, h_filt, i_filt,
boxes, i_dft, inverses, i_mask, i_unmask,
acc_moves, acc_temps, rej_moves, rej_temps,
chi_sq, energy, temperature, scale, constant,
fixed, nh, nk, nl, nu, nv, nw, n_atm, n, N):
refinement.occupational(A_r, A_k, A_k_orig, A_k_cand,
F, F_orig, F_cand,
prod, prod_orig, prod_cand,
space_factor, factors, occupancy,
I_calc, I_expt, inv_sigma_sq,
I_raw, I_flat, I_ref, v_inv,
a_filt, b_filt, c_filt,
d_filt, e_filt, f_filt,
g_filt, h_filt, i_filt,
boxes, i_dft, inverses, i_mask, i_unmask,
acc_moves, acc_temps, rej_moves, rej_temps,
chi_sq, energy, temperature, scale, constant,
fixed, nh, nk, nl, nu, nv, nw, n_atm, n, N)
def displacive_refinement(self, Ux, Uy, Uz,
U_r, U_r_orig, U_r_cand,
U_k, U_k_orig, U_k_cand,
V_k, V_k_nuc, V_k_orig,
V_k_nuc_orig, V_k_cand, V_k_nuc_cand,
F, F_nuc, F_orig, F_nuc_orig, F_cand, F_nuc_cand,
prod, prod_nuc, prod_orig, prod_nuc_orig,
prod_cand, prod_nuc_cand, space_factor, factors,
coeffs, Q_k, Lxx, Lyy, Lzz, Lyz, Lxz, Lxy,
I_calc, I_expt, inv_sigma_sq,
I_raw, I_flat, I_ref, v_inv,
a_filt, b_filt, c_filt,
d_filt, e_filt, f_filt,
g_filt, h_filt, i_filt,
bragg, even, boxes, i_dft, inverses, i_mask,
i_unmask, acc_moves, acc_temps, rej_moves,
rej_temps, chi_sq, energy, temperature, scale,
constant, fixed, isotropic, p, nh, nk, nl,
nu, nv, nw, n_atm, n, N):
refinement.displacive(Ux, Uy, Uz,
U_r, U_r_orig, U_r_cand,
U_k, U_k_orig, U_k_cand,
V_k, V_k_nuc, V_k_orig,
V_k_nuc_orig, V_k_cand, V_k_nuc_cand,
F, F_nuc, F_orig, F_nuc_orig, F_cand, F_nuc_cand,
prod, prod_nuc, prod_orig, prod_nuc_orig,
prod_cand, prod_nuc_cand, space_factor, factors,
coeffs, Q_k, Lxx, Lyy, Lzz, Lyz, Lxz, Lxy,
I_calc, I_expt, inv_sigma_sq,
I_raw, I_flat, I_ref, v_inv,
a_filt, b_filt, c_filt,
d_filt, e_filt, f_filt,
g_filt, h_filt, i_filt,
bragg, even, boxes, i_dft, inverses, i_mask,
i_unmask, acc_moves, acc_temps, rej_moves,
rej_temps, chi_sq, energy, temperature, scale,
constant, fixed, isotropic, p, nh, nk, nl,
nu, nv, nw, n_atm, n, N)
def correlation_statistics(self, corr):
runs = np.shape(corr)[0]
return np.mean(corr, axis=0), np.std(corr, axis=0)**2/runs,
def vector_correlations_1d(self, Vx, Vy, Vz, rx, ry, rz, atms,
nu, nv, nw, A, fract, tol):
data = correlations.vector1d(Vx, Vy, Vz, rx, ry, rz, atms,
nu, nv, nw, A, fract, tol)
corr1d, coll1d, corr1d_, coll1d_, d, atm_pair1d = data
return corr1d, coll1d, d, atm_pair1d
def scalar_correlations_1d(self, V_r, rx, ry, rz, atms,
nu, nv, nw, A, fract, tol):
data = correlations.scalar1d(V_r, rx, ry, rz, atms,
nu, nv, nw, A, fract, tol)
corr1d, corr1d_, d, atm_pair1d = data
return corr1d, d, atm_pair1d
def vector_average_1d(self, corr1d, coll1d, sigma_sq_corr1d,
sigma_sq_coll1d, d, atm_pair1d, tol):
arrays = (corr1d, coll1d, sigma_sq_corr1d, sigma_sq_coll1d)
return correlations.average1d(arrays, d, atm_pair1d, tol)
def scalar_average_1d(self, corr1d, sigma_sq_corr1d, d, atm_pair1d, tol):
arrays = (corr1d, sigma_sq_corr1d)
return correlations.average1d(arrays, d, atm_pair1d, tol)
def vector_correlations_3d(self, Ux, Uy, Uz, rx, ry, rz, atms,
nu, nv, nw, A, fract, tol):
data = correlations.vector3d(Ux, Uy, Uz, rx, ry, rz, atms,
nu, nv, nw, A, fract, tol)
corr3d, coll3d, corr3d_, coll3d_, dx, dy, dz, atm_pair3d = data
return corr3d, coll3d, dx, dy, dz, atm_pair3d
def scalar_correlations_3d(self, V_r, rx, ry, rz, atms,
nu, nv, nw, A, fract, tol):
data = correlations.scalar3d(V_r, rx, ry, rz, atms,
nu, nv, nw, A, fract, tol)
corr3d, corr3d_, dx, dy, dz, atm_pair3d = data
return corr3d, dx, dy, dz, atm_pair3d
def vector_symmetrize_3d(self, corr3d, coll3d,
sigma_sq_corr3d, sigma_sq_coll3d,
dx, dy, dz, atm_pair3d, A, laue, tol):
arrays = (corr3d, coll3d, sigma_sq_corr3d, sigma_sq_coll3d)
return correlations.symmetrize(arrays, dx, dy, dz,
atm_pair3d, A, laue, tol)
def scalar_symmetrizes_3d(self, corr3d, sigma_sq_corr3d, dx, dy, dz,
atm_pair3d, A, laue, tol):
arrays = (corr3d, sigma_sq_corr3d)
return correlations.symmetrize(arrays, dx, dy, dz,
atm_pair3d, A, laue, tol)
def vector_average_3d(self, corr3d, coll3d,
sigma_sq_corr3d, sigma_sq_coll3d,
dx, dy, dz, atm_pair3d, tol):
arrays = (corr3d, coll3d, sigma_sq_corr3d, sigma_sq_coll3d)
return correlations.average3d(arrays, dx, dy, dz, atm_pair3d, tol=tol)
def scalar_average_3d(self, corr3d, sigma_sq_corr3d,
dx, dy, dz, atm_pair3d, tol):
arrays = (corr3d, sigma_sq_corr3d)
return correlations.average3d(arrays, dx, dy, dz, atm_pair3d, tol=tol)
def save_scalar_1d(self, fname, corr, sigma_sq_corr, d, atm_pair):
np.save('{}-correlations-1d.npy'.format(fname), corr)
np.save('{}-correlations-1d-error.npy'.format(fname), sigma_sq_corr)
np.save('{}-correlations-1d-d.npy'.format(fname), d)
np.save('{}-correlations-1d-pair.npy'.format(fname), atm_pair)
def save_vector_1d(self, fname, corr, coll, sigma_sq_corr, sigma_sq_coll,
d, atm_pair):
np.save('{}-correlations-1d.npy'.format(fname), corr)
np.save('{}-collinearity-1d.npy'.format(fname), coll)
np.save('{}-correlations-1d-error.npy'.format(fname), sigma_sq_corr)
np.save('{}-collinearity-1d-error.npy'.format(fname), sigma_sq_coll)
np.save('{}-correlations-1d-d.npy'.format(fname), d)
np.save('{}-correlations-1d-pair.npy'.format(fname), atm_pair)
def save_scalar_3d(self, fname, corr, sigma_sq_corr, dx, dy, dz, atm_pair):
np.save('{}-correlations-3d.npy'.format(fname), corr)
np.save('{}-correlations-3d-error.npy'.format(fname), sigma_sq_corr)
np.save('{}-correlations-3d-dx.npy'.format(fname), dx)
np.save('{}-correlations-3d-dy.npy'.format(fname), dy)
np.save('{}-correlations-3d-dz.npy'.format(fname), dz)
np.save('{}-correlations-3d-pair.npy'.format(fname), atm_pair)
def save_vector_3d(self, fname, corr, coll, sigma_sq_corr, sigma_sq_coll,
dx, dy, dz, atm_pair):
np.save('{}-correlations-3d.npy'.format(fname), corr)
np.save('{}-collinearity-3d.npy'.format(fname), coll)
np.save('{}-correlations-3d-error.npy'.format(fname), sigma_sq_corr)
np.save('{}-collinearity-3d-error.npy'.format(fname), sigma_sq_coll)
np.save('{}-correlations-3d-dx.npy'.format(fname), dx)
np.save('{}-correlations-3d-dy.npy'.format(fname), dy)
np.save('{}-correlations-3d-dz.npy'.format(fname), dz)
np.save('{}-correlations-3d-pair.npy'.format(fname), atm_pair)
def load_scalar_1d(self, fname):
corr = np.load('{}-correlations-1d.npy'.format(fname))
sigma_sq_corr = np.load('{}-correlations-1d-error.npy'.format(fname))
d = np.load('{}-correlations-1d-d.npy'.format(fname))
atm_pair = np.load('{}-correlations-1d-pair.npy'.format(fname))
return corr, sigma_sq_corr, d, atm_pair
def load_vector_1d(self, fname):
corr = np.load('{}-correlations-1d.npy'.format(fname))
coll = np.load('{}-collinearity-1d.npy'.format(fname))
sigma_sq_corr = np.load('{}-correlations-1d-error.npy'.format(fname))
sigma_sq_coll = np.load('{}-collinearity-1d-error.npy'.format(fname))
d = np.load('{}-correlations-1d-d.npy'.format(fname))
atm_pair = np.load('{}-correlations-1d-pair.npy'.format(fname))
return corr, coll, sigma_sq_corr, sigma_sq_coll, d, atm_pair
def load_scalar_3d(self, fname):
corr = np.load('{}-correlations-3d.npy'.format(fname))
sigma_sq_corr = np.load('{}-correlations-3d-error.npy'.format(fname))
dx = np.load('{}-correlations-3d-dx.npy'.format(fname))
dy = np.load('{}-correlations-3d-dy.npy'.format(fname))
dz = np.load('{}-correlations-3d-dz.npy'.format(fname))
atm_pair = np.load('{}-correlations-3d-pair.npy'.format(fname))
return corr, sigma_sq_corr, dx, dy, dz, atm_pair
def load_vector_3d(self, fname):
corr = np.load('{}-correlations-3d.npy'.format(fname))
coll = np.load('{}-collinearity-3d.npy'.format(fname))
sigma_sq_corr = np.load('{}-correlations-3d-error.npy'.format(fname))
sigma_sq_coll = np.load('{}-collinearity-3d-error.npy'.format(fname))
dx = np.load('{}-correlations-3d-dx.npy'.format(fname))
dy = np.load('{}-correlations-3d-dy.npy'.format(fname))
dz = np.load('{}-correlations-3d-dz.npy'.format(fname))
atm_pair = np.load('{}-correlations-3d-pair.npy'.format(fname))
return corr, coll, sigma_sq_corr, sigma_sq_coll, dx, dy, dz, atm_pair
def mask_plane(self, dx, dy, dz, h, k, l, d, A, B, tol):
hx, hy, hz = np.dot(B, [h,k,l])
if (not np.isclose(hx**2+hy**2+hz**2,0)):
nx, ny, nz = [hx,hy,hz]/np.linalg.norm([hx,hy,hz])
Px, Py, Pz = np.cross([0,0,1], [nx,ny,nz])
P = np.linalg.norm([Px,Py,Pz])
if (np.isclose(P,0)):
Px, Py, Pz = np.cross([0,1,0], [nx,ny,nz])
P = np.linalg.norm([Px,Py,Pz])
elif (np.isclose(np.max([Px,Py,Pz]),0)):
Px, Py, Pz = np.cross([1,0,0], [nx,ny,nz])
P = np.linalg.norm([Px,Py,Pz])
px, py, pz = Px/P, Py/P, Pz/P
Qx, Qy, Qz = np.cross([nx,ny,nz], [px,py,pz])
Q = np.linalg.norm([Qx,Qy,Qz])
qx, qy, qz = Qx/Q, Qy/Q, Qz/Q
plane = np.isclose(hx*dx+hy*dy+hz*dz, d, rtol=tol)
A_inv = np.linalg.inv(A)
pu, pv, pw = np.dot(A_inv, [px,py,pz])
qu, qv, qw = np.dot(A_inv, [qx,qy,qz])
projx = np.array([pu,pv,pw])
projy = np.array([qu,qv,qw])
scale_dx = projx.max()
scale_dy = projy.max()
projx = projx/scale_dx
projy = projy/scale_dy
cor_aspect = scale_dx/scale_dy
dx, dy, dz = dx[plane], dy[plane], dz[plane]
Dx, Dy = px*dx+py*dy+pz*dz, qx*dx+qy*dy+qz*dz
Dx, Dy = Dx*scale_dx, Dy*scale_dy
return cor_aspect, projx, projy, Dx, Dy, plane | /rmc_discord-0.0.4-cp36-cp36m-win_amd64.whl/disorder/graphical/model.py | 0.602062 | 0.358044 | model.py | pypi |
import numpy as np
from disorder.material import crystal
from disorder.material import symmetry
def reciprocal(h_range, k_range, l_range, mask, B, T=np.eye(3)):
nh, nk, nl = mask.shape[0], mask.shape[1], mask.shape[2]
h_, k_, l_ = np.meshgrid(np.linspace(h_range[0],h_range[1],nh),
np.linspace(k_range[0],k_range[1],nk),
np.linspace(l_range[0],l_range[1],nl),
indexing='ij')
h, k, l = crystal.transform(h_, k_, l_, T)
Qh, Qk, Ql = crystal.vector(h, k, l, B)
return Qh[~mask], Qk[~mask], Ql[~mask]
def cell(nu, nv, nw, A):
i, j, k = np.meshgrid(np.arange(nu),
np.arange(nv),
np.arange(nw), indexing='ij')
ix, iy, iz = crystal.transform(i, j, k, A)
return ix.flatten(), iy.flatten(), iz.flatten()
def real(ux, uy, uz, ix, iy, iz, atm):
rx = (ix[:,np.newaxis]+ux).flatten()
ry = (iy[:,np.newaxis]+uy).flatten()
rz = (iz[:,np.newaxis]+uz).flatten()
ion = np.tile(atm, ix.shape[0])
return rx, ry, rz, ion
def factor(nu, nv, nw):
ku = 2*np.pi*np.fft.fftfreq(nu)
kv = 2*np.pi*np.fft.fftfreq(nv)
kw = 2*np.pi*np.fft.fftfreq(nw)
ru = np.arange(nu)
rv = np.arange(nv)
rw = np.arange(nw)
k_dot_r = np.kron(ku,ru)[:,np.newaxis,np.newaxis]+\
np.kron(kv,rv)[:,np.newaxis]+\
np.kron(kw,rw)
pf = np.exp(1j*k_dot_r)
return pf.flatten()
def unit(vx, vy, vz):
v = np.sqrt(vx**2+vy**2+vz**2)
mask = np.isclose(v, 0, rtol=1e-4)
if (np.sum(mask) > 0):
n = np.argwhere(mask)
v[n] = 1
vx, vy, vz = vx/v, vy/v, vz/v
vx[n], vy[n], vz[n] = 0, 0, 0
v[n] = 0
else:
vx, vy, vz = vx/v, vy/v, vz/v
return vx, vy, vz, v
def indices(mask):
i_mask = np.arange(mask.size)[mask.flatten()]
i_unmask = np.arange(mask.size)[~mask.flatten()]
return i_mask, i_unmask
def prefactors(scattering_length, phase_factor, occupancy, primitive=None):
n_atm = occupancy.shape[0]
n_hkl = scattering_length.shape[0] // n_atm
scattering_length = scattering_length.reshape(n_hkl,n_atm)
phase_factor = phase_factor.reshape(n_hkl,n_atm)
factors = scattering_length*phase_factor*occupancy
if (not primitive is None):
return np.sum(factors[:,primitive],axis=2).flatten()
else:
return factors.flatten()
def transform(delta_r, H, K, L, nu, nv, nw, n_atm):
delta_r = delta_r.reshape(nu,nv,nw,n_atm)
delta_k = np.fft.ifftn(delta_r, axes=(0,1,2))*nu*nv*nw
Ku = np.mod(H, nu).astype(int)
Kv = np.mod(K, nv).astype(int)
Kw = np.mod(L, nw).astype(int)
i_dft = Kw+nw*(Kv+nv*Ku)
return delta_k.flatten(), i_dft
def intensity(delta_k, i_dft, factors):
n_hkl = i_dft.shape[0]
n_atm = factors.shape[0] // n_hkl
factors = factors.reshape(n_hkl,n_atm)
n_uvw = delta_k.shape[0] // n_atm
delta_k = delta_k.reshape(n_uvw,n_atm)
prod = factors*delta_k[i_dft,:]
F = np.sum(prod, axis=1)
I = np.real(F)**2+np.imag(F)**2
return I/(n_uvw*n_atm)
def structure(delta_k, i_dft, factors):
n_hkl = i_dft.shape[0]
n_atm = factors.shape[0] // n_hkl
factors = factors.reshape(n_hkl,n_atm)
n_uvw = delta_k.shape[0] // n_atm
delta_k = delta_k.reshape(n_uvw,n_atm)
prod = factors*delta_k[i_dft,:]
F = np.sum(prod, axis=1)
return F, prod.flatten()
def bragg(Qx, Qy, Qz, rx, ry, rz, factors, cond):
n_hkl = cond.sum()
n_xyz = factors.size // Qx.shape[0]
factors = factors.reshape(factors.size // n_xyz,n_xyz)[cond,:]
phase_factor = np.exp(1j*(np.kron(Qx[cond],rx)\
+np.kron(Qy[cond],ry)\
+np.kron(Qz[cond],rz)))
phase_factor = phase_factor.reshape(n_hkl,n_xyz)
return (factors*phase_factor).sum(axis=1)
def debye_waller(h_range, k_range, l_range, nh, nk, nl,
U11, U22, U33, U23, U13, U12,
a_, b_, c_, T=np.eye(3)):
h_, k_, l_ = np.meshgrid(np.linspace(h_range[0],h_range[1],nh),
np.linspace(k_range[0],k_range[1],nk),
np.linspace(l_range[0],l_range[1],nl),
indexing='ij')
h, k, l = crystal.transform(h_, k_, l_, T)
h = h.flatten()
k = k.flatten()
l = l.flatten()
n_hkl = nh*nk*nl
n_atm = U11.shape[0]
T = np.zeros((n_hkl,n_atm))
for i in range(n_atm):
T[:,i] = np.exp(-2*np.pi**2*(U11[i]*(h*a_)**2+
U22[i]*(k*b_)**2+
U33[i]*(l*c_)**2+
U23[i]*k*l*b_*c_*2+
U13[i]*h*l*a_*c_*2+
U12[i]*h*k*a_*b_*2))
return T.flatten()
def condition(H, K, L, nu=1, nv=1, nw=1, centering=None):
iH = np.mod(H, nu)
iK = np.mod(K, nv)
iL = np.mod(L, nw)
h = H // nu
k = K // nv
l = L // nw
dft_cond = (iH == 0) & (iK == 0) & (iL == 0)
if (centering is None):
cond = dft_cond
elif (centering == 'P'):
cond = (h % 1 == 0) & (k % 1 == 0) & (l % 1 == 0) & (dft_cond)
elif (centering == 'I'):
cond = ((h+k+l) % 2 == 0) & (dft_cond)
elif (centering == 'F'):
cond = ((h+k) % 2 == 0) \
& ((k+l) % 2 == 0) \
& ((l+h) % 2 == 0) & (dft_cond)
elif (centering == 'R(obv)'):
cond = ((-h+k+l) % 3 == 0) & (dft_cond)
elif (centering == 'R(rev)'):
cond = ((h-k+l) % 3 == 0) & (dft_cond)
elif (centering == 'C'):
cond = ((h+k) % 2 == 0) & (dft_cond)
elif (centering == 'A'):
cond = ((k+l) % 2 == 0) & (dft_cond)
elif (centering == 'B'):
cond = ((l+h) % 2 == 0) & (dft_cond)
elif (centering == 'H'):
cond = ((h-k) % 3 == 0) & (dft_cond)
elif (centering == 'D'):
cond = ((h+k+l) % 3 == 0) & (dft_cond)
return H[cond], K[cond], L[cond], cond
def mapping(h_range, k_range, l_range, nh, nk, nl,
nu, nv, nw, T=np.eye(3), laue=None):
h_, k_, l_ = np.meshgrid(np.linspace(h_range[0],h_range[1],nh),
np.linspace(k_range[0],k_range[1],nk),
np.linspace(l_range[0],l_range[1],nl),
indexing='ij')
h_ = h_.flatten()
k_ = k_.flatten()
l_ = l_.flatten()
h, k, l = crystal.transform(h_, k_, l_, T)
H = np.round(h*nu).astype(int)
K = np.round(k*nv).astype(int)
L = np.round(l*nw).astype(int)
iH = np.mod(H, nu)
iK = np.mod(K, nv)
iL = np.mod(L, nw)
mask = (iH == 0) & (~np.isclose(np.mod(h*nu,nu),0))
H[mask] += 1
mask = (iK == 0) & (~np.isclose(np.mod(k*nv,nv),0))
K[mask] += 1
mask = (iL == 0) & (~np.isclose(np.mod(l*nw,nw),0))
L[mask] += 1
if (laue == None or laue == 'None'):
index = np.arange(nh*nk*nl)
return h, k, l, H, K, L, index, index, np.array([u'x,y,z'])
symops = symmetry.inverse(symmetry.laue(laue))
total = []
coordinate = np.stack((H,K,L))
cosymmetries, coindices, coinverses = np.unique(coordinate,
axis=1,
return_index=True,
return_inverse=True)
for op in symops:
transformed = symmetry.evaluate([op], cosymmetries, translate=False)
total.append(transformed)
index = np.arange(coordinate.shape[1])
total = np.vstack(total)
for i in range(cosymmetries.shape[1]):
total[:,:,i] = total[np.lexsort(total[:,:,i].T),:,i]
total = np.vstack(total)
_, indices, inverses = np.unique(total,
axis=1,
return_index=True,
return_inverse=True)
reverses = np.arange(indices.shape[0])
h = h[coindices][indices]
k = k[coindices][indices]
l = l[coindices][indices]
H = H[coindices][indices]
K = K[coindices][indices]
L = L[coindices][indices]
index = index[coindices][indices]
reverses = reverses[inverses][coinverses]
return h, k, l, H, K, L, index, reverses, symops
def reduced(h_range, k_range, l_range, nh, nk, nl,
nu, nv, nw, T=np.eye(3), laue=None):
h_, k_, l_ = np.meshgrid(np.linspace(h_range[0],h_range[1],nh),
np.linspace(k_range[0],k_range[1],nk),
np.linspace(l_range[0],l_range[1],nl),
indexing='ij')
h, k, l = crystal.transform(h_, k_, l_, T)
h = h.flatten()
k = k.flatten()
l = l.flatten()
del h_, k_, l_
if (nh > 1):
h_max_res = (h_range[1]-h_range[0])/(nh-1)
else:
h_max_res = 0
if (nk > 1):
k_max_res = (k_range[1]-k_range[0])/(nk-1)
else:
k_max_res = 0
if (nl > 1):
l_max_res = (l_range[1]-l_range[0])/(nl-1)
else:
l_max_res = 0
hkl_max_res = np.array([[h_max_res,0,0],[0,k_max_res,0],[0,0,l_max_res]])
hkl_res = np.abs(np.dot(T, hkl_max_res))
h_res, k_res, l_res = np.max(hkl_res, axis=0)
if (h_res > 0 and h_res < 1/nu):
Nu = int(1/h_res // nu)*nu
else:
Nu = nu
if (k_res > 0 and k_res < 1/nv):
Nv = int(1/k_res // nv)*nv
else:
Nv = nv
if (l_res > 0 and l_res < 1/nw):
Nw = int(1/l_res // nw)*nw
else:
Nw = nw
H = np.round(h*Nu).astype(np.int16)
iH = np.mod(H, Nu)
del iH, h
K = np.round(k*Nv).astype(np.int16)
iK = np.mod(K, Nv)
del iK, k
L = np.round(l*Nw).astype(np.int16)
iL = np.mod(L, Nw)
del iL, l
if (laue == None or laue == 'None'):
index = np.arange(nh*nk*nl)
return index, index, np.array([u'x,y,z']), Nu, Nv, Nw
symops = np.array(symmetry.laue(laue))
symops = symmetry.inverse(symops)
coordinate = np.stack((H,K,L)).T
n = coordinate.shape[0]
del H, K, L
coordinate = np.stack((coordinate,-coordinate)).T
sort = np.lexsort(coordinate, axis=1)[:,0]
pair = coordinate.reshape(3,2*n)[:,sort+2*np.arange(n)].T
index = np.arange(n)
del coordinate, sort
cosymmetries, coindices, coinverses = symmetry.unique(pair)
h_, k_, l_ = cosymmetries.T
n = cosymmetries.shape[0]
del cosymmetries
sym, n_symops = symmetry.laue_id(symops)
ops = np.zeros((3,n,n_symops), dtype=np.int16)
for i in range(n_symops):
h, k, l = symmetry.bragg(h_, k_, l_, sym, i)
ops[0,:,i], ops[1,:,i], ops[2,:,i] = h, k, l
sort = np.lexsort(ops, axis=1)[:,0]
total = ops.reshape(3,n_symops*n)[:,sort+n_symops*np.arange(n)].T
del ops, h, k, l
_, indices, inverses = symmetry.unique(total)
reverses = np.arange(indices.shape[0])
index = index[coindices][indices]
reverses = reverses[inverses][coinverses]
return index, reverses, symops, Nu, Nv, Nw | /rmc_discord-0.0.4-cp36-cp36m-win_amd64.whl/disorder/diffuse/space.py | 0.498291 | 0.533337 | space.py | pypi |
import numpy as np
from disorder.diffuse.displacive import number
def transform(U_r, A_r, H, K, L, nu, nv, nw, n_atm):
"""
Discrete Fourier transform of Taylor expansion displacement products and \
relative occupancy parameter.
Parameters
----------
U_r : 1d array
Displacement parameter :math:`U` (in Cartesian coordinates).
A_r : 1d array
Relative occupancy parameter :math:`A`.
H, K, L : 1d array, int
Supercell index along the :math:`a^*`, :math:`b^*`, and
:math:`c^*`-axis in reciprocal space.
nu, nv, nw : int
Number of grid points :math:`N_1`, :math:`N_2`, :math:`N_3` along the
:math:`a`, :math:`b`, and :math:`c`-axis of the supercell.
n_atm : int
Number of atoms in the unit cell.
Returns
-------
U_k : 1d array
Array has a flattened shape of size ``nu*nw*nv*n_atm``.
A_k : 1d array
Array has a flattened shape of size ``nu*nw*nv*n_atm``.
i_dft : 1d array, int
Array has a flattened shape of size ``nu*nw*nv*n_atm``.
"""
n_prod = U_r.shape[0] // (nu*nv*nw*n_atm)
U_r = U_r.reshape(n_prod,nu,nv,nw,n_atm)
U_k = np.fft.ifftn(U_r, axes=(1,2,3))*nu*nv*nw
A_r = np.tile(A_r, n_prod).reshape(n_prod,nu,nv,nw,n_atm)
A_k = np.fft.ifftn(A_r*U_r, axes=(1,2,3))*nu*nv*nw
Ku = np.mod(H, nu).astype(int)
Kv = np.mod(K, nv).astype(int)
Kw = np.mod(L, nw).astype(int)
i_dft = Kw+nw*(Kv+nv*Ku)
return U_k.flatten(), A_k.flatten(), i_dft
def intensity(U_k, A_k, Q_k, coeffs, cond, p, i_dft, factors, subtract=True):
"""
Chemical scattering intensity.
Parameters
----------
U_k : 1d array
Fourier transform of Taylor expansion displacement products.
A_k : 1d array
Fourier transform of relative site occupancies.
Q_k : 1d array
Fourier transform of Taylor expansion wavevector products.
coeffs : 1d array
Taylor expansion coefficients.
cond : 1d array
Array indices corresponding to nuclear Bragg peaks.
p : int
Order of Taylor expansion.
i_dft: 1d array, int
Array indices of Fourier transform corresponding to reciprocal space.
factors: 1d array
Prefactors of form factors, phase factors, and composition factors.
Returns
-------
I : 1d array
Array has a flattened shape of size ``i_dft.shape[0]``.
"""
n_prod = coeffs.shape[0]
n_peaks = i_dft.shape[0]
n_atm = factors.shape[0] // n_peaks
factors = factors.reshape(n_peaks,n_atm)
n_uvw = U_k.shape[0] // n_prod // n_atm
U_k = U_k.reshape(n_prod,n_uvw,n_atm)
A_k = A_k.reshape(n_prod,n_uvw,n_atm)
Q_k = Q_k.reshape(n_prod,n_peaks)
start = (np.cumsum(number(np.arange(p+1)))-number(np.arange(p+1)))[::2]
end = np.cumsum(number(np.arange(p+1)))[::2]
even = []
for k in range(len(end)):
even += range(start[k], end[k])
even = np.array(even)
V_k = np.einsum('ijk,kj->ji', coeffs*(U_k[:,i_dft,:]+\
A_k[:,i_dft,:]).T, Q_k)
V_k_nuc = np.einsum('ijk,kj->ji',
(coeffs[even]*(U_k[:,i_dft,:][even,:]+\
A_k[:,i_dft,:][even,:]).T),
Q_k[even,:])[cond]
prod = factors*V_k
prod_nuc = factors[cond,:]*V_k_nuc
F = np.sum(prod, axis=1)
F_nuc = np.sum(prod_nuc, axis=1)
if subtract:
F[cond] -= F_nuc
I = np.real(F)**2+np.imag(F)**2
return I/(n_uvw*n_atm)
else:
F_bragg = np.zeros(F.shape, dtype=complex)
F_bragg[cond] = F_nuc
I = np.real(F)**2+np.imag(F)**2
return I/(n_uvw*n_atm), F_bragg
def structure(U_k, A_k, Q_k, coeffs, cond, p, i_dft, factors):
"""
Partial displacive structure factor.
Parameters
----------
U_k : 1d array
Fourier transform of Taylor expansion displacement products.
A_k : 1d array
Fourier transform of relative site occupancies times Taylor expansion
displacement products.
Q_k : 1d array
Fourier transform of Taylor expansion wavevector products.
coeffs : 1d array
Taylor expansion coefficients.
cond : 1d array
Array indices corresponding to nuclear Bragg peaks.
p : int
Order of Taylor expansion.
i_dft: 1d array, int
Array indices of Fourier transform corresponding to reciprocal space.
factors: 1d array
Prefactors of scattering lengths, phase factors, and occupancies.
Returns
-------
F : 1d array
Array has a flattened shape of size ``coeffs.shape[0]*i_dft.shape[0]``.
F_nuc : 1d array
Array has a flattened shape of size ``cond.sum()*i_dft.shape[0]``.
prod : 1d array
Array has a flattened shape of size
``coeffs.shape[0]*i_dft.shape[0]*n_atm``.
prod_nuc : 1d array
Array has a flattened shape of size
``coeffs.sum()*i_dft.shape[0]*n_atm``.
V_k : 1d array
Array has a flattened shape of size
``coeffs.shape[0]*i_dft.shape[0]*n_atm``.
V_k_nuc : 1d array
Array has a flattened shape of size
``coeffs.sum()*i_dft.shape[0]*n_atm``.
even : 1d array, int
Array indices of the even Taylor expandion coefficients.
bragg : 1d array, int
Array has a flattened shape of size ``coeffs.sum()``.
"""
n_prod = coeffs.shape[0]
n_peaks = i_dft.shape[0]
n_atm = factors.shape[0] // n_peaks
factors = factors.reshape(n_peaks,n_atm)
n_uvw = U_k.shape[0] // n_prod // n_atm
U_k = U_k.reshape(n_prod,n_uvw,n_atm)
A_k = A_k.reshape(n_prod,n_uvw,n_atm)
Q_k = Q_k.reshape(n_prod,n_peaks)
start = (np.cumsum(number(np.arange(p+1)))-number(np.arange(p+1)))[::2]
end = np.cumsum(number(np.arange(p+1)))[::2]
even = []
for k in range(len(end)):
even += range(start[k], end[k])
even = np.array(even)
V_k = np.einsum('ijk,kj->ji', coeffs*(U_k[:,i_dft,:]+\
A_k[:,i_dft,:]).T, Q_k)
V_k_nuc = np.einsum('ijk,kj->ji',
(coeffs[even]*(U_k[:,i_dft,:][even,:]+\
A_k[:,i_dft,:][even,:]).T),
Q_k[even,:])[cond]
prod = factors*V_k
prod_nuc = factors[cond,:]*V_k_nuc
F = np.sum(prod, axis=1)
F_nuc = np.sum(prod_nuc, axis=1)
bragg = np.arange(n_peaks)[cond]
return F, \
F_nuc, \
prod.flatten(), \
prod_nuc.flatten(), \
V_k.flatten(), \
V_k_nuc.flatten(), \
even, \
bragg | /rmc_discord-0.0.4-cp36-cp36m-win_amd64.whl/disorder/diffuse/nonmagnetic.py | 0.919326 | 0.823186 | nonmagnetic.py | pypi |
import numpy as np
def composition(nu, nv, nw, n_atm, value=0.5):
"""
Generate random relative site occupancies.
Parameters
----------
nu, nv, nw : int
Number of grid points :math:`N_1`, :math:`N_2`, :math:`N_3` along the
:math:`a`, :math:`b`, and :math:`c`-axis of the supercell.
n_atm : int
Number of atoms in the unit cell
value : float, 1d array, optional
Average of site occupancies, defualt ``value=0.5``.
Returns
-------
A : 1d array
Array has a flattened shape of size ``nu*nw*nv*n_atm``.
"""
A_r = (np.random.random((nu,nv,nw,n_atm))<=value)/value-1
return A_r.flatten()
def transform(A_r, H, K, L, nu, nv, nw, n_atm):
"""
Discrete Fourier transform of relative occupancy parameter.
Parameters
----------
A_r : 1d array
Relative occupancy parameter :math:`A`.
H, K, L : 1d array, int
Supercell index along the :math:`a^*`, :math:`b^*`, and
:math:`c^*`-axis in reciprocal space.
nu, nv, nw : int
Number of grid points :math:`N_1`, :math:`N_2`, :math:`N_3` along the
:math:`a`, :math:`b`, and :math:`c`-axis of the supercell.
n_atm : int
Number of atoms in the unit cell.
Returns
-------
A_k : 1d array
Array has a flattened shape of size ``nu*nw*nv*n_atm``.
i_dft : 1d array, int
Array has a flattened shape of size ``nu*nw*nv*n_atm``.
"""
A_k = np.fft.ifftn(A_r.reshape(nu,nv,nw,n_atm), axes=(0,1,2))*nu*nv*nw
Ku = np.mod(H, nu).astype(int)
Kv = np.mod(K, nv).astype(int)
Kw = np.mod(L, nw).astype(int)
i_dft = Kw+nw*(Kv+nv*Ku)
return A_k.flatten(), i_dft
def intensity(A_k, i_dft, factors):
"""
Chemical scattering intensity.
Parameters
----------
A_k : 1d array
Fourier transform of relative site occupancies.
i_dft: 1d array, int
Array indices of Fourier transform corresponding to reciprocal space.
factors: 1d array
Prefactors of form factors, phase factors, and composition factors.
Returns
-------
I : 1d array
Array has a flattened shape of size ``i_dft.shape[0]``.
"""
n_peaks = i_dft.shape[0]
n_atm = factors.shape[0] // n_peaks
factors = factors.reshape(n_peaks,n_atm)
n_uvw = A_k.shape[0] // n_atm
A_k = A_k.reshape(n_uvw,n_atm)
prod = factors*A_k[i_dft,:]
F = np.sum(prod, axis=1)
I = np.real(F)**2+np.imag(F)**2
return I/(n_uvw*n_atm)
def structure(A_k, i_dft, factors):
"""
Partial chemical structure factor.
Parameters
----------
A_k : 1d array
Fourier transform of relative site occupancies.
i_dft: 1d array, int
Array indices of Fourier transform corresponding to reciprocal space.
factors: 1d array
Prefactors of form factors, phase factors, and composition factors.
Returns
-------
F : 1d array
Array has a flattened shape of size ``i_dft.shape[0]``
prod : 1d array
Array has a flattened shape of size
``i_dft.shape[0]*n_atm``.
"""
n_peaks = i_dft.shape[0]
n_atm = factors.shape[0] // n_peaks
factors = factors.reshape(n_peaks,n_atm)
n_uvw = A_k.shape[0] // n_atm
A_k = A_k.reshape(n_uvw,n_atm)
prod = factors*A_k[i_dft,:]
F = np.sum(prod, axis=1)
return F, prod.flatten() | /rmc_discord-0.0.4-cp36-cp36m-win_amd64.whl/disorder/diffuse/occupational.py | 0.928506 | 0.835919 | occupational.py | pypi |
import numpy as np
def expansion(nu, nv, nw, n_atm, value=1, fixed=True):
"""
Generate random displacement vectors.
Parameters
----------
nu, nv, nw : int
Number of grid points :math:`N_1`, :math:`N_2`, :math:`N_3` along the
:math:`a`, :math:`b`, and :math:`c`-axis of the supercell.
n_atm : int
Number of atoms in the unit cell.
value : 1d array, optional
Magnitude of displacement vector, default ``value=1``.
Returns
-------
Ux, Uy, Uz : 1d array
Each array has a flattened shape of size ``nu*nv*nw*n_atm``.
"""
if (len(np.shape(value)) == 0):
Vxx = Vyy = Vzz = np.full(n_atm, value)
Vyz = Vxz = Vxy = np.full(n_atm, 0)
elif (len(np.shape(value)) == 1):
Vxx = Vyy = Vzz = value
Vyz = Vxz = Vxy = np.full(n_atm, 0)
else:
Vxx, Vyy, Vzz = value[0], value[1], value[2]
Vyz, Vxz, Vxy = value[3], value[4], value[5]
if fixed:
theta = 2*np.pi*np.random.rand(nu,nv,nw,n_atm)
phi = np.arccos(1-2*np.random.rand(nu,nv,nw,n_atm))
nx = np.sin(phi)*np.cos(theta)
ny = np.sin(phi)*np.sin(theta)
nz = np.cos(phi)
U = np.sqrt(Vxx*nx*nx+Vyy*ny*ny+Vzz*nz*nz\
+2*(Vxz*nx*nz+Vyz*ny*nz+Vxy*nx*ny))
Ux = U*nx
Uy = U*ny
Uz = U*nz
else:
L, V = np.zeros((3,3,n_atm)), np.zeros((3,3,n_atm))
V[0,0,:] = Vxx
V[1,1,:] = Vyy
V[2,2,:] = Vzz
V[1,2,:] = V[2,1,:] = Vyz
V[0,2,:] = V[2,0,:] = Vxz
V[0,1,:] = V[1,0,:] = Vxy
for i in range(n_atm):
if np.all(np.linalg.eigvals(V[...,i]) > 0):
L[...,i] = np.linalg.cholesky(V[...,i])
U = np.random.normal(loc=0,
scale=1,
size=3*nu*nv*nw*n_atm).reshape(3,nu,nv,nw,n_atm)
Ux = U[0,...]*L[0,0,:]
Uy = U[0,...]*L[1,0,:]+U[1,...]*L[1,1,:]
Uz = U[0,...]*L[2,0,:]+U[1,...]*L[2,1,:]+U[2,...]*L[2,2,:]
return Ux.flatten(), Uy.flatten(), Uz.flatten()
def number(n):
"""
:math:`n`-th triangular number.
Parameters
----------
n : int
Number.
Returns
-------
int
Triangular number.
"""
return (n+1)*(n+2) // 2
def numbers(n):
"""
Cumulative sum of :math:`0\dots n` triangular numbers.
Parameters
----------
n : int
Number.
Returns
-------
int
Cumulative sum.
"""
return (n+1)*(n+2)*(n+3) // 6
def indices(p):
"""
Even and odd indices for the Taylor expansion.
Parameters
----------
p : int
Order of the Taylor expansion.
Returns
-------
even, odd : 1d array, int
Indices for the even and odd terms.
"""
tri_numbers = number(np.arange(p+1))
total_terms = numbers(np.arange(p+1))
first_index = total_terms-tri_numbers
split = [np.arange(j,k) for j, k in zip(first_index,total_terms)]
return np.concatenate(split[0::2]), np.concatenate(split[1::2])
def factorial(n):
"""
Factorial :math:`n!`.
Parameters
----------
n : int
Number.
Returns
-------
int
Factorial of the number.
"""
if (n == 1 or n == 0):
return 1
else:
return n*factorial(n-1)
def coefficients(p):
"""
Coefficients for the Taylor expansion product.
Parameters
----------
p : int
Order of the Taylor expansion.
Returns
-------
coeffs : 1d array, complex
Array of coefficients
"""
coeffs = np.zeros(numbers(p), dtype=complex)
j = 0
for i in range(p+1):
for w in range(i+1):
nw = factorial(w)
for v in range(i+1):
nv = factorial(v)
for u in range(i+1):
nu = factorial(u)
if (u+v+w == i):
coeffs[j] = 1j**i/(nu*nv*nw)
j += 1
return coeffs
def products(Vx, Vy, Vz, p):
if (type(Vx) is np.ndarray):
n = Vx.shape[0]
else:
n = 1
V = np.ones((numbers(p),n))
j = 0
for i in range(p+1):
for w in range(i+1):
for v in range(i+1):
for u in range(i+1):
if (u+v+w == i):
V[j,:] = Vx**u*Vy**v*Vz**w
j += 1
return V.flatten()
def transform(U_r, H, K, L, nu, nv, nw, n_atm):
"""
Discrete Fourier transform of Taylor expansion displacement products.
Parameters
----------
U_r : 1d array
Displacement parameter :math:`U` (in Cartesian coordinates).
H, K, L : 1d array, int
Supercell index along the :math:`a^*`, :math:`b^*`, and
:math:`c^*`-axis in reciprocal space.
nu, nv, nw : int
Number of grid points :math:`N_1`, :math:`N_2`, :math:`N_3` along the
:math:`a`, :math:`b`, and :math:`c`-axis of the supercell.
n_atm : int
Number of atoms in the unit cell.
Returns
-------
U_k : 1d array
Array has a flattened shape of size ``nu*nw*nv*n_atm``.
i_dft : 1d array, int
Array has a flattened shape of size ``nu*nw*nv*n_atm``.
"""
n_uvw = nu*nv*nw
n_prod = U_r.shape[0] // (n_uvw*n_atm)
U_k = np.fft.ifftn(U_r.reshape(n_prod,nu,nv,nw,n_atm), axes=(1,2,3))*n_uvw
Ku = np.mod(H, nu).astype(int)
Kv = np.mod(K, nv).astype(int)
Kw = np.mod(L, nw).astype(int)
i_dft = Kw+nw*(Kv+nv*Ku)
return U_k.flatten(), i_dft
def intensity(U_k, Q_k, coeffs, cond, p, i_dft, factors, subtract=True):
"""
Displacive scattering intensity.
Parameters
----------
U_k : 1d array
Fourier transform of Taylor expansion displacement products.
Q_k : 1d array
Fourier transform of Taylor expansion wavevector products.
coeffs : 1d array
Taylor expansion coefficients
cond : 1d array
Array indices corresponding to nuclear Bragg peaks.
p : int
Order of Taylor expansion
i_dft : 1d array, int
Array indices of Fourier transform corresponding to reciprocal space.
factors : 1d array
Prefactors of form factors, phase factors, and composition factors.
subtract : boolean, optional
Optionally subtract the Bragg intensity or return the Bragg structure
factor.
Returns
-------
I : 1d array
Array has a flattened shape of size ``coeffs.shape[0]*i_dft.shape[0]``.
F_bragg : 1d array
Array has a flattened shape of size ``coeffs.shape[0]*i_dft.shape[0]``.
"""
n_prod = coeffs.shape[0]
n_hkl = i_dft.shape[0]
n_atm = factors.shape[0] // n_hkl
factors = factors.reshape(n_hkl,n_atm)
n_uvw = U_k.shape[0] // n_prod // n_atm
U_k = U_k.reshape(n_prod,n_uvw,n_atm)
Q_k = Q_k.reshape(n_prod,n_hkl)
even, odd = indices(p)
V_k = np.einsum('ijk,kj->ji', coeffs*U_k[:,i_dft,:].T, Q_k)
V_k_nuc = np.einsum('ijk,kj->ji', (coeffs[even]*U_k[:,i_dft,:][even,:].T),
Q_k[even,:])[cond]
prod = factors*V_k
prod_nuc = factors[cond,:]*V_k_nuc
F = np.sum(prod, axis=1)
F_nuc = np.sum(prod_nuc, axis=1)
if subtract:
F[cond] -= F_nuc
I = np.real(F)**2+np.imag(F)**2
return I/(n_uvw*n_atm)
else:
F_bragg = np.zeros(F.shape, dtype=complex)
F_bragg[cond] = F_nuc
I = np.real(F)**2+np.imag(F)**2
return I/(n_uvw*n_atm), F_bragg
def structure(U_k, Q_k, coeffs, cond, p, i_dft, factors):
"""
Partial displacive structure factor.
Parameters
----------
U_k : 1d array
Fourier transform of Taylor expansion displacement products.
Q_k : 1d array
Fourier transform of Taylor expansion wavevector products.
coeffs : 1d array
Taylor expansion coefficients.
cond : 1d array
Array indices corresponding to nuclear Bragg peaks.
p : int
Order of Taylor expansion.
i_dft : 1d array, int
Array indices of Fourier transform corresponding to reciprocal space.
factors : 1d array
Prefactors of scattering lengths, phase factors, and occupancies.
Returns
-------
F : 1d array
Array has a flattened shape of size ``coeffs.shape[0]*i_dft.shape[0]``.
F_nuc : 1d array
Array has a flattened shape of size ``cond.sum()*i_dft.shape[0]``.
prod : 1d array
Array has a flattened shape of size
``coeffs.shape[0]*i_dft.shape[0]*n_atm``.
prod_nuc : 1d array
Array has a flattened shape of size
``coeffs.sum()*i_dft.shape[0]*n_atm``.
V_k : 1d array
Array has a flattened shape of size
``coeffs.shape[0]*i_dft.shape[0]*n_atm``.
V_k_nuc : 1d array
Array has a flattened shape of size
``coeffs.sum()*i_dft.shape[0]*n_atm``.
even : 1d array, int
Array indices of the even Taylor expandion coefficients.
bragg : 1d array, int
Array has a flattened shape of size ``coeffs.sum()``.
"""
n_prod = coeffs.shape[0]
n_hkl = i_dft.shape[0]
n_atm = factors.shape[0] // n_hkl
factors = factors.reshape(n_hkl,n_atm)
n_uvw = U_k.shape[0] // n_prod // n_atm
U_k = U_k.reshape(n_prod,n_uvw,n_atm)
Q_k = Q_k.reshape(n_prod,n_hkl)
even, odd = indices(p)
V_k = np.einsum('ijk,kj->ji', coeffs*U_k[:,i_dft,:].T, Q_k)
V_k_nuc = np.einsum('ijk,kj->ji', (coeffs[even]*U_k[:,i_dft,:][even,:].T),
Q_k[even,:])[cond]
prod = factors*V_k
prod_nuc = factors[cond,:]*V_k_nuc
F = np.sum(prod, axis=1)
F_nuc = np.sum(prod_nuc, axis=1)
bragg = np.arange(n_hkl)[cond]
return F, F_nuc, prod.flatten(), prod_nuc.flatten(), \
V_k.flatten(), V_k_nuc.flatten(), even, bragg
def parameters(Ux, Uy, Uz, D, n_atm):
Uxx = np.mean((Ux**2).reshape(Ux.size // n_atm, n_atm), axis=0)
Uyy = np.mean((Uy**2).reshape(Uy.size // n_atm, n_atm), axis=0)
Uzz = np.mean((Uz**2).reshape(Ux.size // n_atm, n_atm), axis=0)
Uyz = np.mean((Uy*Uz).reshape(Ux.size // n_atm, n_atm), axis=0)
Uxz = np.mean((Ux*Uz).reshape(Uy.size // n_atm, n_atm), axis=0)
Uxy = np.mean((Ux*Uy).reshape(Uz.size // n_atm, n_atm), axis=0)
U11 = np.zeros(n_atm)
U22 = np.zeros(n_atm)
U33 = np.zeros(n_atm)
U23 = np.zeros(n_atm)
U13 = np.zeros(n_atm)
U12 = np.zeros(n_atm)
D_inv = np.linalg.inv(D)
for i in range(n_atm):
Up = np.array([[Uxx[i], Uxy[i], Uxz[i]],
[Uxy[i], Uyy[i], Uyz[i]],
[Uxz[i], Uyz[i], Uzz[i]]])
U = np.dot(np.dot(D_inv, Up), D_inv.T)
U11[i] = U[0,0]
U22[i] = U[1,1]
U33[i] = U[2,2]
U23[i] = U[1,2]
U13[i] = U[0,2]
U12[i] = U[0,1]
return U11, U22, U33, U23, U13, U12
def equivalent(Uiso, D):
"""
Components of atomic displacement parameters in crystal coordiantes
:math:`U_{11}`, :math:`U_{22}`, :math:`U_{33}`, :math:`U_{23}`,
:math:`U_{13}`, and :math:`U_{12}`.
Parameters
----------
Uiso : 1d array
Isotropic atomic displacement parameters :math:`U_\mathrm{iso}`.
D : 2d array, 3x3
Transform matrix from crystal axis to Cartesian coordiante system.
Returns
-------
U11, U22, U33, U23, U13, U12 : float or 1d array
Has same size as input isotropic atomic displacement parameters.
"""
uiso = np.dot(np.linalg.inv(D), np.linalg.inv(D.T))
U11, U22, U33 = Uiso*uiso[0,0], Uiso*uiso[1,1], Uiso*uiso[2,2]
U23, U13, U12 = Uiso*uiso[1,2], Uiso*uiso[0,2], Uiso*uiso[0,1]
return U11, U22, U33, U23, U13, U12
def isotropic(U11, U22, U33, U23, U13, U12, D):
"""
Equivalent isotropic displacement parameters :math:`U_\mathrm{iso}`.
Parameters
----------
U11, U22, U33, U23, U13, U12 : float or 1d array
Components of atomic displacement parameters :math:`U_{11}`,
:math:`U_{22}`, :math:`U_{33}`, :math:`U_{23}`, :math:`U_{13}`,
and :math:`U_{12}`.
D : 2d array, 3x3
Transform matrix from crystal axis to Cartesian coordiante system.
Returns
-------
Uiso : 1d array
Has same size as input atomic displacement parameter components.
"""
U = np.array([[U11,U12,U13], [U12,U22,U23], [U13,U23,U33]])
n = np.size(U11)
U = U.reshape(3,3,n)
Uiso = []
for i in range(n):
Up, _ = np.linalg.eig(np.dot(np.dot(D, U[...,i]), D.T))
Uiso.append(np.mean(Up).real)
return np.array(Uiso)
def principal(U11, U22, U33, U23, U13, U12, D):
"""
Principal atmoic displacement parameters :math:`U_\mathrm{1},
:math:`U_\mathrm{2}`, and :math:`U_\mathrm{3}`.
Parameters
----------
U11, U22, U33, U23, U13, U12 : float or 1d array
Components of atomic displacement parameters :math:`U_{11}`,
:math:`U_{22}`, :math:`U_{33}`, :math:`U_{23}`, :math:`U_{13}`,
and :math:`U_{12}`.
D : 2d array, 3x3
Transform matrix from crystal axis to Cartesian coordiante system.
Returns
-------
U1, U2, U3 : 1d array
Has same size as input atomic displacement parameter components.
"""
U = np.array([[U11,U12,U13], [U12,U22,U23], [U13,U23,U33]])
n = np.size(U11)
U = U.reshape(3,3,n)
U1, U2, U3 = [], [], []
for i in range(n):
Up, _ = np.linalg.eig(np.dot(np.dot(D, U[...,i]), D.T))
Up.sort()
U1.append(Up[0].real)
U2.append(Up[1].real)
U3.append(Up[2].real)
return np.array(U1), np.array(U2), np.array(U3)
def cartesian(U11, U22, U33, U23, U13, U12, D):
"""
Components of atomic displacement parameters in Cartesian coordiantes
:math:`U_{xx}`, :math:`U_{yy}`, :math:`U_{zz}`, :math:`U_{yz}`,
:math:`U_{xz}`, and :math:`U_{xy}`.
Parameters
----------
U11, U22, U33, U23, U13, U12 : float or 1d array
Components of atomic displacement parameters :math:`U_{11}`,
:math:`U_{22}`, :math:`U_{33}`, :math:`U_{23}`, :math:`U_{13}`,
and :math:`U_{12}`.
D : 2d array, 3x3
Transform matrix from crystal axis to Cartesian coordiante system.
Returns
-------
Uxx, Uyy, Uzz, Uyz, Uxz, Uxy : 1d array
Has same size as input atomic displacement parameter components.
"""
U = np.array([[U11,U12,U13], [U12,U22,U23], [U13,U23,U33]])
n = np.size(U11)
U = U.reshape(3,3,n)
Uxx, Uyy, Uzz, Uyz, Uxz, Uxy = [], [], [], [], [], []
for i in range(n):
Up = np.dot(np.dot(D, U[...,i]), D.T)
Uxx.append(Up[0,0])
Uyy.append(Up[1,1])
Uzz.append(Up[2,2])
Uyz.append(Up[1,2])
Uxz.append(Up[0,2])
Uxy.append(Up[0,1])
return np.array(Uxx), np.array(Uyy), np.array(Uzz), \
np.array(Uyz), np.array(Uxz), np.array(Uxy) | /rmc_discord-0.0.4-cp36-cp36m-win_amd64.whl/disorder/diffuse/displacive.py | 0.926116 | 0.631225 | displacive.py | pypi |
import numpy as np
from scipy.special import erfc
from disorder.material import crystal
def __A(alpha,r):
c = 2*alpha/np.sqrt(np.pi)
return -(erfc(alpha*r)/r-c*np.exp(-alpha**2*r**2))/r**2
def __B(alpha,r):
c = 2*alpha/np.sqrt(np.pi)
return (erfc(alpha*r)/r+c*np.exp(-alpha**2*r**2))/r**2
def __C(alpha,r):
c = 2*alpha*(3+2*alpha**2*r**2)/np.sqrt(np.pi)
return (3*erfc(alpha*r)/r+c*np.exp(-alpha**2*r**2))/r**4
def atom_pairs_distance(rx, ry, rz, nu, nv, nw, n_atm, A, tol=1e-3):
A_inv = np.linalg.inv(A)
mu = (nu+1)//2
mv = (nv+1)//2
mw = (nw+1)//2
m_uvw = mu*mv*mw
n_uvw = nu*nv*nw
c_uvw = np.arange(n_uvw, dtype=int)
cu, cv, cw = np.unravel_index(c_uvw, (nu,nv,nw))
i_lat, j_lat = np.triu_indices(m_uvw, k=1)
iu, iv, iw = np.unravel_index(i_lat, (mu,mv,mw))
ju, jv, jw = np.unravel_index(j_lat, (mu,mv,mw))
iu = np.mod(iu+cu[:,None], nu).flatten()
iv = np.mod(iv+cv[:,None], nv).flatten()
iw = np.mod(iw+cw[:,None], nw).flatten()
ju = np.mod(ju+cu[:,None], nu).flatten()
jv = np.mod(jv+cv[:,None], nv).flatten()
jw = np.mod(jw+cw[:,None], nw).flatten()
i_lat = np.ravel_multi_index((iu,iv,iw), (nu,nv,nw))
j_lat = np.ravel_multi_index((ju,jv,jw), (nu,nv,nw))
pairs = np.stack((i_lat,j_lat)).reshape(2,n_uvw*m_uvw*(m_uvw-1)//2)
i_lat, j_lat = np.unique(np.sort(pairs, axis=0), axis=1)
# ---
iu, iv, iw = np.unravel_index(i_lat, (nu,nv,nw))
ju, jv, jw = np.unravel_index(j_lat, (nu,nv,nw))
du, dv, dw = ju-iu, jv-iv, jw-iw
distance = np.stack((du,dv,dw))
distance = np.stack((distance.T,-distance.T)).T
sort = np.lexsort(distance, axis=1)[:,0]
n_pairs = sort.size
distance = distance.reshape(3,2*n_pairs)[:,sort+2*np.arange(n_pairs)]
metric = np.vstack(distance).T
_, index, inverse = np.unique(metric, return_index=True,
return_inverse=True, axis=0)
i_lat = np.ravel_multi_index((iu[index],iv[index],iw[index]), (nu,nv,nw))
j_lat = np.ravel_multi_index((ju[index],jv[index],jw[index]), (nu,nv,nw))
# ---
i_atm, j_atm = np.triu_indices(n_atm, k=1)
ux = rx.reshape(nu,nv,nw,n_atm)[0,0,0,:]
uy = ry.reshape(nu,nv,nw,n_atm)[0,0,0,:]
uz = rz.reshape(nu,nv,nw,n_atm)[0,0,0,:]
dx = ux[j_atm]-ux[i_atm]
dy = uy[j_atm]-uy[i_atm]
dz = uz[j_atm]-uz[i_atm]
distance = np.stack((dx,dy,dz))
distance = np.stack((distance.T,-distance.T)).T
sort = np.lexsort(distance, axis=1)[:,0]
n_pairs = sort.size
distance = distance.reshape(3,2*n_pairs)[:,sort+2*np.arange(n_pairs)]
metric = np.vstack(np.round(distance/tol,0)).astype(int).T
_, ind, inv = np.unique(metric, return_index=True,
return_inverse=True, axis=0)
i_atm, j_atm = i_atm[ind], j_atm[ind]
i_atms = np.concatenate((i_atm,j_atm))
j_atms = np.concatenate((j_atm,i_atm))
i_atms = np.concatenate((i_atms,np.arange(n_atm)))
j_atms = np.concatenate((j_atms,np.arange(n_atm)))
i = np.ravel_multi_index((i_lat,i_atms[:,None]), (n_uvw,n_atm)).flatten()
j = np.ravel_multi_index((j_lat,j_atms[:,None]), (n_uvw,n_atm)).flatten()
ic = np.ravel_multi_index((0,i_atm[:,None]), (n_uvw,n_atm)).flatten()
jc = np.ravel_multi_index((0,j_atm[:,None]), (n_uvw,n_atm)).flatten()
i, j = np.concatenate((ic,i)), np.concatenate((jc,j))
# ---
dx, dy, dz = rx[j]-rx[i], ry[j]-ry[i], rz[j]-rz[i]
du, dv, dw = crystal.transform(dx, dy, dz, A_inv)
du[du < -mu] += nu
dv[dv < -mv] += nv
dw[dw < -mw] += nw
du[du > mu] -= nu
dv[dv > mv] -= nv
dw[dw > mw] -= nw
dx, dy, dz = crystal.transform(du, dv, dw, A)
i_atm, j_atm = np.triu_indices(n_atm, k=1)
i_atms = np.concatenate((i_atm,j_atm))
j_atms = np.concatenate((j_atm,i_atm))
i_atms = np.concatenate((i_atms,np.arange(n_atm)))
j_atms = np.concatenate((j_atms,np.arange(n_atm)))
i_lat = np.ravel_multi_index((iu,iv,iw), (nu,nv,nw))
j_lat = np.ravel_multi_index((ju,jv,jw), (nu,nv,nw))
i = np.ravel_multi_index((i_lat,i_atms[:,None]), (n_uvw,n_atm)).flatten()
j = np.ravel_multi_index((j_lat,j_atms[:,None]), (n_uvw,n_atm)).flatten()
ic = np.ravel_multi_index((c_uvw,i_atm[:,None]), (n_uvw,n_atm)).flatten()
jc = np.ravel_multi_index((c_uvw,j_atm[:,None]), (n_uvw,n_atm)).flatten()
i, j = np.concatenate((ic,i)), np.concatenate((jc,j))
l, m = ind.size, index.size
k = np.concatenate((inv,l+inv,2*l+np.arange(n_atm)))
p = (np.arange(n_uvw)*0+inv[:,None]).flatten()
q = l+(inverse+m*k[:,None]).flatten()
inverse = np.concatenate((p,q))
return dx, dy, dz, i, j, inverse
def spatial_wavevector(nu, nv, nw, n_atm, B, R):
mu = (nu+1)//2
mv = (nv+1)//2
mw = (nw+1)//2
ku = 2*np.pi*np.arange(mu)/nu
kv = 2*np.pi*np.concatenate((np.arange(mv),np.arange(-mv+1,0)))/nv
kw = 2*np.pi*np.concatenate((np.arange(mw),np.arange(-mw+1,0)))/nw
ku, kv, kw = np.meshgrid(ku, kv, kw, indexing='ij')
ku, kv, kw = ku.flatten(), kv.flatten(), kw.flatten()
ku, kv, kw = np.delete(ku, 0), np.delete(kv, 0), np.delete(kw, 0)
Gx, Gy, Gz = crystal.transform(ku, kv, kw, B)
Gx, Gy, Gz = crystal.transform(Gx, Gy, Gz, R)
return Gx, Gy, Gz
def charge_charge_matrix(rx, ry, rz, nu, nv, nw, n_atm, A, B, R, tol=1e-3):
n = nu*nv*nw*n_atm
Qij = np.zeros(n*(n+1)//2)
dx, dy, dz, i, j, inverse = atom_pairs_distance(rx, ry, rz,
nu, nv, nw,
n_atm, A, tol=tol)
k = j+n*i-(i+1)*i//2
l = np.arange(n)
l = l+n*l-(l+1)*l//2
d = np.sqrt(dx**2+dy**2+dz**2)
Gx, Gy, Gz = spatial_wavevector(nu, nv, nw, n_atm, B, R)
G_sq = Gx**2+Gy**2+Gz**2
u, v, w = np.dot(A, [nu,0,0]), np.dot(A, [0,nv,0]), np.dot(A, [0,0,nw])
V = np.dot(u, np.cross(v, w))
alpha = np.sqrt(2*np.pi*np.min([nu/np.linalg.norm(u)**2,
nv/np.linalg.norm(v)**2,
nw/np.linalg.norm(w)**2]))
Qij[k] = (erfc(alpha*d)/d)[inverse]
Qij[l] = -2*alpha/np.sqrt(np.pi)
cos_d_dot_G = np.cos(np.kron(dx,Gx)+
np.kron(dy,Gy)+
np.kron(dz,Gz))
cos_d_dot_G = cos_d_dot_G.reshape(d.size, G_sq.size)
factors = 4*np.pi/V*np.exp(-np.pi**2*G_sq/alpha**2)/G_sq
Qij[k] += (factors*cos_d_dot_G).sum(axis=1)[inverse]
return Qij
def charge_dipole_matrix(rx, ry, rz, nu, nv, nw, n_atm, A, B, R, tol=1e-3):
n = nu*nv*nw*n_atm
Qijk = np.zeros((n*(n+1)//2,3))
dx, dy, dz, i, j, inverse = atom_pairs_distance(rx, ry, rz,
nu, nv, nw,
n_atm, A, tol=tol)
k = j+n*i-(i+1)*i//2
d = np.sqrt(dx**2+dy**2+dz**2)
Gx, Gy, Gz = spatial_wavevector(nu, nv, nw, n_atm, B, R)
G_sq = Gx**2+Gy**2+Gz**2
u, v, w = np.dot(A, [nu,0,0]), np.dot(A, [0,nv,0]), np.dot(A, [0,0,nw])
V = np.dot(u, np.cross(v, w))
alpha = np.sqrt(2*np.pi*np.min([nu/np.linalg.norm(u)**2,
nv/np.linalg.norm(v)**2,
nw/np.linalg.norm(w)**2]))
a = __A(alpha, d)
Qijk[k,0] = (a*dx)[inverse]
Qijk[k,1] = (a*dy)[inverse]
Qijk[k,2] = (a*dz)[inverse]
sin_d_dot_G = np.sin(np.kron(dx,Gx)+
np.kron(dy,Gy)+
np.kron(dz,Gz))
sin_d_dot_G = sin_d_dot_G.reshape(d.size, G_sq.size)
factors = 4*np.pi/V*np.exp(-np.pi**2*G_sq/alpha**2)/G_sq
g = factors*sin_d_dot_G
Qijk[k,0] += np.sum(g*Gx, axis=1)[inverse]
Qijk[k,1] += np.sum(g*Gy, axis=1)[inverse]
Qijk[k,2] += np.sum(g*Gz, axis=1)[inverse]
return Qijk
def dipole_dipole_matrix(rx, ry, rz, nu, nv, nw, n_atm, A, B, R, tol=1e-3):
n = nu*nv*nw*n_atm
Qijkl = np.zeros((n*(n+1)//2,6))
dx, dy, dz, i, j, inverse = atom_pairs_distance(rx, ry, rz,
nu, nv, nw,
n_atm, A, tol=tol)
k = j+n*i-(i+1)*i//2
l = np.arange(n)
l = l+n*l-(l+1)*l//2
d = np.sqrt(dx**2+dy**2+dz**2)
Gx, Gy, Gz = spatial_wavevector(nu, nv, nw, n_atm, B, R)
G_sq = Gx**2+Gy**2+Gz**2
u, v, w = np.dot(A, [nu,0,0]), np.dot(A, [0,nv,0]), np.dot(A, [0,0,nw])
V = np.dot(u, np.cross(v, w))
alpha = np.sqrt(2*np.pi*np.min([nu/np.linalg.norm(u)**2,
nv/np.linalg.norm(v)**2,
nw/np.linalg.norm(w)**2]))
b, c = __B(alpha, d), __C(alpha, d)
Qijkl[k,0] = (b-dx*dx*c)[inverse]
Qijkl[k,1] = (b-dy*dy*c)[inverse]
Qijkl[k,2] = (b-dz*dz*c)[inverse]
Qijkl[k,3] = (-dy*dz*c)[inverse]
Qijkl[k,4] = (-dx*dz*c)[inverse]
Qijkl[k,5] = (-dx*dy*c)[inverse]
Qijkl[l,0] = Qijkl[l,1] = Qijkl[l,2] = -4*alpha**3/(3*np.sqrt(np.pi))
cos_d_dot_G = np.cos(np.kron(dx,Gx)+
np.kron(dy,Gy)+
np.kron(dz,Gz))
cos_d_dot_G = cos_d_dot_G.reshape(d.size, G_sq.size)
factors = 4*np.pi/V*np.exp(-np.pi**2*G_sq/alpha**2)/G_sq
g = factors*cos_d_dot_G
Gxx, Gyy, Gzz = Gx*Gx, Gy*Gy, Gz*Gz
Gxz, Gyz, Gxy = Gx*Gz, Gy*Gz, Gx*Gy
Qijkl[k,0] += np.sum(g*Gxx, axis=1)[inverse]
Qijkl[k,1] += np.sum(g*Gyy, axis=1)[inverse]
Qijkl[k,2] += np.sum(g*Gzz, axis=1)[inverse]
Qijkl[k,3] += np.sum(g*Gyz, axis=1)[inverse]
Qijkl[k,4] += np.sum(g*Gxz, axis=1)[inverse]
Qijkl[k,5] += np.sum(g*Gxy, axis=1)[inverse]
return Qijkl | /rmc_discord-0.0.4-cp36-cp36m-win_amd64.whl/disorder/diffuse/interaction.py | 0.519521 | 0.568655 | interaction.py | pypi |
import numpy as np
from nexusformat.nexus import nxload
import pyvista as pv
from functools import reduce
from disorder.diffuse import filters
def data(filename):
data = nxload(filename)
signal = np.array(data.MDHistoWorkspace.data.signal.nxdata.T)
error_sq = np.array(data.MDHistoWorkspace.data.errors_squared.nxdata.T)
if ('Q1' in data.MDHistoWorkspace.data.keys()):
Qh = data.MDHistoWorkspace.data['Q1']
Qk = data.MDHistoWorkspace.data['Q2']
Ql = data.MDHistoWorkspace.data['Q3']
elif ('[H,0,0]' in data.MDHistoWorkspace.data.keys()):
Qh = data.MDHistoWorkspace.data['[H,0,0]']
Qk = data.MDHistoWorkspace.data['[0,K,0]']
Ql = data.MDHistoWorkspace.data['[0,0,L]']
Qh_min, Qk_min, Ql_min = Qh.min(), Qk.min(), Ql.min()
Qh_max, Qk_max, Ql_max = Qh.max(), Qk.max(), Ql.max()
mh, mk, ml = Qh.size, Qk.size, Ql.size
nh = mh-1
nk = mk-1
nl = ml-1
step_h = (Qh_max-Qh_min)/nh
step_k = (Qk_max-Qk_min)/nk
step_l = (Ql_max-Ql_min)/nl
min_h = np.round(Qh_min+step_h/2, 4)
min_k = np.round(Qk_min+step_k/2, 4)
min_l = np.round(Ql_min+step_l/2, 4)
max_h = np.round(Qh_max-step_h/2, 4)
max_k = np.round(Qk_max-step_k/2, 4)
max_l = np.round(Ql_max-step_l/2, 4)
h_range, k_range, l_range = [min_h, max_h], [min_k, max_k], [min_l, max_l]
return signal, error_sq, h_range, k_range, l_range, nh, nk, nl
def mask(signal, error_sq):
mask = np.isnan(signal)\
+ np.isinf(signal)\
+ np.less_equal(signal, 0, where=~np.isnan(signal))\
+ np.isnan(error_sq)\
+ np.isinf(error_sq)\
+ np.less_equal(error_sq, 0, where=~np.isnan(error_sq))
return mask
def rebin(a, binsize):
changed = np.array(binsize) != np.array(a.shape)
if (changed[0] and changed[1] and changed[2]):
comp0 = weights(a.shape[0], binsize[0])
comp1 = weights(a.shape[1], binsize[1])
comp2 = weights(a.shape[2], binsize[2])
b = filters.rebin0(a, comp0)
c = filters.rebin1(b, comp1)
d = filters.rebin2(c, comp2)
return d
elif (changed[0] and changed[1]):
comp0 = weights(a.shape[0], binsize[0])
comp1 = weights(a.shape[1], binsize[1])
b = filters.rebin0(a, comp0)
c = filters.rebin1(b, comp1)
return c
elif (changed[1] and changed[2]):
comp1 = weights(a.shape[1], binsize[1])
comp2 = weights(a.shape[2], binsize[2])
b = filters.rebin1(a, comp1)
c = filters.rebin2(b, comp2)
return c
elif (changed[2] and changed[0]):
comp2 = weights(a.shape[2], binsize[2])
comp0 = weights(a.shape[0], binsize[0])
b = filters.rebin2(a, comp2)
c = filters.rebin0(b, comp0)
return c
elif (changed[0]):
comp0 = weights(a.shape[0], binsize[0])
b = filters.rebin0(a, comp0)
return b
elif (changed[1]):
comp1 = weights(a.shape[1], binsize[1])
b = filters.rebin1(a, comp1)
return b
elif (changed[2]):
comp2 = weights(a.shape[2], binsize[2])
b = filters.rebin2(a, comp2)
return b
else:
return a
def weights(old, new):
weights = np.zeros((new,old))
binning = old/new
interval = binning
row, col = 0, 0
while (row < weights.shape[0] and col < weights.shape[1]):
if (np.round(interval-col, 1) >= 1):
weights[row,col] = 1
col += 1
elif (interval == col):
row += 1
interval += binning
else:
partial = interval-col
weights[row,col] = partial
row += 1
weights[row,col] = 1-partial
col += 1
interval += binning
weights /= binning
return weights
def crop(x, h_slice, k_slice, l_slice):
if (h_slice[0] == 0 and h_slice[1] == x.shape[0] and
k_slice[0] == 0 and k_slice[1] == x.shape[1] and
l_slice[0] == 0 and l_slice[1] == x.shape[2]):
return np.ascontiguousarray(x)
else:
return np.ascontiguousarray(x[h_slice[0]:h_slice[1],\
k_slice[0]:k_slice[1],\
l_slice[0]:l_slice[1]].copy(order='C'))
def factors(n):
return np.unique(reduce(list.__add__,
([i, n/i] for i in range(1, int(n**0.5) + 1) if n % i == 0))).astype(int)
def punch(data, radius_h, radius_k, radius_l, h_range, k_range, l_range,
centering='P', outlier=1.5, punch='Box'):
step_h = (h_range[1]-h_range[0])/data.shape[0]
step_k = (k_range[1]-k_range[0])/data.shape[1]
step_l = (l_range[1]-l_range[0])/data.shape[2]
box = [int(round(radius_h)), int(round(radius_k)), int(round(radius_l))]
min_h, max_h = h_range
min_k, max_k = k_range
min_l, max_l = l_range
h_range = [int(round(min_h)), int(round(max_h))]
k_range = [int(round(min_k)), int(round(max_k))]
l_range = [int(round(min_l)), int(round(max_l))]
for h in range(h_range[0], h_range[1]+1):
for k in range(k_range[0], k_range[1]+1):
for l in range(l_range[0], l_range[1]+1):
if reflections(h, k, l, centering=centering):
i_hkl = [int(np.round((h-h_range[0])/step_h,4)),\
int(np.round((k-k_range[0])/step_k,4)),\
int(np.round((l-l_range[0])/step_l,4))]
h0, h1 = i_hkl[0]-box[0], i_hkl[0]+box[0]+1
k0, k1 = i_hkl[1]-box[1], i_hkl[1]+box[1]+1
l0, l1 = i_hkl[2]-box[2], i_hkl[2]+box[2]+1
if (h0 < 0): h0 = 0
if (k0 < 0): k0 = 0
if (l0 < 0): l0 = 0
if (h1 >= data.shape[0]): h1 = data.shape[0]
if (k1 >= data.shape[1]): k1 = data.shape[1]
if (l1 >= data.shape[2]): l1 = data.shape[2]
values = data[h0:h1,k0:k1,l0:l1].copy()
if (punch == 'Ellipsoid'):
values_outside = values.copy()
x, y, z = np.meshgrid(np.arange(h0,h1)-i_hkl[0],
np.arange(k0,k1)-i_hkl[1],
np.arange(l0,l1)-i_hkl[2],
indexing='ij')
mask = (x/box[0])**2+(y/box[1])**2+(z/box[2])**2 > 1
values[mask] = np.nan
Q3 = np.nanpercentile(values.data,75)
Q1 = np.nanpercentile(values.data,25)
interquartile = Q3-Q1
reject = (values >= Q3+outlier*interquartile) | \
(values < Q1-outlier*interquartile)
values[reject] = np.nan
if (punch == 'Ellipsoid'):
values[mask] = values_outside[mask].copy()
data[h0:h1,k0:k1,l0:l1] = values.copy()
return data
def outlier(signal, size):
median = filters.median(signal, size)
mad = filters.median(np.abs(signal-median), size=size)
asigma = np.abs(mad*3*1.4826)
mask = np.logical_or(signal < (median-asigma), signal > (median+asigma))
signal[mask] = np.nan
return signal
def reflections(h, k, l, centering='P'):
# centering == 'P', 'R (rhombohedral axes, primitive cell')
allow = 1
if (centering == 'I'):
if ((h+k+l) % 2 != 0):
allow = 0
elif (centering == 'F'):
if ((h+k) % 2 != 0 or (k+l) % 2 != 0 or (l+h) % 2 != 0):
allow = 0
elif (centering == 'A'):
if ((k+l) % 2 != 0):
allow = 0
elif (centering == 'B'):
if ((l+h) % 2 != 0):
allow = 0
elif (centering == 'C'):
if ((h+k) % 2 != 0):
allow = 0
elif (centering == 'R(obv)'): # (hexagonal axes, triple obverse cell)
if ((-h+k+l) % 3 != 0):
allow = 0
elif (centering == 'R(rev)'): # (hexagonal axes, triple reverse cell)
if ((h-k+l) % 3 != 0):
allow = 0
elif (centering == 'H'): # (hexagonal axes, triple hexagonal cell)
if ((h-k) % 3 != 0):
allow = 0
elif (centering == 'D'): # (rhombohedral axes, triple rhombohedral cell)
if ((h+k+l) % 3 != 0):
allow = 0
return allow
def correlations(fname, data, label):
blocks = pv.MultiBlock()
points = np.column_stack((data[0],data[1],data[2]))
vectors = ['Correlation', 'Collinearity']
scalars = ['Correlation']
if (label == 'vector-pair'):
datasets = [data[3], data[4]]
pairs = data[5]
elif (label == 'scalar-pair'):
datasets = [data[3]]
pairs = data[4]
elif (label == 'vector'):
datasets = [data[3], data[4]]
elif (label == 'scalar'):
datasets = [data[3]]
form = vectors if label.startswith('vector') else scalars
if label.endswith('pair'):
labels = np.unique(pairs)
for t, array in zip(form, datasets):
for label in labels:
mask = pairs == label
blocks[t+'-'+label] = pv.PolyData(points[mask])
blocks[t+'-'+label].point_data[t] = array[mask]
else:
for t, array in zip(form, datasets):
blocks[t] = pv.PolyData(points)
blocks[t].point_data[t] = array
blocks.save(fname, binary=False)
def intensity(fname, h, k, l, intensity, B=np.eye(3)):
T = np.eye(4)
T[:3,:3] = B
grid = pv.StructuredGrid(h, k, l)
grid.point_data['intensity'] = intensity.flatten(order='F')
grid.transform(T)
grid.save(fname, binary=True) | /rmc_discord-0.0.4-cp36-cp36m-win_amd64.whl/disorder/diffuse/experimental.py | 0.41182 | 0.567697 | experimental.py | pypi |
import numpy as np
from disorder.material import tables
def j0(Q, A, a, B, b, C, c, D):
"""
Appoximation of the zeroth-order spherical Bessesl function :math:`j_0(Q)`.
Parameters
----------
Q : 1d array
Magnitude of wavevector :math:`Q`.
A : float
:math:`A_0` constant.
a : float
:math:`a_0` constant.
B : float
:math:`B_0` constant.
b : float
:math:`b_0` constant.
C : float
:math:`C_0` constant.
c : float
:math:`c_0` constant.
D : float
:math:`D_0` constant.
Returns
-------
j0 : 1d array
Has the same shape as the input wavevector.
"""
s = Q/4/np.pi
return A*np.exp(-a*s**2)+B*np.exp(-b*s**2)+C*np.exp(-c*s**2)+D
def j2(Q, A, a, B, b, C, c, D):
"""
Appoximation of the second-order spherical Bessesl function :math:`j_2(Q)`.
Parameters
----------
Q : 1d array
Magnitude of wavevector :math:`Q`.
A : float
:math:`A_2` constant.
a : float
:math:`a_2` constant.
B : float
:math:`B_2` constant.
b : float
:math:`b_2` constant.
C : float
:math:`C_2` constant.
c : float
:math:`c_2` constant.
D : float
:math:`D_2` constant.
Returns
-------
j2 : 1d array
Has the same shape as the input wavevector.
"""
s = Q/4/np.pi
return A*s**2*np.exp(-a*s**2)+\
B*s**2*np.exp(-b*s**2)+\
C*s**2*np.exp(-c*s**2)+\
D*s**2
def f(Q, j0, j2=0, K2=0):
"""
Magnetic form factor :math:`f(Q)`.
Parameters
----------
Q : 1d array.
Magnitude of wavevector.
j0, j2 : 1d array
:math:`j_0` and :math:`j_2` constant with same shape as wavevector.
K2 : 1d array, optional
Coupling constant, defualt ``K2=0``.
Returns
-------
f : 1d array
Has the same shape as the input wavevector.
"""
return j0+K2*j2
def form(Q, ions, g=2):
"""
Magnetic form factor :math:`f(Q)`.
Parameters
----------
Q : 1d array
Magnitude of wavevector.
ions : 1d array
Magnetic ions.
g : float, 1d array, optional
:math:`g` factor of the spins, defualt ``g=2``.
Returns
-------
f : 1d array
Has the same shape as the input wavevector.
"""
k = 2/g-1
n_hkl = Q.shape[0]
n_atm = len(ions)
factor = np.zeros(n_hkl*n_atm)
for i, ion in enumerate(ions):
if (tables.j0.get(ion) is None):
A0, a0, B0, b0, C0, c0, D0 = 0, 0, 0, 0, 0, 0, 0
A2, a2, B2, b2, C2, c2, D2 = 0, 0, 0, 0, 0, 0, 0
else:
A0, a0, B0, b0, C0, c0, D0 = tables.j0.get(ion)
A2, a2, B2, b2, C2, c2, D2 = tables.j2.get(ion)
if (np.size(k) > 1):
K = k[i]
else:
K = k
factor[i::n_atm] = f(Q, j0(Q, A0, a0, B0, b0, C0, c0, D0),\
j2(Q, A2, a2, B2, b2, C2, c2, D2), K)
factor[factor < 0] = 0
return factor
def spin(nu, nv, nw, n_atm, value=1, fixed=True):
"""
Generate random spin vectors.
Parameters
----------
nu, nv, nw : int
Number of grid points :math:`N_1`, :math:`N_2`, :math:`N_3` along the
:math:`a`, :math:`b`, and :math:`c`-axis of the supercell.
n_atm : int
Number of atoms in the unit cell.
Returns
-------
Sx, Sy, Sz : 1d array
Each array has a flattened shape of size ``nu*nw*nv*n_atm``.
"""
if (len(np.shape(value)) <= 1):
if (len(np.shape(value)) == 0):
V = np.full(n_atm, value)
elif (len(np.shape(value)) == 1):
V = value
theta = 2*np.pi*np.random.rand(nu,nv,nw,n_atm)
phi = np.arccos(1-2*np.random.rand(nu,nv,nw,n_atm))
Sx = V*np.sin(phi)*np.cos(theta)
Sy = V*np.sin(phi)*np.sin(theta)
Sz = V*np.cos(phi)
else:
sign = 2*(np.random.rand(nu,nv,nw,n_atm) < 0.5)-1
Sx, Sy, Sz = sign*value[0], sign*value[1], sign*value[2]
if not fixed:
U = np.random.rand(nu,nv,nw,n_atm)
Sx *= U
Sy *= U
Sz *= U
return Sx.flatten(), Sy.flatten(), Sz.flatten()
def transform(Sx, Sy, Sz, H, K, L, nu, nv, nw, n_atm):
"""
Discrete Fourier transform of spin vectors.
Parameters
----------
Sx, Sy, Sz : 1d array
Spin vector component :math:`S_x`, :math:`S_y`, and :math:`S_z` in
Cartesian components along the :math:`x`, :math:`y`, and
:math:`z`-direction.
H, K, L : 1d array, int
Supercell index along the :math:`a^*`, :math:`b^*`, and
:math:`c^*`-axis in reciprocal space.
nu, nv, nw : int
Number of grid points :math:`N_1`, :math:`N_2`, :math:`N_3` along the
:math:`a`, :math:`b`, and :math:`c`-axis of the supercell.
n_atm : int
Number of atoms in the unit cell.
Returns
-------
Sx_k, Sy_k, Sz_k : 1d array
Each array has a flattened shape of size ``nu*nw*nv*n_atm``.
i_dft : 1d array, int
Array has a flattened shape of size ``nu*nw*nv*n_atm``.
"""
Sx_k = np.fft.ifftn(Sx.reshape(nu,nv,nw,n_atm), axes=(0,1,2))*nu*nv*nw
Sy_k = np.fft.ifftn(Sy.reshape(nu,nv,nw,n_atm), axes=(0,1,2))*nu*nv*nw
Sz_k = np.fft.ifftn(Sz.reshape(nu,nv,nw,n_atm), axes=(0,1,2))*nu*nv*nw
Ku = np.mod(H, nu).astype(int)
Kv = np.mod(K, nv).astype(int)
Kw = np.mod(L, nw).astype(int)
i_dft = Kw+nw*(Kv+nv*Ku)
return Sx_k.flatten(), Sy_k.flatten(), Sz_k.flatten(), i_dft
def intensity(Qx_norm, Qy_norm, Qz_norm, Sx_k, Sy_k, Sz_k, i_dft, factors):
"""
Magnetic scattering intensity.
Parameters
----------
Qx_norm, Qy_norm, Qz_norm : 1d array
Normalized wavevector component :math:`\hat{Q}_x`, :math:`\hat{Q}_y`,
and :math:`\hat{Q}_z`.
Sx_k, Sy_k, Sz_k : 1d array
Fourier transform of the spin vector component :math:`S_x`,
:math:`S_y`, and :math:`S_z` component.
i_dft: 1d array, int
Array indices of Fourier transform corresponding to reciprocal space.
factors: 1d array
Prefactors of form factors and phase factors.
Returns
-------
I : 1d array
Array has a flattened shape of size ``i_dft.shape[0]``.
"""
n_peaks = i_dft.shape[0]
n_atm = factors.shape[0] // n_peaks
factors = factors.reshape(n_peaks,n_atm)
n_uvw = Sx_k.shape[0] // n_atm
Sx_k = Sx_k.reshape(n_uvw,n_atm)
Sy_k = Sy_k.reshape(n_uvw,n_atm)
Sz_k = Sz_k.reshape(n_uvw,n_atm)
prod_x = factors*Sx_k[i_dft,:]
prod_y = factors*Sy_k[i_dft,:]
prod_z = factors*Sz_k[i_dft,:]
Fx = np.sum(prod_x, axis=1)
Fy = np.sum(prod_y, axis=1)
Fz = np.sum(prod_z, axis=1)
Q_norm_dot_F = Qx_norm*Fx+Qy_norm*Fy+Qz_norm*Fz
Fx_perp = Fx-Q_norm_dot_F*Qx_norm
Fy_perp = Fy-Q_norm_dot_F*Qy_norm
Fz_perp = Fz-Q_norm_dot_F*Qz_norm
I = np.real(Fx_perp)**2+np.imag(Fx_perp)**2\
+ np.real(Fy_perp)**2+np.imag(Fy_perp)**2\
+ np.real(Fz_perp)**2+np.imag(Fz_perp)**2
return I/(n_uvw*n_atm)
def structure(Qx_norm, Qy_norm, Qz_norm, Sx_k, Sy_k, Sz_k, i_dft, factors):
"""
Partial magnetic structure factor.
Parameters
----------
Qx_norm, Qy_norm, Qz_norm : 1d array
Normalized wavevector component :math:`\hat{Q}_x`, :math:`\hat{Q}_y`,
and :math:`\hat{Q}_z`.
Sx_k, Sy_k, Sz_k : 1d array
Fourier transform of the spin vector component :math:`S_x`,
:math:`S_y`, and :math:`S_z` component.
i_dft: 1d array, int
Array indices of Fourier transform corresponding to reciprocal space.
factors: 1d array
Prefactors of form factors and phase factors.
Returns
-------
Fx, Fy, Fz : 1d array
Each array has a flattened shape of size ``i_dft.shape[0]``.
prod_x, prod_y, prod_z : 1d array
Each array has a flattened shape of size ``i_dft.shape[0]*n_atm``.
"""
n_peaks = i_dft.shape[0]
n_atm = factors.shape[0] // n_peaks
factors = factors.reshape(n_peaks,n_atm)
n_uvw = Sx_k.shape[0] // n_atm
Sx_k = Sx_k.reshape(n_uvw,n_atm)
Sy_k = Sy_k.reshape(n_uvw,n_atm)
Sz_k = Sz_k.reshape(n_uvw,n_atm)
prod_x = factors*Sx_k[i_dft,:]
prod_y = factors*Sy_k[i_dft,:]
prod_z = factors*Sz_k[i_dft,:]
Fx = np.sum(prod_x, axis=1)
Fy = np.sum(prod_y, axis=1)
Fz = np.sum(prod_z, axis=1)
return Fx, Fy, Fz, prod_x.flatten(), prod_y.flatten(), prod_z.flatten()
def magnitude(mu1, mu2, mu3, C):
"""
Magnitude of magnetic moment :math:`\mu`.
Parameters
----------
mu1, mu2, mu3 : 1d array
Components of magnetic momenet :math:`\mu_1`, :math:`\mu_2`,
and :math:`\mu_3`.
C: 2d array, 3x3
Transform matrix from crystal axis to Cartesian coordiante system.
Returns
-------
mu : 1d array
Has same size as input magnetic moment components.
"""
M = np.array([mu1,mu2,mu3])
n = np.size(mu1)
M = M.reshape(3,n)
mu = []
for i in range(n):
mu.append(np.linalg.norm(np.dot(C, M[:,i])))
return np.array(mu)
def cartesian(mu1, mu2, mu3, C):
"""
Components of magnetic moment in Cartesian coordiantes :math:`\mu_x`,
:math:`\mu_x`, and :math:`\mu_x`.
Parameters
----------
mu1, mu2, mu3 : float or 1d array
Components of magnetic momenet :math:`\mu_1`, :math:`\mu_2`,
and :math:`\mu_3`.
C: 2d array, 3x3
Transform matrix from crystal axis to Cartesian coordiante system.
Returns
-------
mu_x, mu_y, mu_z : 1d array
Has same size as input magnetic moment components.
"""
M = np.array([mu1,mu2,mu3])
n = np.size(mu1)
M = M.reshape(3,n)
mu_x, mu_y, mu_z = [], [], []
for i in range(n):
Mp = np.dot(C, M[:,i])
mu_x.append(Mp[0])
mu_y.append(Mp[1])
mu_z.append(Mp[2])
return np.array(mu_x), np.array(mu_y), np.array(mu_z) | /rmc_discord-0.0.4-cp36-cp36m-win_amd64.whl/disorder/diffuse/magnetic.py | 0.93739 | 0.823825 | magnetic.py | pypi |
class Preprocessor:
def __init__(self, fileName):
self.fileName = fileName
""" Pre processing class for receive data and return normalized data
Attributes: fileName
"""
def __repr__(self):
print(self.data)
return "Nome do Arquivo em estudo: {}".format(self.fileName)
def load_file(self,type='txt'):
"""Function to read in data from a txt file. The txt file should have
one number (float) per line. The numbers are stored in the data attribute.
Args:
file_name (string): name of a file to read from
Returns:
None
"""
with open(self.fileName,'r') as file:
data_list = []
line = file.readline()
while line:
try:
data_list.append(float(line))
except:
data_list.append(line)
print("MissingValue")
line = file.readline()
file.close()
self.data = data_list
self.len = len(data_list)
return print("Arquivo com : {} dados".format(self.len))
def get_mean(self, digits=6):
self.mean = None
if self.data != None:
self.mean = round( sum(self.data)/len(self.data), digits)
return self.mean
else:
raise "No Data"
def get_stdev(self, digits=6, tipo='sample'):
if tipo =='sample':
d = 1
else:
d = 0
if self.len < 2:
raise ValueError('Necessita pelo menos 2 dados')
self.get_mean()
self.variance = sum( (x - self.mean)**2 for x in self.data) / (self.len -d)
self.stdev = round( self.variance **0.5 , 6)
return self.stdev
def save_file(self, fileType='txt'):
"""Function to save in data from a txt file. The txt file should have
one number (float) per line. The numbers are stored in the data attribute.
Args:
file(string): name of a file to read from
Returns:
None
"""
outPutFile = self.fileName[:-4] + "_outPut." + fileType
with open(outPutFile,'w') as file:
file.writelines("%s\n" % l for l in self.data)
file.close()
return print("Arquivo {} gravado com : {} dados".format(outPutFile,self.len)) | /rmclino_preprocessor-1.0.tar.gz/rmclino_preprocessor-1.0/rmclino_preprocessor/preprocessor.py | 0.627609 | 0.396535 | preprocessor.py | pypi |
Rmdawn: a Python package for programmatic R markdown workflows
==============================================================
|Chat| |Build| |License| |PyPI| |Status| |Updates| |Versions|
Introduction
------------
The ``rmdawn`` Python package allows you to (de)construct, convert, and render `R Markdown <https://rmarkdown.rstudio.com/authoring_quick_tour.html>`__ (Rmd) files in
- your terminal (e.g. ``bash``, ``zsh``, ``fish``, etc.) or
- your favorite Python environment (e.g. `PyCharm <https://www.jetbrains.com/pycharm/>`__ or `Visual Studio Code <https://code.visualstudio.com/docs/python/python-tutorial>`__).
The ``rmdawn`` Python package consists of 6 shell commands and functions:
- ``rmdawn``, which concatenates input files to generate an Rmd file.
- ``rmdusk``, which extracts 1) a YAML file, 2) Python or R scripts and 3) `Markdown <https://www.markdownguide.org/>`__ (md) files from Rmd files.
- ``rmdtor``, which converts Rmd files into R scripts using `knitr::purl <https://www.rdocumentation.org/packages/knitr/versions/1.20/topics/knit>`__.
- ``rtormd``, which converts R scripts into Rmd files using `knitr::spin <https://yihui.name/knitr/demo/stitch/#spin-comment-out-texts>`__.
- ``render``, which creates rendered versions of R scripts or Rmd files into HTML, PDF, Word, and `other output file formats <https://rmarkdown.rstudio.com/lesson-9.html>`__.
- ``catren``, which combines the functionality of ``rmdawn`` and ``render`` to generate an Rmd file from source files and then create an output file.
All ``rmdawn`` functions and commands, except for ``rmdawn`` and ``rmdusk``, rely on the `rpy2 <https://rpy2.readthedocs.io/>`__ Python library.
The command line interface relies on the `click <https://click.palletsprojects.com/>`__ Python library.
For a related package that provides programmatic tools for working with `Jupyter
Notebooks <http://jupyter-notebook.readthedocs.io/en/latest/examples/Notebook/What%20is%20the%20Jupyter%20Notebook.html>`__,
check out the `Nbless Python package <https://py4ds.github.io/nbless/>`__.
Documentation and Code
----------------------
The documentation is hosted at https://py4ds.github.io/rmdawn/.
The code is hosted at https://github.com/py4ds/rmdawn.
Installation
------------
.. code:: sh
pip install rmdawn
or clone the `repo <https://github.com/py4ds/rmdawn>`__, e.g. ``git clone https://github.com/py4ds/rmdawn`` and install locally using setup.py (``python setup.py install``) or ``pip`` (``pip install .``).
Creating an R markdown file with the ``rmdawn`` shell command
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code:: sh
rmdawn header.yml intro.md scrape.py plot.R notes.txt > example.Rmd
Instead of redirecting to a file (``>``), you can use the ``--out_file`` or ``-o`` flag:
.. code:: sh
rmdawn header.yml intro.md scrape.py plot.R notes.txt -o example.Rmd
The easiest way to handle large numbers of files is to use the ``*`` wildcard in the shell.
.. code:: sh
rmdawn source_file* -o example.Rmd
Extract YAML, markdown, and code files from R markdown files with the ``rmdusk`` shell command
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code:: sh
rmdusk example.Rmd
Convert between R markdown and R code files with the ``rmdtor`` and ``rtormd`` shell commands
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code:: sh
rmdtor example.Rmd
rtormd example.R
You can also specify an new filename with ``--out_file`` or ``-o`` flag.
.. code:: sh
rmdtor example.Rmd -o new.R
rtormd example.R -o new.Rmd
Render R markdown and R code files with the ``render`` shell command
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The default output format is HTML.
.. code:: sh
render example.Rmd
render example.R
You can specify output format with the ``--format`` or ``-f`` flag.
.. code:: sh
render example.Rmd -f word_document
render example.R -f word_document
If you only specify output filename with the ``--out_file`` or ``-o`` flag,
``render`` will try to infer the output format from the file extension.
This will not work for slides or R markdown notebooks.
.. code:: sh
render example.Rmd -o example.pdf
render example.R -o example.pdf
Create an R markdown file from source files with the ``catren`` shell command
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can pass ``--rmd_file`` (``-r``), ``--out_file`` (``-o``), and ``--format`` (``-f``) arguments to ``catren``.
The default output format is HTML.
.. code:: sh
catren header.yml intro.md scrape.py plot.R notes.txt -r example.Rmd
If you only specify an output filename with the ``--out_file`` or ``-o`` flag,
``catren`` will try to infer the R markdown file name and output format from the file extension.
.. code:: sh
catren header.yml intro.md scrape.py plot.R notes.txt -o example.pdf
If you only specify an output format with the ``--format`` or ``-f`` flag or do not provide any optional arguments,
``catren`` will create a temporary file in a temporary location.
.. code:: sh
catren header.yml intro.md scrape.py plot.R notes.txt -f word_document
catren header.yml intro.md scrape.py plot.R notes.txt
Basic usage: Python environment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code:: python
from pathlib import Path
from rmdawn import rmdawn
from rmdawn import rmdusk
from rmdawn import rtormd
from rmdawn import rmdtor
from rmdawn import render
from rmdawn import catren
# Create an R markdown file from source files
file_list = ["header.yml", "intro.md", "scrape.py", "plot.R", "notes.txt"]
Path("example.Rmd").write_text(rmdawn(file_list))
# Extract source files from an R markdown file
rmdusk("example.Rmd")
# Convert R markdown files into R scripts
rmdtor("example.Rmd")
# Convert R scripts into R markdown files
rtormd("example.R")
# Generate output files from R scripts or R markdown files
render("example.Rmd") # The default format is HTML
render("example.R") # The default format is HTML
render("example.Rmd", out_format="pdf_document")
render("example.R", out_format="word_document")
# Create an R markdown file from source files output files and render it
file_list = ["header.yml", "intro.md", "scrape.py", "plot.R", "notes.txt"]
catren(file_list, rmd_file="example.Rmd") # The default format is HTML
catren(file_list, rmd_file="example.Rmd", out_format="pdf_document")
catren(file_list, out_file="example.html")
# Another alternative is to import the package and use it as a namespace.
import rmdawn
rmdawn.rmdawn(["header.yml", "intro.md", "scrape.py", "plot.R", "notes.txt"])
rmdawn.rmdusk("example.Rmd")
rmdawn.rtormd("example.R")
rmdawn.rmdtor("example.Rmd")
rmdawn.render("example.Rmd") # The default format is HTML
Next Steps
----------
Currently, `xaringan <https://bookdown.org/yihui/rmarkdown/xaringan.html>`__ slides require a special format.
- Write ``remark``/``demark`` functions and commands to add/remove slide delimiters ``---`` before headers ``#``.
.. |Chat| image:: https://badges.gitter.im/py4ds/rmdawn.svg
:alt: Join the chat at https://gitter.im/py4ds/rmdawn
:target: https://gitter.im/py4ds/rmdawn
.. |Build| image:: https://travis-ci.org/py4ds/rmdawn.svg?branch=master
:target: https://travis-ci.org/py4ds/rmdawn
.. |License| image:: https://img.shields.io/badge/License-MIT-purple.svg
:target: https://opensource.org/licenses/MIT
.. |PyPI| image:: https://img.shields.io/pypi/v/rmdawn.svg
:target: https://pypi.python.org/pypi/rmdawn
.. |Status| image:: https://www.repostatus.org/badges/latest/active.svg
:alt: Project Status: Active – The project has reached a stable, usable state and is being actively developed.
:target: https://www.repostatus.org/#active
.. |Updates| image:: https://pyup.io/repos/github/py4ds/rmdawn/shield.svg
:target: https://pyup.io/repos/github/py4ds/rmdawn/
.. |Versions| image:: https://img.shields.io/pypi/pyversions/rmdawn.svg
:alt: PyPI - Python Version
:target: https://www.python.org/downloads/
| /rmdawn-0.1.2.tar.gz/rmdawn-0.1.2/README.rst | 0.898907 | 0.732296 | README.rst | pypi |
import matplotlib.pyplot as plt
import numpy as np
from shapely.geometry import Polygon
def trapezoidal_rule(f, a: float, b: float,
n: int) -> float:
"""
Returns a numerical approximation of the definite integral of f
between a and b by the trapezoidal rule.
Parameters:
f(function): function to be integrated
a(float): low bound
b(float): upper bound
n(int): number of iterations of the numerical approximation
Returns:
result(float): the numerical approximation of the definite integral
"""
# Definition of step and the result
step = (b - a)/n
result = 0
# Moving Variables in X-axis
xn = a
xn_1 = xn + step
# Sum of y-pairs
for i in range(n):
result += f(xn) + f(xn_1)
xn += step
xn_1 += step
return (step/2) * result
def plot_trapezoidal_rule(f, a: float, b: float,
n: int) -> None:
"""
Plots a numerical approximation of the definite integral of f
between a and b by the trapezoidal rule with n iterations.
Parameters:
f(function): function to be integrated
a(float): low bound
b(float): upper bound
n(int): number of iterations of the numerical approximation
Returns:
result(None): None
"""
# Define the X and Y of f
X = np.linspace(a, b, 100)
Y = f(X)
# Plot Size
plt.figure(figsize=(15, 6))
# Calculate the approximate sum by using the trapezoidal rule
aprox_sum = trapezoidal_rule(f, a, b, n)
step = (b-a)/n
# Initial Values
i = a
trapezoidal_list = []
# Create trapezoids to approximate the area
for _ in range(n):
P1 = (i, 0)
P2 = (i + step, 0)
P3 = (i, f(i))
P4 = (i + step, f(i + step))
trapezoidal_list.append([[P1, P2, P4, P3]])
i += step
# Plot created trapezoids
for trapezoid in trapezoidal_list:
polygon = Polygon(trapezoid[0])
x1, y1 = polygon.exterior.xy
plt.plot(x1, y1, c="red")
plt.fill(x1, y1, "y")
# Plot function f
plt.plot(X, Y, 'g')
plt.title(f'n={n}, aprox_sum={aprox_sum}') | /rmg_numerical_integration-4.1-py3-none-any.whl/rmg_numerical_integration/trapezoidal.py | 0.885018 | 0.879147 | trapezoidal.py | pypi |
import numpy as np
import matplotlib.pyplot as plt
def simpson_rule(f, a: float, b: float,
n: int) -> float:
"""
Returns a numerical approximation of the definite integral of f
between a and b by the Simpson rule.
Parameters:
f(function): function to be integrated
a(float): low bound
b(float): upper bound
n(int): number of iterations of the numerical approximation
Returns:
result(float): the numerical approximation of the definite integral
"""
assert n % 2 == 0 # to verify that n is even
# Definition of step and the result
step = (b - a)/n
result = f(a) + f(b) # first and last
# Moving Variables in X-axis
xn = a + step
# Sum of y-pairs
for i in range(n-1):
if i % 2 == 0:
result += 4 * f(xn)
else:
result += 2 * f(xn)
xn += step
return (step/3) * result
def plot_simpson_rule(f, a: float,
b: float, n: int) -> None:
"""
Plots a numerical approximation of the definite integral of f
between a and b by Simpson's rule with n iterations.
Parameters:
f(function): function to be integrated
a(float): low bound
b(float): upper bound
n(int): number of iterations of the numerical approximation
Returns:
result(None): None
"""
def parabola_from_3(x1, y1, x2, y2, x3, y3):
"""
Get a, b, c coefficients of a parabola from 3 points (x,y)
"""
denominator = ((x1-x2) * (x1-x3) * (x2-x3))
assert denominator != 0
a = (x3 * (y2-y1) + x2 * (y1-y3) + x1 * (y3-y2))
b = (x3*x3 * (y1-y2) + x2*x2 * (y3-y1) + x1*x1 * (y2-y3))
c = (x2*x3 * (x2-x3) * y1 + x3*x1 * (x3-x1) * y2+x1 * x2 * (x1-x2)*y3)
a, b, c = a/denominator, b/denominator, c/denominator
return a, b, c
def f_parabola(x, a_parab, b_parab, c_parab):
"""
Get the parabola function from a, b, c coefficients
"""
return a_parab*x**2 + b_parab*x + c_parab
# Define the X and Y of f
X = np.linspace(a, b, 100)
Y = f(X)
# Plot Size
plt.figure(figsize=(15, 6))
# Calculate the approximate sum by using Simpson's rule
aprox_sum = simpson_rule(f, a, b, n)
step = (b-a)/n
# Initial Values
i = a
parabola_list = []
# Create the points of parabolas to approximate the area
for _ in range(n//2):
P1 = (i, f(i))
P2 = (i + 2*step, f(i + 2*step))
P_mid = (i + step, f(i + step))
parabola_list.append([[P1, P2, P_mid]])
i += 2 * step
# Plot fixed parabolas (separated by "red" bar plot)
for simpson in parabola_list:
a_parab, b_parab, c_parab = parabola_from_3(
simpson[0][0][0], simpson[0][0][1],
simpson[0][1][0], simpson[0][1][1],
simpson[0][2][0], simpson[0][2][1])
x_test = list(np.linspace(simpson[0][0][0], simpson[0][1][0], 100))
y_test = list()
for element in x_test:
y_test.append(f_parabola(element, a_parab, b_parab, c_parab))
plt.plot(x_test, y_test, c="red")
plt.bar([simpson[0][0][0], simpson[0][1][0]],
[simpson[0][0][1], simpson[0][1][1]],
width=0.01, color="red")
plt.fill_between(x_test, y_test, color="yellow")
# Plot function f
plt.plot(X, Y, 'g')
plt.title(f'n={n}, aprox_sum={aprox_sum}') | /rmg_numerical_integration-4.1-py3-none-any.whl/rmg_numerical_integration/simpson.py | 0.875321 | 0.85315 | simpson.py | pypi |
from scipy.special.orthogonal import p_roots
import numpy as np
import matplotlib.pyplot as plt
def gauss_rule(f, n: int, a: float, b: float) -> float:
"""
Returns a numerical approximation of the definite integral
of f between a and b by the Gauss quadrature rule.
Parameters:
f(function): function to be integrated
a(float): low bound
b(float): upper bound
n(int): number of iterations of the numerical approximation
Returns:
result(float): the numerical approximation of the definite integral
"""
# Get the points (xn) and weights (wn) from Legendre polynomials
list_points_weights = p_roots(n)
points = list_points_weights[0]
weights = list_points_weights[1]
# Calculate the approximate sum by using the Gaussian quadrature rule
result = 0
for weight, point in zip(weights, points):
result += weight * f(0.5 * (b - a) * point + 0.5 * (b + a))
return 0.5 * (b - a) * result
def plot_gauss_quadrature(f, n: int, a: float,
b: float) -> None:
"""
Plots a numerical approximation of the definite integral of f
between a and b by the Gauss quadrature rule with n iterations.
Parameters:
f(function): function to be integrated
a(float): low bound
b(float): upper bound
n(int): number of iterations of the numerical approximation
Returns:
result(None): None
"""
# Define the X and Y of f
x = np.linspace(a, b, 100)
y = f(x)
# Calculate the approximate sum by using the Gauss quadrature rule
aprox_sum = gauss_rule(f, a, b, n)
# Initial Values
[points, weights] = p_roots(n)
xn = a
# Plot approximate rectangles
for i in range(n):
plt.bar(xn, f(points[i]), width=weights[i], alpha=0.25,
align='edge', edgecolor='r')
xn += weights[i]
# Plot function f
plt.axhline(0, color='black') # X-axis
plt.axvline(0, color='black') # Y-axis
plt.plot(x, y)
plt.title(f'n={n}, aprox_sum={aprox_sum}') | /rmg_numerical_integration-4.1-py3-none-any.whl/rmg_numerical_integration/gaussian_quadrature.py | 0.949342 | 0.805747 | gaussian_quadrature.py | pypi |
import matplotlib.pyplot as plt
import numpy as np
from shapely.geometry import Polygon
def midpoint_rule(f, a: float, b: float,
n: int) -> float:
"""
Returns a numerical approximation of the definite integral
of f between a and b by the midpoint rule.
Parameters:
f(function): function to be integrated
a(float): low bound
b(float): upper bound
n(int): number of iterations of the numerical approximation
Returns:
result(float): the numerical approximation of the definite integral
"""
# Definition of step and the result
step = (b - a)/n
result = 0
# Moving Variables in X-axis
xn = a
xn_1 = xn + step
# Sum of y-pairs
for i in range(n):
result += f((xn + xn_1)/2)
xn += step
xn_1 += step
return step * result
def plot_midpoint_rule(f, a: float, b: float,
n: int) -> None:
"""
Plots a numerical approximation of the definite integral of f
between a and b by the midpoint rule with n iterations.
Parameters:
f(function): function to be integrated
a(float): low bound
b(float): upper bound
n(int): number of iterations of the numerical approximation
Returns:
result(None): None
"""
# Define the X and Y of f
X = np.linspace(a, b, 100)
Y = f(X)
# Plot Size
plt.figure(figsize=(15, 6))
# Calculate the approximate sum by using the midpoint rule
aprox_sum = midpoint_rule(f, a, b, n)
step = (b-a)/n
# Initial Values
i = a
midpoint_list = []
# Create midpoint rectangles to approximate the area
for _ in range(n):
P1 = (i, 0)
P2 = (i + step, 0)
P3 = (i, f((2*i + step)/2))
P4 = (i + step, f((2*i + step)/2))
midpoint_list.append([[P1, P2, P4, P3]])
i += step
# Plot created midpoints rectangles
for midpoint in midpoint_list:
polygon = Polygon(midpoint[0])
x1, y1 = polygon.exterior.xy
plt.plot(x1, y1, c="red")
plt.fill(x1, y1, "y")
# Plot function f
plt.plot(X, Y, 'g')
plt.title(f'n={n}, aprox_sum={aprox_sum}') | /rmg_numerical_integration-4.1-py3-none-any.whl/rmg_numerical_integration/midpoint.py | 0.893007 | 0.840815 | midpoint.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
EPSILON = 0.0005
class RMILoss(nn.Module):
"""
PyTorch Module which calculates the Region Mutual Information loss (https://arxiv.org/abs/1910.12037).
"""
def __init__(self,
with_logits,
radius=3,
bce_weight=0.5,
pool='max',
stride=3,
use_log_trace=True,
use_double_precision=True,
epsilon=EPSILON):
"""
:param with_logits: If True, apply the sigmoid function to the prediction before calculating loss.
:param radius: RMI radius.
:param bce_weight: Weight of the binary cross entropy. Must be between 0 and 1.
:param pool: Pooling method used before calculating RMI. Must be one of ['avg', 'max'].
:param stride: Stride used in the pooling layer.
:param use_log_trace: Whether to calculate the log of the trace, instead of the log of the determinant. See equation (15).
:param use_double_precision: Calculate the RMI using doubles in order to fix potential numerical issues.
:param epsilon: Magnitude of the entries added to the diagonal of M in order to fix potential numerical issues.
"""
super().__init__()
self.use_double_precision = use_double_precision
self.with_logits = with_logits
self.bce_weight = bce_weight
self.stride = stride
self.pool = pool
self.radius = radius
self.use_log_trace = use_log_trace
self.epsilon = epsilon
def forward(self, input, target):
# Calculate BCE if needed
if self.bce_weight != 0:
if self.with_logits:
bce = F.binary_cross_entropy_with_logits(input, target=target)
else:
bce = F.binary_cross_entropy(input, target=target)
bce = bce.mean() * self.bce_weight
else:
bce = 0.0
# Apply sigmoid to get probabilities. See final paragraph of section 4.
if self.with_logits:
input = torch.sigmoid(input)
# Downscale tensors before RMI
input = self.downscale(input)
target = self.downscale(target)
# Calculate RMI loss
rmi = self.rmi_loss(input=input, target=target)
rmi = rmi.mean() * (1.0 - self.bce_weight)
return rmi + bce
def downscale(self, x):
if self.stride == 1:
return x
padding = self.stride // 2
if self.pool == 'max':
return F.max_pool2d(x, kernel_size=self.stride, stride=self.stride, padding=padding)
if self.pool == 'avg':
return F.avg_pool2d(x, kernel_size=self.stride, stride=self.stride, padding=padding)
raise ValueError(self.pool)
def rmi_loss(self, input, target):
"""
Calculates the RMI loss between the prediction and target.
:return: RMI loss
"""
assert input.shape == target.shape
vector_size = self.radius * self.radius
# Convert to doubles for better precision
if self.use_double_precision:
input = input.double()
target = target.double()
# Small diagonal matrix to fix numerical issues
eps = torch.eye(vector_size, dtype=input.dtype, device=input.device) * self.epsilon
eps = eps.unsqueeze(dim=0).unsqueeze(dim=0)
# Get region vectors
y, p = extract_region_vectors(input, target, radius=self.radius)
# Subtract mean
y = y - y.mean(dim=3, keepdim=True)
p = p - p.mean(dim=3, keepdim=True)
# Covariances
y_cov = y @ transpose(y)
p_cov = p @ transpose(p)
y_p_cov = y @ transpose(p)
# Approximated posterior covariance matrix of Y given P
m = y_cov - y_p_cov @ transpose(inverse(p_cov + eps)) @ transpose(y_p_cov)
# Lower bound of RMI
if self.use_log_trace:
rmi = 0.5 * log_trace(m + eps)
else:
rmi = 0.5 * log_det(m + eps)
# Normalize
rmi = rmi / float(vector_size)
# Sum over classes, mean over samples.
return rmi.sum(dim=1).mean(dim=0)
def extract_region_vectors(input, target, radius):
"""
Extracts square regions from the pred and target tensors.
Returns the flattened vectors of length radius*radius.
:param input: Input Tensor with shape (b, c, h, w).
:param target: Target Tensor with shape (b, c, h, w).
:param radius: RMI radius.
:return: Pair of flattened extracted regions for the prediction and target both with shape (b, c, radius * radius, n), where n is the number of regions.
"""
h, w = target.shape[2], target.shape[3]
new_h, new_w = h - (radius - 1), w - (radius - 1)
y_regions, p_regions = [], []
for y in range(0, radius):
for x in range(0, radius):
y_current = target[:, :, y:y + new_h, x:x + new_w]
p_current = input[:, :, y:y + new_h, x:x + new_w]
y_regions.append(y_current)
p_regions.append(p_current)
y_regions = torch.stack(y_regions, dim=2)
p_regions = torch.stack(p_regions, dim=2)
# Flatten
y = y_regions.view((*y_regions.shape[:-2], -1))
p = p_regions.view((*p_regions.shape[:-2], -1))
return y, p
def transpose(x):
return x.transpose(-2, -1)
def inverse(x):
return torch.inverse(x)
def log_trace(x):
x = torch.cholesky(x)
diag = torch.diagonal(x, dim1=-2, dim2=-1)
return 2 * torch.sum(torch.log(diag + 1e-8), dim=-1)
def log_det(x):
return torch.logdet(x) | /rmi-pytorch-0.1.1.tar.gz/rmi-pytorch-0.1.1/rmi/rmi.py | 0.948811 | 0.722233 | rmi.py | pypi |
codes = {
0: {
"message": "No error",
"response_code": 200
},
1: {
"message": "Unknown error",
"response_code": 500
},
2: {
"message": "Invalid input",
"response_code": 400
},
3: {
"message":"Insufficient permissions",
"response_code": 401
},
4: {
"message": "Bad ticket",
"response_code": 401,
},
5: {
"message": "Unimplemented operation",
"response_code": 501
},
6: {
"message": "Syntax error",
"response_code": 400
},
7: {
"message": "API not allowed on this application table",
"response_code": 400
},
8: {
"message": "SSL required for this application table",
"response_code": 401
},
9: {
"message": "Invalid choice",
"response_code": 400
},
10: {
"message": "Invalid field type",
"response_code": 400
},
11: {
"message": "Could not parse XML input",
"response_code": 400
},
12: {
"message": "Invalid source DBID",
"response_code": 400
},
13: {
"message": "Invalid account ID",
"response_code": 400
},
14: {
"message": "Missing DBID or DBID of wrong type",
"response_code": 400
},
15: {
"message": "Invalid hostname",
"response_code": 400
},
19: {
"message": "Unauthorized IP address",
"response_code": 401
},
20: {
"message": "Unknown username/password",
"response_code": 401
},
21: {
"message": "Unknown user",
"response_code": 401
},
22: {
"message": "Sign-in required",
"response_code": 401
},
23: {
"message": "Feature not supported",
"response_code": 501
},
24: {
"message": "Invalid application token",
"response_code": 401
},
25: {
"message": "Duplicate application token",
"response_code": 401
},
26: {
"message": "Max count",
"response_code": 400
},
27: {
"message": "Registration required",
"response_code": 403
},
28: {
"message": "Managed by LDAP",
"response_code": 400
},
29: {
"message": "User on Deny list",
"response_code": 403
},
30: {
"message": "No such record",
"response_code": 400
},
31: {
"message": "No such field",
"response_code": 400
},
32: {
"message": "The application does not exist or was deleted",
"response_code": 410
},
33: {
"message": "No such query",
"response_code": 400
},
34: {
"message": "You cannot change the value of this field",
"response_code": 403
},
35: {
"message": "No data returned",
"response_code": 400
},
36: {
"message": "Cloning error",
"response_code": 500
},
37: {
"message": "No such report",
"response_code": 400
},
38: {
"message": "Periodic report contains a restricted field",
"response_code": 400
},
50: {
"message": "Missing required field",
"response_code": 400
},
51: {
"message": "Attempting to add a non-unique value to a field marked unique",
"response_code": 400
},
52: {
"message": "Duplicate field",
"response_code": 400
},
53: {
"message": "Fields missing from your import data",
"response_code": 400
},
54: {
"message": "Cached list of records not found",
"response_code": 400
},
60: {
"message": "Update conflict detected",
"response_code": 409
},
61: {
"message": "Schema is locked",
"response_code": 409
},
70: {
"message": "Account size limit exceeded",
"response_code": 403
},
71: {
"message": "Database size limit exceeded",
"response_code": 403
},
73: {
"message": "Your account has been suspended",
"response_code": 403
},
74: {
"message": "You are not allowed to create applications",
"response_code": 403
},
75: {
"message": "View too large",
"response_code": 400
},
76: {
"message": "Too many criteria",
"response_code": 400
},
77: {
"message": "API request limit exceeded",
"response_code": 400
},
78: {
"message": "Data limit exceeded",
"response_code": 403
},
80: {
"message": "Overflow",
"response_code": 403
},
81: {
"message": "Item not found",
"response_code": 400
},
82: {
"message": "Operation took too long",
"response_code": 408
},
83: {
"message": "Access denied",
"response_code": 403
},
84: {
"message": "Database error",
"response_code": 500
},
85: {
"message": "Schema update error",
"response_code": 500
},
87: {
"message": "Invalid group",
"response_code": 400
},
100: {
"message": "Technical Difficulties -- try again later",
"response_code": 500
},
101: {
"message": "Quick Base is temporarily unavailable due to technical difficulties",
"response_code": 500
},
102: {
"message": "Invalid request - we cannot understand the URL you specified",
"response_code": 400
},
103: {
"message": "The Quick Base URL you specified contained an invalid srvr parameter",
"response_code": 400
},
104: {
"message": "Your Quick Base app is experiencing unusually heavy traffic. Please wait a few minutes and re-try this command.",
"response_code": 408
},
105: {
"message": "Quick Base is experiencing technical difficulties",
"response_code": 500
},
110: {
"message": "Invalid role",
"response_code": 400
},
111: {
"message": "User exists",
"response_code": 400
},
112: {
"message": "No user in role",
"response_code": 400
},
113: {
"message": "User already in role",
"response_code": 400
},
114: {
"message": "Must be admin user",
"response_code": 401
},
150: {
"message": "Upgrade plan",
"response_code": 403
},
151: {
"message": "Expired plan",
"response_code": 403
},
152: {
"message": "App suspended",
"response_code": 403
}
} | /rmi_qb_sdk-0.4.1.tar.gz/rmi_qb_sdk-0.4.1/rmi_qb_sdk/error_codes.py | 0.557123 | 0.387632 | error_codes.py | pypi |
[](https://travis-ci.org/wouterboomsma/eigency)
# Eigency
Eigency is a Cython interface between Numpy arrays and Matrix/Array
objects from the Eigen C++ library. It is intended to simplify the
process of writing C++ extensions using the Eigen library. Eigency is
designed to reuse the underlying storage of the arrays when passing
data back and forth, and will thus avoid making unnecessary copies
whenever possible. Only in cases where copies are explicitly requested
by your C++ code will they be made.
Below is a description of a range of common usage scenarios. A full working
example of both setup and these different use cases is available in the
`test` directory distributed with the this package.
## Setup
To import eigency functionality, add the following to your `.pyx` file:
```
from eigency.core cimport *
```
In addition, in the `setup.py` file, the include directories must be
set up to include the eigency includes. This can be done by calling
the `get_includes` function in the `eigency` module:
```
import eigency
...
extensions = [
Extension("module-dir-name/module-name", ["module-dir-name/module-name.pyx"],
include_dirs = [".", "module-dir-name"] + eigency.get_includes()
),
]
```
Eigency includes a version of the Eigen library, and the `get_includes` function will include the path to this directory. If you
have your own version of Eigen, just set the `include_eigen` option to False, and add your own path instead:
```
include_dirs = [".", "module-dir-name", 'path-to-own-eigen'] + eigency.get_includes(include_eigen=False)
```
## From Numpy to Eigen
Assume we are writing a Cython interface to the following C++ function:
```c++
void function_w_mat_arg(const Eigen::Map<Eigen::MatrixXd> &mat) {
std::cout << mat << "\n";
}
```
Note that we use `Eigen::Map` to ensure that we can reuse the storage
of the numpy array, thus avoiding making a copy. Assuming the C++ code
is in a file called `functions.h`, the corresponding `.pyx` entry could look like this:
```
cdef extern from "functions.h":
cdef void _function_w_mat_arg "function_w_mat_arg"(Map[MatrixXd] &)
# This will be exposed to Python
def function_w_mat_arg(np.ndarray array):
return _function_w_mat_arg(Map[MatrixXd](array))
```
The last line contains the actual conversion. `Map` is an Eigency
type that derives from the real Eigen map, and will take care of
the conversion from the numpy array to the corresponding Eigen type.
We can now call the C++ function directly from Python:
```python
>>> import numpy as np
>>> import eigency_tests
>>> x = np.array([[1.1, 2.2], [3.3, 4.4]])
>>> eigency_tests.function_w_mat_arg(x)
1.1 3.3
2.2 4.4
```
(if you are wondering about why the matrix is transposed, please
see the Storage layout section below).
## Types matter
The basic idea behind eigency is to share the underlying representation of a
numpy array between Python and C++. This means that somewhere in the process,
we need to make explicit which numerical types we are dealing with. In the
function above, we specify that we expect an Eigen MatrixXd, which means
that the numpy array must also contain double (i.e. float64) values. If we instead provide
a numpy array of ints, we will get strange results.
```python
>>> import numpy as np
>>> import eigency_tests
>>> x = np.array([[1, 2], [3, 4]])
>>> eigency_tests.function_w_mat_arg(x)
4.94066e-324 1.4822e-323
9.88131e-324 1.97626e-323
```
This is because we are explicitly asking C++ to interpret out python integer
values as floats.
To avoid this type of error, you can force your cython function to
accept only numpy arrays of a specific type:
```
cdef extern from "functions.h":
cdef void _function_w_mat_arg "function_w_mat_arg"(Map[MatrixXd] &)
# This will be exposed to Python
def function_w_mat_arg(np.ndarray[np.float64_t, ndim=2] array):
return _function_w_mat_arg(Map[MatrixXd](array))
```
(Note that when using this technique to select the type, you also need to specify
the dimensions of the array (this will default to 1)). Using this new definition,
users will get an error when passing arrays of the wrong type:
```python
>>> import numpy as np
>>> import eigency_tests
>>> x = np.array([[1, 2], [3, 4]])
>>> eigency_tests.function_w_mat_arg(x)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "eigency_tests/eigency_tests.pyx", line 87, in eigency_tests.eigency_tests.function_w_mat_arg
ValueError: Buffer dtype mismatch, expected 'float64_t' but got 'long'
```
Since it avoids many surprises, it is strongly recommended to use this technique
to specify the full types of numpy arrays in your cython code whenever
possible.
## Writing Eigen Map types in Cython
Since Cython does not support nested fused types, you cannot write types like `Map[Matrix[double, 2, 2]]`. In most cases, you won't need to, since you can just use Eigens convenience typedefs, such as `Map[VectorXd]`. If you need the additional flexibility of the full specification, you can use the `FlattenedMap` type, where all type arguments can be specified at top level, for instance `FlattenedMap[Matrix, double, _2, _3]` or `FlattenedMap[Matrix, double, _2, Dynamic]`. Note that dimensions must be prefixed with an underscore.
Using full specifications of the Eigen types, the previous example would look like this:
```
cdef extern from "functions.h":
cdef void _function_w_mat_arg "function_w_mat_arg" (FlattenedMap[Matrix, double, Dynamic, Dynamic] &)
# This will be exposed to Python
def function_w_mat_arg(np.ndarray[np.float64_t, ndim=2] array):
return _function_w_mat_arg(FlattenedMap[Matrix, double, Dynamic, Dynamic](array))
```
`FlattenedType` takes four template parameters: arraytype, scalartype,
rows and cols. Eigen supports a few other template arguments for
setting the storage layout and Map strides. Since cython does not
support default template arguments for fused types, we have instead
defined separate types for this purpose. These are called
`FlattenedMapWithOrder` and `FlattenedMapWithStride` with five and eight
template arguments, respectively. For details on their use, see the section
about storage layout below.
## From Numpy to Eigen (insisting on a copy)
Eigency will not complain if the C++ function you interface with does
not take a Eigen Map object, but instead a regular Eigen Matrix or
Array. However, in such cases, a copy will be made. Actually, the
procedure is exactly the same as above. In the `.pyx` file, you still
define everything exactly the same way as for the Map case described above.
For instance, given the following C++ function:
```c++
void function_w_vec_arg_no_map(const Eigen::VectorXd &vec);
```
The Cython definitions would still look like this:
```
cdef extern from "functions.h":
cdef void _function_w_vec_arg_no_map "function_w_vec_arg_no_map"(Map[VectorXd] &)
# This will be exposed to Python
def function_w_vec_arg_no_map(np.ndarray[np.float64_t] array):
return _function_w_vec_arg_no_map(Map[VectorXd](array))
```
Cython will not mind the fact that the argument type in the extern
declaration (a Map type) differs from the actual one in the `.h` file,
as long as one can be assigned to the other. Since Map objects can be
assigned to their corresponding Matrix/Array types this works
seemlessly. But keep in mind that this assignment will make a copy of
the underlying data.
## Eigen to Numpy
C++ functions returning a reference to an Eigen Matrix/Array can also
be transferred to numpy arrays without copying their content. Assume
we have a class with a single getter function that returns an Eigen
matrix member:
```c++
class MyClass {
public:
MyClass():
matrix(Eigen::Matrix3d::Constant(3.)) {
}
Eigen::MatrixXd &get_matrix() {
return this->matrix;
}
private:
Eigen::Matrix3d matrix;
};
```
The Cython C++ class interface is specified as usual:
```
cdef cppclass _MyClass "MyClass":
_MyClass "MyClass"() except +
Matrix3d &get_matrix()
```
And the corresponding Python wrapper:
```python
cdef class MyClass:
cdef _MyClass *thisptr;
def __cinit__(self):
self.thisptr = new _MyClass()
def __dealloc__(self):
del self.thisptr
def get_matrix(self):
return ndarray(self.thisptr.get_matrix())
```
This last line contains the actual conversion. Again, eigency has its
own version of `ndarray`, that will take care of the conversion for
you.
Due to limitations in Cython, Eigency cannot deal with full
Matrix/Array template specifications as return types
(e.g. `Matrix[double, 4, 2]`). However, as a workaround, you can use
`PlainObjectBase` as a return type in such cases (or in all cases if
you prefer):
```
PlainObjectBase &get_matrix()
```
## Overriding default behavior
The `ndarray` conversion type specifier will attempt do guess whether you want a copy
or a view, depending on the return type. Most of the time, this is
probably what you want. However, there might be cases where you want
to override this behavior. For instance, functions returning const
references will result in a copy of the array, since the const-ness
cannot be enforced in Python. However, you can always override the
default behavior by using the `ndarray_copy` or `ndarray_view`
functions.
Expanding the `MyClass` example from before:
```c++
class MyClass {
public:
...
const Eigen::MatrixXd &get_const_matrix() {
return this->matrix;
}
...
};
```
With the corresponding cython interface specification
The Cython C++ class interface is specified as usual:
```
cdef cppclass _MyClass "MyClass":
...
const Matrix3d &get_const_matrix()
```
The following would return a copy
```python
cdef class MyClass:
...
def get_const_matrix(self):
return ndarray(self.thisptr.get_const_matrix())
```
while the following would force it to return a view
```python
cdef class MyClass:
...
def get_const_matrix(self):
return ndarray_view(self.thisptr.get_const_matrix())
```
## Eigen to Numpy (non-reference return values)
Functions returning an Eigen object (not a reference), are specified
in a similar way. For instance, given the following C++ function:
```c++
Eigen::Matrix3d function_w_mat_retval();
```
The Cython code could be written as:
```
cdef extern from "functions.h":
cdef Matrix3d _function_w_mat_retval "function_w_mat_retval" ()
# This will be exposed to Python
def function_w_mat_retval():
return ndarray_copy(_function_w_mat_retval())
```
As mentioned above, you can replace `Matrix3d` (or any other Eigen return type) with
`PlainObjectBase`, which is especially relevant when working with
Eigen object that do not have an associated convenience typedef.
Note that we use `ndarray_copy` instead of `ndarray` to explicitly
state that a copy should be made. In c++11 compliant compilers, it
will detect the rvalue reference and automatically make a copy even if
you just use `ndarray` (see next section), but to ensure that it works
also with older compilers it is recommended to always use
`ndarray_copy` when returning newly constructed eigen values.
## Corrupt data when returning non-map types
The tendency of Eigency to avoid copies whenever possible can lead
to corrupted data when returning non-map Eigen arrays. For instance,
in the `function_w_mat_retval` from the previous section, a temporary
value will be returned from C++, and we have to take care to make
a copy of this data instead of letting the resulting numpy array
refer directly to this memory. In C++11, this situation can be
detected directly using rvalue references, and it will therefore
automatically make a copy:
```
def function_w_mat_retval():
# This works in C++11, because it detects the rvalue reference
return ndarray(_function_w_mat_retval())
```
However, to make sure it works with older compilers,
it is recommended to use the `ndarray_copy` conversion:
```
def function_w_mat_retval():
# Explicit request for copy - this always works
return ndarray_copy(_function_w_mat_retval())
```
## Storage layout - why arrays are sometimes transposed
The default storage layout used in numpy and Eigen differ. Numpy uses
a row-major layout (C-style) per default while Eigen uses a
column-major layout (Fortran style) by default. In Eigency, we prioritize to
avoid copying of data whenever possible, which can have unexpected
consequences in some cases: There is no problem when passing values
from C++ to Python - we just adjust the storage layout of the returned
numpy array to match that of Eigen. However, since the storage layout
is encoded into the _type_ of the Eigen array (or the type of the
Map), we cannot automatically change the layout in the Python to C++ direction. In
Eigency, we have therefore opted to return the transposed array/matrix
in such cases. This provides the user with the flexibility to deal
with the problem either in Python (use order="F" when constructing
your numpy array), or on the C++ side: (1) explicitly define your
argument to have the row-major storage layout, 2) manually set the Map
stride, or 3) just call `.transpose()` on the received
array/matrix).
As an example, consider the case of a C++ function that both receives
and returns a Eigen Map type, thus acting as a filter:
```c++
Eigen::Map<Eigen::ArrayXXd> function_filter(Eigen::Map<Eigen::ArrayXXd> &mat) {
return mat;
}
```
The Cython code could be:
```
cdef extern from "functions.h":
...
cdef Map[ArrayXXd] &_function_filter1 "function_filter1" (Map[ArrayXXd] &)
def function_filter1(np.ndarray[np.float64_t, ndim=2] array):
return ndarray(_function_filter1(Map[ArrayXXd](array)))
```
If we call this function from Python in the standard way, we will
see that the array is transposed on the way from Python to C++, and
remains that way when it is again returned to Python:
```python
>>> x = np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]])
>>> y = function_filter1(x)
>>> print x
[[ 1. 2. 3. 4.]
[ 5. 6. 7. 8.]]
>>> print y
[[ 1. 5.]
[ 2. 6.]
[ 3. 7.]
[ 4. 8.]]
```
The simplest way to avoid this is to tell numpy to use a
column-major array layout instead of the default row-major
layout. This can be done using the order='F' option:
```python
>>> x = np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]], order='F')
>>> y = function_filter1(x)
>>> print x
[[ 1. 2. 3. 4.]
[ 5. 6. 7. 8.]]
>>> print y
[[ 1. 2. 3. 4.]
[ 5. 6. 7. 8.]]
```
The other alternative is to tell Eigen to use RowMajor layout. This
requires changing the C++ function definition:
```c++
typedef Eigen::Map<Eigen::Array<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> > RowMajorArrayMap;
RowMajorArrayMap &function_filter2(RowMajorArrayMap &mat) {
return mat;
}
```
To write the corresponding Cython definition, we need the expanded version of
`FlattenedMap` called `FlattenedMapWithOrder`, which allows us to specify
the storage order:
```
cdef extern from "functions.h":
...
cdef PlainObjectBase _function_filter2 "function_filter2" (FlattenedMapWithOrder[Array, double, Dynamic, Dynamic, RowMajor])
def function_filter2(np.ndarray[np.float64_t, ndim=2] array):
return ndarray(_function_filter2(FlattenedMapWithOrder[Array, double, Dynamic, Dynamic, RowMajor](array)))
```
Another alternative is to keep the array itself in RowMajor format,
but use different stride values for the Map type:
```c++
typedef Eigen::Map<Eigen::ArrayXXd, Eigen::Unaligned, Eigen::Stride<1, Eigen::Dynamic> > CustomStrideMap;
CustomStrideMap &function_filter3(CustomStrideMap &);
```
In this case, in Cython, we need to use the even more extended
`FlattenedMap` type called `FlattenedMapWithStride`, taking eight
arguments:
```
cdef extern from "functions.h":
...
cdef PlainObjectBase _function_filter3 "function_filter3" (FlattenedMapWithStride[Array, double, Dynamic, Dynamic, ColMajor, Unaligned, _1, Dynamic])
def function_filter3(np.ndarray[np.float64_t, ndim=2] array):
return ndarray(_function_filter3(FlattenedMapWithStride[Array, double, Dynamic, Dynamic, ColMajor, Unaligned, _1, Dynamic](array)))
```
In all three cases, the returned array will now be of the same shape
as the original.
| /rmjarvis.eigency-1.77.1.tar.gz/rmjarvis.eigency-1.77.1/README.md | 0.790732 | 0.973418 | README.md | pypi |
import argparse
import dataclasses
import itertools
import os
from copy import deepcopy
from enum import Enum
from typing import Any, Dict, Optional, Sequence, Tuple, Union
class ConfigField(Enum):
ARGUMENT = "argument"
ATTRIBUTE = "attribute"
VARIABLE = "variable"
class PrefixSeparator(Enum):
ARGUMENT = "-"
ATTRIBUTE = "."
VARIABLE = "_"
@dataclasses.dataclass
class ConfigMapping:
attribute: str
argument: Optional[str] = None
variable: Optional[str] = None
default: Any = None
description: str = None
group: Optional[Union[str, Sequence[str]]] = None
def __post_init__(self):
self.argument = (self.argument or self.attribute).lower()
self.variable = (self.variable or self.attribute).upper()
if isinstance(self.group, str):
self.group = [self.group]
elif isinstance(self.group, list):
self.group = self.group
else:
self.group = []
def as_dict(self) -> Dict[Any, Any]:
"""Return class attributes as dictionary"""
return dataclasses.asdict(self)
def as_tuple(self) -> Sequence[Tuple[Any, Any]]:
"""Return class attributes as list of tuples"""
return list(dataclasses.asdict(self).items())
class BaseConfig:
mappings: Sequence[ConfigMapping] = []
def __init__(self, mappings: Optional[Sequence[ConfigMapping]] = None):
mappings = deepcopy(mappings) if mappings is not None else []
self.merge(mappings=mappings)
self.update()
def generate_mappings(self) -> None:
"""Generate mappings for attributes that were set directly"""
attributes = [(k, v) for k, v in vars(self).items() if k != "mappings"]
_mapped = set([x.attribute for x in self.mappings])
_unmapped = [(k, v) for k, v in attributes if k not in _mapped]
mappings = [ConfigMapping(attribute=k, default=v) for k, v in _unmapped]
self.merge(mappings=mappings)
def merge(self, mappings: Optional[Sequence[ConfigMapping]]) -> None:
"""Merge multiple ConfigMapping definitions"""
assert type(mappings) in (tuple, list), f"Not a valid sequence: {mappings=}"
mappings = deepcopy(mappings)
_mappings_merged = {x.attribute: x for x in [*self.mappings, *mappings]}
self.mappings = list(_mappings_merged.values())
def remove_unmapped_attributes(self):
"""Remove attributes without valid attribute mappings, retain groups"""
attributes = set([k for k, v in vars(self).items() if k != "mappings"])
_mapped = set([x.attribute for x in self.mappings])
for attribute in set(attributes).difference(_mapped):
if isinstance(getattr(self, attribute), BaseConfig):
continue
delattr(self, attribute)
def _set_prefix(
self,
field: ConfigField,
prefix: str,
group: Optional[Union[Sequence[str], str]] = None,
merge_group: bool = False,
):
"""Set a common prefix for arguments or variables, optionally group-only"""
fields = {
ConfigField.ARGUMENT: (
ConfigField.ARGUMENT.value,
PrefixSeparator.ARGUMENT.value,
),
ConfigField.VARIABLE: (
ConfigField.VARIABLE.value,
PrefixSeparator.VARIABLE.value,
),
}
conversions = {
ConfigField.ARGUMENT: lambda x: x.lower(),
ConfigField.VARIABLE: lambda x: x.upper(),
}
_field, _separator = fields[field]
_prefix = conversions[field](prefix)
_group = group or []
_group = [group] if isinstance(group, str) else _group
_group = [conversions[field](g) for g in _group]
mappings = [x for x in self.mappings if x.group == _group] if group is not None else self.mappings
_mappings = []
for mapping in mappings:
_mapping = mapping.as_dict()
if isinstance(_group, list) and merge_group is True:
_members = [_prefix, *_group, _mapping[_field]]
else:
_members = [_prefix, _mapping[_field]]
_prefixed = _separator.join(_members)
_mappings.append(ConfigMapping(**{**_mapping, _field: _prefixed}))
self.merge(mappings=_mappings)
def set_argument_prefix(self, prefix: str, group: str = None):
"""Set a common prefix for arguments, optionally group-only"""
self._set_prefix(field=ConfigField.ARGUMENT, prefix=prefix, group=group)
def set_variable_prefix(self, prefix: str, group: str = None):
"""Set a common prefix for arguments, optionally group-only"""
self._set_prefix(field=ConfigField.VARIABLE, prefix=prefix, group=group)
def update(self, reset: bool = False) -> None:
"""Update attributes self.mappings"""
for mapping in self.mappings:
if (
hasattr(self, mapping.attribute)
and getattr(self, mapping.attribute) != mapping.default
and reset is False
):
continue
setattr(self, mapping.attribute, mapping.default)
def update_from_args(self, args: argparse.Namespace) -> None:
"""Update attribute values from argparse arguments"""
# Transform Namespace into dict, remove None values to honour defaults
_args = {k: v for k, v in vars(args).items() if v is not None}
for m in self.mappings:
setattr(
self, m.attribute, _args.get(m.argument, getattr(self, m.attribute))
)
def update_from_env(self) -> None:
"""Update attribute values from environment variables"""
for m in self.mappings:
_variable = m.variable.upper()
setattr(self, m.attribute, os.getenv(_variable, getattr(self, m.attribute)))
def update_groups(self) -> None:
"""Recursively create groups from mappings, resulting in a nested class tree"""
def __grouper(_mapping: ConfigMapping) -> str:
return _mapping.group[0]
_groups = [x for x in self.mappings if x.group not in ([], None)]
_sorted = sorted(_groups, key=__grouper)
_grouped = [(k, list(g)) for k, g in itertools.groupby(_sorted, __grouper)]
for group, mappings in _grouped:
# Remove mappings from parent
self.mappings = [x for x in self.mappings if x not in mappings]
_mappings = deepcopy(mappings)
# Push mapping groups down one level
for mapping in _mappings:
mapping.group = mapping.group[1:]
if not hasattr(self, group):
setattr(self, group, BaseConfig())
_group = getattr(self, group)
_group.merge(mappings=_mappings)
# Recurse into subgroups
if any([x.group is not None for x in _group.mappings]):
_group.update_groups()
# Update ungrouped mappings
_group.update()
# Clean up any attributes which have been pushed into a group
self.remove_unmapped_attributes()
def validate(self):
"""Validate that mappings are correct and mapped attributes exist"""
assert type(self.mappings) in (
list,
tuple,
), f"Invalid sequence: {self.mappings=}"
for mapping in self.mappings:
assert isinstance(
mapping, ConfigMapping
), f"Invalid mapping type: {mapping=}"
assert hasattr(
self, mapping.attribute
), f"Missing attribute: {mapping.attribute}" | /rmk2_py-0.1.2-py3-none-any.whl/rmk2/config.py | 0.843122 | 0.232452 | config.py | pypi |
import datetime
import json
import logging
import os
from enum import Enum
from typing import Iterator, Union, Any
Expected = Union[bool, str, int, float, datetime.date, datetime.datetime, None]
Jsonified = Union[bool, str, int, float, None]
class WriteMode(Enum):
APPEND = "a"
CREATE = "x"
TRUNCATE = "w"
def _jsonify_types(row: dict[str, Expected]) -> dict[str, Jsonified]:
"""Cast types that are not supported by JSON to more compatible types"""
_castable_types = {
datetime.date: lambda x: x.isoformat(),
datetime.datetime: lambda x: x.isoformat(timespec="microseconds"),
datetime.timedelta: lambda x: str(x),
}
return {k: _castable_types.get(type(v), lambda x: x)(v) for k, v in row.items()}
def write_jsonl(
data: Union[Iterator, list[Union[list[tuple[str, Any]], dict[str, Any]]]],
prefix: str,
filename: str,
mode: WriteMode = WriteMode.CREATE,
) -> None:
"""Write serialised data to a given path/file"""
path = os.path.join(prefix, filename)
try:
assert isinstance(
mode, WriteMode
), f"Mode needs to be one of {[x.name for x in WriteMode]}"
with open(path, mode=mode.value, encoding="utf-8") as outfile:
logging.debug(f"Writing serialised data, {path=}")
for line in data:
if isinstance(line, dict):
_line = line
elif isinstance(line, list) and isinstance(line[0], tuple):
_line = dict(line)
else:
raise ValueError("Data must be a list of key/value pairs")
# Add OS-specific newline after each row
outfile.write(json.dumps(dict(_jsonify_types(_line))))
outfile.write(os.linesep)
except (AssertionError, FileExistsError) as e:
logging.error(str(e))
raise e
def read_jsonl(
prefix: str, filename: str
) -> Union[Iterator, list[list[tuple[str, Jsonified]]]]:
"""Read JSONL serialised data from a given path/file"""
path = os.path.join(prefix, filename)
try:
with open(path, mode="r", encoding="utf-8") as infile:
logging.debug(f"Reading serialised data, {path=}")
yield from iter(json.loads(line) for line in infile)
except FileNotFoundError as e:
logging.error(str(e))
raise e
def delete_file(prefix: str, filename: str) -> None:
"""Delete a given data file"""
path = os.path.join(prefix, filename)
try:
logging.debug(f"Deleting serialised data, {path=}")
os.remove(path)
except FileNotFoundError as e:
logging.error(str(e))
raise e
def count_file(prefix: str, filename: str) -> int:
"""Count number of lines in a given path/file"""
_idx = 0
with open(os.path.join(prefix, filename), mode="rb") as infile:
for _idx, _ in enumerate(infile, start=1):
pass
return _idx | /rmk2_py-0.1.2-py3-none-any.whl/rmk2/file.py | 0.665084 | 0.295725 | file.py | pypi |
import time
import os
import struct
import stat
import logging
logging.basicConfig(format='%(message)s')
log = logging.getLogger('resim')
def affine_map(x, a0, a1, b0, b1):
"""Map x in range (a0, a1) to (b0, b1)
Args:
x (float): input
a0 (float): input range start
a1 (float): input range start
b0 (float): output range start
b1 (float): output range start
Returns:
int: mapped coordinate
"""
return int(((x - a0) / a1) * (b1 - b0) + b0)
def makefifo(path):
"""Make a fifo, delete existing fifo
Args:
path (str): path to new fifo
"""
if os.path.exists(path) and stat.S_ISFIFO(os.stat(path).st_mode):
os.remove(path)
os.mkfifo(path)
return os.open(path, os.O_RDWR)
# write evdev events to fifos
def write_evdev(f, e_type, e_code, e_value):
"""Write evdev events to fifo
Args:
f (int): fd of fifo
e_type (int): evdev event type
e_code (int): evdev event code
e_value (int): evdev event value
"""
log.debug("{} {} {} {}".format(f, e_type, e_code, e_value))
t = time.time_ns()
t_seconds = int(t / 1e9)
t_microseconds = int(t / 1e3 % 1e6)
os.write(
f,
struct.pack(
'ILHHi',
t_seconds,
t_microseconds,
e_type,
e_code,
e_value
)
)
# ----- evdev codes -----
# see evdev_notes.md
# tuples containing (type, code)
code_sync = (0, 0, 0)
codes_stylus = {
'toolpen': (1, 320),
'toolrubber': (1, 321),
'touch': (1, 330),
'stylus': (1, 331),
'stylus2': (1, 332),
'abs_x': (3, 0),
'abs_y': (3, 1),
'abs_pressure': (3, 24),
'abs_distance': (3, 25),
'abs_tilt_x': (3, 26),
'abs_tilt_y': (3, 27)
}
codes_touch = {
'abs_mt_distance': (3, 25),
'abs_mt_slot': (3, 47),
'abs_mt_touch_major': (3, 48),
'abs_mt_touch_minor': (3, 49),
'abs_mt_orientation': (3, 52),
'abs_mt_position_x': (3, 53),
'abs_mt_position_y': (3, 54),
'abs_mt_tracking_id': (3, 57),
'abs_mt_pressure': (3, 58)
}
codes_button = {
'home': (1, 102),
'left': (1, 105),
'right': (1, 106),
'power': (1, 116),
'wakeup': (1, 143)
}
stylus_max_x = 20967
stylus_max_y = 15725
touch_max_x = 767
touch_max_y = 1023 | /rmkit-sim-0.0.2.tar.gz/rmkit-sim-0.0.2/remarkable_sim/evsim.py | 0.790652 | 0.254903 | evsim.py | pypi |
import array
import operator
from base64 import b64decode
import qrcode
from reportlab.lib.units import toLength
DEFAULT_PARAMS = {
'size': '5cm',
'padding': '2.5',
'fg': '#000000',
'bg': None,
'version': None,
'error_correction': 'L',
}
GENERATOR_PARAMS = {'size', 'padding', 'fg', 'bg', 'x', 'y'}
QR_PARAMS = {'version', 'error_correction'}
QR_ERROR_CORRECTIONS = {
'L': qrcode.ERROR_CORRECT_L,
'M': qrcode.ERROR_CORRECT_M,
'Q': qrcode.ERROR_CORRECT_Q,
'H': qrcode.ERROR_CORRECT_H,
}
DIRECTION = (
( 1, 0), # right
( 0, 1), # down
(-1, 0), # left
( 0, -1), # up
)
# left, direct, right
DIRECTION_TURNS_CHECKS = (
(( 0, -1), ( 0, 0), (-1, 0)), # right
(( 0, 0), (-1, 0), (-1, -1)), # down
((-1, 0), (-1, -1), ( 0, -1)), # left
((-1, -1), ( 0, -1), ( 0, 0)), # up
)
class Vector(tuple):
def __add__(self, other):
return self.__class__(map(operator.add, self, other))
class ReportlabImageBase(qrcode.image.base.BaseImage):
size = None
padding = None
bg = None
fg = None
bitmap = None
x = 0
y = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bitmap = array.array('B', [0] * self.width * self.width)
self.size = toLength(self.size) if isinstance(self.size, str) else float(self.size)
if isinstance(self.padding, str) and '%' in self.padding:
self.padding = float(self.padding[:-1]) * self.size / 100
else:
try:
self.padding = float(self.padding)
self.padding = (self.size / (self.width + self.padding * 2)) * self.padding
except ValueError:
self.padding = toLength(self.padding) if isinstance(self.padding, str) else float(self.padding)
def drawrect(self, row, col):
self.bitmap_set((col, row), 1)
def save(self, stream, kind=None):
stream.saveState()
try:
# Move to start
stream.translate(self.x, self.y)
# Draw background
if self.bg is not None:
stream.setFillColor(self.bg)
stream.rect(0, 0, self.size, self.size, fill=1, stroke=0)
# Set foreground
stream.setFillColor(self.fg)
# Set transform matrix
stream.translate(self.padding, self.padding)
scale = (self.size - (self.padding * 2)) / self.width
stream.scale(scale, scale)
# Draw code
p = stream.beginPath()
for segment in self.get_segments():
p.moveTo(segment[0][0], self.width - segment[0][1])
for coords in segment[1:-1]:
p.lineTo(coords[0], self.width - coords[1])
p.close()
stream.drawPath(p, stroke=0, fill=1)
finally:
stream.restoreState()
def addr(self, coords):
"""
Get index to bitmap
"""
col, row = coords
if row < 0 or col < 0 or row >= self.width or col >= self.width:
return None
return row * self.width + col
def coord(self, addr):
"""
Returns bitmap coordinates from address
"""
return Vector((addr % self.width, addr // self.width))
def bitmap_get(self, coords):
"""
Returns pixel value of bitmap
"""
addr = self.addr(coords)
return 0 if addr is None else self.bitmap[addr]
def bitmap_set(self, coords, value):
"""
Set pixel value of bitmap
"""
addr = self.addr(coords)
self.bitmap[addr] = value
def bitmap_invert(self, coords):
"""
Invert value of pixel
"""
addr = self.addr(coords)
self.bitmap[addr] = 0 if self.bitmap[addr] else 1
def get_segments(self):
"""
Return list of segments (vector shapes)
"""
segments = []
segment = self.__consume_segment()
while segment:
segments.append(segment)
segment = self.__consume_segment()
return segments
def __consume_segment(self):
"""
Returns segment of qr image as path (pairs of x, y coordinates)
"""
line_intersections = [[] for __ in range(self.width)]
try:
# Find first pixel
coords = self.coord(self.bitmap.index(1))
except ValueError:
# Or no pixels left
return
# Accumulated path
path = []
# Begin of line
path.append(tuple(coords))
# Default direction to right
direction = 0
def move():
nonlocal coords
step = DIRECTION[direction]
# Record intersection
if step[1]: # Vertical move
line = coords[1]
if step[1] == -1:
line -= 1
line_intersections[line].append(coords[0])
# Step
coords += step
# Move to right
move()
# From shape begin to end
while coords != path[0]:
# Trun left
val = self.bitmap_get(coords + DIRECTION_TURNS_CHECKS[direction][0])
if val:
path.append(tuple(coords))
direction = (direction - 1) % 4
move()
continue
# Straight
val = self.bitmap_get(coords + DIRECTION_TURNS_CHECKS[direction][1])
if val:
move()
continue
# Trun right
path.append(tuple(coords))
direction = (direction + 1) % 4
move()
path.append(tuple(coords))
# Remove shape from bitmap
for row, line in enumerate(line_intersections):
line = sorted(line)
for start, end in zip(line[::2], line[1::2]):
for col in range(start, end):
self.bitmap_invert((col, row))
return path
def reportlab_image_factory(**kwargs):
"""
Returns ReportlabImage class for qrcode image_factory
"""
return type('ReportlabImage', (ReportlabImageBase,), kwargs)
def clean_params(params):
"""
Validate and clean parameters
"""
for key, __ in params.items():
if key not in GENERATOR_PARAMS and key not in QR_PARAMS:
raise ValueError("Unknown attribute '%s'" % key)
if params['version'] is not None:
try:
params['version'] = int(params['version'])
except ValueError:
raise ValueError("Version '%s' is not a number" % params['version'])
if params['error_correction'] in QR_ERROR_CORRECTIONS:
params['error_correction'] = QR_ERROR_CORRECTIONS[params['error_correction']]
else:
raise ValueError("Unknown error correction '%s', expected one of %s" % (params['error_correction'], ', '.join(QR_ERROR_CORRECTIONS.keys())))
def parse_graphic_params(params):
"""
Parses params string in form:
key=value,key2=value2;(text|base64);content
For example:
size=5cm,fg=#ff0000,bg=#ffffff,version=1,error_correction=M,padding=5%;text;text to encode
"""
try:
parsed_params, fmt, text = params.split(';', 2)
except ValueError:
raise ValueError("Wrong format, expected parametrs;format;content")
if fmt not in ('text', 'base64'):
raise ValueError("Unknown format '%s', supprted are text or base64" % fmt)
params = DEFAULT_PARAMS.copy()
if parsed_params:
try:
params.update(dict(item.split("=") for item in parsed_params.split(",")))
except ValueError:
raise ValueError("Wrong format of parameters '%s', expected key=value pairs delimited by ',' character" % parsed_params)
clean_params(params)
text = text.encode('utf-8')
if fmt == 'base64':
try:
text = b64decode(text)
except Exception as e:
raise ValueError("Wrong base64 '%s': %s" % (text.decode('utf-8'), e))
return params, text
def build_qrcode(params, text):
factory_kwargs = {key: value for key, value in params.items() if key in GENERATOR_PARAMS}
qr_kwargs = {key: value for key, value in params.items() if key in QR_PARAMS}
return qrcode.make(text, image_factory=reportlab_image_factory(**factory_kwargs), border=0, **qr_kwargs)
def qr_factory(params):
params, text = parse_graphic_params(params)
return build_qrcode(params, text)
def qr_draw(canvas, text, x=0, y=0, **kwargs):
params = DEFAULT_PARAMS.copy()
params.update(**kwargs)
clean_params(params)
params['x'] = toLength(x) if isinstance(x, str) else float(x)
params['y'] = toLength(y) if isinstance(y, str) else float(y)
if isinstance(text, str):
text = text.encode('utf-8')
build_qrcode(params, text).save(canvas)
def qr(canvas, params=None):
"""
Generate QR code using plugInGraphic or plugInFlowable
Example RML code:
<illustration height="5cm" width="5cm" align="center">
<plugInGraphic module="reportlab_qrcode" function="qr">size=5cm;text;Simple text</plugInGraphic>
</illustration>
"""
qr_factory(params).save(canvas) | /rml_qrcode-1.1.0.tar.gz/rml_qrcode-1.1.0/rml_qrcode/__init__.py | 0.433502 | 0.245108 | __init__.py | pypi |
import logging
import traceback
from typing import List, Mapping
_Logger = logging.getLogger(__name__)
# ---- HTTP-related
class ClientError(Exception):
"""Client request is incorrect."""
pass
class AuthenticationError(Exception):
"""Failed to authenticate user."""
pass
class ForbiddenError(Exception):
"""Forbidden access to resource."""
pass
class NotFoundError(Exception):
"""Resource not found."""
pass
class MethodNotAllowed(Exception):
"""Method not allowed for resource."""
pass
class ExpiredSessionError(Exception):
"""Current user session has expired. Login required."""
pass
class ExpiredTokenError(Exception):
"""Access token has expired, refresh required."""
pass
class InternalServerError(Exception):
"""Server reported internal error."""
pass
class BadGatewayError(Exception):
"""Upstream server reported internal error."""
pass
class ServiceUnavailableError(Exception):
"""Service is not available."""
pass
class GatewayTimeoutError(Exception):
"""Upstream server did not respond in time."""
pass
class UnknownError(Exception):
"""Unknown error."""
pass
class UnreachableError(Exception):
"""Code is unreachable."""
pass
class LogicError(Exception):
"""Logical inconsistency."""
pass
class CriticalError(Exception):
"""Critical error."""
pass
class GCPError(Exception):
"""GCP reported error."""
pass
class HTTPRequestError(Exception):
"""General error in the HTTP request."""
pass
class RuntimeError(RuntimeError):
"""Error depending on run-time conditions."""
pass
class ValueError(ValueError):
"""Incorrect value."""
pass
class NotImplementedError(NotImplementedError):
"""Feature not implemented."""
pass
class UnhandledError(Exception):
"""Error not contemplated."""
pass
class TimeoutError(TimeoutError):
"""Time taken was more than expected."""
pass
class AcquireResourceError(Exception):
"""Failure in resource acquisition."""
pass
class VersionError(Exception):
"""Incompatibility between client and server API versions"""
pass
class MultipleError(Exception):
"""Container for multiple errors."""
def __init__(self, *errors):
self.errors: List[Exception] = [e for e in errors]
_HTTPCodeToException: Mapping[int, type] = {
400: ClientError,
401: AuthenticationError,
403: ForbiddenError,
404: NotFoundError,
405: MethodNotAllowed,
410: VersionError,
440: ExpiredSessionError,
498: ExpiredTokenError,
500: InternalServerError,
501: NotImplementedError,
502: BadGatewayError,
503: ServiceUnavailableError,
504: GatewayTimeoutError,
}
_ExceptionToHTTPCode: Mapping[str, int] = {
t.__name__: c for c, t in _HTTPCodeToException.items()
}
def http_code_to_exception(code: int, text: str, default: type) -> Exception:
"""Returns an exception object from integer code and text content
Args:
code (int): HTTP code
text (str): Error text
default (type): Default error type in case ``code`` do not match any error.
Returns:
Exception: Error associated to ``code``
"""
if code in _HTTPCodeToException:
return _HTTPCodeToException[code](text)
else:
return default(text)
def exception_type_to_http_code(exc_type: str) -> int:
"""Returns an error code for any exception type
Args:
exc_type (str): A type of exception
Returns:
int: Error code associated to type, or 500 if type is not associated to a HTTP error.
"""
return _ExceptionToHTTPCode[exc_type] if exc_type in _ExceptionToHTTPCode else 500
def exception_to_http_code(exc: Exception) -> int:
"""Returns a HTTP error code from an exception object.
Args:
exc (Exception): Exception object
Returns:
int: Error code
"""
return exception_type_to_http_code(type(exc).__name__)
def _single_handler(exc: Exception, is_debug: bool) -> Mapping[str, str]:
"""Creates a dict representation of an exception object, possibly appending traceback info.
Args:
exc (Exception): Exception object
is_debug (bool): Whether to append traceback info.
Returns:
Mapping[str, str]: Dict representation of exception.
"""
assert not isinstance(exc, MultipleError)
return {
"type": type(exc).__name__,
"content": str(exc) if not "html" in str(exc) else "Omitted HTML content",
"code": exception_to_http_code(exc),
"traceback": f'{"".join(traceback.format_tb(exc.__traceback__))}'
if is_debug and isinstance(exc, Exception)
else "hidden",
}
def error_handler(*excs) -> Mapping[str, Mapping[str, str]]:
"""Creates a dict representation of multiple exception objects.
Returns:
Mapping[str,Mapping[str,str]]: Dict representation of exceptions
Notes:
Error handler returning a json with formatted errors. Input errors may be reordered if some of them are ``MultipleError``.
"""
is_debug = logging.root.level <= logging.DEBUG
ret = {"errors": []}
# Create descriptions for each of the single errors
ret["errors"] += [
_single_handler(exc, is_debug)
for exc in excs
if not isinstance(exc, MultipleError)
]
# Log single errors
for single_err in ret["errors"]:
if single_err["type"] == CriticalError.__name__:
_Logger.critical(single_err)
elif exception_type_to_http_code(single_err["type"]) >= 500:
_Logger.error(single_err)
else:
_Logger.warning(single_err)
if is_debug:
for k in ["type", "content", "traceback"]:
_Logger.debug(single_err[k])
# Unfold single errors within multiple errors
ret["errors"] += [
exc_desc
for multiple in excs
if isinstance(multiple, MultipleError)
for exc_desc in error_handler(*multiple.errors)["errors"]
]
return ret
def make_errors_from_json(*errs_json) -> Exception:
"""Creates an exception object from multiple dict representation of errors.
Returns:
Exception: Exception object encapsulating inputs.
"""
err_objs = []
for err_json in errs_json:
assert all([rk in err_json for rk in ["type", "content"]])
err_msg = err_json["content"]
if "traceback" in err_json:
err_msg += "\nOriginal traceback: " + err_json["traceback"]
if err_json["type"] in globals():
# Interpret error as one of our defined classes in this file
err_objs.append(globals()[err_json["type"]](err_msg))
else:
err_objs.append(
UnhandledError(
"Original type: "
+ err_json["type"]
+ "\nOriginal content: "
+ err_msg
)
)
if len(err_objs) == 1:
return err_objs[0]
else:
return MultipleError(*err_objs)
def raise_from_list(exceptions: List[Exception]):
"""Collapse all exceptions in list into a single exception.
Args:
exceptions (List[Exception]): List of exceptions.
Raises:
Exception: Single exception in list
MultipleError: Capturing all exceptions in list
"""
if len(exceptions) == 1:
raise exceptions[0]
elif len(exceptions) > 1:
raise MultipleError(*exceptions) | /rmlab_errors-0.1.6-py3-none-any.whl/rmlab_errors/__init__.py | 0.910466 | 0.216964 | __init__.py | pypi |
import os, io
from dataclasses import dataclass
from typing import Callable, List, Optional
from inspect import signature, Parameter
from typing import Any, List, Mapping
from rmlab_errors import ValueError
from enum import Enum
import aiohttp
class EnumStrings(Enum):
@classmethod
def str_to_enum_value(cls, v: str):
return cls.__dict__["_value2member_map_"][v.lower()]
class MethodType(EnumStrings):
"""All accepted HTTP request methods."""
GET = "get"
POST = "post"
class FileExtensionType(EnumStrings):
"""All accepted HTTP file extensions."""
JSON = "json"
CSV = "csv"
class PayloadType(EnumStrings):
"""All payload types."""
NONE = "none"
MATCH = "match"
JSON = "json"
MULTIPART = "multipart"
class AuthType(EnumStrings):
"""All auth types."""
PUBLIC = "public"
BASIC = "basic"
APIKEY = "api-key"
JWT = "jwt"
JWT_EXPIRABLE = "jwt-expirable"
WS = "ws"
class ResponseType(EnumStrings):
"""All HTTP response types."""
NONE = "none"
JSON = "json"
class CommunicationType(EnumStrings):
"""All Communication types."""
SYNC = "sync"
ASYNC = "async"
WS = "ws"
class FileType:
"""Type to mark file arguments in endpoints."""
pass
JSONTypes = {cls.__name__: cls for cls in [bool, str, int, float, list, dict, FileType]}
_LimitableTypesString = {cls.__name__: cls for cls in [int, float, list, dict]}
_LimitableNumericTypesString = {cls.__name__: cls for cls in [int, float]}
_LimitableContainerTypesString = {cls.__name__: cls for cls in [list, dict]}
@dataclass
class Argument:
"""An endpoint argument."""
arg_type: type
default_value: Any = None
limit: int = None
def __post_init__(self):
if isinstance(self.arg_type, str):
self.arg_type = JSONTypes[self.arg_type]
if (
self.limit is not None
and self.arg_type.__name__ not in _LimitableTypesString
):
raise ValueError(
f"Argument cannot be limited with `{self.limit}` if `{self.arg_type}` is not limitable"
)
class PayloadArguments:
"""All argument properties (name/type/default value) of an endpoint."""
REQUIRED_STR = "REQUIRED"
ASYNC_ID_KEY = "operation_id"
def __init__(self, **kwargs):
assert all([isinstance(v, Argument) for v in kwargs.values()])
self.args: Mapping[str, Argument] = {k: v for k, v in kwargs.items()}
@classmethod
def make_from_function(
cls, fn: Optional[Callable] = None, limits: Optional[Mapping[str, int]] = None
):
args: Mapping[str, Argument] = dict()
if fn is not None:
sig = signature(fn)
if limits is not None:
assert all([lk in sig.parameters for lk in limits.keys()])
for k, val in sig.parameters.items():
default = (
PayloadArguments.REQUIRED_STR
if val.default == Parameter.empty
else val.default
)
limit = limits[k] if limits and k in limits else None
args[k] = Argument(
arg_type=val.annotation, default_value=default, limit=limit
)
return cls(**args)
@classmethod
def make_from_json(cls, args: Mapping[str, Mapping[str, Any]]):
return cls(
**{
k: Argument(arg["type"], arg["default"], arg["limit"])
for k, arg in args.items()
}
)
@property
def json(self) -> Mapping[str, Mapping[str, Any]]:
return {
k: {
"type": arg.arg_type.__name__,
"default": arg.default_value,
"limit": arg.limit,
}
for k, arg in self.args.items()
}
@property
def keys(self) -> List[str]:
return list(self.args.keys())
@property
def types_str(self) -> List[str]:
return [arg.arg_type.__name__ for arg in self.args.values()]
@property
def defaults(self) -> List[Any]:
return [arg.default_value for arg in self.args.values()]
@property
def limits(self) -> List[int]:
return [arg.limit for arg in self.args.values()]
def limit_of(self, arg_name: str) -> int:
if arg_name in self.args:
return self.args[arg_name].limit
else:
return None
# Maximum time to await for the completion of a sync operation
# Operations taking longer should be asynchronous
SyncResponseDefaultTimeout = 10
# Maximum time to await for the completion of a long-running async op
AsyncOperationDefaultTimeout = 3600
# Short sleep before poll, to return much sooner in case this op:
# * lasts much less than self._poll_seconds seconds
# * lasts more than the time to make the first poll call without sleep
# * less than _prepoll_wait_seconds
AsyncOperationDefaultPrePollWait = 1
# Poll status of poll operation each seconds
AsyncOperationDefaultPollInterval = 10
# Timeout to await first response of async op
# Should be small, as creation of async op id is fast
AsyncResponseDefaultTimeout = 5
class Endpoint:
"""Properties of an endpoint."""
def __init__(
self,
*,
resource: List[str],
method: MethodType,
payload: PayloadType,
auth: AuthType,
response: ResponseType,
arguments: PayloadArguments,
communication: CommunicationType = CommunicationType.SYNC,
timeout: int = SyncResponseDefaultTimeout,
id: str = "default-id",
):
"""Initializes an endpoint.
Args:
resource (List[str]): Resource endpoint, translate into '/'-separated resources.
method (MethodType): Method type
payload (PayloadType): Payload type
auth (AuthType): Auth type
response (ResponseType): Response type
arguments (PayloadArguments): Payload arguments
communication (CommunicationType, optional): Communication type. Defaults to CommunicationType.SYNC.
timeout (int, optional): Timeout in seconds. Defaults to SyncResponseDefaultTimeout.
id (str): Endpoint identifier
"""
assert isinstance(arguments, PayloadArguments)
self.id = id
self.resource: List[str] = resource
self.method: MethodType = (
method
if not isinstance(method, str)
else MethodType.str_to_enum_value(method)
)
self.payload: PayloadType = (
payload
if not isinstance(payload, str)
else PayloadType.str_to_enum_value(payload)
)
self.auth: AuthType = (
auth if not isinstance(auth, str) else AuthType.str_to_enum_value(auth)
)
self.communication: CommunicationType = (
communication
if not isinstance(communication, str)
else CommunicationType.str_to_enum_value(communication)
)
self.response: ResponseType = (
response
if not isinstance(response, str)
else ResponseType.str_to_enum_value(response)
)
self.arguments: PayloadArguments = arguments
self.timeout: int = timeout
if self.payload == PayloadType.NONE:
assert len(arguments.keys) == 0
else:
assert len(arguments.keys) > 0
@classmethod
def make_from_json(cls, ep_json: dict):
"""Creates an endpoint from a JSON-compatible dictionary.
Args:
ep_json (dict): JSON-compatible dictionary
Returns:
Endpoint: Instance
"""
args_json = ep_json.pop("arguments")
args = PayloadArguments.make_from_json(args_json)
return cls(arguments=args, **ep_json)
@property
def address(self) -> str:
"""Returns endpoint address."""
endpoint_addr = self.resource + (
["{" + k + "}" for k in self.arguments.keys]
if self.payload == PayloadType.MATCH
else []
)
return "/" + "/".join(endpoint_addr)
@property
def json(self) -> Mapping[str, Any]:
"""Returns a JSON-compatible dictionary of the endpoint."""
return {
"id": self.id,
"resource": self.resource,
"method": self.method.value,
"payload": self.payload.value,
"auth": self.auth.value,
"communication": self.communication.value,
"response": self.response.value,
"arguments": self.arguments.json,
"timeout": self.timeout,
}
class AsyncEndpoint(Endpoint):
"""Properties of an asynchronous endpoint."""
ASYNC_INFO_KEY = "async_info"
def __init__(
self,
*,
prepoll_wait: int = AsyncOperationDefaultPrePollWait,
poll_interval: int = AsyncOperationDefaultPollInterval,
poll_endpoint_id: str,
result_endpoint_id: str,
**base_kwargs,
):
"""Initializes an asynchronous endpoint instance.
Args:
poll_endpoint_id (str): ID of endpoint where status is polled from
result_endpoint_id (str): ID of endpoint where result is fetched from
prepoll_wait (int, optional): Await seconds before entering the polling loop. Defaults to AsyncOperationDefaultPrePollWait.
poll_interval (int, optional): Poll interval in seconds. Defaults to AsyncOperationDefaultPollInterval.
"""
self.async_prepoll_wait: int = prepoll_wait
self.async_poll_interval: int = poll_interval
self.async_poll_endpoint_id: str = poll_endpoint_id
self.async_result_endpoint_id: str = result_endpoint_id
super(AsyncEndpoint, self).__init__(
communication=CommunicationType.ASYNC, **base_kwargs
)
@classmethod
def make_from_json(cls, aep_json: dict):
"""Creates an asynchronous endpoint from a JSON-compatible dictionary.
Args:
ep_json (dict): JSON-compatible dictionary
Returns:
Endpoint: Instance
"""
async_info = aep_json.pop(AsyncEndpoint.ASYNC_INFO_KEY)
if "communication" in aep_json:
assert aep_json["communication"] == "async"
aep_json.pop("communication")
args = aep_json.pop("arguments")
pargs = PayloadArguments.make_from_json(args)
return cls(arguments=pargs, **async_info, **aep_json)
@property
def json(self) -> Mapping[str, Any]:
"""Returns a JSON-compatible dictionary of the asynchronous endpoint."""
return {
**super(AsyncEndpoint, self).json,
AsyncEndpoint.ASYNC_INFO_KEY: {
"prepoll_wait": self.async_prepoll_wait,
"poll_interval": self.async_poll_interval,
"poll_endpoint_id": self.async_poll_endpoint_id,
"result_endpoint_id": self.async_result_endpoint_id,
},
}
class DataRequestContextMultipart:
"""Context manager to parse a set of endpoint arguments,
with at least one being a file, to be sent in the HTTP request
as multipart data."""
def __init__(self, endpoint: Endpoint, **kwargs):
"""Initializes context manager
Args:
endpoint (Endpoint): Endpoint instance.
kwargs (Mapping[str,Any]): Endpoint arguments.
"""
assert endpoint.payload == PayloadType.MULTIPART
self._arguments: PayloadArguments = endpoint.arguments
self._kwargs = kwargs
self._opened_files: List[io.BufferedReader] = list()
self.data: aiohttp.FormData = None
def __enter__(self):
"""Parses endpoint arguments according to properties of endpoint
Raises:
ValueError: If mandatory argument has not been provided.
FileNotFoundError: If a file in arguments does not exist.
Returns:
DataRequestContextMultipart: Instance.
"""
self.data = aiohttp.FormData()
for key, default, type_str in zip(
self._arguments.keys, self._arguments.defaults, self._arguments.types_str
):
if type_str == "FileType":
assert default == PayloadArguments.REQUIRED_STR
if key not in self._kwargs:
raise ValueError(f"File argument `{key}` is required")
filename = self._kwargs[key]
if not os.path.exists(filename):
raise FileNotFoundError(f"File `{filename}` does not exist")
file = open(filename, "rb")
self._opened_files.append(file)
self.data.add_field(name=key, value=file, filename=filename)
else:
type_class = JSONTypes[type_str]
if type_class == int:
type_class = str
if default != PayloadArguments.REQUIRED_STR and key not in self._kwargs:
v = default
self.data.add_field(name=key, value=v)
elif key in self._kwargs:
v = type_class(self._kwargs[key])
self.data.add_field(name=key, value=v)
else:
raise ValueError(f"Argument `{key}` is required")
return self
def __exit__(self, exc_ty, exc_val, tb):
"""Closes file instances opened during enter."""
for f in self._opened_files:
f.close()
pass
class DataRequestContext:
"""Context manager to parse a set of endpoint arguments,
to be sent in the HTTP request as address- or json-arguments."""
def __init__(self, endpoint: Endpoint, **kwargs):
"""Initializes context manager
Args:
endpoint (Endpoint): Endpoint instance.
kwargs (Mapping[str,Any]): Endpoint arguments.
"""
assert endpoint.payload != PayloadType.MULTIPART
self._arguments: PayloadArguments = endpoint.arguments
self._kwargs = kwargs
self._opened_files: List[io.BufferedReader] = list()
self.data: Mapping[str, Any] = dict()
def __enter__(self):
"""Parses endpoint arguments according to properties of endpoint
Raises:
ValueError: If mandatory argument has not been provided.
Returns:
DataRequestContext: Instance.
"""
for key, default, type_str, limit in zip(
self._arguments.keys,
self._arguments.defaults,
self._arguments.types_str,
self._arguments.limits,
):
type_class = JSONTypes[type_str]
if default != PayloadArguments.REQUIRED_STR:
if key not in self._kwargs:
self.data[key] = default
elif self._kwargs[key] == default:
self.data[key] = self._kwargs[key]
else:
self.data[key] = type_class(self._kwargs[key])
elif key in self._kwargs:
self.data[key] = type_class(self._kwargs[key])
else:
raise ValueError(f"Argument `{key}` is required")
# Check arguments is within limits, if any
if limit is not None and self.data[key] is not None:
if type_str in _LimitableNumericTypesString and self.data[key] > limit:
raise ValueError(f"Argument `{key}` exceeds limit `{limit}`")
elif (
type_str in _LimitableContainerTypesString
and len(self.data[key]) > limit
):
raise ValueError(
f"Argument `{key}` number of elements `{len(self.data[key])}` exceeds limit `{limit}`"
)
return self
def __exit__(self, exc_ty, exc_val, tb):
pass
@classmethod
def make_data(cls, endpoint: Endpoint, **kwargs) -> Mapping[str, Any]:
"""Returns parsed data as a JSON-compatible dictionary.
Args:
endpoint (Endpoint): Endpoint instance.
kwargs (Mapping[str,Any]): Endpoint arguments.
Returns:
_type_: _description_
"""
with cls(endpoint, **kwargs) as drc:
return drc.data | /rmlab_http_client-0.4.0-py3-none-any.whl/rmlab_http_client/types.py | 0.890235 | 0.219819 | types.py | pypi |
from typing import Any, Mapping, Optional, Union
from rmlab_errors import ValueError
from rmlab_http_client import (
Endpoint,
AsyncEndpoint,
)
_EndpointType = Union[Endpoint, AsyncEndpoint]
class Cache:
"""Singleton cache to store credentials and endpoints,
meant to be initialized once.
Raises:
RuntimeError: _description_
RuntimeError: _description_
ValueError: _description_
RuntimeError: _description_
ValueError: _description_
ValueError: _description_
Returns:
_type_: _description_
"""
_credentials: Mapping[str, Any] = None
_endpoints: Mapping[str, _EndpointType] = None
@classmethod
def get_credential(cls, cred_key: str) -> Optional[str]:
"""Returns single credential from key if exists
Args:
cred_key (str): Credential key.
Returns:
Optional[str]: Credential value or None
"""
if cls._credentials is not None and cred_key in cls._credentials:
return cls._credentials[cred_key]
@classmethod
def set_credential(cls, cred_key: str, cred_value: str):
if cls._credentials is None:
cls._credentials = dict()
cls._credentials[cred_key] = cred_value
@classmethod
def get_endpoint(cls, endpoint_id: str) -> Union[Endpoint, AsyncEndpoint]:
"""Return specific endpoint from its ID.
Args:
endpoint_id (str): Endpoint ID
Raises:
RuntimeError: If endpoint has not been initialized.
Returns:
Union[Endpoint, AsyncEndpoint]: Endpoint instance.
"""
if cls._endpoints is None or endpoint_id not in cls._endpoints:
raise RuntimeError(f"Undefined endpoint `{endpoint_id}`")
return cls._endpoints[endpoint_id]
@classmethod
def add_endpoints(cls, **kwargs):
"""Add all endpoints in argument.
Raises:
ValueError: If endpoint has unrecognized ID.
ValueError: If endpoint has unknown communication type.
"""
if cls._endpoints is None:
cls._endpoints = dict()
for ep_id, ep_json in kwargs.items():
if ep_json["communication"] == "sync":
cls._endpoints[ep_id] = Endpoint.make_from_json(ep_json)
elif ep_json["communication"] == "async":
cls._endpoints[ep_id] = AsyncEndpoint.make_from_json(ep_json)
else:
raise ValueError(f"Unknown endpoint communication type `{ep_json}`") | /rmlab_http_client-0.4.0-py3-none-any.whl/rmlab_http_client/cache.py | 0.918242 | 0.167083 | cache.py | pypi |
# RMM: RimWorld Mod Manager
Do you dislike DRM based platforms but love RimWorld and it's mods? RMM is cross platform mod manager that allows you to download, update, auto-sort, and configure mods for the game without relying on the Steam consumer client. RMM has a keyboard based interface that is easy to use and will be familiar to Linux users and developers.
RMM v1.0 supports Windows, Linux, and MacOS.
## Prerequisites
To use RMM you need:
- SteamCMD installed and in your path. (Linux/Mac Only)
- Set RMM_PATH to game path if game is installed to a not default location.
- Python 3.9+
# Installation for Windows
1. Install latest Python 3 release from `https://www.python.org/downloads/windows/`
- Ensure 'add to PATH' is checked / enabled during installation.
2. Open 'cmd' with Administrator privileges and type `python -m pip install --user rmm-spoons`
- Use with `python -m rmm`
3. (Optional) Add `C:\Users\[username]\AppData\Roaming\Python\[version]\Scripts\` to PATH.
- Use with `rmm`
# Installation for MacOS:
1. Install Python3 with brew.
2. `pip3 install --user rmm-spoons`
3. Use with `python3 -m rmm`
4. Add python bin directory to your path:
``` sh
echo "export PATH=\"$PATH:$HOME/Library/Python/$(python3 --version | awk '{split($2,a,".") ; print a[1] "." a[2] }')/bin\"" >> ~/.zshrc
```
5. Use with `rmm`
## Upgrading on MacOS
Please perodically update RMM with the following command:
`pip3 install --upgrade rmm-spoons`
# Installation for Arch Linux
RMM has an AUR package 'rmm'. The package brings in all dependencies, including steamcmd, and can be installed with makepkg and git or an AUR helper as shown below. No other steps are required:
## Makepkg
``` sh
mkdir -p ~/build ; cd ~/build
git clone https://aur.archlinux.org/rmm.git
cd rmm
makepkg -si
```
## Yay (AUR helper)
``` sh
yay -S rmm
```
# Installation for other Linux distributions (via PyPi)
## 1. Installing SteamCMD on Ubuntu
``` sh
sudo su -c 'apt update && apt upgrade && apt install software-properties-common && add-apt-repository multiverse && dpkg --add-architecture i386 && apt update && apt install lib32gcc1 steamcmd' ;
echo 'export PATH="$PATH:/usr/games' >> ~/.bashrc ;
exec $SHELL
```
## 1. Installing SteamCMD on Debian
``` sh
sudo su -c 'apt update && apt upgrade && apt install software-properties-common && add-apt-repository non-free && dpkg --add-architecture i386 && apt update && apt install steamcmd' ;
echo 'export PATH="$PATH:/usr/games' >> ~/.bashrc ;
exec $SHELL
```
## 2. Adding .local/bin to your PATH
RMM can be directly accessed with command `rmm`. In order for this to work, you need to add `~/.local/bin` to your PATH variable, otherwise, your terminal will not find the `rmm` script. If you notice that you cannot run `rmm` after installation, try the following:
``` sh
echo 'export PATH="$PATH:$HOME/.local/bin" >> ~/.bashrc ; exec $SHELL
```
Alternatively, RMM can always called with:
``` sh
python -m rmm
```
## 3. Installing package from PIP
``` sh
python -m pip install --user rmm-spoons
```
## Upgrading with PIP
Please perodically update RMM with the following command:
`python -m pip install --user --upgrade rmm-spoons`
# Configuration
## Set RMM_PATH (Optional)
If RimWorld is installed a directory other than the default ones, you should set the RMM_PATH variable to your game directory for convenience.
Set it permanently in your `bashrc` or `zshrc` files:
``` sh
# Note please update this path to your actual game or mod directory
echo 'export RMM_PATH="$HOME/your/game/path" >> ~/.bashrc ;
exec $SHELL
```
Temporarily set it during your shell session:
``` sh
export RMM_PATH="~/PATHTOGAME/game/Mods"
```
# Installation for Development (Developers)
Clone repository and install with pip.
```
mkdir -p ~/build
git clone https://github.com/Spoons/rmm.git ~/build/rmm
pip install --user ~/build/rmm
```
# Usage
```
RimWorld Mod Manager
Usage:
rmm [options] config
rmm [options] export [-e]|[-d] <file>
rmm [options] import <file>
rmm [options] enable [-a]|[-f file]|<packageid>|<term>
rmm [options] disable [-a]|[-f file]|<packageid>|<term>
rmm [options] remove [-a]|[-f file]|<packageid>|<term>
rmm [options] list
rmm [options] query [<term>]
rmm [options] search <term>
rmm [options] sort
rmm [options] sync <name>
rmm [options] update
rmm [options] verify
rmm -h | --help
rmm -v | --version
Operations:
config Sort and enable/disable mods with ncurses
export Save mod list to file.
import Install a mod list from a file.
list List installed mods.
query Search installed mods.
remove Remove installed mod.
search Search Workshop.
sort Auto-sort your modlist
sync Install or update a mod.
update Update all mods from Steam.
verify Checks that enabled mods are compatible
enable Enable mods
disable Disable mods
order Lists mod order
Parameters
term Name, author, steamid
file File path for a mod list
name Name of mod.
Flags
-a Performs operation on all mods
-d Export disabled mods to modlist.
-e Export enabled mods to modlist.
-f Specify mods in a mod list
Options:
-p --path DIR RimWorld path.
-w --workshop DIR Workshop Path.
-u --user DIR User config path.
Environment Variables:
RMM_PATH Folder containings Mods
RMM_WORKSHOP_PATH Folder containing Workshop mods (optional)
RMM_USER_PATH Folder containing saves and config
Pathing Preference:
CLI Argument > Environment Variable > Defaults
Tip:
You can use enable, disable, and remove with no
argument to select from all mods.
```
## How To
List installed packages:
```
rmm list
```
Search workshop packages:
```
rmm search term
```
Search locally installed mods
```
rmm query term
```
Install package:
```
rmm sync rimhud
```
Removing a package:
```
rmm remove fuzzy
```
Removing all / a range packages:
``` sh
rmm remove
# all packages will be listed. specify your desired range at the interactive prompt.
```
Saving a mod list
```
rmm export ~/modlist.txt
```
Install mod list:
```
rmm import ~/modlist.txt
```
Update all packages:
```
rmm update
```
Auto sort mods:
``` sh
rmm sort
```
Manually sort mods:
``` sh
rmm config
```
Show mod load order:
``` sh
rmm order
```
### Tips
1. Duplicating a mod setup to a new installation:
``` sh
rmm -p ~/path-to-game export ~/modlist.txt
rmm -p ~/path-to-game import ~/modlist.txt
```
2. It is recommended to auto sort your mods after installation of a mod or modlist.
# Related Projects
- [rwm](https://github.com/AOx0/rwm): Rust rewrite of RMM.
# Contributing
If you would like to contribute your time or efforts towards the project, you are welcome to and your efforts will be appreciated. Please format any code changes through python-black.
# License
This project is licensed under the GPLv3 License - see the [LICENSE](LICENSE) file for details
| /rmm-spoons-1.0.15.tar.gz/rmm-spoons-1.0.15/README.md | 0.678433 | 0.691484 | README.md | pypi |
from contextlib import contextmanager
import re
import shutil
import subprocess
import sys
import xml.etree.ElementTree as ET
from pathlib import Path
from typing import Generator, Optional, cast, Union, List
from xml.dom import minidom
def platform() -> Optional[str]:
return sys.platform
def execute(cmd) -> Generator[str, None, None]:
with subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
text=True,
close_fds=True,
shell=True,
) as proc:
for line in iter(proc.stdout.readline, b""):
yield line
if (r := proc.poll()) is not None:
if r != 0:
raise subprocess.CalledProcessError(r, cmd)
break
def run_sh(cmd: str) -> str:
# Will raise a CalledProcessError if non-zero return code
return subprocess.check_output(cmd, text=True, shell=True).strip()
def copy(source: Path, destination: Path, recursive: bool = False):
if recursive:
shutil.copytree(source, destination)
else:
shutil.copy2(source, destination, follow_symlinks=True)
def move(source: Path, destination: Path):
shutil.move(source, destination)
def remove(dest: Path):
shutil.rmtree(dest)
def list_set_intersection(a: list, b: list) -> list:
return list(set(a) & set(b))
def list_loop_intersection(a: list, b: list) -> list:
return [value for value in a if value in b]
def list_loop_exclusion(a: list, b: list) -> list:
return [value for value in a if value not in b]
def list_grab(element: str, root: ET.Element) -> Optional[List[str]]:
try:
return cast(
Optional[List[str]],
[n.text for n in cast(ET.Element, root.find(element)).findall("li")],
)
except AttributeError:
return None
def element_grab(element: str, root: ET.Element) -> Optional[str]:
try:
return cast(ET.Element, root.find(element)).text
except AttributeError:
return None
def et_pretty_xml(root: ET.Element) -> str:
return minidom.parseString(
re.sub(
r"[\n\t\s]*",
"",
(ET.tostring(cast(ET.Element, root), "utf-8").decode()),
)
).toprettyxml(indent=" ", newl="\n")
def sanitize_path(path: Union[str, Path]):
if isinstance(path, Path):
path = str(path)
if platform() == "win32":
path.replace('"', "")
return Path(path).expanduser() | /rmm-spoons-1.0.15.tar.gz/rmm-spoons-1.0.15/src/rmm/util.py | 0.57069 | 0.206574 | util.py | pypi |
import curses
class WindowSizeException(Exception):
pass
class AbortModOrderException(Exception):
pass
def multiselect_order_menu(stdscr, data):
data = [ ( n.packageid, n.enabled ) for n in data ]
k = 0
# Clear and refresh the screen for a blank canvas
stdscr.clear()
stdscr.refresh()
# Start colors in curses
curses.start_color()
curses.init_pair(1, curses.COLOR_CYAN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_WHITE)
selection = 0
window_height, window_width = stdscr.getmaxyx()
scroll_window_height = window_height - 10
scroll_window_position = 0
if window_width < 40 or window_height < 15:
raise WindowSizeException()
def check_bounds(location, data, delta_position):
if delta_position > 0:
if location < len(data) - 1:
return True
if delta_position < 0:
if location > 0:
return True
return False
def move_selection(selected, l, d):
if d > 0 and check_bounds(selected, l, d):
return selected + 1
if d < 0:
if selected > 0 and check_bounds(selected, l, d):
return selected - 1
return selected
def list_swap(data, offset, pos):
temp = data[offset], data[offset + pos]
data[offset + pos] = temp[0]
data[offset] = temp[1]
while True:
# Initialization
stdscr.clear()
window_height, window_width = stdscr.getmaxyx()
if k == curses.KEY_DOWN:
selection = move_selection(selection, data, 1)
elif k == curses.KEY_UP:
selection = move_selection(selection, data, -1)
elif k == ord("j"):
if check_bounds(selection, data, 1):
list_swap(data, selection, 1)
selection = move_selection(selection, data, 1)
elif k == ord("k"):
if check_bounds(selection, data, -1):
list_swap(data, selection, -1)
selection = move_selection(selection, data, -1)
elif k == curses.KEY_ENTER or k == 10 or k == 13:
data[selection] = (data[selection][0], not data[selection][1])
selection = move_selection(selection, data, 1)
elif k == ord("c"):
return data
elif k == ord("q"):
raise AbortModOrderException()
if scroll_window_position < len(data) - 1 and selection > (
scroll_window_position + scroll_window_height - 1
):
scroll_window_position += 1
if scroll_window_position > 0 and selection < (scroll_window_position):
scroll_window_position -= 1
statusbarstr = "Press 'c' to accept changes, 'q' to exit without saving, 'Enter' to enable/disable, 'j/k' to change order."
max_length = 0
for n in data:
if len(n[0]) > max_length:
max_length = len(n[0])
scroll_centering_value = (
scroll_window_height if scroll_window_height < len(data) else len(data)
)
start_y = int(
(window_height // 2)
- int(scroll_centering_value) // 2
- int(scroll_centering_value) % 2
)
start_x = int((window_width // 2) - int(max_length) // 2)
for i, (k, v) in enumerate(
data[scroll_window_position : scroll_window_position + scroll_window_height]
):
if v == False:
stdscr.addstr(start_y, start_x - 3, "-")
if v == True:
stdscr.addstr(start_y, start_x - 3, "+")
if selection == i + scroll_window_position:
stdscr.attron(curses.A_STANDOUT)
stdscr.addstr(start_y, start_x, k)
if selection == i + scroll_window_position:
stdscr.attroff(curses.A_STANDOUT)
start_y += 1
# Rendering some text
title = "RMM: Mod Sorting Display"
stdscr.addstr(0, 0, title, curses.color_pair(1))
# Render status bar
stdscr.attron(curses.color_pair(3))
stdscr.addstr(window_height - 1, 0, statusbarstr[0 : window_width - 1])
if window_width > len(statusbarstr):
stdscr.addstr(
window_height - 1,
len(statusbarstr),
" " * (window_width - len(statusbarstr) - 1),
)
stdscr.attroff(curses.color_pair(3))
# Refresh the screen
stdscr.refresh()
# Wait for next input
k = stdscr.getch()
def main():
text = [
("jaxe.rimhud", True),
("fluffies.desirepaths", False),
("i eat bagels", False),
("chonky.cats", False),
]
print(curses.wrapper(multiselect_order_menu, text))
if __name__ == "__main__":
main() | /rmm-spoons-1.0.15.tar.gz/rmm-spoons-1.0.15/src/rmm/multiselect.py | 0.479016 | 0.351116 | multiselect.py | pypi |
from pathlib import Path
from typing import Optional, List
import rmm.util as util
class PathFinder:
DEFAULT_GAME_PATHS = [
("~/GOG Games/RimWorld", "linux"),
("~/games/rimworld", "linux"),
("~/.local/share/Steam/steamapps/common/RimWorld", "linux"),
("/Applications/RimWorld.app/Mods", "darwin"),
("~/Library/Application Support/Steam/steamapps/common/RimWorld", "darwin"),
("C:/GOG Games/RimWorld/Mods", "win32"),
("C:/Program Files (x86)/Steam/steamapps/common/RimWorld", "win32"),
("C:/Program Files/Steam/steamapps/common/RimWorld", "win32"),
]
DEFAULT_WORKSHOP_PATHS = [
("~/.local/share/Steam/steamapps/workshop/content/294100", "linux"),
(
"~/Library/Application Support/Steam/steamapps/workshop/content/294100",
"darwin",
),
(
"C:/Program Files (x86)/Steam/steamapps/common/workshop/content/294100",
"win32",
),
("C:/Program Files/Steam/steamapps/common/workshop/content/294100", "win32"),
]
DEFAULT_CONFIG_PATHS = [
("~/Library/Application Support/RimWorld/", "darwin"),
("~/.config/unity3d/Ludeon Studios/RimWorld by Ludeon Studios", "linux"),
("~/AppData/LocalLow/Ludeon Studios/RimWorld by Ludeon Studios", "win32"),
]
@staticmethod
def _is_game_dir(p: Path) -> bool:
if p.name == "Mods":
for n in p.parent.iterdir():
if n.name == "Version.txt":
return True
return False
@staticmethod
def _is_workshop_dir(p: Path) -> bool:
return (
p.name == "294100"
and p.parts[-2] == "content"
and p.parts[-3] == "workshop"
)
@staticmethod
def _is_config_dir(p: Path) -> bool:
files_to_find = ["Config", "Saves"]
child_names = [f.name for f in p.iterdir()]
for target_name in files_to_find:
if not target_name in child_names:
return False
return True
@staticmethod
def _search_root(p: Path, f) -> Optional[Path]:
try:
p = util.sanitize_path(p)
for n in p.glob("**/"):
if f(n):
return n
return None
except FileNotFoundError:
return None
@staticmethod
def get_workshop_from_game_path(p: Path):
p = util.sanitize_path(p)
for index, dirname in enumerate(p.parts):
if dirname == "steamapps":
return Path(*list(p.parts[0:index])) / Path(
"steamapps/workshop/content/294100"
)
@staticmethod
def _search_defaults(defaults: List[str], f) -> Optional[Path]:
platform = util.platform()
for path in [n[0] for n in defaults if n[1] == platform]:
path = util.sanitize_path(path)
if path := f(Path(path)):
return path
return None
@classmethod
def find_game(cls, p: Path) -> Optional[Path]:
return cls._search_root(p, cls._is_game_dir)
@classmethod
def find_workshop(cls, p: Path) -> Optional[Path]:
return cls._search_root(p, cls._is_workshop_dir)
@classmethod
def find_config(cls, p: Path) -> Optional[Path]:
return cls._search_root(p, cls._is_config_dir)
@classmethod
def find_game_defaults(cls) -> Optional[Path]:
return cls._search_defaults(cls.DEFAULT_GAME_PATHS, cls.find_game)
@classmethod
def find_workshop_defaults(cls) -> Optional[Path]:
return cls._search_defaults(cls.DEFAULT_WORKSHOP_PATHS, cls.find_workshop)
@classmethod
def find_config_defaults(cls) -> Optional[Path]:
return cls._search_defaults(cls.DEFAULT_CONFIG_PATHS, cls.find_config) | /rmm-spoons-1.0.15.tar.gz/rmm-spoons-1.0.15/src/rmm/path.py | 0.631481 | 0.262877 | path.py | pypi |
import torch.nn as nn
from .basic_layers import ResidualBlock
class AttentionModule(nn.Module):
def __init__(self, in_channels, out_channels, size1, size2, size3):
super(AttentionModule, self).__init__()
self.first_residual_blocks = ResidualBlock(in_channels, out_channels)
self.trunk_branches = nn.Sequential(
ResidualBlock(in_channels, out_channels),
ResidualBlock(out_channels, out_channels),
)
self.mpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.softmax1_blocks = ResidualBlock(in_channels, out_channels)
self.skip1_connection_residual_block = ResidualBlock(in_channels, out_channels)
self.softmax2_blocks = ResidualBlock(in_channels, out_channels)
self.skip2_connection_residual_block = ResidualBlock(in_channels, out_channels)
self.softmax3_blocks = nn.Sequential(
ResidualBlock(in_channels, out_channels),
ResidualBlock(in_channels, out_channels),
)
self.interpolation3 = nn.UpsamplingBilinear2d(size=size3)
self.softmax4_blocks = ResidualBlock(in_channels, out_channels)
self.interpolation2 = nn.UpsamplingBilinear2d(size=size2)
self.softmax5_blocks = ResidualBlock(in_channels, out_channels)
self.interpolation1 = nn.UpsamplingBilinear2d(size=size1)
self.softmax6_blocks = nn.Sequential(
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, bias=False),
nn.Sigmoid(),
)
self.last_blocks = ResidualBlock(in_channels, out_channels)
def forward(self, x):
x = self.first_residual_blocks(x)
out_trunk = self.trunk_branches(x)
out_mpool1 = self.mpool1(x)
out_softmax1 = self.softmax1_blocks(out_mpool1)
out_skip1_connection = self.skip1_connection_residual_block(out_softmax1)
out_mpool2 = self.mpool2(out_softmax1)
out_softmax2 = self.softmax2_blocks(out_mpool2)
out_skip2_connection = self.skip2_connection_residual_block(out_softmax2)
out_mpool3 = self.mpool3(out_softmax2)
out_softmax3 = self.softmax3_blocks(out_mpool3)
out_interp3 = self.interpolation3(out_softmax3)
out = out_interp3 + out_skip2_connection
out_softmax4 = self.softmax4_blocks(out)
out_interp2 = self.interpolation2(out_softmax4)
out = out_interp2 + out_skip1_connection
out_softmax5 = self.softmax5_blocks(out)
out_interp1 = self.interpolation1(out_softmax5)
out_softmax6 = self.softmax6_blocks(out_interp1)
out = (1 + out_softmax6) * out_trunk
return self.last_blocks(out) | /rmn-3.1.1-py3-none-any.whl/models/attention_module.py | 0.946088 | 0.40592 | attention_module.py | pypi |
import torch
import torch.nn as nn
class PreActivateDoubleConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(PreActivateDoubleConv, self).__init__()
self.double_conv = nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
)
def forward(self, x):
return self.double_conv(x)
class PreActivateResUpBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(PreActivateResUpBlock, self).__init__()
self.ch_avg = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels),
)
self.up_sample = nn.Upsample(
scale_factor=2, mode="bilinear", align_corners=True
)
self.ch_avg = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels),
)
self.double_conv = PreActivateDoubleConv(in_channels, out_channels)
def forward(self, down_input, skip_input):
x = self.up_sample(down_input)
x = torch.cat([x, skip_input], dim=1)
return self.double_conv(x) + self.ch_avg(x)
class PreActivateResBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(PreActivateResBlock, self).__init__()
self.ch_avg = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels),
)
self.double_conv = PreActivateDoubleConv(in_channels, out_channels)
self.down_sample = nn.MaxPool2d(2)
def forward(self, x):
identity = self.ch_avg(x)
out = self.double_conv(x)
out = out + identity
return self.down_sample(out), out
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(DoubleConv, self).__init__()
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
)
def forward(self, x):
return self.double_conv(x)
class ResBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ResBlock, self).__init__()
self.downsample = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels),
)
self.double_conv = DoubleConv(in_channels, out_channels)
self.down_sample = nn.MaxPool2d(2)
self.relu = nn.ReLU()
def forward(self, x):
identity = self.downsample(x)
out = self.double_conv(x)
out = self.relu(out + identity)
return self.down_sample(out), out
class DownBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(DownBlock, self).__init__()
self.double_conv = DoubleConv(in_channels, out_channels)
self.down_sample = nn.MaxPool2d(2)
def forward(self, x):
skip_out = self.double_conv(x)
down_out = self.down_sample(skip_out)
return (down_out, skip_out)
class UpBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(UpBlock, self).__init__()
self.up_sample = nn.Upsample(
scale_factor=2, mode="bilinear", align_corners=True
)
self.double_conv = DoubleConv(in_channels, out_channels)
def forward(self, down_input, skip_input):
x = self.up_sample(down_input)
x = torch.cat([x, skip_input], dim=1)
return self.double_conv(x)
class UNet(nn.Module):
def __init__(self, out_classes=1):
super(UNet, self).__init__()
self.down_conv1 = DownBlock(1, 64)
self.down_conv2 = DownBlock(64, 128)
self.down_conv3 = DownBlock(128, 256)
self.down_conv4 = DownBlock(256, 512)
self.double_conv = DoubleConv(512, 1024)
self.up_conv4 = UpBlock(512 + 1024, 512)
self.up_conv3 = UpBlock(256 + 512, 256)
self.up_conv2 = UpBlock(128 + 256, 128)
self.up_conv1 = UpBlock(128 + 64, 64)
self.conv_last = nn.Conv2d(64, out_classes, kernel_size=1)
def forward(self, x):
x, skip1_out = self.down_conv1(x)
x, skip2_out = self.down_conv2(x)
x, skip3_out = self.down_conv3(x)
x, skip4_out = self.down_conv4(x)
x = self.double_conv(x)
x = self.up_conv4(x, skip4_out)
x = self.up_conv3(x, skip3_out)
x = self.up_conv2(x, skip2_out)
x = self.up_conv1(x, skip1_out)
x = self.conv_last(x)
return x
class DeepResUNet(nn.Module):
def __init__(self, in_channels=3, num_classes=1):
super(DeepResUNet, self).__init__()
self.down_conv1 = PreActivateResBlock(in_channels, 64)
self.down_conv2 = PreActivateResBlock(64, 128)
self.down_conv3 = PreActivateResBlock(128, 256)
self.down_conv4 = PreActivateResBlock(256, 512)
self.double_conv = PreActivateDoubleConv(512, 1024)
self.up_conv4 = PreActivateResUpBlock(512 + 1024, 512)
self.up_conv3 = PreActivateResUpBlock(256 + 512, 256)
self.up_conv2 = PreActivateResUpBlock(128 + 256, 128)
self.up_conv1 = PreActivateResUpBlock(128 + 64, 64)
self.conv_last = nn.Conv2d(64, num_classes, kernel_size=1)
def forward(self, x):
x, skip1_out = self.down_conv1(x)
x, skip2_out = self.down_conv2(x)
x, skip3_out = self.down_conv3(x)
x, skip4_out = self.down_conv4(x)
x = self.double_conv(x)
x = self.up_conv4(x, skip4_out)
x = self.up_conv3(x, skip3_out)
x = self.up_conv2(x, skip2_out)
x = self.up_conv1(x, skip1_out)
x = self.conv_last(x)
x = torch.softmax(x, dim=1)
return x
class ResUNet(nn.Module):
"""
Hybrid solution of resnet blocks and double conv blocks
"""
def __init__(self, out_classes=1):
super(ResUNet, self).__init__()
self.down_conv1 = ResBlock(1, 64)
self.down_conv2 = ResBlock(64, 128)
self.down_conv3 = ResBlock(128, 256)
self.down_conv4 = ResBlock(256, 512)
self.double_conv = DoubleConv(512, 1024)
self.up_conv4 = UpBlock(512 + 1024, 512)
self.up_conv3 = UpBlock(256 + 512, 256)
self.up_conv2 = UpBlock(128 + 256, 128)
self.up_conv1 = UpBlock(128 + 64, 64)
self.conv_last = nn.Conv2d(64, out_classes, kernel_size=1)
def forward(self, x):
x, skip1_out = self.down_conv1(x)
x, skip2_out = self.down_conv2(x)
x, skip3_out = self.down_conv3(x)
x, skip4_out = self.down_conv4(x)
x = self.double_conv(x)
x = self.up_conv4(x, skip4_out)
x = self.up_conv3(x, skip3_out)
x = self.up_conv2(x, skip2_out)
x = self.up_conv1(x, skip1_out)
x = self.conv_last(x)
return x
class ONet(nn.Module):
def __init__(self, alpha=470, beta=40, out_classes=1):
super(ONet, self).__init__()
self.alpha = alpha
self.beta = beta
self.down_conv1 = ResBlock(1, 64)
self.down_conv2 = ResBlock(64, 128)
self.down_conv3 = ResBlock(128, 256)
self.down_conv4 = ResBlock(256, 512)
self.double_conv = DoubleConv(512, 1024)
self.up_conv4 = UpBlock(512 + 1024, 512)
self.up_conv3 = UpBlock(256 + 512, 256)
self.up_conv2 = UpBlock(128 + 256, 128)
self.up_conv1 = UpBlock(128 + 64, 64)
self.conv_last = nn.Conv2d(64, 1, kernel_size=1)
self.input_output_conv = nn.Conv2d(2, 1, kernel_size=1)
def forward(self, inputs):
input_tensor, bounding = inputs
x, skip1_out = self.down_conv1(input_tensor + (bounding * self.alpha))
x, skip2_out = self.down_conv2(x)
x, skip3_out = self.down_conv3(x)
x, skip4_out = self.down_conv4(x)
x = self.double_conv(x)
x = self.up_conv4(x, skip4_out)
x = self.up_conv3(x, skip3_out)
x = self.up_conv2(x, skip2_out)
x = self.up_conv1(x, skip1_out)
x = self.conv_last(x)
input_output = torch.cat([x, bounding * self.beta], dim=1)
x = self.input_output_conv(input_output)
return x
def deepresunet(in_channels=3, num_classes=2):
return DeepResUNet(in_channels, num_classes) | /rmn-3.1.1-py3-none-any.whl/models/brain_humor.py | 0.966036 | 0.455622 | brain_humor.py | pypi |
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import load_state_dict_from_url
__all__ = ["Inception3", "inception_v3"]
model_urls = {
# Inception v3 ported from TensorFlow
"inception_v3_google": "https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth",
}
_InceptionOutputs = namedtuple("InceptionOutputs", ["logits", "aux_logits"])
def inception_v3(pretrained=True, progress=True, **kwargs):
r"""Inception v3 model architecture from
`"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_.
.. note::
**Important**: In contrast to the other models the inception_v3 expects tensors with a size of
N x 3 x 299 x 299, so ensure your images are sized accordingly.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
aux_logits (bool): If True, add an auxiliary branch that can improve training.
Default: *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
"""
if pretrained:
if "transform_input" not in kwargs:
kwargs["transform_input"] = True
if "aux_logits" in kwargs:
original_aux_logits = kwargs["aux_logits"]
kwargs["aux_logits"] = True
else:
original_aux_logits = True
model = Inception3(**kwargs)
state_dict = load_state_dict_from_url(
model_urls["inception_v3_google"], progress=progress
)
model.load_state_dict(state_dict)
if not original_aux_logits:
model.aux_logits = False
del model.AuxLogits
model.fc = nn.Linear(2048, 7)
return model
return Inception3(**kwargs)
class Inception3(nn.Module):
def __init__(
self, num_classes=1000, aux_logits=True, transform_input=False, in_channels=3
):
super(Inception3, self).__init__()
# strictlt set to 1000
num_classes = 1000
self.aux_logits = aux_logits
self.transform_input = transform_input
self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
self.Mixed_6a = InceptionB(288)
self.Mixed_6b = InceptionC(768, channels_7x7=128)
self.Mixed_6c = InceptionC(768, channels_7x7=160)
self.Mixed_6d = InceptionC(768, channels_7x7=160)
self.Mixed_6e = InceptionC(768, channels_7x7=192)
if aux_logits:
self.AuxLogits = InceptionAux(768, num_classes)
self.Mixed_7a = InceptionD(768)
self.Mixed_7b = InceptionE(1280)
self.Mixed_7c = InceptionE(2048)
self.fc = nn.Linear(2048, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
stddev = m.stddev if hasattr(m, "stddev") else 0.1
X = stats.truncnorm(-2, 2, scale=stddev)
values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype)
values = values.view(m.weight.size())
with torch.no_grad():
m.weight.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
if self.transform_input:
x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
# N x 3 x 299 x 299
x = self.Conv2d_1a_3x3(x)
# N x 32 x 149 x 149
x = self.Conv2d_2a_3x3(x)
# N x 32 x 147 x 147
x = self.Conv2d_2b_3x3(x)
# N x 64 x 147 x 147
x = F.max_pool2d(x, kernel_size=3, stride=2)
# N x 64 x 73 x 73
x = self.Conv2d_3b_1x1(x)
# N x 80 x 73 x 73
x = self.Conv2d_4a_3x3(x)
# N x 192 x 71 x 71
x = F.max_pool2d(x, kernel_size=3, stride=2)
# N x 192 x 35 x 35
x = self.Mixed_5b(x)
# N x 256 x 35 x 35
x = self.Mixed_5c(x)
# N x 288 x 35 x 35
x = self.Mixed_5d(x)
# N x 288 x 35 x 35
x = self.Mixed_6a(x)
# N x 768 x 17 x 17
x = self.Mixed_6b(x)
# N x 768 x 17 x 17
x = self.Mixed_6c(x)
# N x 768 x 17 x 17
x = self.Mixed_6d(x)
# N x 768 x 17 x 17
x = self.Mixed_6e(x)
# N x 768 x 17 x 17
if self.training and self.aux_logits:
self.AuxLogits(x)
# N x 768 x 17 x 17
x = self.Mixed_7a(x)
# N x 1280 x 8 x 8
x = self.Mixed_7b(x)
# N x 2048 x 8 x 8
x = self.Mixed_7c(x)
# N x 2048 x 8 x 8
# Adaptive average pooling
x = F.adaptive_avg_pool2d(x, (1, 1))
# N x 2048 x 1 x 1
x = F.dropout(x, training=self.training)
# N x 2048 x 1 x 1
x = torch.flatten(x, 1)
# N x 2048
x = self.fc(x)
# N x 1000 (num_classes)
"""
if self.training and self.aux_logits:
return _InceptionOutputs(x, aux)
"""
return x
class InceptionA(nn.Module):
def __init__(self, in_channels, pool_features):
super(InceptionA, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch5x5_1 = BasicConv2d(in_channels, 48, kernel_size=1)
self.branch5x5_2 = BasicConv2d(48, 64, kernel_size=5, padding=2)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, padding=1)
self.branch_pool = BasicConv2d(in_channels, pool_features, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionB(nn.Module):
def __init__(self, in_channels):
super(InceptionB, self).__init__()
self.branch3x3 = BasicConv2d(in_channels, 384, kernel_size=3, stride=2)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3(x)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionC(nn.Module):
def __init__(self, in_channels, channels_7x7):
super(InceptionC, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 192, kernel_size=1)
c7 = channels_7x7
self.branch7x7_1 = BasicConv2d(in_channels, c7, kernel_size=1)
self.branch7x7_2 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7_3 = BasicConv2d(c7, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_1 = BasicConv2d(in_channels, c7, kernel_size=1)
self.branch7x7dbl_2 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_3 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7dbl_4 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_5 = BasicConv2d(c7, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionD(nn.Module):
def __init__(self, in_channels):
super(InceptionD, self).__init__()
self.branch3x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)
self.branch3x3_2 = BasicConv2d(192, 320, kernel_size=3, stride=2)
self.branch7x7x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)
self.branch7x7x3_2 = BasicConv2d(192, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7x3_3 = BasicConv2d(192, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7x3_4 = BasicConv2d(192, 192, kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch7x7x3 = self.branch7x7x3_1(x)
branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch7x7x3, branch_pool]
return torch.cat(outputs, 1)
class InceptionE(nn.Module):
def __init__(self, in_channels):
super(InceptionE, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 320, kernel_size=1)
self.branch3x3_1 = BasicConv2d(in_channels, 384, kernel_size=1)
self.branch3x3_2a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3_2b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch3x3dbl_1 = BasicConv2d(in_channels, 448, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(448, 384, kernel_size=3, padding=1)
self.branch3x3dbl_3a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3dbl_3b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionAux(nn.Module):
def __init__(self, in_channels, num_classes):
super(InceptionAux, self).__init__()
self.conv0 = BasicConv2d(in_channels, 128, kernel_size=1)
self.conv1 = BasicConv2d(128, 768, kernel_size=5)
self.conv1.stddev = 0.01
self.fc = nn.Linear(768, num_classes)
self.fc.stddev = 0.001
def forward(self, x):
# N x 768 x 17 x 17
x = F.avg_pool2d(x, kernel_size=5, stride=3)
# N x 768 x 5 x 5
x = self.conv0(x)
# N x 128 x 5 x 5
x = self.conv1(x)
# N x 768 x 1 x 1
# Adaptive average pooling
x = F.adaptive_avg_pool2d(x, (1, 1))
# N x 768 x 1 x 1
x = torch.flatten(x, 1)
# N x 768
x = self.fc(x)
# N x 1000
return x
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True) | /rmn-3.1.1-py3-none-any.whl/models/inception.py | 0.954041 | 0.539226 | inception.py | pypi |
import torch.nn as nn
from .attention_module import AttentionModule
from .basic_layers import ResidualBlock
class ResidualAttentionModel(nn.Module):
def __init__(self, in_channels=3, num_classes=1000):
super(ResidualAttentionModel, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
)
self.mpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.residual_block1 = ResidualBlock(64, 256)
self.attention_module1 = AttentionModule(256, 256, (56, 56), (28, 28), (14, 14))
self.residual_block2 = ResidualBlock(256, 512, 2)
self.attention_module2 = AttentionModule(512, 512, (28, 28), (14, 14), (7, 7))
self.residual_block3 = ResidualBlock(512, 1024, 2)
self.attention_module3 = AttentionModule(1024, 1024, (14, 14), (7, 7), (4, 4))
self.residual_block4 = ResidualBlock(1024, 2048, 2)
self.residual_block5 = ResidualBlock(2048, 2048)
self.residual_block6 = ResidualBlock(2048, 2048)
self.mpool2 = nn.Sequential(
nn.BatchNorm2d(2048),
nn.ReLU(inplace=True),
nn.AvgPool2d(kernel_size=7, stride=1),
)
self.fc = nn.Linear(2048, num_classes)
def forward(self, x):
out = self.conv1(x)
out = self.mpool1(out)
# print(out.data)
out = self.residual_block1(out)
out = self.attention_module1(out)
out = self.residual_block2(out)
out = self.attention_module2(out)
out = self.residual_block3(out)
# print(out.data)
out = self.attention_module3(out)
out = self.residual_block4(out)
out = self.residual_block5(out)
out = self.residual_block6(out)
out = self.mpool2(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
def res_attention(in_channels=3, num_classes=1000):
return ResidualAttentionModel(in_channels, num_classes) | /rmn-3.1.1-py3-none-any.whl/models/residual_attention_network.py | 0.906467 | 0.410402 | residual_attention_network.py | pypi |
import torch
import torch.nn as nn
from .masking import masking
from .resnet import BasicBlock, ResNet
from .utils import load_state_dict_from_url
model_urls = {
"resnet18": "https://download.pytorch.org/models/resnet18-5c106cde.pth",
"resnet34": "https://download.pytorch.org/models/resnet34-333f7ec4.pth",
"resnet50": "https://download.pytorch.org/models/resnet50-19c8e357.pth",
}
class ResMaskingNaive(ResNet):
def __init__(self, weight_path):
super(ResMaskingNaive, self).__init__(
block=BasicBlock, layers=[3, 4, 6, 3], in_channels=3, num_classes=1000
)
# state_dict = torch.load('saved/checkpoints/resnet18_rot30_2019Nov05_17.44')['net']
state_dict = load_state_dict_from_url(model_urls["resnet34"], progress=True)
self.load_state_dict(state_dict)
self.fc = nn.Linear(512, 7)
"""
# freeze all net
for m in self.parameters():
m.requires_grad = False
"""
self.mask1 = masking(64, 64, depth=4)
self.mask2 = masking(128, 128, depth=3)
self.mask3 = masking(256, 256, depth=2)
self.mask4 = masking(512, 512, depth=1)
def forward(self, x): # 224
x = self.conv1(x) # 112
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x) # 56
x = self.layer1(x) # 56
# m = self.mask1(x)
# x = x * m
x = self.layer2(x) # 28
# m = self.mask2(x)
# x = x * m
x = self.layer3(x) # 14
# m = self.mask3(x)
# x = x * m
x = self.layer4(x) # 7
# m = self.mask4(x)
# x = x * m
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def resmasking_naive_dropout1(in_channels=3, num_classes=7, weight_path=""):
model = ResMaskingNaive(weight_path)
model.fc = nn.Sequential(
nn.Dropout(0.4),
nn.Linear(512, 7)
# nn.Linear(512, num_classes)
)
return model | /rmn-3.1.1-py3-none-any.whl/models/resmasking_naive.py | 0.8777 | 0.393152 | resmasking_naive.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.