code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import mrl
from mrl.utils.misc import soft_update, flatten_state
from mrl.modules.model import PytorchModel
import numpy as np
import torch
import torch.nn.functional as F
import os
class QValuePolicy(mrl.Module):
""" For acting in the environment"""
def __init__(self):
super().__init__(
'policy',
required_agent_modules=[
'qvalue', 'env', 'replay_buffer'
],
locals=locals())
def _setup(self):
self.use_qvalue_target = self.config.get('use_qvalue_target') or False
def __call__(self, state, greedy=False):
res = None
# Initial Exploration
if self.training:
if self.config.get('initial_explore') and len(
self.replay_buffer) < self.config.initial_explore:
res = np.array([self.env.action_space.sample() for _ in range(self.env.num_envs)])
elif hasattr(self, 'ag_curiosity'):
state = self.ag_curiosity.relabel_state(state)
state = flatten_state(state) # flatten goal environments
if hasattr(self, 'state_normalizer'):
state = self.state_normalizer(state, update=self.training)
if res is not None:
return res
state = self.torch(state)
if self.use_qvalue_target:
q_values = self.numpy(self.qvalue_target(state))
else:
q_values = self.numpy(self.qvalue(state))
if self.training and not greedy and np.random.random() < self.config.random_action_prob(steps=self.config.env_steps):
action = np.random.randint(self.env.action_space.n, size=[self.env.num_envs])
else:
action = np.argmax(q_values, -1) # Convert to int
return action
class BaseQLearning(mrl.Module):
""" Generic Discrete Action Q-Learning Algorithm"""
def __init__(self):
super().__init__(
'algorithm',
required_agent_modules=['qvalue','replay_buffer', 'env'],
locals=locals())
def _setup(self):
""" Set up Q-value optimizers and create target network modules."""
self.targets_and_models = []
# Q-Value setup
qvalue_params = []
self.qvalues = []
for module in list(self.module_dict.values()):
name = module.module_name
if name.startswith('qvalue') and isinstance(module, PytorchModel):
self.qvalues.append(module)
qvalue_params += list(module.model.parameters())
target = module.copy(name + '_target')
target.model.load_state_dict(module.model.state_dict())
self.agent.set_module(name + '_target', target)
self.targets_and_models.append((target.model, module.model))
self.qvalue_opt = torch.optim.Adam(
qvalue_params,
lr=self.config.qvalue_lr,
weight_decay=self.config.qvalue_weight_decay)
self.qvalue_params = qvalue_params
def save(self, save_folder : str):
path = os.path.join(save_folder, self.module_name + '.pt')
torch.save({
'qvalue_opt_state_dict': self.qvalue_opt.state_dict(),
}, path)
def load(self, save_folder : str):
path = os.path.join(save_folder, self.module_name + '.pt')
checkpoint = torch.load(path)
self.qvalue_opt.load_state_dict(checkpoint['qvalue_opt_state_dict'])
def _optimize(self):
if len(self.replay_buffer) > self.config.warm_up:
states, actions, rewards, next_states, gammas = self.replay_buffer.sample(
self.config.batch_size)
self.optimize_from_batch(states, actions, rewards, next_states, gammas)
if self.config.opt_steps % self.config.target_network_update_freq == 0:
for target_model, model in self.targets_and_models:
soft_update(target_model, model, self.config.target_network_update_frac)
def optimize_from_batch(self, states, actions, rewards, next_states, gammas):
raise NotImplementedError('Subclass this!')
class DQN(BaseQLearning):
def optimize_from_batch(self, states, actions, rewards, next_states, gammas):
q_next = self.qvalue_target(next_states).detach()
if self.config.double_q:
best_actions = torch.argmax(self.qvalue(next_states), dim=-1, keepdims=True)
q_next = q_next.gather(-1, best_actions)
else:
q_next = q_next.max(-1, keepdims=True)[0] # Assuming action dim is the last dimension
target = (rewards + gammas * q_next)
target = torch.clamp(target, *self.config.clip_target_range).detach()
if hasattr(self, 'logger') and self.config.opt_steps % 1000 == 0:
self.logger.add_histogram('Optimize/Target_q', target)
q = self.qvalue(states)
q = q.gather(-1, actions.unsqueeze(-1).to(torch.int64))
td_loss = F.mse_loss(q, target)
self.qvalue_opt.zero_grad()
td_loss.backward()
# Grad clipping
if self.config.grad_norm_clipping > 0.:
torch.nn.utils.clip_grad_norm_(self.qvalue_params, self.config.grad_norm_clipping)
if self.config.grad_value_clipping > 0.:
torch.nn.utils.clip_grad_value_(self.qvalue_params, self.config.grad_value_clipping)
self.qvalue_opt.step()
return | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/deps/libraries/mrl/algorithms/discrete_off_policy.py | 0.775605 | 0.238572 | discrete_off_policy.py | pypi |
import mrl
from mrl.utils.misc import soft_update, flatten_state
from mrl.modules.model import PytorchModel
import numpy as np
import torch
import torch.nn.functional as F
import os
class ActorPolicy(mrl.Module):
"""Used for DDPG / TD3 and other deterministic policy variants"""
def __init__(self):
super().__init__(
'policy',
required_agent_modules=[
'actor', 'action_noise', 'env', 'replay_buffer'
],
locals=locals())
def _setup(self):
self.use_actor_target = self.config.get('use_actor_target')
def __call__(self, state, greedy=False):
action_scale = self.env.max_action
# initial exploration and intrinsic curiosity
res = None
if self.training:
if self.config.get('initial_explore') and len(self.replay_buffer) < self.config.initial_explore:
res = np.array([self.env.action_space.sample() for _ in range(self.env.num_envs)])
elif hasattr(self, 'ag_curiosity'):
state = self.ag_curiosity.relabel_state(state)
state = flatten_state(state) # flatten goal environments
if hasattr(self, 'state_normalizer'):
state = self.state_normalizer(state, update=self.training)
if res is not None:
return res
state = self.torch(state)
if self.use_actor_target:
action = self.numpy(self.actor_target(state))
else:
action = self.numpy(self.actor(state))
if self.training and not greedy:
action = self.action_noise(action)
if self.config.get('eexplore'):
eexplore = self.config.eexplore
if hasattr(self, 'ag_curiosity'):
eexplore = self.ag_curiosity.go_explore * self.config.go_eexplore + eexplore
mask = (np.random.random((action.shape[0], 1)) < eexplore).astype(np.float32)
randoms = np.random.random(action.shape) * (2 * action_scale) - action_scale
action = mask * randoms + (1 - mask) * action
return np.clip(action, -action_scale, action_scale)
class StochasticActorPolicy(mrl.Module):
"""Used for SAC / learned action noise"""
def __init__(self):
super().__init__(
'policy',
required_agent_modules=[
'actor', 'env', 'replay_buffer'
],
locals=locals())
def _setup(self):
self.use_actor_target = self.config.get('use_actor_target')
def __call__(self, state, greedy=False):
action_scale = self.env.max_action
# initial exploration and intrinsic curiosity
res = None
if self.training:
if self.config.get('initial_explore') and len(self.replay_buffer) < self.config.initial_explore:
res = np.array([self.env.action_space.sample() for _ in range(self.env.num_envs)])
elif hasattr(self, 'ag_curiosity'):
state = self.ag_curiosity.relabel_state(state)
state = flatten_state(state) # flatten goal environments
if hasattr(self, 'state_normalizer'):
state = self.state_normalizer(state, update=self.training)
if res is not None:
return res
state = self.torch(state)
if self.use_actor_target:
action, _ = self.actor_target(state)
else:
action, _ = self.actor(state)
action = self.numpy(action)
if self.training and not greedy and self.config.get('eexplore'):
eexplore = self.config.eexplore
if hasattr(self, 'ag_curiosity'):
eexplore = self.ag_curiosity.go_explore * self.config.go_eexplore + eexplore
mask = (np.random.random((action.shape[0], 1)) < eexplore).astype(np.float32)
randoms = np.random.random(action.shape) * (2 * action_scale) - action_scale
action = mask * randoms + (1 - mask) * action
return np.clip(action, -action_scale, action_scale)
class OffPolicyActorCritic(mrl.Module):
"""This is the standard DDPG"""
def __init__(self):
super().__init__(
'algorithm',
required_agent_modules=['actor','critic','replay_buffer', 'env'],
locals=locals())
def _setup(self):
"""Sets up actor/critic optimizers and creates target network modules"""
self.targets_and_models = []
# Actor setup
actor_params = []
self.actors = []
for module in list(self.module_dict.values()):
name = module.module_name
if name.startswith('actor') and isinstance(module, PytorchModel):
self.actors.append(module)
actor_params += list(module.model.parameters())
target = module.copy(name + '_target')
target.model.load_state_dict(module.model.state_dict())
# Freeze target networks with respect to optimizers (only update via polyak averaging)
for p in target.model.parameters():
p.requires_grad = False
self.agent.set_module(name + '_target', target)
self.targets_and_models.append((target.model, module.model))
self.actor_opt = torch.optim.Adam(
actor_params,
lr=self.config.actor_lr,
weight_decay=self.config.actor_weight_decay)
self.actor_params = actor_params
# Critic setup
critic_params = []
self.critics = []
for module in list(self.module_dict.values()):
name = module.module_name
if name.startswith('critic') and isinstance(module, PytorchModel):
self.critics.append(module)
critic_params += list(module.model.parameters())
target = module.copy(name + '_target')
target.model.load_state_dict(module.model.state_dict())
# Freeze target networks with respect to optimizers (only update via polyak averaging)
for p in target.model.parameters():
p.requires_grad = False
self.agent.set_module(name + '_target', target)
self.targets_and_models.append((target.model, module.model))
self.critic_opt = torch.optim.Adam(
critic_params,
lr=self.config.critic_lr,
weight_decay=self.config.critic_weight_decay)
self.critic_params = critic_params
self.action_scale = self.env.max_action
def save(self, save_folder : str):
path = os.path.join(save_folder, self.module_name + '.pt')
torch.save({
'actor_opt_state_dict': self.actor_opt.state_dict(),
'critic_opt_state_dict': self.critic_opt.state_dict()
}, path)
def load(self, save_folder : str):
path = os.path.join(save_folder, self.module_name + '.pt')
checkpoint = torch.load(path)
self.actor_opt.load_state_dict(checkpoint['actor_opt_state_dict'])
self.critic_opt.load_state_dict(checkpoint['critic_opt_state_dict'])
def _optimize(self):
if len(self.replay_buffer) > self.config.warm_up:
states, actions, rewards, next_states, gammas = self.replay_buffer.sample(
self.config.batch_size)
self.optimize_from_batch(states, actions, rewards, next_states, gammas)
if self.config.opt_steps % self.config.target_network_update_freq == 0:
for target_model, model in self.targets_and_models:
soft_update(target_model, model, self.config.target_network_update_frac)
def optimize_from_batch(self, states, actions, rewards, next_states, gammas):
raise NotImplementedError('Subclass this!')
class DDPG(OffPolicyActorCritic):
def optimize_from_batch(self, states, actions, rewards, next_states, gammas):
with torch.no_grad():
q_next = self.critic_target(next_states, self.actor_target(next_states))
target = (rewards + gammas * q_next)
target = torch.clamp(target, *self.config.clip_target_range)
if hasattr(self, 'logger') and self.config.opt_steps % 1000 == 0:
self.logger.add_histogram('Optimize/Target_q', target)
q = self.critic(states, actions)
critic_loss = F.mse_loss(q, target)
self.critic_opt.zero_grad()
critic_loss.backward()
# Grad clipping
if self.config.grad_norm_clipping > 0.:
for p in self.critic_params:
clip_coef = self.config.grad_norm_clipping / (1e-6 + torch.norm(p.grad.detach()))
if clip_coef < 1:
p.grad.detach().mul_(clip_coef)
if self.config.grad_value_clipping > 0.:
torch.nn.utils.clip_grad_value_(self.critic_params, self.config.grad_value_clipping)
self.critic_opt.step()
for p in self.critic_params:
p.requires_grad = False
a = self.actor(states)
if self.config.get('policy_opt_noise'):
noise = torch.randn_like(a) * (self.config.policy_opt_noise * self.action_scale)
a = (a + noise).clamp(-self.action_scale, self.action_scale)
actor_loss = -self.critic(states, a)[:,-1].mean()
if self.config.action_l2_regularization:
actor_loss += self.config.action_l2_regularization * F.mse_loss(a / self.action_scale, torch.zeros_like(a))
self.actor_opt.zero_grad()
actor_loss.backward()
# Grad clipping
if self.config.grad_norm_clipping > 0.:
for p in self.actor_params:
clip_coef = self.config.grad_norm_clipping / (1e-6 + torch.norm(p.grad.detach()))
if clip_coef < 1:
p.grad.detach().mul_(clip_coef)
if self.config.grad_value_clipping > 0.:
torch.nn.utils.clip_grad_value_(self.actor_params, self.config.grad_value_clipping)
self.actor_opt.step()
for p in self.critic_params:
p.requires_grad = True
class TD3(OffPolicyActorCritic):
def optimize_from_batch(self, states, actions, rewards, next_states, gammas):
config = self.config
with torch.no_grad():
a_next_max = self.actor(next_states)
noise = torch.randn_like(a_next_max) * (config.td3_noise * self.action_scale)
noise = noise.clamp(-config.td3_noise_clip * self.action_scale,
config.td3_noise_clip * self.action_scale)
a_next_max = (a_next_max + noise).clamp(-self.action_scale, self.action_scale)
q1, q2 = self.critic_target(next_states, a_next_max), self.critic2_target(
next_states, a_next_max)
target = (rewards + gammas * torch.min(q1, q2))
target = torch.clamp(target, *self.config.clip_target_range)
if hasattr(self, 'logger') and self.config.opt_steps % 1000 == 0:
self.logger.add_histogram('Optimize/Target_q', target)
q1, q2 = self.critic(states, actions), self.critic2(states, actions)
critic_loss = F.mse_loss(q1, target) + F.mse_loss(q2, target)
self.critic_opt.zero_grad()
critic_loss.backward()
# Grad clipping
if self.config.grad_norm_clipping > 0.:
torch.nn.utils.clip_grad_norm_(self.critic_params, self.config.grad_norm_clipping)
if self.config.grad_value_clipping > 0.:
torch.nn.utils.clip_grad_value_(self.critic_params, self.config.grad_value_clipping)
self.critic_opt.step()
if config.opt_steps % config.td3_delay == 0:
for p in self.critic_params:
p.requires_grad = False
a = self.actor(states)
if self.config.get('policy_opt_noise'):
noise = torch.randn_like(a) * (config.policy_opt_noise * self.action_scale)
a = (a + noise).clamp(-self.action_scale, self.action_scale)
actor_loss = -torch.min(self.critic(states, a)[:,-1], self.critic2(states, a)[:,-1]).mean()
if self.config.action_l2_regularization:
actor_loss += self.config.action_l2_regularization * F.mse_loss(a / self.action_scale, torch.zeros_like(a))
self.actor_opt.zero_grad()
actor_loss.backward()
# Grad clipping
if self.config.grad_norm_clipping > 0.:
torch.nn.utils.clip_grad_norm_(self.actor_params, self.config.grad_norm_clipping)
if self.config.grad_value_clipping > 0.:
torch.nn.utils.clip_grad_value_(self.actor_params, self.config.grad_value_clipping)
self.actor_opt.step()
for p in self.critic_params:
p.requires_grad = True
class SAC(OffPolicyActorCritic):
def optimize_from_batch(self, states, actions, rewards, next_states, gammas):
config = self.config
with torch.no_grad():
# Target actions come from *current* policy
a_next, logp_next = self.actor(next_states)
q1 = self.critic_target(next_states, a_next)
q2 = self.critic2_target(next_states, a_next)
target = rewards + gammas * (torch.min(q1, q2) - config.entropy_coef * logp_next)
target = torch.clamp(target, *self.config.clip_target_range)
if hasattr(self, 'logger') and self.config.opt_steps % 1000 == 0:
self.logger.add_histogram('Optimize/Target_q', target)
q1, q2 = self.critic(states, actions), self.critic2(states, actions)
critic_loss = F.mse_loss(q1, target) + F.mse_loss(q2, target)
self.critic_opt.zero_grad()
critic_loss.backward()
# Grad clipping
if self.config.grad_norm_clipping > 0.:
torch.nn.utils.clip_grad_norm_(self.critic_params, self.config.grad_norm_clipping)
if self.config.grad_value_clipping > 0.:
torch.nn.utils.clip_grad_value_(self.critic_params, self.config.grad_value_clipping)
self.critic_opt.step()
for p in self.critic_params:
p.requires_grad = False
a, logp = self.actor(states)
q = torch.min(self.critic(states, a), self.critic2(states, a))
actor_loss = (config.entropy_coef * logp - q).mean()
if self.config.action_l2_regularization:
actor_loss += self.config.action_l2_regularization * F.mse_loss(a / self.action_scale, torch.zeros_like(a))
self.actor_opt.zero_grad()
actor_loss.backward()
# Grad clipping
if self.config.grad_norm_clipping > 0.:
torch.nn.utils.clip_grad_norm_(self.actor_params, self.config.grad_norm_clipping)
if self.config.grad_value_clipping > 0.:
torch.nn.utils.clip_grad_value_(self.actor_params, self.config.grad_value_clipping)
self.actor_opt.step()
for p in self.critic_params:
p.requires_grad = True | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/deps/libraries/mrl/algorithms/continuous_off_policy.py | 0.75392 | 0.158467 | continuous_off_policy.py | pypi |
import numpy as np
import random
import gym
import torch
from types import LambdaType
from scipy.linalg import block_diag
try:
import tensorflow as tf
except:
tf = None
def set_global_seeds(seed):
"""
set the seed for python random, tensorflow, numpy and gym spaces
:param seed: (int) the seed
"""
if tf is not None:
if hasattr(tf.random, 'set_seed'):
tf.random.set_seed(seed)
elif hasattr(tf.compat, 'v1'):
tf.compat.v1.set_random_seed(seed)
else:
tf.set_random_seed(seed)
np.random.seed(seed)
random.seed(seed)
# prng was removed in latest gym version
if hasattr(gym.spaces, 'prng'):
gym.spaces.prng.seed(seed)
class AttrDict(dict):
"""
Behaves like a dictionary but additionally has attribute-style access
for both read and write.
e.g. x["key"] and x.key are the same,
e.g. can iterate using: for k, v in x.items().
Can sublcass for specific data classes; must call AttrDict's __init__().
"""
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.__dict__ = self
def copy(self):
"""
Provides a "deep" copy of all unbroken chains of types AttrDict, but
shallow copies otherwise, (e.g. numpy arrays are NOT copied).
"""
return type(self)(**{k: v.copy() if isinstance(v, AttrDict) else v for k, v in self.items()})
class AnnotatedAttrDict(AttrDict):
"""
This is an AttrDict that accepts tuples of length 2 as values, where the
second element is an annotation.
"""
def __init__(self, *args, **kwargs):
argdict = dict(*args, **kwargs)
valuedict = {}
annotationdict = {}
for k, va in argdict.items():
if hasattr(va, '__len__') and len(va) == 2 and type(va[1]) == str:
v, a = va
valuedict[k] = v
annotationdict[k] = a
else:
valuedict[k] = va
super().__init__(self, **valuedict)
self.annotationdict = annotationdict
def get_annotation(self, key):
return self.annotationdict.get(key)
def soft_update(target, src, factor):
with torch.no_grad():
for target_param, param in zip(target.parameters(), src.parameters()):
target_param.data.mul_(1.0 - factor)
target_param.data.add_(factor * param.data)
def short_timestamp():
"""Returns string with timestamp"""
import datetime
return '{:%m%d%H%M%S}'.format(datetime.datetime.now())
def flatten_state(state):
if isinstance(state, dict):
obs = state['observation']
goal = state['desired_goal']
return np.concatenate((obs, goal), -1)
return state
def add_config_args(argparser, config: AnnotatedAttrDict):
"""TODO: Make this add more types of args automatically? """
for k, v in config.items():
try:
if type(v) in (str, int, float):
argparser.add_argument('--' + k, default=v, type=type(v), help=config.get_annotation(k))
elif type(v) == bool:
argparser.add_argument('--' + k, default=v, type=str2bool, help=config.get_annotation(k))
except:
pass
return argparser
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def merge_args_into_config(args, config: AttrDict):
config.parent_folder = args.parent_folder
other_args = {}
for k, v in args.__dict__.items():
if k in config:
config[k] = v
elif not isinstance(v, LambdaType):
other_args[k] = v
config.other_args = other_args
return config
def make_agent_name(config, attr_list, prefix='agent'):
agent_name = prefix
attr_set = set()
for attr in attr_list:
s = shorten_attr(attr, attr_set)
attr_set.add(s)
if attr in config:
agent_name += '_' + s + str(config[attr])
elif attr in config.other_args:
agent_name += '_' + s + '-' + str(config.other_args[attr])
else:
raise ValueError('Attribute {} not found in config!'.format(attr))
return agent_name
def shorten_attr(attr, set, proposed_len=5):
short = attr[:proposed_len]
if short in set:
return shorten_attr(attr, set, proposed_len + 1)
return short
def softmax(X, theta=1.0, axis=None):
"""
Compute the softmax of each element along an axis of X.
Parameters
----------
X: ND-Array. Probably should be floats.
theta (optional): float parameter, used as a multiplier
prior to exponentiation. Default = 1.0
axis (optional): axis to compute values along. Default is the
first non-singleton axis.
Returns an array the same size as X. The result will sum to 1
along the specified axis.
"""
# make X at least 2d
y = np.atleast_2d(X)
# find axis
if axis is None:
axis = next(j[0] for j in enumerate(y.shape) if j[1] > 1)
# multiply y against the theta parameter,
y = y * float(theta)
# subtract the max for numerical stability
y = y - np.max(y, axis=axis, keepdims=True)
# exponentiate y
y = np.exp(y)
# take the sum along the specified axis
ax_sum = np.sum(y, axis=axis, keepdims=True)
# finally: divide elementwise
p = y / ax_sum
# flatten if X was 1D
if len(X.shape) == 1: p = p.flatten()
return p
def make_activ(activ_name):
if activ_name.lower() == 'relu':
return torch.nn.ReLU
elif activ_name.lower() == 'gelu':
from mrl.utils.networks import GELU
return GELU
elif activ_name.lower() == 'tanh':
return torch.nn.Tanh
else:
raise NotImplementedError
def batch_block_diag(a, b):
"""
This does what scipy.linalg.block_diag does but in batch mode and with only 2 array
https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.linalg.block_diag.html
"""
a_shape = a.shape
b_shape = b.shape
if len(a_shape) == 2:
return block_diag(a, b)
assert len(a_shape) == 3
assert len(b_shape) == 3
assert a_shape[0] == b_shape[0] # the batch dimension
res = np.zeros((a_shape[0], a_shape[1] + b_shape[1], a_shape[2] + b_shape[2]))
res[:,:a_shape[1], :a_shape[2]] = a
res[:,a_shape[1]:, a_shape[2]:] = b
return res
def batch_block_diag_many(*arrs):
shapes = np.array([a.shape for a in arrs], dtype=np.int64)
if len(shapes[0]) == 2:
return block_diag(*arrs)
# shapes is now 2D: num_arrs x 3
res = np.zeros( (shapes[0][0], shapes[:, 1].sum(), shapes[:,2].sum()) )
r, c = 0, 0
for i, (batch, rr, cc) in enumerate(shapes):
res[:, r:r + rr, c:c + cc] = arrs[i]
r += rr
c += cc
return res | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/deps/libraries/mrl/utils/misc.py | 0.7181 | 0.32603 | misc.py | pypi |
from collections import OrderedDict
import numpy as np
from gym import spaces
from . import VecEnv
class DummyVecEnv(VecEnv):
"""
Creates a simple vectorized wrapper for multiple environments
:param env_fns: ([Gym Environment]) the list of environments to vectorize
"""
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
shapes, dtypes = {}, {}
self.keys = []
obs_space = env.observation_space
self.dummy_env = env
if isinstance(obs_space, spaces.Dict):
assert isinstance(obs_space.spaces, OrderedDict)
subspaces = obs_space.spaces
if env.compute_reward is not None:
self.compute_reward = env.compute_reward
if hasattr(env, 'goal_extraction_function') and env.goal_extraction_function is not None:
self.goal_extraction_function = env.goal_extraction_function
else:
subspaces = {None: obs_space}
for key, box in subspaces.items():
shapes[key] = box.shape
dtypes[key] = box.dtype
self.keys.append(key)
self.buf_obs = {k: np.zeros((self.num_envs, ) + tuple(shapes[k]), dtype=dtypes[k]) for k in self.keys}
self.buf_dones = np.zeros((self.num_envs, ), dtype=np.bool)
self.buf_rews = np.zeros((self.num_envs, ), dtype=np.float32)
self.buf_infos = [{} for _ in range(self.num_envs)]
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
for env_idx in range(self.num_envs):
obs, self.buf_rews[env_idx], self.buf_dones[env_idx], self.buf_infos[env_idx] =\
self.envs[env_idx].step(self.actions[env_idx])
if self.buf_dones[env_idx]:
obs = self.envs[env_idx].reset()
self._save_obs(env_idx, obs)
if self.keys == [None]:
return (np.copy(self._obs_from_buf()), np.copy(self.buf_rews), np.copy(self.buf_dones), self.buf_infos.copy())
else:
return ({k: np.copy(v)
for k, v in self._obs_from_buf().items()}, np.copy(self.buf_rews), np.copy(self.buf_dones),
self.buf_infos.copy())
def reset(self):
for env_idx in range(self.num_envs):
obs = self.envs[env_idx].reset()
self._save_obs(env_idx, obs)
if self.keys == [None]:
return np.copy(self._obs_from_buf())
else:
return {k: np.copy(v) for k, v in self._obs_from_buf().items()}
def close(self):
return
def get_images(self):
return [env.render(mode='rgb_array') for env in self.envs]
def render(self, *args, **kwargs):
if self.num_envs == 1:
return self.envs[0].render(*args, **kwargs)
else:
return super().render(*args, **kwargs)
def _save_obs(self, env_idx, obs):
for key in self.keys:
if key is None:
self.buf_obs[key][env_idx] = obs
else:
self.buf_obs[key][env_idx] = obs[key]
def _obs_from_buf(self):
if self.keys == [None]:
return self.buf_obs[None]
else:
return self.buf_obs
def get_attr(self, attr_name, indices=None):
"""Return attribute from vectorized environment (see base class)."""
target_envs = self._get_target_envs(indices)
return [getattr(env_i, attr_name) for env_i in target_envs]
def set_attr(self, attr_name, value, indices=None):
"""Set attribute inside vectorized environments (see base class)."""
target_envs = self._get_target_envs(indices)
for env_i in target_envs:
setattr(env_i, attr_name, value)
def env_method(self, method_name, *method_args, indices=None, **method_kwargs):
"""Call instance methods of vectorized environments."""
target_envs = self._get_target_envs(indices)
return [getattr(env_i, method_name)(*method_args, **method_kwargs) for env_i in target_envs]
def _get_target_envs(self, indices):
indices = self._get_indices(indices)
return [self.envs[i] for i in indices] | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/deps/libraries/mrl/utils/vec_env/dummy_vec_env.py | 0.831177 | 0.38743 | dummy_vec_env.py | pypi |
from abc import ABC, abstractmethod
import pickle
import cloudpickle
class AlreadySteppingError(Exception):
"""
Raised when an asynchronous step is running while
step_async() is called again.
"""
def __init__(self):
msg = 'already running an async step'
Exception.__init__(self, msg)
class NotSteppingError(Exception):
"""
Raised when an asynchronous step is not running but
step_wait() is called.
"""
def __init__(self):
msg = 'not running an async step'
Exception.__init__(self, msg)
class VecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
:param num_envs: (int) the number of environments
:param observation_space: (Gym Space) the observation space
:param action_space: (Gym Space) the action space
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a dict of observation arrays (for goal envs).
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
:return: ([int] or [float] or dict) observation
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
:return: ([int] or [float] or dict, [float], [bool], dict) observation, reward, done, information
"""
pass
@abstractmethod
def close(self):
"""
Clean up the environment's resources.
"""
pass
def step(self, actions):
"""
Step the environments with the given action
:param actions: ([int] or [float]) the action
:return: ([int] or [float] or dict, [float], [bool], dict) observation, reward, done, information
"""
self.step_async(actions)
return self.step_wait()
def get_images(self):
"""
Return RGB images from each environment
"""
raise NotImplementedError
def render(self, *args, **kwargs):
"""
Gym environment rendering
:param mode: (str) the rendering type
"""
pass
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def _get_indices(self, indices):
"""
Convert a flexibly-typed reference to environment indices to an implied list of indices.
:param indices: (None,int,Iterable) refers to indices of envs.
:return: (list) the implied list of indices.
"""
if indices is None:
indices = range(self.num_envs)
elif isinstance(indices, int):
indices = [indices]
return indices
class VecEnvWrapper(VecEnv):
"""
Vectorized environment base class
:param venv: (VecEnv) the vectorized environment to wrap
:param observation_space: (Gym Space) the observation space (can be None to load from venv)
:param action_space: (Gym Space) the action space (can be None to load from venv)
"""
def __init__(self, venv, observation_space=None, action_space=None):
self.venv = venv
VecEnv.__init__(self, num_envs=venv.num_envs, observation_space=observation_space or venv.observation_space,
action_space=action_space or venv.action_space)
def step_async(self, actions):
self.venv.step_async(actions)
@abstractmethod
def reset(self):
pass
@abstractmethod
def step_wait(self):
pass
def close(self):
return self.venv.close()
def render(self, *args, **kwargs):
return self.venv.render(*args, **kwargs)
def get_images(self):
return self.venv.get_images()
class CloudpickleWrapper(object):
def __init__(self, var):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
:param var: (Any) the variable you wish to wrap for pickling with cloudpickle
"""
self.var = var
def __getstate__(self):
return cloudpickle.dumps(self.var)
def __setstate__(self, obs):
self.var = pickle.loads(obs) | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/deps/libraries/mrl/utils/vec_env/base_vec_env.py | 0.8938 | 0.462412 | base_vec_env.py | pypi |
import torch
import numpy as np
from mrl.utils.misc import AnnotatedAttrDict
from mrl.utils.schedule import LinearSchedule
default_dqn_config = lambda: AnnotatedAttrDict(
device=('cuda' if torch.cuda.is_available() else 'cpu', 'torch device (cpu or gpu)'),
gamma=(0.99, 'discount factor'),
qvalue_lr=(1e-3, 'Q-value learning rate'),
qvalue_weight_decay=(0., 'weight decay to apply to qvalue'),
optimize_every=(2, 'how often optimize is called, in terms of environment steps'),
batch_size=(1000, 'batch size for training the Q-values'),
warm_up=(10000, 'minimum steps in replay buffer needed to optimize'),
initial_explore=(10000, 'whether to act randomly during warmup'),
grad_norm_clipping=(-1, 'gradient norm clipping (implemented as backward hook)'),
grad_value_clipping=(-1, 'gradient value clipping'),
random_action_prob=(LinearSchedule(1.0, 0.1, 1e4), 'Epsilon decay schedule'),
target_network_update_frac=(0.005, 'polyak averaging coefficient for target networks'),
target_network_update_freq=(2, 'how often to update target networks; NOTE: TD3 uses this too!'),
clip_target_range=((-np.inf, np.inf), 'q/value targets are clipped to this range'),
go_eexplore=(0.1, 'epsilon exploration bonus from each point of go explore, when using intrinsic curiosity'),
go_reset_percent=(0.025, 'probability to reset epsiode early for each point of go explore, when using intrinsic curiosity'),
overshoot_goal_percent=(False, 'if using instrinsic goals, should goal be overshot on success?'),
direct_overshoots=(False, 'if using overshooting, should it be directed in a straight line?'),
dg_score_multiplier=(1., 'if using instrinsic goals, score multiplier for goal candidates that are in DG distribution'),
cutoff_success_threshold=(0.3, 0.7), # thresholds for decreasing/increasing the cutoff
initial_cutoff=(-3, 'initial (and minimum) cutoff for intrinsic goal curiosity'),
double_q=(False, 'Use Double DQN or not. Default: False'),
activ=('gelu', 'activation to use for hidden layers in networks'),
curiosity_beta=(-3., 'beta to use for curiosity_alpha module'),
# Below are args to other modules (maybe should live in those modules?)
seed=(0, 'random seed'),
replay_size=(int(1e6), 'maximum size of replay buffer'),
num_envs=(12, 'number of parallel envs to run'),
log_every=(5000, 'how often to log things'),
use_qvalue_target=(False, 'if true, use target network to act in the environment'),
her=('futureactual_2_2', 'strategy to use for hindsight experience replay'),
prioritized_mode=('none', 'buffer prioritization strategy'),
future_warm_up=(20000, 'minimum steps in replay buffer needed to stop doing ONLY future sampling'),
sparse_reward_shaping=(0., 'coefficient of euclidean distance reward shaping in sparse goal envs'),
n_step_returns=(1, 'if using n-step returns, how many steps?'),
slot_based_state=(False, 'if state is organized by slot; i.e., [batch_size, num_slots, slot_feats]')
)
def dqn_config():
config = default_dqn_config()
config.gamma = 0.98
config.qvalue_lr = 1e-4
config.qvalue_weight_decay = 0.
config.target_network_update_freq = 40
config.target_network_update_frac = 0.05
config.optimize_every = 2
config.batch_size = 1000
config.warm_up = 2500
config.random_action_prob = LinearSchedule(1.0, 0.1, 1e4)
config.replay_size = int(1e6)
return config | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/deps/libraries/mrl/configs/discrete_off_policy.py | 0.841923 | 0.592784 | discrete_off_policy.py | pypi |
import torch
import numpy as np
from mrl.utils.misc import AnnotatedAttrDict
default_ddpg_config = lambda: AnnotatedAttrDict(
device=('cuda' if torch.cuda.is_available() else 'cpu', 'torch device (cpu or gpu)'),
gamma=(0.99, 'discount factor'),
actor_lr=(1e-3, 'actor learning rate'),
critic_lr=(1e-3, 'critic learning rate'),
actor_weight_decay=(0., 'weight decay to apply to actor'),
action_l2_regularization=(1e-2, 'l2 penalty for action norm'),
critic_weight_decay=(0., 'weight decay to apply to critic'),
optimize_every=(2, 'how often optimize is called, in terms of environment steps'),
batch_size=(2000, 'batch size for training the actors/critics'),
warm_up=(10000, 'minimum steps in replay buffer needed to optimize'),
initial_explore=(10000, 'steps that actor acts randomly for at beginning of training'),
grad_norm_clipping=(-1., 'gradient norm clipping'),
grad_value_clipping=(-1., 'gradient value clipping'),
target_network_update_frac=(0.005, 'polyak averaging coefficient for target networks'),
target_network_update_freq=(1, 'how often to update target networks; NOTE: TD3 uses this too!'),
clip_target_range=((-np.inf, np.inf), 'q/value targets are clipped to this range'),
td3_noise=(0.1, 'noise added to next step actions in td3'),
td3_noise_clip=(0.3, 'amount to which next step noise in td3 is clipped'),
td3_delay=(2, 'how often the actor is trained, in terms of critic training steps, in td3'),
entropy_coef=(0.2, 'Entropy regularization coefficient for SAC'),
policy_opt_noise=(0., 'how much policy noise to add to actor optimization'),
action_noise=(0.1, 'maximum std of action noise'),
eexplore=(0., 'how often to do completely random exploration (overrides action noise)'),
go_eexplore=(0.1, 'epsilon exploration bonus from each point of go explore, when using intrinsic curiosity'),
go_reset_percent=(0., 'probability to reset episode early for each point of go explore, when using intrinsic curiosity'),
overshoot_goal_percent=(0., 'if using instrinsic FIRST VISIT goals, should goal be overshot on success?'),
direct_overshoots=(False, 'if using overshooting, should it be directed in a straight line?'),
dg_score_multiplier=(1., 'if using instrinsic goals, score multiplier for goal candidates that are in DG distribution'),
cutoff_success_threshold=(0.3, 0.7), # thresholds for decreasing/increasing the cutoff
initial_cutoff=(-3, 'initial (and minimum) cutoff for intrinsic goal curiosity'),
activ=('gelu', 'activation to use for hidden layers in networks'),
curiosity_beta=(-3., 'beta to use for curiosity_alpha module'),
sigma_l2_regularization=(0., 'l2 regularization on sigma critics log variance'),
# Below are args to other modules (maybe should live in those modules?)
seed=(0, 'random seed'),
replay_size=(int(1e6), 'maximum size of replay buffer'),
save_replay_buf=(False, 'save replay buffer checkpoint during training?'),
num_envs=(12, 'number of parallel envs to run'),
num_eval_envs=(1, 'number of parallel eval envs to run'),
log_every=(5000, 'how often to log things'),
varied_action_noise=(False, 'if true, action noise for each env in vecenv is interpolated between 0 and action noise'),
use_actor_target=(False, 'if true, use actor target network to act in the environment'),
her=('futureactual_2_2', 'strategy to use for hindsight experience replay'),
prioritized_mode=('none', 'buffer prioritization strategy'),
future_warm_up=(25000, 'minimum steps in replay buffer needed to stop doing ONLY future sampling'),
sparse_reward_shaping=(0., 'coefficient of euclidean distance reward shaping in sparse goal envs'),
n_step_returns=(1, 'if using n-step returns, how many steps?'),
slot_based_state=(False, 'if state is organized by slot; i.e., [batch_size, num_slots, slot_feats]')
)
def protoge_config():
config = default_ddpg_config()
config.gamma = 0.98
config.actor_lr = 1e-3
config.critic_lr = 1e-3
config.actor_weight_decay = 0.
config.action_l2_regularization = 1e-1
config.target_network_update_freq = 40
config.target_network_update_frac = 0.05
config.optimize_every = 1
config.batch_size = 2000
config.warm_up = 2500
config.initial_explore = 5000
config.replay_size = int(1e6)
config.clip_target_range = (-50.,0.)
config.action_noise = 0.1
config.eexplore = 0.1
config.go_eexplore = 0.1
config.go_reset_percent = 0.
config.her = 'rfaab_1_4_3_1_1'
config.grad_value_clipping = 5.
return config
def best_slide_config():
config = protoge_config()
config.batch_size = 1000
config.eexplore = 0.2
config.action_noise = 0.1
config.grad_value_clipping = -1
config.her = 'futureactual_2_2'
config.replay_size = int(2.5e6)
config.initial_explore = 10000
config.warm_up = 5000
config.action_l2_regularization = 1e-2
config.optimize_every = 2
config.target_network_update_freq = 10
config.activ = 'relu'
return config
def protoge_td3_config():
config = default_ddpg_config()
config.gamma = 0.99
config.actor_lr = 1e-3
config.critic_lr = 1e-3
config.actor_weight_decay = 0.
config.target_network_update_freq = 40
config.target_network_update_frac = 0.05
config.optimize_every = 2
config.batch_size = 1000
config.warm_up = 2500
config.replay_size = int(1e6)
config.action_noise = 0.1
config.eexplore = 0.1
config.grad_value_clipping = 5.
return config
def spinning_up_td3_config():
config = default_ddpg_config()
config.gamma = 0.99
config.replay_size = int(1e6)
config.target_network_update_frac = 0.005
config.target_network_update_freq = 2
config.actor_lr = 1e-3
config.critic_lr = 1e-3
config.action_noise = 0.1
config.td3_noise = 0.2
config.td3_noise_clip = 0.5
config.td3_delay = 2
config.batch_size = 100
config.warm_up = 1000
config.optimize_every = 1
config.action_l2_regularization = 0
config.activ = 'relu'
# hidden sizes = (400, 300)
# no layer norm
return config
def spinning_up_sac_config():
config = default_ddpg_config()
config.gamma = 0.99
config.replay_size = int(1e6)
config.target_network_update_frac = 0.005
config.target_network_update_freq = 1
config.actor_lr = 1e-3
config.critic_lr = 1e-3
config.action_noise = 0.1
config.td3_noise = 0.2
config.td3_noise_clip = 0.5
config.td3_delay = 2
config.batch_size = 100
config.warm_up = 1000
config.optimize_every = 1
config.action_l2_regularization = 0
config.activ = 'relu'
# hidden sizes = (256, 256)
# no layer norm
return config
def spinning_up_ddpg_config():
config = spinning_up_td3_config()
config.target_network_update_freq = 1
config.activ = 'relu'
return config
def td3_config():
config = spinning_up_td3_config()
config.actor_lr = 3e-4
config.critic_lr = 3e-4
config.batch_size = 256
# hidden sizes = (256, 256)
# no layer norm
return config | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/deps/libraries/mrl/configs/continuous_off_policy.py | 0.770939 | 0.528533 | continuous_off_policy.py | pypi |
import mrl
import gym
from mrl.replays.core.replay_buffer import ReplayBuffer as Buffer
from typing import Optional
import numpy as np
import torch
import pickle
import os
class OldReplayBuffer(mrl.Module):
def __init__(self):
"""
A standard replay buffer (no prioritization / fancy stuff).
"""
super().__init__('replay_buffer', required_agent_modules=['env'], locals=locals())
self.size = None
self.goal_space = None
self.hindsight_buffer = None
self.buffer = None
self.save_buffer = None
def _setup(self):
self.size = self.config.replay_size
env = self.env
if type(env.observation_space) == gym.spaces.Dict:
observation_space = env.observation_space.spaces["observation"]
self.goal_space = env.observation_space.spaces["desired_goal"]
raise NotImplementedError("This buffer no longer supports goal spaces; use OnlineHERBuffer")
else:
observation_space = env.observation_space
items = [("state", observation_space.shape),
("action", env.action_space.shape), ("reward", (1,)),
("next_state", observation_space.shape), ("done", (1,))]
self.buffer = Buffer(self.size, items)
def _process_experience(self, experience):
if getattr(self, 'logger'):
self.logger.add_tabular('Replay buffer size', len(self.buffer))
done = np.expand_dims(experience.done, 1) # format for replay buffer
reward = np.expand_dims(experience.reward, 1) # format for replay buffer
action = experience.action
state = experience.state
next_state = experience.next_state
self.buffer.add_batch(state, action, reward, next_state, done)
def sample(self, batch_size, to_torch=True):
states, actions, rewards, next_states, dones = self.buffer.sample(batch_size)
gammas = self.config.gamma * (1-dones)
if hasattr(self, 'state_normalizer'):
states = self.state_normalizer(states, update=False).astype(np.float32)
next_states = self.state_normalizer(next_states, update=False).astype(np.float32)
if to_torch:
return (self.torch(states), self.torch(actions),
self.torch(rewards), self.torch(next_states),
self.torch(gammas))
else:
return (states, actions, rewards, next_states, gammas)
def __len__(self):
return len(self.buffer)
def save(self, save_folder):
if self.config.save_replay_buf or self.save_buffer:
state = self.buffer._get_state()
with open(os.path.join(save_folder, "{}.pickle".format(self.module_name)), 'wb') as f:
pickle.dump(state, f)
def load(self, save_folder):
load_path = os.path.join(save_folder, "{}.pickle".format(self.module_name))
if os.path.exists(load_path):
with open(load_path, 'rb') as f:
state = pickle.load(f)
self.buffer._set_state(state)
else:
self.logger.log_color('###############################################################', '', color='red')
self.logger.log_color('WARNING', 'Replay buffer is not being loaded / was not saved.', color='cyan')
self.logger.log_color('WARNING', 'Replay buffer is not being loaded / was not saved.', color='red')
self.logger.log_color('WARNING', 'Replay buffer is not being loaded / was not saved.', color='yellow')
self.logger.log_color('###############################################################', '', color='red') | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/deps/libraries/mrl/replays/old_replay_buffer.py | 0.798462 | 0.228382 | old_replay_buffer.py | pypi |
import mrl
import numpy as np
import gym
from mrl.replays.core.replay_buffer import RingBuffer
from mrl.utils.misc import AttrDict
import pickle
import os
from sklearn import mixture
from scipy.stats import rankdata
class EntropyPrioritizedOnlineHERBuffer(mrl.Module):
def __init__(
self,
module_name='prioritized_replay',
rank_method='dense',
temperature=1.0
):
"""
Buffer that stores entropy of trajectories for prioritized replay
"""
super().__init__(module_name, required_agent_modules=['env','replay_buffer'], locals=locals())
self.goal_space = None
self.buffer = None
self.rank_method = rank_method
self.temperature = temperature
self.traj_len = None
self.has_fit_density = False
def _setup(self):
self.ag_buffer = self.replay_buffer.buffer.BUFF.buffer_ag
env = self.env
assert type(env.observation_space) == gym.spaces.Dict
self.goal_space = env.observation_space.spaces["desired_goal"]
# Note: for now we apply entropy estimation on the achieved goal (ag) space
# Define the buffers to store for prioritization
items = [("entropy", (1,)), ("priority", (1,))]
self.buffer = AttrDict()
for name, shape in items:
self.buffer['buffer_' + name] = RingBuffer(self.ag_buffer.maxlen, shape=shape)
self._subbuffers = [[] for _ in range(self.env.num_envs)]
self.n_envs = self.env.num_envs
# Define the placeholder for mixture model to estimate trajectory
self.clf = 0
def fit_density_model(self):
if not self.has_fit_density:
self.has_fit_density = True
ag = self.ag_buffer.data[0:self.size].copy()
X_train = ag.reshape(-1, self.traj_len * ag.shape[-1]) # [num_episodes, episode_len * goal_dim]
self.clf = mixture.BayesianGaussianMixture(weight_concentration_prior_type="dirichlet_distribution", n_components=3)
self.clf.fit(X_train)
pred = -self.clf.score_samples(X_train)
self.pred_min = pred.min()
pred = pred - self.pred_min
pred = np.clip(pred, 0, None)
self.pred_sum = pred.sum()
pred = pred / self.pred_sum
self.pred_avg = (1 / pred.shape[0])
pred = np.repeat(pred, self.traj_len, axis=0)
self.buffer.buffer_entropy.data[:self.size] = pred.reshape(-1,1).copy()
def _process_experience(self, exp):
# Compute the entropy
# TODO: Include previous achieved goal too? or use that instead of ag?
achieved = exp.next_state['achieved_goal']
for i in range(self.n_envs):
self._subbuffers[i].append([achieved[i]])
for i in range(self.n_envs):
if exp.trajectory_over[i]:
# TODO: Compute the entropy of the trajectory
traj_len = len(self._subbuffers[i])
if self.traj_len is None:
self.traj_len = traj_len
else:
# Current implementation assumes the same length for all trajectories
assert(traj_len == self.traj_len)
if not isinstance(self.clf, int):
ag = [np.stack(a) for a in zip(*self._subbuffers[i])][0] # [episode_len, goal_dim]
X = ag.reshape(-1, ag.shape[0]*ag.shape[1])
pred = -self.clf.score_samples(X)
pred = pred - self.pred_min
pred = np.clip(pred, 0, None)
pred = pred / self.pred_sum # Shape (1,)
entropy = np.ones((traj_len,1)) * pred
else:
# Not enough data to train mixture density yet, set entropy to be zero
entropy = np.zeros((traj_len, 1))
priority = np.zeros((traj_len,1))
trajectory = [entropy, priority]
# TODO: Update the trajectory with entropy
self.add_trajectory(*trajectory)
self._subbuffers[i] = []
# TODO: Update the rank here before adding it to the trajectory?
self.update_priority()
def add_trajectory(self, *items):
"""
Append a trajectory of transitions to the buffer.
:param items: a list of batched transition values to append to the replay buffer,
in the item order that we initialized the ReplayBuffer with.
"""
for buffer, batched_values in zip(self.buffer.values(), items):
buffer.append_batch(batched_values)
def update_priority(self):
"""
After adding a trajectory to the replay buffer, update the ranking of transitions
"""
# Note: 'dense' assigns the next highest element with the rank immediately
# after those assigned to the tied elements.
entropy_transition_total = self.buffer.buffer_entropy.data[:self.size]
entropy_rank = rankdata(entropy_transition_total, method=self.rank_method)
entropy_rank = (entropy_rank - 1).reshape(-1, 1)
self.buffer.buffer_priority.data[:self.size] = entropy_rank
def __call__(self, batch_size):
"""
Samples batch_size number of indices from main replay_buffer.
Args:
batch_size (int): size of the batch to sample
Returns:
batch_idxs: a 1-D numpy array of length batch_size containing indices
sampled in prioritized manner
"""
if self.rank_method == 'none':
entropy_trajectory = self.buffer.buffer_entropy.data[:self.size]
else:
entropy_trajectory = self.buffer.buffer_priority.data[:self.size]
# Factorize out sampling into sampling trajectory according to priority/entropy
# then sample time uniformly independently
entropy_trajectory = entropy_trajectory.reshape(-1, self.traj_len)[:,0]
p_trajectory = np.power(entropy_trajectory, 1/(self.temperature+1e-2))
# If the density model hasn't been fitted yet, we have p_trajectory all 0's
# And hence treat them as uniform:
if not self.has_fit_density:
p_trajectory = np.ones(p_trajectory.shape) / len(p_trajectory)
else:
assert(p_trajectory.sum() != 0.0)
p_trajectory = p_trajectory / p_trajectory.sum()
num_trajectories = p_trajectory.shape[0]
batch_tidx = np.random.choice(num_trajectories, size=batch_size, p=p_trajectory)
batch_idxs = self.traj_len * batch_tidx + np.random.choice(self.traj_len, size=batch_size)
return batch_idxs
@property
def size(self):
return len(self.ag_buffer)
def save(self, save_folder):
if self.config.save_replay_buf:
state = self.buffer._get_state()
with open(os.path.join(save_folder, "{}.pickle".format(self.module_name)), 'wb') as f:
pickle.dump(state, f)
def load(self, save_folder):
load_path = os.path.join(save_folder, "{}.pickle".format(self.module_name))
if os.path.exists(load_path):
with open(load_path, 'rb') as f:
state = pickle.load(f)
self.buffer._set_state(state)
else:
self.logger.log_color('###############################################################', '', color='red')
self.logger.log_color('WARNING', 'Replay buffer is not being loaded / was not saved.', color='cyan')
self.logger.log_color('WARNING', 'Replay buffer is not being loaded / was not saved.', color='red')
self.logger.log_color('WARNING', 'Replay buffer is not being loaded / was not saved.', color='yellow')
self.logger.log_color('###############################################################', '', color='red') | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/deps/libraries/mrl/replays/prioritized_replay.py | 0.685529 | 0.337013 | prioritized_replay.py | pypi |
import numpy as np
from collections import OrderedDict
from mrl.utils.misc import AttrDict
from multiprocessing import RawValue
class RingBuffer(object):
"""This is a collections.deque in numpy, with pre-allocated memory"""
def __init__(self, maxlen, shape, dtype=np.float32, data=None):
"""
A buffer object, when full restarts at the initial position
:param maxlen: (int) the max number of numpy objects to store
:param shape: (tuple) the shape of the numpy objects you want to store
:param dtype: (str) the name of the type of the numpy object you want to store
"""
self.maxlen = maxlen
self.start = RawValue('L')
self.length = RawValue('L')
self.shape = shape
if data is None:
self.data = np.zeros((maxlen, ) + shape, dtype=dtype)
else:
assert data.shape == (maxlen, ) + shape
assert data.dtype == dtype
self.data = data
def _get_state(self):
# Only restore the values in the data
end_idx = self.start.value + self.length.value
indices = range(self.start.value, end_idx)
return self.start.value, self.length.value, self.data.take(indices, axis=0, mode='wrap')
def _set_state(self, start, length, data):
self.start.value = start
self.length.value = length
self.data[:length] = data
self.data = np.roll(self.data, start, axis=0)
def __len__(self):
return self.length.value
def __getitem__(self, idx):
if idx < 0 or idx >= self.length.value:
raise KeyError()
return self.data[(self.start.value + idx) % self.maxlen]
def get_batch(self, idxs):
"""
get the value at the indexes
:param idxs: (int or numpy int) the indexes
:return: (np.ndarray) the stored information in the buffer at the asked positions
"""
return self.data[(self.start.value + idxs) % self.length.value]
def append(self, var):
"""
Append an object to the buffer
:param var: (np.ndarray) the object you wish to add
"""
if self.length.value < self.maxlen:
# We have space, simply increase the length.
self.length.value += 1
elif self.length.value == self.maxlen:
# No space, "remove" the first item.
self.start.value = (self.start.value + 1) % self.maxlen
else:
# This should never happen.
raise RuntimeError()
self.data[(self.start.value + self.length.value - 1) % self.maxlen] = var
def _append_batch_with_space(self, var):
"""
Append a batch of objects to the buffer, *assuming* there is space.
:param var: (np.ndarray) the batched objects you wish to add
"""
len_batch = len(var)
start_pos = (self.start.value + self.length.value) % self.maxlen
self.data[start_pos : start_pos + len_batch] = var
if self.length.value < self.maxlen:
self.length.value += len_batch
assert self.length.value <= self.maxlen, "this should never happen!"
else:
self.start.value = (self.start.value + len_batch) % self.maxlen
return np.arange(start_pos, start_pos + len_batch)
def append_batch(self, var):
"""
Append a batch of objects to the buffer.
:param var: (np.ndarray) the batched objects you wish to add
"""
len_batch = len(var)
assert len_batch < self.maxlen, 'trying to add a batch that is too big!'
start_pos = (self.start.value + self.length.value) % self.maxlen
if start_pos + len_batch <= self.maxlen:
# If there is space, add it
idxs = self._append_batch_with_space(var)
else:
# No space, so break it into two batches for which there is space
first_batch, second_batch = np.split(var, [self.maxlen - start_pos])
idxs1 = self._append_batch_with_space(first_batch)
# use append on second call in case len_batch > self.maxlen
idxs2 = self._append_batch_with_space(second_batch)
idxs = np.concatenate((idxs1, idxs2))
return idxs
class ReplayBuffer(object):
def __init__(self, limit, item_shape, dtypes=None):
"""
The replay buffer object
:param limit: (int) the max number of transitions to store
:param item_shape: a list of tuples of (str) item name and (tuple) the shape for item
Ex: [("observations0", env.observation_space.shape),\
("actions",env.action_space.shape),\
("rewards", (1,)),\
("observations1",env.observation_space.shape ),\
("terminals1", (1,))]
:param dtypes: list of dtype tuples; useful for storing things as float16.
"""
self.limit = limit
self.items = OrderedDict()
if dtypes is None:
dtypes = [(np.float32, np.float32)] * len(item_shape)
self.in_types, self.out_types = zip(*dtypes)
for (name, shape), dtype in zip(item_shape, self.in_types):
self.items[name] = RingBuffer(limit, shape=shape, dtype=dtype)
def sample(self, batch_size):
"""
sample a random batch from the buffer
:param batch_size: (int) the number of element to sample for the batch
:return: (list) the sampled batch
"""
if self.size==0:
return []
batch_idxs = np.random.randint(self.size, size=batch_size)
transition = []
for buf, dtype in zip(self.items.values(), self.out_types):
item = buf.get_batch(batch_idxs).astype(dtype)
transition.append(item)
return transition
def add(self, *items):
"""
Appends a single transition to the buffer
:param items: a list of values for the transition to append to the replay buffer,
in the item order that we initialized the ReplayBuffer with.
"""
for buf, value in zip(self.items.values(), items):
buf.append(value)
def add_batch(self, *items):
"""
Append a batch of transitions to the buffer.
:param items: a list of batched transition values to append to the replay buffer,
in the item order that we initialized the ReplayBuffer with.
"""
if (items[0].shape) == 1 or len(items[0]) == 1:
self.add(*items)
return
for buf, batched_values in zip(self.items.values(), items):
idxs = buf.append_batch(batched_values)
return idxs
def __len__(self):
return self.size
def _get_state(self):
d = dict()
for item, buf in self.items.items():
d[item] = buf._get_state()
return d
def _set_state(self, d):
for item, buf in self.items.items():
buf._set_state(*d[item])
@property
def size(self):
# Get the size of the RingBuffer on the first item type
return len(next(iter(self.items.values()))) | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/deps/libraries/mrl/replays/core/replay_buffer.py | 0.788909 | 0.520984 | replay_buffer.py | pypi |
import torch
import logging
import warnings
import torch.nn as nn
import torch.nn.functional as F
from ._base import BaseModule, BaseClassifier, BaseRegressor
from ._base import torchensemble_model_doc
from .utils import io
from .utils import set_module
from .utils import operator as op
from .utils.logging import get_tb_logger
__all__ = ["FastGeometricClassifier", "FastGeometricRegressor"]
__fit_doc = """
Parameters
----------
train_loader : torch.utils.data.DataLoader
A :mod:`DataLoader` container that contains the training data.
cycle : int, default=4
The number of cycles used to build each base estimator in the ensemble.
lr_1 : float, default=5e-2
``alpha_1`` in original paper used to adjust the learning rate, also
serves as the initial learning rate of the internal optimizer.
lr_2 : float, default=1e-4
``alpha_2`` in original paper used to adjust the learning rate, also
serves as the smallest learning rate of the internal optimizer.
epochs : int, default=100
The number of training epochs used to fit the dummy base estimator.
log_interval : int, default=100
The number of batches to wait before logging the training status.
test_loader : torch.utils.data.DataLoader, default=None
A :mod:`DataLoader` container that contains the evaluating data.
- If ``None``, no validation is conducted after each real base
estimator being generated.
- If not ``None``, the ensemble will be evaluated on this
dataloader after each base estimator being generated.
save_model : bool, default=True
Specify whether to save the model parameters.
- If test_loader is ``None``, the ensemble fully trained will be
saved.
- If test_loader is not ``None``, the ensemble with the best
validation performance will be saved.
save_dir : string, default=None
Specify where to save the model parameters.
- If ``None``, the model will be saved in the current directory.
- If not ``None``, the model will be saved in the specified
directory: ``save_dir``.
"""
def _fast_geometric_model_doc(header, item="fit"):
"""
Decorator on obtaining documentation for different fast geometric models.
"""
def get_doc(item):
"""Return selected item"""
__doc = {"fit": __fit_doc}
return __doc[item]
def adddoc(cls):
doc = [header + "\n\n"]
doc.extend(get_doc(item))
cls.__doc__ = "".join(doc)
return cls
return adddoc
class _BaseFastGeometric(BaseModule):
def __init__(
self, estimator, n_estimators, estimator_args=None, cuda=True
):
super(BaseModule, self).__init__()
self.base_estimator_ = estimator
self.n_estimators = n_estimators
self.estimator_args = estimator_args
if estimator_args and not isinstance(estimator, type):
msg = (
"The input `estimator_args` will have no effect since"
" `estimator` is already an object after instantiation."
)
warnings.warn(msg, RuntimeWarning)
self.device = torch.device("cuda" if cuda else "cpu")
self.logger = logging.getLogger()
self.tb_logger = get_tb_logger()
self.estimators_ = nn.ModuleList()
self.use_scheduler_ = False
def _forward(self, *x):
"""
Implementation on the internal data forwarding in fast geometric
ensemble.
"""
# Average
results = [estimator(*x) for estimator in self.estimators_]
output = op.average(results)
return output
def _adjust_lr(
self, optimizer, epoch, i, n_iters, cycle, alpha_1, alpha_2
):
"""
Set the internal learning rate scheduler for fast geometric ensemble.
Please refer to the original paper for details.
"""
def scheduler(i):
t = ((epoch % cycle) + i) / cycle
if t < 0.5:
return alpha_1 * (1.0 - 2.0 * t) + alpha_2 * 2.0 * t
else:
return alpha_1 * (2.0 * t - 1.0) + alpha_2 * (2.0 - 2.0 * t)
lr = scheduler(i / n_iters)
for param_group in optimizer.param_groups:
param_group["lr"] = lr
return lr
@torchensemble_model_doc(
"""Set the attributes on optimizer for Fast Geometric Ensemble.""",
"set_optimizer",
)
def set_optimizer(self, optimizer_name, **kwargs):
self.optimizer_name = optimizer_name
self.optimizer_args = kwargs
@torchensemble_model_doc(
"""Set the attributes on scheduler for Fast Geometric Ensemble.""",
"set_scheduler",
)
def set_scheduler(self, scheduler_name, **kwargs):
msg = (
"The learning rate scheduler for fast geometirc ensemble will"
" only be used in the first stage on building the dummy base"
" estimator."
)
warnings.warn(msg, UserWarning)
self.scheduler_name = scheduler_name
self.scheduler_args = kwargs
self.use_scheduler_ = True
@torchensemble_model_doc(
"""Implementation on the FastGeometricClassifier.""", "seq_model"
)
class FastGeometricClassifier(_BaseFastGeometric, BaseClassifier):
@torchensemble_model_doc(
"""Implementation on the data forwarding in FastGeometricClassifier.""", # noqa: E501
"classifier_forward",
)
def forward(self, *x):
proba = self._forward(*x)
return F.softmax(proba, dim=1)
@torchensemble_model_doc(
(
"""Set the attributes on optimizer for FastGeometricClassifier. """
+ """Notice that keyword arguments specified here will also be """
+ """used in the ensembling stage except the learning rate.."""
),
"set_optimizer",
)
def set_optimizer(self, optimizer_name, **kwargs):
super().set_optimizer(optimizer_name=optimizer_name, **kwargs)
@torchensemble_model_doc(
(
"""Set the attributes on scheduler for FastGeometricClassifier. """
+ """Notice that this scheduler will only be used in the stage on """ # noqa: E501
+ """fitting the dummy base estimator."""
),
"set_scheduler",
)
def set_scheduler(self, scheduler_name, **kwargs):
super().set_scheduler(scheduler_name=scheduler_name, **kwargs)
@torchensemble_model_doc(
"""Set the training criterion for FastGeometricClassifier.""",
"set_criterion",
)
def set_criterion(self, criterion):
super().set_criterion(criterion)
@_fast_geometric_model_doc(
"""Implementation on the training stage of FastGeometricClassifier.""", # noqa: E501
"fit",
)
def fit(
self,
train_loader,
cycle=4,
lr_1=5e-2,
lr_2=1e-4,
epochs=100,
log_interval=100,
test_loader=None,
save_model=True,
save_dir=None,
):
self._validate_parameters(epochs, log_interval)
self.n_outputs = self._decide_n_outputs(train_loader)
# ====================================================================
# Train the dummy estimator (estimator_)
# ====================================================================
estimator_ = self._make_estimator()
# Set the optimizer and scheduler
optimizer = set_module.set_optimizer(
estimator_, self.optimizer_name, **self.optimizer_args
)
if self.use_scheduler_:
scheduler = set_module.set_scheduler(
optimizer, self.scheduler_name, **self.scheduler_args
)
# Check the training criterion
if not hasattr(self, "_criterion"):
self._criterion = nn.CrossEntropyLoss()
# Utils
total_iters = 0
for epoch in range(epochs):
# Training
estimator_.train()
for batch_idx, elem in enumerate(train_loader):
data, target = io.split_data_target(elem, self.device)
batch_size = data[0].size(0)
optimizer.zero_grad()
output = estimator_(*data)
loss = self._criterion(output, target)
loss.backward()
optimizer.step()
# Print training status
if batch_idx % log_interval == 0:
with torch.no_grad():
_, predicted = torch.max(output.data, 1)
correct = (predicted == target).sum().item()
msg = (
"Epoch: {:03d} | Batch: {:03d} | Loss: {:.5f} |"
" Correct: {:d}/{:d}"
)
self.logger.info(
msg.format(
epoch,
batch_idx,
loss,
correct,
batch_size,
)
)
if self.tb_logger:
self.tb_logger.add_scalar(
"fast_geometric/Base_Est/Train_Loss",
loss,
total_iters,
)
total_iters += 1
if self.use_scheduler_:
scheduler.step()
# ====================================================================
# Generate the ensemble
# ====================================================================
# Set the internal optimizer
estimator_.zero_grad()
optimizer = set_module.set_optimizer(
estimator_, self.optimizer_name, **self.optimizer_args
)
# Utils
best_acc = 0.0
n_iters = len(train_loader)
updated = False
epoch = 0
while len(self.estimators_) < self.n_estimators:
# Training
estimator_.train()
for batch_idx, elem in enumerate(train_loader):
# Update learning rate
self._adjust_lr(
optimizer, epoch, batch_idx, n_iters, cycle, lr_1, lr_2
)
data, target = io.split_data_target(elem, self.device)
batch_size = data[0].size(0)
optimizer.zero_grad()
output = estimator_(*data)
loss = self._criterion(output, target)
loss.backward()
optimizer.step()
# Print training status
if batch_idx % log_interval == 0:
with torch.no_grad():
_, predicted = torch.max(output.data, 1)
correct = (predicted == target).sum().item()
msg = (
"lr: {:.5f} | Epoch: {:03d} | Batch: {:03d} |"
" Loss: {:.5f} | Correct: {:d}/{:d}"
)
self.logger.info(
msg.format(
optimizer.param_groups[0]["lr"],
epoch,
batch_idx,
loss,
correct,
batch_size,
)
)
if self.tb_logger:
self.tb_logger.add_scalar(
"fast_geometric/Ensemble-Est_{}".format(
len(self.estimators_)
)
+ "/Train_Loss",
loss,
total_iters,
)
total_iters += 1
# Update the ensemble
if (epoch % cycle + 1) == cycle // 2:
base_estimator = self._make_estimator()
base_estimator.load_state_dict(estimator_.state_dict())
self.estimators_.append(base_estimator)
updated = True
total_iters = 0
msg = "Save the base estimator with index: {}"
self.logger.info(msg.format(len(self.estimators_) - 1))
# Validation after each base estimator being added
if test_loader and updated:
self.eval()
with torch.no_grad():
correct = 0
total = 0
for _, elem in enumerate(test_loader):
data, target = io.split_data_target(elem, self.device)
output = self.forward(*data)
_, predicted = torch.max(output.data, 1)
correct += (predicted == target).sum().item()
total += target.size(0)
acc = 100 * correct / total
if acc > best_acc:
best_acc = acc
if save_model:
io.save(self, save_dir, self.logger)
msg = (
"n_estimators: {} | Validation Acc: {:.3f} %"
" | Historical Best: {:.3f} %"
)
self.logger.info(
msg.format(len(self.estimators_), acc, best_acc)
)
if self.tb_logger:
self.tb_logger.add_scalar(
"fast_geometric/Ensemble_Est/Validation_Acc",
acc,
len(self.estimators_),
)
updated = False # reset the updating flag
epoch += 1
if save_model and not test_loader:
io.save(self, save_dir, self.logger)
@torchensemble_model_doc(item="classifier_evaluate")
def evaluate(self, test_loader, return_loss=False):
return super().evaluate(test_loader, return_loss)
@torchensemble_model_doc(item="predict")
def predict(self, *x):
return super().predict(*x)
@torchensemble_model_doc(
"""Implementation on the FastGeometricRegressor.""", "seq_model"
)
class FastGeometricRegressor(_BaseFastGeometric, BaseRegressor):
@torchensemble_model_doc(
"""Implementation on the data forwarding in FastGeometricRegressor.""", # noqa: E501
"regressor_forward",
)
def forward(self, *x):
pred = self._forward(*x)
return pred
@torchensemble_model_doc(
(
"""Set the attributes on optimizer for FastGeometricRegressor. """
+ """Notice that keyword arguments specified here will also be """
+ """used in the ensembling stage except the learning rate."""
),
"set_optimizer",
)
def set_optimizer(self, optimizer_name, **kwargs):
super().set_optimizer(optimizer_name=optimizer_name, **kwargs)
@torchensemble_model_doc(
(
"""Set the attributes on scheduler for FastGeometricRegressor. """
+ """Notice that this scheduler will only be used in the stage on """ # noqa: E501
+ """fitting the dummy base estimator."""
),
"set_scheduler",
)
def set_scheduler(self, scheduler_name, **kwargs):
super().set_scheduler(scheduler_name=scheduler_name, **kwargs)
@torchensemble_model_doc(
"""Set the training criterion for FastGeometricRegressor.""",
"set_criterion",
)
def set_criterion(self, criterion):
super().set_criterion(criterion)
@_fast_geometric_model_doc(
"""Implementation on the training stage of FastGeometricRegressor.""", # noqa: E501
"fit",
)
def fit(
self,
train_loader,
cycle=4,
lr_1=5e-2,
lr_2=1e-4,
epochs=100,
log_interval=100,
test_loader=None,
save_model=True,
save_dir=None,
):
self._validate_parameters(epochs, log_interval)
self.n_outputs = self._decide_n_outputs(train_loader)
# ====================================================================
# Train the dummy estimator (estimator_)
# ====================================================================
estimator_ = self._make_estimator()
# Set the optimizer and scheduler
optimizer = set_module.set_optimizer(
estimator_, self.optimizer_name, **self.optimizer_args
)
if self.use_scheduler_:
scheduler = set_module.set_scheduler(
optimizer, self.scheduler_name, **self.scheduler_args
)
# Check the training criterion
if not hasattr(self, "_criterion"):
self._criterion = nn.MSELoss()
# Utils
total_iters = 0
for epoch in range(epochs):
# Training
estimator_.train()
for batch_idx, elem in enumerate(train_loader):
data, target = io.split_data_target(elem, self.device)
optimizer.zero_grad()
output = estimator_(*data)
loss = self._criterion(output, target)
loss.backward()
optimizer.step()
# Print training status
if batch_idx % log_interval == 0:
with torch.no_grad():
msg = "Epoch: {:03d} | Batch: {:03d} | Loss: {:.5f}"
self.logger.info(msg.format(epoch, batch_idx, loss))
if self.tb_logger:
self.tb_logger.add_scalar(
"fast_geometric/Base_Est/Train_Loss",
loss,
total_iters,
)
total_iters += 1
if self.use_scheduler_:
scheduler.step()
# ====================================================================
# Generate the ensemble
# ====================================================================
# Set the internal optimizer
estimator_.zero_grad()
optimizer = set_module.set_optimizer(
estimator_, self.optimizer_name, **self.optimizer_args
)
# Utils
best_loss = float("inf")
n_iters = len(train_loader)
updated = False
epoch = 0
while len(self.estimators_) < self.n_estimators:
# Training
estimator_.train()
for batch_idx, elem in enumerate(train_loader):
# Update learning rate
self._adjust_lr(
optimizer, epoch, batch_idx, n_iters, cycle, lr_1, lr_2
)
data, target = io.split_data_target(elem, self.device)
optimizer.zero_grad()
output = estimator_(*data)
loss = self._criterion(output, target)
loss.backward()
optimizer.step()
# Print training status
if batch_idx % log_interval == 0:
with torch.no_grad():
msg = "Epoch: {:03d} | Batch: {:03d} | Loss: {:.5f}"
self.logger.info(msg.format(epoch, batch_idx, loss))
if self.tb_logger:
self.tb_logger.add_scalar(
"fast_geometric/Ensemble-Est_{}".format(
len(self.estimators_)
)
+ "/Train_Loss",
loss,
total_iters,
)
total_iters += 1
# Update the ensemble
if (epoch % cycle + 1) == cycle // 2:
base_estimator = self._make_estimator()
base_estimator.load_state_dict(estimator_.state_dict())
self.estimators_.append(base_estimator)
updated = True
total_iters = 0
msg = "Save the base estimator with index: {}"
self.logger.info(msg.format(len(self.estimators_) - 1))
# Validation after each base estimator being added
if test_loader and updated:
self.eval()
with torch.no_grad():
val_loss = 0.0
for _, elem in enumerate(test_loader):
data, target = io.split_data_target(elem, self.device)
output = self.forward(*data)
val_loss += self._criterion(output, target)
val_loss /= len(test_loader)
if val_loss < best_loss:
best_loss = val_loss
if save_model:
io.save(self, save_dir, self.logger)
msg = (
"Epoch: {:03d} | Validation Loss: {:.5f} |"
" Historical Best: {:.5f}"
)
self.logger.info(msg.format(epoch, val_loss, best_loss))
if self.tb_logger:
self.tb_logger.add_scalar(
"fast_geometric/Ensemble_Est/Validation_Loss",
val_loss,
len(self.estimators_),
)
updated = False # reset the updating flag
epoch += 1
if save_model and not test_loader:
io.save(self, save_dir, self.logger)
@torchensemble_model_doc(item="regressor_evaluate")
def evaluate(self, test_loader):
return super().evaluate(test_loader)
@torchensemble_model_doc(item="predict")
def predict(self, *x):
return super().predict(*x) | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/deps/libraries/torchensemble/fast_geometric.py | 0.924845 | 0.395864 | fast_geometric.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
import warnings
from joblib import Parallel, delayed
from ._base import BaseClassifier, BaseRegressor
from ._base import torchensemble_model_doc
from .utils import io
from .utils import set_module
from .utils import operator as op
__all__ = ["BaggingClassifier", "BaggingRegressor"]
def _parallel_fit_per_epoch(
train_loader,
estimator,
cur_lr,
optimizer,
criterion,
idx,
epoch,
log_interval,
device,
is_classification,
):
"""
Private function used to fit base estimators in parallel.
WARNING: Parallelization when fitting large base estimators may cause
out-of-memory error.
"""
if cur_lr:
# Parallelization corrupts the binding between optimizer and scheduler
set_module.update_lr(optimizer, cur_lr)
for batch_idx, elem in enumerate(train_loader):
data, target = io.split_data_target(elem, device)
batch_size = data[0].size(0)
# Sampling with replacement
sampling_mask = torch.randint(
high=batch_size, size=(int(batch_size),), dtype=torch.int64
)
sampling_mask = torch.unique(sampling_mask) # remove duplicates
subsample_size = sampling_mask.size(0)
sampling_data = [tensor[sampling_mask] for tensor in data]
sampling_target = target[sampling_mask]
optimizer.zero_grad()
sampling_output = estimator(*sampling_data)
loss = criterion(sampling_output, sampling_target)
loss.backward()
optimizer.step()
# Print training status
if batch_idx % log_interval == 0:
# Classification
if is_classification:
_, predicted = torch.max(sampling_output.data, 1)
correct = (predicted == sampling_target).sum().item()
msg = (
"Estimator: {:03d} | Epoch: {:03d} | Batch: {:03d}"
" | Loss: {:.5f} | Correct: {:d}/{:d}"
)
print(
msg.format(
idx, epoch, batch_idx, loss, correct, subsample_size
)
)
else:
msg = (
"Estimator: {:03d} | Epoch: {:03d} | Batch: {:03d}"
" | Loss: {:.5f}"
)
print(msg.format(idx, epoch, batch_idx, loss))
return estimator, optimizer
@torchensemble_model_doc(
"""Implementation on the BaggingClassifier.""", "model"
)
class BaggingClassifier(BaseClassifier):
@torchensemble_model_doc(
"""Implementation on the data forwarding in BaggingClassifier.""",
"classifier_forward",
)
def forward(self, *x):
# Average over class distributions from all base estimators.
outputs = [
F.softmax(estimator(*x), dim=1) for estimator in self.estimators_
]
proba = op.average(outputs)
return proba
@torchensemble_model_doc(
"""Set the attributes on optimizer for BaggingClassifier.""",
"set_optimizer",
)
def set_optimizer(self, optimizer_name, **kwargs):
super().set_optimizer(optimizer_name, **kwargs)
@torchensemble_model_doc(
"""Set the attributes on scheduler for BaggingClassifier.""",
"set_scheduler",
)
def set_scheduler(self, scheduler_name, **kwargs):
super().set_scheduler(scheduler_name, **kwargs)
@torchensemble_model_doc(
"""Set the training criterion for BaggingClassifier.""",
"set_criterion",
)
def set_criterion(self, criterion):
super().set_criterion(criterion)
@torchensemble_model_doc(
"""Implementation on the training stage of BaggingClassifier.""", "fit"
)
def fit(
self,
train_loader,
epochs=100,
log_interval=100,
test_loader=None,
save_model=True,
save_dir=None,
):
self._validate_parameters(epochs, log_interval)
self.n_outputs = self._decide_n_outputs(train_loader)
# Instantiate a pool of base estimators, optimizers, and schedulers.
estimators = []
for _ in range(self.n_estimators):
estimators.append(self._make_estimator())
optimizers = []
for i in range(self.n_estimators):
optimizers.append(
set_module.set_optimizer(
estimators[i], self.optimizer_name, **self.optimizer_args
)
)
if self.use_scheduler_:
scheduler_ = set_module.set_scheduler(
optimizers[0], self.scheduler_name, **self.scheduler_args
)
# Check the training criterion
if not hasattr(self, "_criterion"):
self._criterion = nn.CrossEntropyLoss()
# Utils
best_acc = 0.0
# Internal helper function on pesudo forward
def _forward(estimators, *x):
outputs = [
F.softmax(estimator(*x), dim=1) for estimator in estimators
]
proba = op.average(outputs)
return proba
# Maintain a pool of workers
with Parallel(n_jobs=self.n_jobs) as parallel:
# Training loop
for epoch in range(epochs):
self.train()
if self.use_scheduler_:
cur_lr = scheduler_.get_last_lr()[0]
else:
cur_lr = None
if self.n_jobs and self.n_jobs > 1:
msg = "Parallelization on the training epoch: {:03d}"
self.logger.info(msg.format(epoch))
rets = parallel(
delayed(_parallel_fit_per_epoch)(
train_loader,
estimator,
cur_lr,
optimizer,
self._criterion,
idx,
epoch,
log_interval,
self.device,
True,
)
for idx, (estimator, optimizer) in enumerate(
zip(estimators, optimizers)
)
)
estimators, optimizers = [], []
for estimator, optimizer in rets:
estimators.append(estimator)
optimizers.append(optimizer)
# Validation
if test_loader:
self.eval()
with torch.no_grad():
correct = 0
total = 0
for _, elem in enumerate(test_loader):
data, target = io.split_data_target(
elem, self.device
)
output = _forward(estimators, *data)
_, predicted = torch.max(output.data, 1)
correct += (predicted == target).sum().item()
total += target.size(0)
acc = 100 * correct / total
if acc > best_acc:
best_acc = acc
self.estimators_ = nn.ModuleList()
self.estimators_.extend(estimators)
if save_model:
io.save(self, save_dir, self.logger)
msg = (
"Epoch: {:03d} | Validation Acc: {:.3f}"
" % | Historical Best: {:.3f} %"
)
self.logger.info(msg.format(epoch, acc, best_acc))
if self.tb_logger:
self.tb_logger.add_scalar(
"bagging/Validation_Acc", acc, epoch
)
# Update the scheduler
with warnings.catch_warnings():
# UserWarning raised by PyTorch is ignored because
# scheduler does not have a real effect on the optimizer.
warnings.simplefilter("ignore", UserWarning)
if self.use_scheduler_:
scheduler_.step()
self.estimators_ = nn.ModuleList()
self.estimators_.extend(estimators)
if save_model and not test_loader:
io.save(self, save_dir, self.logger)
@torchensemble_model_doc(item="classifier_evaluate")
def evaluate(self, test_loader, return_loss=False):
return super().evaluate(test_loader, return_loss)
@torchensemble_model_doc(item="predict")
def predict(self, *x):
return super().predict(*x)
@torchensemble_model_doc(
"""Implementation on the BaggingRegressor.""", "model"
)
class BaggingRegressor(BaseRegressor):
@torchensemble_model_doc(
"""Implementation on the data forwarding in BaggingRegressor.""",
"regressor_forward",
)
def forward(self, *x):
# Average over predictions from all base estimators.
outputs = [estimator(*x) for estimator in self.estimators_]
pred = op.average(outputs)
return pred
@torchensemble_model_doc(
"""Set the attributes on optimizer for BaggingRegressor.""",
"set_optimizer",
)
def set_optimizer(self, optimizer_name, **kwargs):
super().set_optimizer(optimizer_name, **kwargs)
@torchensemble_model_doc(
"""Set the attributes on scheduler for BaggingRegressor.""",
"set_scheduler",
)
def set_scheduler(self, scheduler_name, **kwargs):
super().set_scheduler(scheduler_name, **kwargs)
@torchensemble_model_doc(
"""Set the training criterion for BaggingRegressor.""",
"set_criterion",
)
def set_criterion(self, criterion):
super().set_criterion(criterion)
@torchensemble_model_doc(
"""Implementation on the training stage of BaggingRegressor.""", "fit"
)
def fit(
self,
train_loader,
epochs=100,
log_interval=100,
test_loader=None,
save_model=True,
save_dir=None,
):
self._validate_parameters(epochs, log_interval)
self.n_outputs = self._decide_n_outputs(train_loader)
# Instantiate a pool of base estimators, optimizers, and schedulers.
estimators = []
for _ in range(self.n_estimators):
estimators.append(self._make_estimator())
optimizers = []
for i in range(self.n_estimators):
optimizers.append(
set_module.set_optimizer(
estimators[i], self.optimizer_name, **self.optimizer_args
)
)
if self.use_scheduler_:
scheduler_ = set_module.set_scheduler(
optimizers[0], self.scheduler_name, **self.scheduler_args
)
# Check the training criterion
if not hasattr(self, "_criterion"):
self._criterion = nn.MSELoss()
# Utils
best_loss = float("inf")
# Internal helper function on pesudo forward
def _forward(estimators, *x):
outputs = [estimator(*x) for estimator in estimators]
pred = op.average(outputs)
return pred
# Maintain a pool of workers
with Parallel(n_jobs=self.n_jobs) as parallel:
# Training loop
for epoch in range(epochs):
self.train()
if self.use_scheduler_:
cur_lr = scheduler_.get_last_lr()[0]
else:
cur_lr = None
if self.n_jobs and self.n_jobs > 1:
msg = "Parallelization on the training epoch: {:03d}"
self.logger.info(msg.format(epoch))
rets = parallel(
delayed(_parallel_fit_per_epoch)(
train_loader,
estimator,
cur_lr,
optimizer,
self._criterion,
idx,
epoch,
log_interval,
self.device,
False,
)
for idx, (estimator, optimizer) in enumerate(
zip(estimators, optimizers)
)
)
estimators, optimizers = [], []
for estimator, optimizer in rets:
estimators.append(estimator)
optimizers.append(optimizer)
# Validation
if test_loader:
self.eval()
with torch.no_grad():
val_loss = 0.0
for _, elem in enumerate(test_loader):
data, target = io.split_data_target(
elem, self.device
)
output = _forward(estimators, *data)
val_loss += self._criterion(output, target)
val_loss /= len(test_loader)
if val_loss < best_loss:
best_loss = val_loss
self.estimators_ = nn.ModuleList()
self.estimators_.extend(estimators)
if save_model:
io.save(self, save_dir, self.logger)
msg = (
"Epoch: {:03d} | Validation Loss:"
" {:.5f} | Historical Best: {:.5f}"
)
self.logger.info(
msg.format(epoch, val_loss, best_loss)
)
if self.tb_logger:
self.tb_logger.add_scalar(
"bagging/Validation_Loss", val_loss, epoch
)
# Update the scheduler
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
if self.use_scheduler_:
scheduler_.step()
self.estimators_ = nn.ModuleList()
self.estimators_.extend(estimators)
if save_model and not test_loader:
io.save(self, save_dir, self.logger)
@torchensemble_model_doc(item="regressor_evaluate")
def evaluate(self, test_loader):
return super().evaluate(test_loader)
@torchensemble_model_doc(item="predict")
def predict(self, *x):
return super().predict(*x) | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/deps/libraries/torchensemble/bagging.py | 0.904133 | 0.330795 | bagging.py | pypi |
import abc
import torch
import logging
import warnings
import torch.nn as nn
import torch.nn.functional as F
from joblib import Parallel, delayed
from ._base import BaseModule, BaseClassifier, BaseRegressor
from ._base import torchensemble_model_doc
from .utils import io
from .utils import set_module
from .utils import operator as op
from .utils.logging import get_tb_logger
__all__ = ["SoftGradientBoostingClassifier", "SoftGradientBoostingRegressor"]
__model_doc = """
Parameters
----------
estimator : torch.nn.Module
The class or object of your base estimator.
- If :obj:`class`, it should inherit from :mod:`torch.nn.Module`.
- If :obj:`object`, it should be instantiated from a class inherited
from :mod:`torch.nn.Module`.
n_estimators : int
The number of base estimators in the ensemble.
estimator_args : dict, default=None
The dictionary of hyper-parameters used to instantiate base
estimators. This parameter will have no effect if ``estimator`` is a
base estimator object after instantiation.
shrinkage_rate : float, default=1
The shrinkage rate used in gradient boosting.
cuda : bool, default=True
- If ``True``, use GPU to train and evaluate the ensemble.
- If ``False``, use CPU to train and evaluate the ensemble.
n_jobs : int, default=None
The number of workers for training the ensemble. This input
argument is used for parallel ensemble methods such as
:mod:`voting` and :mod:`bagging`. Setting it to an integer larger
than ``1`` enables ``n_jobs`` base estimators to be trained
simultaneously.
Attributes
----------
estimators_ : torch.nn.ModuleList
An internal container that stores all fitted base estimators.
"""
__fit_doc = """
Parameters
----------
train_loader : torch.utils.data.DataLoader
A data loader that contains the training data.
epochs : int, default=100
The number of training epochs per base estimator.
use_reduction_sum : bool, default=True
Whether to set ``reduction="sum"`` for the internal mean squared
error used to fit each base estimator.
log_interval : int, default=100
The number of batches to wait before logging the training status.
test_loader : torch.utils.data.DataLoader, default=None
A data loader that contains the evaluating data.
- If ``None``, no validation is conducted after each base
estimator being trained.
- If not ``None``, the ensemble will be evaluated on this
dataloader after each base estimator being trained.
save_model : bool, default=True
Specify whether to save the model parameters.
- If test_loader is ``None``, the ensemble containing
``n_estimators`` base estimators will be saved.
- If test_loader is not ``None``, the ensemble with the best
validation performance will be saved.
save_dir : string, default=None
Specify where to save the model parameters.
- If ``None``, the model will be saved in the current directory.
- If not ``None``, the model will be saved in the specified
directory: ``save_dir``.
"""
def _soft_gradient_boosting_model_doc(header, item="model"):
"""
Decorator on obtaining documentation for different gradient boosting
models.
"""
def get_doc(item):
"""Return the selected item"""
__doc = {"model": __model_doc, "fit": __fit_doc}
return __doc[item]
def adddoc(cls):
doc = [header + "\n\n"]
doc.extend(get_doc(item))
cls.__doc__ = "".join(doc)
return cls
return adddoc
def _parallel_compute_pseudo_residual(
output, target, estimator_idx, shrinkage_rate, n_outputs, is_classification
):
"""
Compute pseudo residuals in soft gradient boosting for each base estimator
in a parallel fashion.
"""
accumulated_output = torch.zeros_like(output[0], device=output[0].device)
for i in range(estimator_idx):
accumulated_output += shrinkage_rate * output[i]
# Classification
if is_classification:
residual = op.pseudo_residual_classification(
target, accumulated_output, n_outputs
)
# Regression
else:
residual = op.pseudo_residual_regression(target, accumulated_output)
return residual
class _BaseSoftGradientBoosting(BaseModule):
def __init__(
self,
estimator,
n_estimators,
estimator_args=None,
shrinkage_rate=1.0,
cuda=True,
n_jobs=None,
):
super(BaseModule, self).__init__()
self.base_estimator_ = estimator
self.n_estimators = n_estimators
self.estimator_args = estimator_args
if estimator_args and not isinstance(estimator, type):
msg = (
"The input `estimator_args` will have no effect since"
" `estimator` is already an object after instantiation."
)
warnings.warn(msg, RuntimeWarning)
self.shrinkage_rate = shrinkage_rate
self.device = torch.device("cuda" if cuda else "cpu")
self.n_jobs = n_jobs
self.logger = logging.getLogger()
self.tb_logger = get_tb_logger()
self.estimators_ = nn.ModuleList()
self.use_scheduler_ = False
def _validate_parameters(self, epochs, log_interval):
"""Validate hyper-parameters on training the ensemble."""
if not epochs > 0:
msg = (
"The number of training epochs = {} should be strictly"
" positive."
)
self.logger.error(msg.format(epochs))
raise ValueError(msg.format(epochs))
if not log_interval > 0:
msg = (
"The number of batches to wait before printting the"
" training status should be strictly positive, but got {}"
" instead."
)
self.logger.error(msg.format(log_interval))
raise ValueError(msg.format(log_interval))
if not 0 < self.shrinkage_rate <= 1:
msg = (
"The shrinkage rate should be in the range (0, 1], but got"
" {} instead."
)
self.logger.error(msg.format(self.shrinkage_rate))
raise ValueError(msg.format(self.shrinkage_rate))
@abc.abstractmethod
def _evaluate_during_fit(self, test_loader, epoch):
"""Evaluate the ensemble after each training epoch."""
def fit(
self,
train_loader,
epochs=100,
use_reduction_sum=True,
log_interval=100,
test_loader=None,
save_model=True,
save_dir=None,
):
# Instantiate base estimators and set attributes
for _ in range(self.n_estimators):
self.estimators_.append(self._make_estimator())
self._validate_parameters(epochs, log_interval)
self.n_outputs = self._decide_n_outputs(train_loader)
# Utils
criterion = (
nn.MSELoss(reduction="sum") if use_reduction_sum else nn.MSELoss()
)
total_iters = 0
# Set up optimizer and learning rate scheduler
optimizer = set_module.set_optimizer(
self, self.optimizer_name, **self.optimizer_args
)
if self.use_scheduler_:
scheduler = set_module.set_scheduler(
optimizer,
self.scheduler_name,
**self.scheduler_args # noqa: E501
)
for epoch in range(epochs):
self.train()
for batch_idx, elem in enumerate(train_loader):
data, target = io.split_data_target(elem, self.device)
output = [estimator(*data) for estimator in self.estimators_]
# Compute pseudo residuals in parallel
rets = Parallel(n_jobs=self.n_jobs)(
delayed(_parallel_compute_pseudo_residual)(
output,
target,
i,
self.shrinkage_rate,
self.n_outputs,
self.is_classification,
)
for i in range(self.n_estimators)
)
# Compute sGBM loss
loss = torch.tensor(0.0, device=self.device)
for idx, estimator in enumerate(self.estimators_):
loss += criterion(output[idx], rets[idx])
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Print training status
if batch_idx % log_interval == 0:
with torch.no_grad():
msg = "Epoch: {:03d} | Batch: {:03d} | RegLoss: {:.5f}"
self.logger.info(msg.format(epoch, batch_idx, loss))
if self.tb_logger:
self.tb_logger.add_scalar(
"sGBM/Train_Loss", loss, total_iters
)
total_iters += 1
# Validation
if test_loader:
flag = self._evaluate_during_fit(test_loader, epoch)
if save_model and flag:
io.save(self, save_dir, self.logger)
# Update the scheduler
if self.use_scheduler_:
scheduler.step()
if save_model and not test_loader:
io.save(self, save_dir, self.logger)
@_soft_gradient_boosting_model_doc(
"""Implementation on the SoftGradientBoostingClassifier.""", "model"
)
class SoftGradientBoostingClassifier(
_BaseSoftGradientBoosting, BaseClassifier
):
def __init__(
self,
estimator,
n_estimators,
estimator_args=None,
shrinkage_rate=1.0,
cuda=True,
n_jobs=None,
):
super().__init__(
estimator=estimator,
n_estimators=n_estimators,
estimator_args=estimator_args,
shrinkage_rate=shrinkage_rate,
cuda=cuda,
n_jobs=n_jobs,
)
self._criterion = nn.CrossEntropyLoss()
self.is_classification = True
self.best_acc = 0.0
@torch.no_grad()
def _evaluate_during_fit(self, test_loader, epoch):
self.eval()
correct = 0
total = 0
flag = False
for _, elem in enumerate(test_loader):
data, target = io.split_data_target(elem, self.device)
output = self.forward(*data)
_, predicted = torch.max(output.data, 1)
correct += (predicted == target).sum().item()
total += target.size(0)
acc = 100 * correct / total
if acc > self.best_acc:
self.best_acc = acc
flag = True
msg = (
"Epoch: {:03d} | Validation Acc: {:.3f}"
" % | Historical Best: {:.3f} %"
)
self.logger.info(msg.format(epoch, acc, self.best_acc))
if self.tb_logger:
self.tb_logger.add_scalar(
"soft_gradient_boosting/Validation_Acc", acc, epoch
)
return flag
@torchensemble_model_doc(
"""Set the attributes on optimizer for SoftGradientBoostingClassifier.""", # noqa: E501
"set_optimizer",
)
def set_optimizer(self, optimizer_name, **kwargs):
super().set_optimizer(optimizer_name, **kwargs)
@torchensemble_model_doc(
"""Set the attributes on scheduler for SoftGradientBoostingClassifier.""", # noqa: E501
"set_scheduler",
)
def set_scheduler(self, scheduler_name, **kwargs):
super().set_scheduler(scheduler_name, **kwargs)
@torchensemble_model_doc(
"""Set the training criterion for SoftGradientBoostingClassifier.""",
"set_criterion",
)
def set_criterion(self, criterion):
super().set_criterion(criterion)
@_soft_gradient_boosting_model_doc(
"""Implementation on the training stage of SoftGradientBoostingClassifier.""", # noqa: E501
"fit",
)
def fit(
self,
train_loader,
epochs=100,
use_reduction_sum=True,
log_interval=100,
test_loader=None,
save_model=True,
save_dir=None,
):
super().fit(
train_loader=train_loader,
epochs=epochs,
use_reduction_sum=use_reduction_sum,
log_interval=log_interval,
test_loader=test_loader,
save_model=save_model,
save_dir=save_dir,
)
@torchensemble_model_doc(
"""Implementation on the data forwarding in SoftGradientBoostingClassifier.""", # noqa: E501
"classifier_forward",
)
def forward(self, *x):
output = [estimator(*x) for estimator in self.estimators_]
output = op.sum_with_multiplicative(output, self.shrinkage_rate)
proba = F.softmax(output, dim=1)
return proba
@torchensemble_model_doc(item="classifier_evaluate")
def evaluate(self, test_loader, return_loss=False):
return super().evaluate(test_loader, return_loss)
@torchensemble_model_doc(item="predict")
def predict(self, *x):
return super().predict(*x)
@_soft_gradient_boosting_model_doc(
"""Implementation on the SoftGradientBoostingRegressor.""", "model"
)
class SoftGradientBoostingRegressor(_BaseSoftGradientBoosting, BaseRegressor):
def __init__(
self,
estimator,
n_estimators,
estimator_args=None,
shrinkage_rate=1.0,
cuda=True,
n_jobs=None,
):
super().__init__(
estimator=estimator,
n_estimators=n_estimators,
estimator_args=estimator_args,
shrinkage_rate=shrinkage_rate,
cuda=cuda,
n_jobs=n_jobs,
)
self._criterion = nn.MSELoss()
self.is_classification = False
self.best_mse = float("inf")
@torch.no_grad()
def _evaluate_during_fit(self, test_loader, epoch):
self.eval()
mse = 0.0
flag = False
criterion = nn.MSELoss()
for _, elem in enumerate(test_loader):
data, target = io.split_data_target(elem, self.device)
output = self.forward(*data)
mse += criterion(output, target)
mse /= len(test_loader)
if mse < self.best_mse:
self.best_mse = mse
flag = True
msg = (
"Epoch: {:03d} | Validation MSE: {:.5f} | Historical Best: {:.5f}"
)
self.logger.info(msg.format(epoch, mse, self.best_mse))
if self.tb_logger:
self.tb_logger.add_scalar(
"soft_gradient_boosting/Validation_MSE", mse, epoch
)
return flag
@torchensemble_model_doc(
"""Set the attributes on optimizer for SoftGradientBoostingRegressor.""", # noqa: E501
"set_optimizer",
)
def set_optimizer(self, optimizer_name, **kwargs):
super().set_optimizer(optimizer_name, **kwargs)
@torchensemble_model_doc(
"""Set the attributes on scheduler for SoftGradientBoostingRegressor.""", # noqa: E501
"set_scheduler",
)
def set_scheduler(self, scheduler_name, **kwargs):
super().set_scheduler(scheduler_name, **kwargs)
@torchensemble_model_doc(
"""Set the training criterion for SoftGradientBoostingRegressor.""",
"set_criterion",
)
def set_criterion(self, criterion):
super().set_criterion(criterion)
@_soft_gradient_boosting_model_doc(
"""Implementation on the training stage of SoftGradientBoostingRegressor.""", # noqa: E501
"fit",
)
def fit(
self,
train_loader,
epochs=100,
use_reduction_sum=True,
log_interval=100,
test_loader=None,
save_model=True,
save_dir=None,
):
super().fit(
train_loader=train_loader,
epochs=epochs,
use_reduction_sum=use_reduction_sum,
log_interval=log_interval,
test_loader=test_loader,
save_model=save_model,
save_dir=save_dir,
)
@torchensemble_model_doc(
"""Implementation on the data forwarding in SoftGradientBoostingRegressor.""", # noqa: E501
"regressor_forward",
)
def forward(self, *x):
outputs = [estimator(*x) for estimator in self.estimators_]
pred = op.sum_with_multiplicative(outputs, self.shrinkage_rate)
return pred
@torchensemble_model_doc(item="regressor_evaluate")
def evaluate(self, test_loader):
return super().evaluate(test_loader)
@torchensemble_model_doc(item="predict")
def predict(self, *x):
return super().predict(*x) | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/deps/libraries/torchensemble/soft_gradient_boosting.py | 0.935832 | 0.403156 | soft_gradient_boosting.py | pypi |
import abc
import copy
import torch
import logging
import warnings
import numpy as np
import torch.nn as nn
from . import _constants as const
from .utils.io import split_data_target
from .utils.logging import get_tb_logger
def torchensemble_model_doc(header="", item="model"):
"""
A decorator on obtaining documentation for different methods in the
ensemble. This decorator is modified from `sklearn.py` in XGBoost.
Parameters
----------
header: string
Introduction to the decorated class or method.
item : string
Type of the docstring item.
"""
def get_doc(item):
"""Return the selected item."""
__doc = {
"model": const.__model_doc,
"seq_model": const.__seq_model_doc,
"fit": const.__fit_doc,
"predict": const.__predict_doc,
"set_optimizer": const.__set_optimizer_doc,
"set_scheduler": const.__set_scheduler_doc,
"set_criterion": const.__set_criterion_doc,
"classifier_forward": const.__classification_forward_doc,
"classifier_evaluate": const.__classification_evaluate_doc,
"regressor_forward": const.__regression_forward_doc,
"regressor_evaluate": const.__regression_evaluate_doc,
}
return __doc[item]
def adddoc(cls):
doc = [header + "\n\n"]
doc.extend(get_doc(item))
cls.__doc__ = "".join(doc)
return cls
return adddoc
class BaseModule(nn.Module):
"""Base class for all ensembles.
WARNING: This class cannot be used directly.
Please use the derived classes instead.
"""
def __init__(
self,
estimator,
n_estimators,
estimator_args=None,
cuda=True,
n_jobs=None,
):
super(BaseModule, self).__init__()
self.base_estimator_ = estimator
self.n_estimators = n_estimators
self.estimator_args = estimator_args
if estimator_args and not isinstance(estimator, type):
msg = (
"The input `estimator_args` will have no effect since"
" `estimator` is already an object after instantiation."
)
warnings.warn(msg, RuntimeWarning)
self.device = torch.device("cuda" if cuda else "cpu")
self.n_jobs = n_jobs
self.logger = logging.getLogger()
self.tb_logger = get_tb_logger()
self.estimators_ = nn.ModuleList()
self.use_scheduler_ = False
def __len__(self):
"""
Return the number of base estimators in the ensemble. The real number
of base estimators may not match `self.n_estimators` because of the
early stopping stage in several ensembles such as Gradient Boosting.
"""
return len(self.estimators_)
def __getitem__(self, index):
"""Return the `index`-th base estimator in the ensemble."""
return self.estimators_[index]
@abc.abstractmethod
def _decide_n_outputs(self, train_loader):
"""Decide the number of outputs according to the `train_loader`."""
def _make_estimator(self):
"""Make and configure a copy of `self.base_estimator_`."""
# Call `deepcopy` to make a base estimator
if not isinstance(self.base_estimator_, type):
estimator = copy.deepcopy(self.base_estimator_)
# Call `__init__` to make a base estimator
else:
# Without params
if self.estimator_args is None:
estimator = self.base_estimator_()
# With params
else:
estimator = self.base_estimator_(**self.estimator_args)
return estimator.to(self.device)
def _validate_parameters(self, epochs, log_interval):
"""Validate hyper-parameters on training the ensemble."""
if not epochs > 0:
msg = (
"The number of training epochs should be strictly positive"
", but got {} instead."
)
self.logger.error(msg.format(epochs))
raise ValueError(msg.format(epochs))
if not log_interval > 0:
msg = (
"The number of batches to wait before printing the"
" training status should be strictly positive, but got {}"
" instead."
)
self.logger.error(msg.format(log_interval))
raise ValueError(msg.format(log_interval))
def set_criterion(self, criterion):
"""Set the training criterion."""
self._criterion = criterion
def set_optimizer(self, optimizer_name, **kwargs):
"""Set the parameter optimizer."""
self.optimizer_name = optimizer_name
self.optimizer_args = kwargs
def set_scheduler(self, scheduler_name, **kwargs):
"""Set the learning rate scheduler."""
self.scheduler_name = scheduler_name
self.scheduler_args = kwargs
self.use_scheduler_ = True
@abc.abstractmethod
def forward(self, *x):
"""
Implementation on the data forwarding in the ensemble. Notice
that the input ``x`` should be a data batch instead of a standalone
data loader that contains many data batches.
"""
@abc.abstractmethod
def fit(
self,
train_loader,
epochs=100,
log_interval=100,
test_loader=None,
save_model=True,
save_dir=None,
):
"""
Implementation on the training stage of the ensemble.
"""
@torch.no_grad()
def predict(self, *x):
"""Docstrings decorated by downstream ensembles."""
self.eval()
# Copy data
x_device = []
for data in x:
if isinstance(data, torch.Tensor):
x_device.append(data.to(self.device))
elif isinstance(data, np.ndarray):
x_device.append(torch.Tensor(data).to(self.device))
else:
msg = (
"The type of input X should be one of {{torch.Tensor,"
" np.ndarray}}."
)
raise ValueError(msg)
pred = self.forward(*x_device)
pred = pred.cpu()
return pred
class BaseClassifier(BaseModule):
"""Base class for all ensemble classifiers.
WARNING: This class cannot be used directly.
Please use the derived classes instead.
"""
def _decide_n_outputs(self, train_loader):
"""
Decide the number of outputs according to the `train_loader`.
The number of outputs equals the number of distinct classes for
classifiers.
"""
if hasattr(train_loader.dataset, "classes"):
n_outputs = len(train_loader.dataset.classes)
# Infer `n_outputs` from the dataloader
else:
labels = []
for _, elem in enumerate(train_loader):
_, target = split_data_target(elem, self.device)
labels.append(target)
labels = torch.unique(torch.cat(labels))
n_outputs = labels.size(0)
return n_outputs
@torch.no_grad()
def evaluate(self, test_loader, return_loss=False):
"""Docstrings decorated by downstream models."""
self.eval()
correct = 0
total = 0
loss = 0.0
for _, elem in enumerate(test_loader):
data, target = split_data_target(elem, self.device)
output = self.forward(*data)
_, predicted = torch.max(output.data, 1)
correct += (predicted == target).sum().item()
total += target.size(0)
loss += self._criterion(output, target)
acc = 100 * correct / total
loss /= len(test_loader)
if return_loss:
return acc, float(loss)
return acc
class BaseRegressor(BaseModule):
"""Base class for all ensemble regressors.
WARNING: This class cannot be used directly.
Please use the derived classes instead.
"""
def _decide_n_outputs(self, train_loader):
"""
Decide the number of outputs according to the `train_loader`.
The number of outputs equals the number of target variables for
regressors (e.g., `1` in univariate regression).
"""
for _, elem in enumerate(train_loader):
_, target = split_data_target(elem, self.device)
if len(target.size()) == 1:
n_outputs = 1 # univariate regression
else:
n_outputs = target.size(1) # multivariate regression
break
return n_outputs
@torch.no_grad()
def evaluate(self, test_loader):
"""Docstrings decorated by downstream ensembles."""
self.eval()
loss = 0.0
for _, elem in enumerate(test_loader):
data, target = split_data_target(elem, self.device)
output = self.forward(*data)
loss += self._criterion(output, target)
return float(loss) / len(test_loader) | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/deps/libraries/torchensemble/_base.py | 0.908978 | 0.244916 | _base.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
from ._base import BaseClassifier, BaseRegressor
from ._base import torchensemble_model_doc
from .utils import io
from .utils import set_module
from .utils import operator as op
__all__ = ["FusionClassifier", "FusionRegressor"]
@torchensemble_model_doc(
"""Implementation on the FusionClassifier.""", "model"
)
class FusionClassifier(BaseClassifier):
def _forward(self, *x):
"""
Implementation on the internal data forwarding in FusionClassifier.
"""
# Average
outputs = [estimator(*x) for estimator in self.estimators_]
output = op.average(outputs)
return output
@torchensemble_model_doc(
"""Implementation on the data forwarding in FusionClassifier.""",
"classifier_forward",
)
def forward(self, *x):
output = self._forward(*x)
proba = F.softmax(output, dim=1)
return proba
@torchensemble_model_doc(
"""Set the attributes on optimizer for FusionClassifier.""",
"set_optimizer",
)
def set_optimizer(self, optimizer_name, **kwargs):
super().set_optimizer(optimizer_name, **kwargs)
@torchensemble_model_doc(
"""Set the attributes on scheduler for FusionClassifier.""",
"set_scheduler",
)
def set_scheduler(self, scheduler_name, **kwargs):
super().set_scheduler(scheduler_name, **kwargs)
@torchensemble_model_doc(
"""Set the training criterion for FusionClassifier.""",
"set_criterion",
)
def set_criterion(self, criterion):
super().set_criterion(criterion)
@torchensemble_model_doc(
"""Implementation on the training stage of FusionClassifier.""", "fit"
)
def fit(
self,
train_loader,
epochs=100,
log_interval=100,
test_loader=None,
save_model=True,
save_dir=None,
):
# Instantiate base estimators and set attributes
for _ in range(self.n_estimators):
self.estimators_.append(self._make_estimator())
self._validate_parameters(epochs, log_interval)
self.n_outputs = self._decide_n_outputs(train_loader)
optimizer = set_module.set_optimizer(
self, self.optimizer_name, **self.optimizer_args
)
# Set the scheduler if `set_scheduler` was called before
if self.use_scheduler_:
self.scheduler_ = set_module.set_scheduler(
optimizer, self.scheduler_name, **self.scheduler_args
)
# Check the training criterion
if not hasattr(self, "_criterion"):
self._criterion = nn.CrossEntropyLoss()
# Utils
best_acc = 0.0
total_iters = 0
# Training loop
for epoch in range(epochs):
self.train()
for batch_idx, elem in enumerate(train_loader):
data, target = io.split_data_target(elem, self.device)
batch_size = data[0].size(0)
optimizer.zero_grad()
output = self._forward(*data)
loss = self._criterion(output, target)
loss.backward()
optimizer.step()
# Print training status
if batch_idx % log_interval == 0:
with torch.no_grad():
_, predicted = torch.max(output.data, 1)
correct = (predicted == target).sum().item()
msg = (
"Epoch: {:03d} | Batch: {:03d} | Loss:"
" {:.5f} | Correct: {:d}/{:d}"
)
self.logger.info(
msg.format(
epoch, batch_idx, loss, correct, batch_size
)
)
if self.tb_logger:
self.tb_logger.add_scalar(
"fusion/Train_Loss", loss, total_iters
)
total_iters += 1
# Validation
if test_loader:
self.eval()
with torch.no_grad():
correct = 0
total = 0
for _, elem in enumerate(test_loader):
data, target = io.split_data_target(elem, self.device)
output = self.forward(*data)
_, predicted = torch.max(output.data, 1)
correct += (predicted == target).sum().item()
total += target.size(0)
acc = 100 * correct / total
if acc > best_acc:
best_acc = acc
if save_model:
io.save(self, save_dir, self.logger)
msg = (
"Epoch: {:03d} | Validation Acc: {:.3f}"
" % | Historical Best: {:.3f} %"
)
self.logger.info(msg.format(epoch, acc, best_acc))
if self.tb_logger:
self.tb_logger.add_scalar(
"fusion/Validation_Acc", acc, epoch
)
# Update the scheduler
if hasattr(self, "scheduler_"):
self.scheduler_.step()
if save_model and not test_loader:
io.save(self, save_dir, self.logger)
@torchensemble_model_doc(item="classifier_evaluate")
def evaluate(self, test_loader, return_loss=False):
return super().evaluate(test_loader, return_loss)
@torchensemble_model_doc(item="predict")
def predict(self, *x):
return super().predict(*x)
@torchensemble_model_doc("""Implementation on the FusionRegressor.""", "model")
class FusionRegressor(BaseRegressor):
@torchensemble_model_doc(
"""Implementation on the data forwarding in FusionRegressor.""",
"regressor_forward",
)
def forward(self, *x):
# Average
outputs = [estimator(*x) for estimator in self.estimators_]
pred = op.average(outputs)
return pred
@torchensemble_model_doc(
"""Set the attributes on optimizer for FusionRegressor.""",
"set_optimizer",
)
def set_optimizer(self, optimizer_name, **kwargs):
super().set_optimizer(optimizer_name, **kwargs)
@torchensemble_model_doc(
"""Set the attributes on scheduler for FusionRegressor.""",
"set_scheduler",
)
def set_scheduler(self, scheduler_name, **kwargs):
super().set_scheduler(scheduler_name, **kwargs)
@torchensemble_model_doc(
"""Set the training criterion for FusionRegressor.""",
"set_criterion",
)
def set_criterion(self, criterion):
super().set_criterion(criterion)
@torchensemble_model_doc(
"""Implementation on the training stage of FusionRegressor.""", "fit"
)
def fit(
self,
train_loader,
epochs=100,
log_interval=100,
test_loader=None,
save_model=True,
save_dir=None,
):
# Instantiate base estimators and set attributes
for _ in range(self.n_estimators):
self.estimators_.append(self._make_estimator())
self._validate_parameters(epochs, log_interval)
self.n_outputs = self._decide_n_outputs(train_loader)
optimizer = set_module.set_optimizer(
self, self.optimizer_name, **self.optimizer_args
)
# Set the scheduler if `set_scheduler` was called before
if self.use_scheduler_:
self.scheduler_ = set_module.set_scheduler(
optimizer, self.scheduler_name, **self.scheduler_args
)
# Check the training criterion
if not hasattr(self, "_criterion"):
self._criterion = nn.MSELoss()
# Utils
best_loss = float("inf")
total_iters = 0
# Training loop
for epoch in range(epochs):
self.train()
for batch_idx, elem in enumerate(train_loader):
data, target = io.split_data_target(elem, self.device)
optimizer.zero_grad()
output = self.forward(*data)
loss = self._criterion(output, target)
loss.backward()
optimizer.step()
# Print training status
if batch_idx % log_interval == 0:
with torch.no_grad():
msg = "Epoch: {:03d} | Batch: {:03d} | Loss: {:.5f}"
self.logger.info(msg.format(epoch, batch_idx, loss))
if self.tb_logger:
self.tb_logger.add_scalar(
"fusion/Train_Loss", loss, total_iters
)
total_iters += 1
# Validation
if test_loader:
self.eval()
with torch.no_grad():
val_loss = 0.0
for _, elem in enumerate(test_loader):
data, target = io.split_data_target(elem, self.device)
output = self.forward(*data)
val_loss += self._criterion(output, target)
val_loss /= len(test_loader)
if val_loss < best_loss:
best_loss = val_loss
if save_model:
io.save(self, save_dir, self.logger)
msg = (
"Epoch: {:03d} | Validation Loss: {:.5f} |"
" Historical Best: {:.5f}"
)
self.logger.info(msg.format(epoch, val_loss, best_loss))
if self.tb_logger:
self.tb_logger.add_scalar(
"fusion/Validation_Loss", val_loss, epoch
)
# Update the scheduler
if hasattr(self, "scheduler_"):
self.scheduler_.step()
if save_model and not test_loader:
io.save(self, save_dir, self.logger)
@torchensemble_model_doc(item="regressor_evaluate")
def evaluate(self, test_loader):
return super().evaluate(test_loader)
@torchensemble_model_doc(item="predict")
def predict(self, *x):
return super().predict(*x) | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/deps/libraries/torchensemble/fusion.py | 0.906307 | 0.297597 | fusion.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
import warnings
from joblib import Parallel, delayed
from ._base import BaseClassifier, BaseRegressor
from ._base import torchensemble_model_doc
from .utils import io
from .utils import set_module
from .utils import operator as op
__all__ = ["VotingClassifier", "VotingRegressor"]
def _parallel_fit_per_epoch(
train_loader,
estimator,
cur_lr,
optimizer,
criterion,
idx,
epoch,
log_interval,
device,
is_classification,
):
"""
Private function used to fit base estimators in parallel.
WARNING: Parallelization when fitting large base estimators may cause
out-of-memory error.
"""
if cur_lr:
# Parallelization corrupts the binding between optimizer and scheduler
set_module.update_lr(optimizer, cur_lr)
for batch_idx, elem in enumerate(train_loader):
data, target = io.split_data_target(elem, device)
batch_size = data[0].size(0)
optimizer.zero_grad()
output = estimator(*data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
# Print training status
if batch_idx % log_interval == 0:
# Classification
if is_classification:
_, predicted = torch.max(output.data, 1)
correct = (predicted == target).sum().item()
msg = (
"Estimator: {:03d} | Epoch: {:03d} | Batch: {:03d}"
" | Loss: {:.5f} | Correct: {:d}/{:d}"
)
print(
msg.format(
idx, epoch, batch_idx, loss, correct, batch_size
)
)
# Regression
else:
msg = (
"Estimator: {:03d} | Epoch: {:03d} | Batch: {:03d}"
" | Loss: {:.5f}"
)
print(msg.format(idx, epoch, batch_idx, loss))
return estimator, optimizer
@torchensemble_model_doc(
"""Implementation on the VotingClassifier.""", "model"
)
class VotingClassifier(BaseClassifier):
@torchensemble_model_doc(
"""Implementation on the data forwarding in VotingClassifier.""",
"classifier_forward",
)
def forward(self, *x):
# Average over class distributions from all base estimators.
outputs = [
F.softmax(estimator(*x), dim=1) for estimator in self.estimators_
]
proba = op.average(outputs)
return proba
@torchensemble_model_doc(
"""Set the attributes on optimizer for VotingClassifier.""",
"set_optimizer",
)
def set_optimizer(self, optimizer_name, **kwargs):
super().set_optimizer(optimizer_name, **kwargs)
@torchensemble_model_doc(
"""Set the attributes on scheduler for VotingClassifier.""",
"set_scheduler",
)
def set_scheduler(self, scheduler_name, **kwargs):
super().set_scheduler(scheduler_name, **kwargs)
@torchensemble_model_doc(
"""Set the training criterion for VotingClassifier.""",
"set_criterion",
)
def set_criterion(self, criterion):
super().set_criterion(criterion)
@torchensemble_model_doc(
"""Implementation on the training stage of VotingClassifier.""", "fit"
)
def fit(
self,
train_loader,
epochs=100,
log_interval=100,
test_loader=None,
save_model=True,
save_dir=None,
):
self._validate_parameters(epochs, log_interval)
self.n_outputs = self._decide_n_outputs(train_loader)
# Instantiate a pool of base estimators, optimizers, and schedulers.
estimators = []
for _ in range(self.n_estimators):
estimators.append(self._make_estimator())
optimizers = []
for i in range(self.n_estimators):
optimizers.append(
set_module.set_optimizer(
estimators[i], self.optimizer_name, **self.optimizer_args
)
)
if self.use_scheduler_:
scheduler_ = set_module.set_scheduler(
optimizers[0], self.scheduler_name, **self.scheduler_args
)
# Check the training criterion
if not hasattr(self, "_criterion"):
self._criterion = nn.CrossEntropyLoss()
# Utils
best_acc = 0.0
# Internal helper function on pesudo forward
def _forward(estimators, *x):
outputs = [
F.softmax(estimator(*x), dim=1) for estimator in estimators
]
proba = op.average(outputs)
return proba
# Maintain a pool of workers
with Parallel(n_jobs=self.n_jobs) as parallel:
# Training loop
for epoch in range(epochs):
self.train()
if self.use_scheduler_:
cur_lr = scheduler_.get_last_lr()[0]
else:
cur_lr = None
if self.n_jobs and self.n_jobs > 1:
msg = "Parallelization on the training epoch: {:03d}"
self.logger.info(msg.format(epoch))
rets = parallel(
delayed(_parallel_fit_per_epoch)(
train_loader,
estimator,
cur_lr,
optimizer,
self._criterion,
idx,
epoch,
log_interval,
self.device,
True,
)
for idx, (estimator, optimizer) in enumerate(
zip(estimators, optimizers)
)
)
estimators, optimizers = [], []
for estimator, optimizer in rets:
estimators.append(estimator)
optimizers.append(optimizer)
# Validation
if test_loader:
self.eval()
with torch.no_grad():
correct = 0
total = 0
for _, elem in enumerate(test_loader):
data, target = io.split_data_target(
elem, self.device
)
output = _forward(estimators, *data)
_, predicted = torch.max(output.data, 1)
correct += (predicted == target).sum().item()
total += target.size(0)
acc = 100 * correct / total
if acc > best_acc:
best_acc = acc
self.estimators_ = nn.ModuleList()
self.estimators_.extend(estimators)
if save_model:
io.save(self, save_dir, self.logger)
msg = (
"Epoch: {:03d} | Validation Acc: {:.3f}"
" % | Historical Best: {:.3f} %"
)
self.logger.info(msg.format(epoch, acc, best_acc))
if self.tb_logger:
self.tb_logger.add_scalar(
"voting/Validation_Acc", acc, epoch
)
# Update the scheduler
with warnings.catch_warnings():
# UserWarning raised by PyTorch is ignored because
# scheduler does not have a real effect on the optimizer.
warnings.simplefilter("ignore", UserWarning)
if self.use_scheduler_:
scheduler_.step()
self.estimators_ = nn.ModuleList()
self.estimators_.extend(estimators)
if save_model and not test_loader:
io.save(self, save_dir, self.logger)
@torchensemble_model_doc(item="classifier_evaluate")
def evaluate(self, test_loader, return_loss=False):
return super().evaluate(test_loader, return_loss)
@torchensemble_model_doc(item="predict")
def predict(self, *x):
return super().predict(*x)
@torchensemble_model_doc("""Implementation on the VotingRegressor.""", "model")
class VotingRegressor(BaseRegressor):
@torchensemble_model_doc(
"""Implementation on the data forwarding in VotingRegressor.""",
"regressor_forward",
)
def forward(self, *x):
# Average over predictions from all base estimators.
outputs = [estimator(*x) for estimator in self.estimators_]
pred = op.average(outputs)
return pred
@torchensemble_model_doc(
"""Set the attributes on optimizer for VotingRegressor.""",
"set_optimizer",
)
def set_optimizer(self, optimizer_name, **kwargs):
super().set_optimizer(optimizer_name, **kwargs)
@torchensemble_model_doc(
"""Set the attributes on scheduler for VotingRegressor.""",
"set_scheduler",
)
def set_scheduler(self, scheduler_name, **kwargs):
super().set_scheduler(scheduler_name, **kwargs)
@torchensemble_model_doc(
"""Set the training criterion for VotingRegressor.""",
"set_criterion",
)
def set_criterion(self, criterion):
super().set_criterion(criterion)
@torchensemble_model_doc(
"""Implementation on the training stage of VotingRegressor.""", "fit"
)
def fit(
self,
train_loader,
epochs=100,
log_interval=100,
test_loader=None,
save_model=True,
save_dir=None,
):
self._validate_parameters(epochs, log_interval)
self.n_outputs = self._decide_n_outputs(train_loader)
# Instantiate a pool of base estimators, optimizers, and schedulers.
estimators = []
for _ in range(self.n_estimators):
estimators.append(self._make_estimator())
optimizers = []
for i in range(self.n_estimators):
optimizers.append(
set_module.set_optimizer(
estimators[i], self.optimizer_name, **self.optimizer_args
)
)
if self.use_scheduler_:
scheduler_ = set_module.set_scheduler(
optimizers[0], self.scheduler_name, **self.scheduler_args
)
# Check the training criterion
if not hasattr(self, "_criterion"):
self._criterion = nn.MSELoss()
# Utils
best_loss = float("inf")
# Internal helper function on pesudo forward
def _forward(estimators, *x):
outputs = [estimator(*x) for estimator in estimators]
pred = op.average(outputs)
return pred
# Maintain a pool of workers
with Parallel(n_jobs=self.n_jobs) as parallel:
# Training loop
for epoch in range(epochs):
self.train()
if self.use_scheduler_:
cur_lr = scheduler_.get_last_lr()[0]
else:
cur_lr = None
if self.n_jobs and self.n_jobs > 1:
msg = "Parallelization on the training epoch: {:03d}"
self.logger.info(msg.format(epoch))
rets = parallel(
delayed(_parallel_fit_per_epoch)(
train_loader,
estimator,
cur_lr,
optimizer,
self._criterion,
idx,
epoch,
log_interval,
self.device,
False,
)
for idx, (estimator, optimizer) in enumerate(
zip(estimators, optimizers)
)
)
estimators, optimizers = [], []
for estimator, optimizer in rets:
estimators.append(estimator)
optimizers.append(optimizer)
# Validation
if test_loader:
self.eval()
with torch.no_grad():
val_loss = 0.0
for _, elem in enumerate(test_loader):
data, target = io.split_data_target(
elem, self.device
)
output = _forward(estimators, *data)
val_loss += self._criterion(output, target)
val_loss /= len(test_loader)
if val_loss < best_loss:
best_loss = val_loss
self.estimators_ = nn.ModuleList()
self.estimators_.extend(estimators)
if save_model:
io.save(self, save_dir, self.logger)
msg = (
"Epoch: {:03d} | Validation Loss:"
" {:.5f} | Historical Best: {:.5f}"
)
self.logger.info(
msg.format(epoch, val_loss, best_loss)
)
if self.tb_logger:
self.tb_logger.add_scalar(
"voting/Validation_Loss", val_loss, epoch
)
# Update the scheduler
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
if self.use_scheduler_:
scheduler_.step()
self.estimators_ = nn.ModuleList()
self.estimators_.extend(estimators)
if save_model and not test_loader:
io.save(self, save_dir, self.logger)
@torchensemble_model_doc(item="regressor_evaluate")
def evaluate(self, test_loader):
return super().evaluate(test_loader)
@torchensemble_model_doc(item="predict")
def predict(self, *x):
return super().predict(*x) | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/deps/libraries/torchensemble/voting.py | 0.898715 | 0.29523 | voting.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
import warnings
from joblib import Parallel, delayed
from ._base import BaseModule, BaseClassifier, BaseRegressor
from ._base import torchensemble_model_doc
from .utils import io
from .utils import set_module
from .utils import operator as op
__all__ = ["AdversarialTrainingClassifier", "AdversarialTrainingRegressor"]
__fit_doc = """
Parameters
----------
train_loader : torch.utils.data.DataLoader
A :mod:`torch.utils.data.DataLoader` container that contains the
training data.
epochs : int, default=100
The number of training epochs.
epsilon : float, default=0.01
The step used to generate adversarial samples in the fast gradient
sign method (FGSM), which should be in the range [0, 1].
log_interval : int, default=100
The number of batches to wait before logging the training status.
test_loader : torch.utils.data.DataLoader, default=None
A :mod:`torch.utils.data.DataLoader` container that contains the
evaluating data.
- If ``None``, no validation is conducted after each training
epoch.
- If not ``None``, the ensemble will be evaluated on this
dataloader after each training epoch.
save_model : bool, default=True
Specify whether to save the model parameters.
- If test_loader is ``None``, the ensemble fully trained will be
saved.
- If test_loader is not ``None``, the ensemble with the best
validation performance will be saved.
save_dir : string, default=None
Specify where to save the model parameters.
- If ``None``, the model will be saved in the current directory.
- If not ``None``, the model will be saved in the specified
directory: ``save_dir``.
"""
def _adversarial_training_model_doc(header, item="fit"):
"""
Decorator on obtaining documentation for different adversarial training
models.
"""
def get_doc(item):
"""Return selected item"""
__doc = {"fit": __fit_doc}
return __doc[item]
def adddoc(cls):
doc = [header + "\n\n"]
doc.extend(get_doc(item))
cls.__doc__ = "".join(doc)
return cls
return adddoc
def _parallel_fit_per_epoch(
train_loader,
epsilon,
estimator,
cur_lr,
optimizer,
criterion,
idx,
epoch,
log_interval,
device,
is_classification,
):
"""
Private function used to fit base estimators in parallel.
WARNING: Parallelization when fitting large base estimators may cause
out-of-memory error.
"""
if cur_lr:
# Parallelization corrupts the binding between optimizer and scheduler
set_module.update_lr(optimizer, cur_lr)
for batch_idx, elem in enumerate(train_loader):
data, target = io.split_data_target(elem, device)
batch_size = data[0].size(0)
for tensor in data:
tensor.requires_grad = True
# Get adversarial samples
_output = estimator(*data)
_loss = criterion(_output, target)
_loss.backward()
data_grad = [tensor.grad.data for tensor in data]
adv_data = _get_fgsm_samples(data, epsilon, data_grad)
# Compute the training loss
optimizer.zero_grad()
org_output = estimator(*data)
adv_output = estimator(*adv_data)
loss = criterion(org_output, target) + criterion(adv_output, target)
loss.backward()
optimizer.step()
# Print training status
if batch_idx % log_interval == 0:
# Classification
if is_classification:
_, predicted = torch.max(org_output.data, 1)
correct = (predicted == target).sum().item()
msg = (
"Estimator: {:03d} | Epoch: {:03d} | Batch: {:03d}"
" | Loss: {:.5f} | Correct: {:d}/{:d}"
)
print(
msg.format(
idx, epoch, batch_idx, loss, correct, batch_size
)
)
# Regression
else:
msg = (
"Estimator: {:03d} | Epoch: {:03d} | Batch: {:03d}"
" | Loss: {:.5f}"
)
print(msg.format(idx, epoch, batch_idx, loss))
return estimator, optimizer
def _get_fgsm_samples(sample_list, epsilon, sample_grad_list):
"""
Private functions used to generate adversarial samples with fast gradient
sign method (FGSM).
"""
perturbed_sample_list = []
for sample, sample_grad in zip(sample_list, sample_grad_list):
# Check the input range of `sample`
min_value, max_value = torch.min(sample), torch.max(sample)
if not 0 <= min_value < max_value <= 1:
msg = (
"The input range of samples passed to adversarial training"
" should be in the range [0, 1], but got [{:.3f}, {:.3f}]"
" instead."
)
raise ValueError(msg.format(min_value, max_value))
sign_sample_grad = sample_grad.sign()
perturbed_sample = sample + epsilon * sign_sample_grad
perturbed_sample = torch.clamp(perturbed_sample, 0, 1)
perturbed_sample_list.append(perturbed_sample)
return perturbed_sample_list
class _BaseAdversarialTraining(BaseModule):
def _validate_parameters(self, epochs, epsilon, log_interval):
"""Validate hyper-parameters on training the ensemble."""
if not epochs > 0:
msg = (
"The number of training epochs = {} should be strictly"
" positive."
)
self.logger.error(msg.format(epochs))
raise ValueError(msg.format(epochs))
if not 0 < epsilon <= 1:
msg = (
"The step used to generate adversarial samples in FGSM"
" should be in the range (0, 1], but got {} instead."
)
self.logger.error(msg.format(epsilon))
raise ValueError(msg.format(epsilon))
if not log_interval > 0:
msg = (
"The number of batches to wait before printting the"
" training status should be strictly positive, but got {}"
" instead."
)
self.logger.error(msg.format(log_interval))
raise ValueError(msg.format(log_interval))
@torchensemble_model_doc(
"""Implementation on the AdversarialTrainingClassifier.""", # noqa: E501
"model",
)
class AdversarialTrainingClassifier(_BaseAdversarialTraining, BaseClassifier):
@torchensemble_model_doc(
"""Implementation on the data forwarding in AdversarialTrainingClassifier.""", # noqa: E501
"classifier_forward",
)
def forward(self, *x):
# Take the average over class distributions from all base estimators.
outputs = [
F.softmax(estimator(*x), dim=1) for estimator in self.estimators_
]
proba = op.average(outputs)
return proba
@torchensemble_model_doc(
"""Set the attributes on optimizer for AdversarialTrainingClassifier.""", # noqa: E501
"set_optimizer",
)
def set_optimizer(self, optimizer_name, **kwargs):
super().set_optimizer(optimizer_name, **kwargs)
@torchensemble_model_doc(
"""Set the attributes on scheduler for AdversarialTrainingClassifier.""", # noqa: E501
"set_scheduler",
)
def set_scheduler(self, scheduler_name, **kwargs):
super().set_scheduler(scheduler_name, **kwargs)
@torchensemble_model_doc(
"""Set the training criterion for AdversarialTrainingClassifier.""",
"set_criterion",
)
def set_criterion(self, criterion):
super().set_criterion(criterion)
@_adversarial_training_model_doc(
"""Implementation on the training stage of AdversarialTrainingClassifier.""", # noqa: E501
"fit",
)
def fit(
self,
train_loader,
epochs=100,
epsilon=0.5,
log_interval=100,
test_loader=None,
save_model=True,
save_dir=None,
):
self._validate_parameters(epochs, epsilon, log_interval)
self.n_outputs = self._decide_n_outputs(train_loader)
# Instantiate a pool of base estimators, optimizers, and schedulers.
estimators = []
for _ in range(self.n_estimators):
estimators.append(self._make_estimator())
optimizers = []
for i in range(self.n_estimators):
optimizers.append(
set_module.set_optimizer(
estimators[i], self.optimizer_name, **self.optimizer_args
)
)
if self.use_scheduler_:
scheduler_ = set_module.set_scheduler(
optimizers[0], self.scheduler_name, **self.scheduler_args
)
# Check the training criterion
if not hasattr(self, "_criterion"):
self._criterion = nn.CrossEntropyLoss()
# Utils
best_acc = 0.0
# Internal helper function on pesudo forward
def _forward(estimators, *x):
outputs = [
F.softmax(estimator(*x), dim=1) for estimator in estimators
]
proba = op.average(outputs)
return proba
# Maintain a pool of workers
with Parallel(n_jobs=self.n_jobs) as parallel:
# Training loop
for epoch in range(epochs):
self.train()
if self.use_scheduler_:
cur_lr = scheduler_.get_last_lr()[0]
else:
cur_lr = None
if self.n_jobs and self.n_jobs > 1:
msg = "Parallelization on the training epoch: {:03d}"
self.logger.info(msg.format(epoch))
rets = parallel(
delayed(_parallel_fit_per_epoch)(
train_loader,
epsilon,
estimator,
cur_lr,
optimizer,
self._criterion,
idx,
epoch,
log_interval,
self.device,
False,
)
for idx, (estimator, optimizer) in enumerate(
zip(estimators, optimizers)
)
)
estimators, optimizers = [], []
for estimator, optimizer in rets:
estimators.append(estimator)
optimizers.append(optimizer)
# Validation
if test_loader:
self.eval()
with torch.no_grad():
correct = 0
total = 0
for _, elem in enumerate(test_loader):
data, target = io.split_data_target(
elem, self.device
)
output = _forward(estimators, *data)
_, predicted = torch.max(output.data, 1)
correct += (predicted == target).sum().item()
total += target.size(0)
acc = 100 * correct / total
if acc > best_acc:
best_acc = acc
self.estimators_ = nn.ModuleList() # reset
self.estimators_.extend(estimators)
if save_model:
io.save(self, save_dir, self.logger)
msg = (
"Epoch: {:03d} | Validation Acc: {:.3f}"
" % | Historical Best: {:.3f} %"
)
self.logger.info(msg.format(epoch, acc, best_acc))
if self.tb_logger:
self.tb_logger.add_scalar(
"adversarial_training/Validation_Acc",
acc,
epoch,
)
# Update the scheduler
with warnings.catch_warnings():
# UserWarning raised by PyTorch is ignored because
# scheduler does not have a real effect on the optimizer.
warnings.simplefilter("ignore", UserWarning)
if self.use_scheduler_:
scheduler_.step()
self.estimators_ = nn.ModuleList()
self.estimators_.extend(estimators)
if save_model and not test_loader:
io.save(self, save_dir, self.logger)
@torchensemble_model_doc(item="classifier_evaluate")
def evaluate(self, test_loader, return_loss=False):
return super().evaluate(test_loader, return_loss)
@torchensemble_model_doc(item="predict")
def predict(self, *x):
return super().predict(*x)
@torchensemble_model_doc(
"""Implementation on the AdversarialTrainingRegressor.""", # noqa: E501
"model",
)
class AdversarialTrainingRegressor(_BaseAdversarialTraining, BaseRegressor):
@torchensemble_model_doc(
"""Implementation on the data forwarding in AdversarialTrainingRegressor.""", # noqa: E501
"regressor_forward",
)
def forward(self, *x):
# Take the average over predictions from all base estimators.
outputs = [estimator(*x) for estimator in self.estimators_]
pred = op.average(outputs)
return pred
@torchensemble_model_doc(
"""Set the attributes on optimizer for AdversarialTrainingRegressor.""", # noqa: E501
"set_optimizer",
)
def set_optimizer(self, optimizer_name, **kwargs):
super().set_optimizer(optimizer_name, **kwargs)
@torchensemble_model_doc(
"""Set the attributes on scheduler for AdversarialTrainingRegressor.""", # noqa: E501
"set_scheduler",
)
def set_scheduler(self, scheduler_name, **kwargs):
super().set_scheduler(scheduler_name, **kwargs)
@torchensemble_model_doc(
"""Set the training criterion for AdversarialTrainingRegressor.""",
"set_criterion",
)
def set_criterion(self, criterion):
super().set_criterion(criterion)
@_adversarial_training_model_doc(
"""Implementation on the training stage of AdversarialTrainingRegressor.""", # noqa: E501
"fit",
)
def fit(
self,
train_loader,
epochs=100,
epsilon=0.5,
log_interval=100,
test_loader=None,
save_model=True,
save_dir=None,
):
self._validate_parameters(epochs, epsilon, log_interval)
self.n_outputs = self._decide_n_outputs(train_loader)
# Instantiate a pool of base estimators, optimizers, and schedulers.
estimators = []
for _ in range(self.n_estimators):
estimators.append(self._make_estimator())
optimizers = []
for i in range(self.n_estimators):
optimizers.append(
set_module.set_optimizer(
estimators[i], self.optimizer_name, **self.optimizer_args
)
)
if self.use_scheduler_:
scheduler_ = set_module.set_scheduler(
optimizers[0], self.scheduler_name, **self.scheduler_args
)
# Check the training criterion
if not hasattr(self, "_criterion"):
self._criterion = nn.MSELoss()
# Utils
best_loss = float("inf")
# Internal helper function on pesudo forward
def _forward(estimators, *x):
outputs = [estimator(*x) for estimator in estimators]
pred = op.average(outputs)
return pred
# Maintain a pool of workers
with Parallel(n_jobs=self.n_jobs) as parallel:
# Training loop
for epoch in range(epochs):
self.train()
if self.use_scheduler_:
cur_lr = scheduler_.get_last_lr()[0]
else:
cur_lr = None
if self.n_jobs and self.n_jobs > 1:
msg = "Parallelization on the training epoch: {:03d}"
self.logger.info(msg.format(epoch))
rets = parallel(
delayed(_parallel_fit_per_epoch)(
train_loader,
epsilon,
estimator,
cur_lr,
optimizer,
self._criterion,
idx,
epoch,
log_interval,
self.device,
True,
)
for idx, (estimator, optimizer) in enumerate(
zip(estimators, optimizers)
)
)
estimators, optimizers = [], []
for estimator, optimizer in rets:
estimators.append(estimator)
optimizers.append(optimizer)
# Validation
if test_loader:
self.eval()
with torch.no_grad():
val_loss = 0.0
for _, elem in enumerate(test_loader):
data, target = io.split_data_target(
elem, self.device
)
output = _forward(estimators, *data)
val_loss += self._criterion(output, target)
val_loss /= len(test_loader)
if val_loss < best_loss:
best_loss = val_loss
self.estimators_ = nn.ModuleList()
self.estimators_.extend(estimators)
if save_model:
io.save(self, save_dir, self.logger)
msg = (
"Epoch: {:03d} | Validation Loss:"
" {:.5f} | Historical Best: {:.5f}"
)
self.logger.info(
msg.format(epoch, val_loss, best_loss)
)
if self.tb_logger:
self.tb_logger.add_scalar(
"adversirial_training/Validation_Loss",
val_loss,
epoch,
)
# Update the scheduler
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
if self.use_scheduler_:
scheduler_.step()
self.estimators_ = nn.ModuleList()
self.estimators_.extend(estimators)
if save_model and not test_loader:
io.save(self, save_dir, self.logger)
@torchensemble_model_doc(item="regressor_evaluate")
def evaluate(self, test_loader):
return super().evaluate(test_loader)
@torchensemble_model_doc(item="predict")
def predict(self, *x):
return super().predict(*x) | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/deps/libraries/torchensemble/adversarial_training.py | 0.938096 | 0.407982 | adversarial_training.py | pypi |
import abc
import torch
import logging
import warnings
import torch.nn as nn
import torch.nn.functional as F
from ._base import BaseModule, BaseClassifier, BaseRegressor
from ._base import torchensemble_model_doc
from .utils import io
from .utils import set_module
from .utils import operator as op
from .utils.logging import get_tb_logger
__all__ = ["GradientBoostingClassifier", "GradientBoostingRegressor"]
__model_doc = """
Parameters
----------
estimator : torch.nn.Module
The class or object of your base estimator.
- If :obj:`class`, it should inherit from :mod:`torch.nn.Module`.
- If :obj:`object`, it should be instantiated from a class inherited
from :mod:`torch.nn.Module`.
n_estimators : int
The number of base estimators in the ensemble.
estimator_args : dict, default=None
The dictionary of hyper-parameters used to instantiate base
estimators. This parameter will have no effect if ``estimator`` is a
base estimator object after instantiation.
shrinkage_rate : float, default=1
The shrinkage rate used in gradient boosting.
cuda : bool, default=True
- If ``True``, use GPU to train and evaluate the ensemble.
- If ``False``, use CPU to train and evaluate the ensemble.
Attributes
----------
estimators_ : torch.nn.ModuleList
An internal container that stores all fitted base estimators.
"""
__fit_doc = """
Parameters
----------
train_loader : torch.utils.data.DataLoader
A data loader that contains the training data.
epochs : int, default=100
The number of training epochs per base estimator.
use_reduction_sum : bool, default=True
Whether to set ``reduction="sum"`` for the internal mean squared
error used to fit each base estimator.
log_interval : int, default=100
The number of batches to wait before logging the training status.
test_loader : torch.utils.data.DataLoader, default=None
A data loader that contains the evaluating data.
- If ``None``, no validation is conducted after each base
estimator being trained.
- If not ``None``, the ensemble will be evaluated on this
dataloader after each base estimator being trained.
early_stopping_rounds : int, default=2
Specify the number of tolerant rounds for early stopping. When the
validation performance of the ensemble does not improve after
adding the base estimator fitted in current iteration, the internal
counter on early stopping will increase by one. When the value of
the internal counter reaches ``early_stopping_rounds``, the
training stage will terminate instantly.
save_model : bool, default=True
Specify whether to save the model parameters.
- If test_loader is ``None``, the ensemble containing
``n_estimators`` base estimators will be saved.
- If test_loader is not ``None``, the ensemble with the best
validation performance will be saved.
save_dir : string, default=None
Specify where to save the model parameters.
- If ``None``, the model will be saved in the current directory.
- If not ``None``, the model will be saved in the specified
directory: ``save_dir``.
"""
def _gradient_boosting_model_doc(header, item="model"):
"""
Decorator on obtaining documentation for different gradient boosting
models.
"""
def get_doc(item):
"""Return the selected item"""
__doc = {"model": __model_doc, "fit": __fit_doc}
return __doc[item]
def adddoc(cls):
doc = [header + "\n\n"]
doc.extend(get_doc(item))
cls.__doc__ = "".join(doc)
return cls
return adddoc
class _BaseGradientBoosting(BaseModule):
def __init__(
self,
estimator,
n_estimators,
estimator_args=None,
shrinkage_rate=1.0,
cuda=True,
):
super(BaseModule, self).__init__()
self.base_estimator_ = estimator
self.n_estimators = n_estimators
self.estimator_args = estimator_args
if estimator_args and not isinstance(estimator, type):
msg = (
"The input `estimator_args` will have no effect since"
" `estimator` is already an object after instantiation."
)
warnings.warn(msg, RuntimeWarning)
self.shrinkage_rate = shrinkage_rate
self.device = torch.device("cuda" if cuda else "cpu")
self.logger = logging.getLogger()
self.tb_logger = get_tb_logger()
self.estimators_ = nn.ModuleList()
self.use_scheduler_ = False
def _validate_parameters(
self, epochs, log_interval, early_stopping_rounds
):
"""Validate hyper-parameters on training the ensemble."""
if not epochs > 0:
msg = (
"The number of training epochs = {} should be strictly"
" positive."
)
self.logger.error(msg.format(epochs))
raise ValueError(msg.format(epochs))
if not log_interval > 0:
msg = (
"The number of batches to wait before printting the"
" training status should be strictly positive, but got {}"
" instead."
)
self.logger.error(msg.format(log_interval))
raise ValueError(msg.format(log_interval))
if not early_stopping_rounds >= 1:
msg = (
"The number of tolerant rounds before triggering the"
" early stopping should at least be 1, but got {} instead."
)
self.logger.error(msg.format(early_stopping_rounds))
raise ValueError(msg.format(early_stopping_rounds))
if not 0 < self.shrinkage_rate <= 1:
msg = (
"The shrinkage rate should be in the range (0, 1], but got"
" {} instead."
)
self.logger.error(msg.format(self.shrinkage_rate))
raise ValueError(msg.format(self.shrinkage_rate))
@abc.abstractmethod
def _handle_early_stopping(self, test_loader, est_idx):
"""Decide whether to trigger the internal counter on early stopping."""
def _staged_forward(self, est_idx, *x):
"""
Return the accumulated outputs from the first `est_idx+1` base
estimators.
"""
if est_idx >= self.n_estimators:
msg = (
"est_idx = {} should be an integer smaller than the"
" number of base estimators = {}."
)
self.logger.error(msg.format(est_idx, self.n_estimators))
raise ValueError(msg.format(est_idx, self.n_estimators))
outputs = [
estimator(*x) for estimator in self.estimators_[: est_idx + 1]
]
out = op.sum_with_multiplicative(outputs, self.shrinkage_rate)
return out
def fit(
self,
train_loader,
epochs=100,
use_reduction_sum=True,
log_interval=100,
test_loader=None,
early_stopping_rounds=2,
save_model=True,
save_dir=None,
):
# Instantiate base estimators and set attributes
for _ in range(self.n_estimators):
self.estimators_.append(self._make_estimator())
self._validate_parameters(epochs, log_interval, early_stopping_rounds)
self.n_outputs = self._decide_n_outputs(train_loader)
# Utils
criterion = (
nn.MSELoss(reduction="sum") if use_reduction_sum else nn.MSELoss()
)
n_counter = 0 # a counter on early stopping
for est_idx, estimator in enumerate(self.estimators_):
# Initialize a optimizer and scheduler for each base estimator to
# avoid unexpected dependencies.
learner_optimizer = set_module.set_optimizer(
estimator, self.optimizer_name, **self.optimizer_args
)
if self.use_scheduler_:
learner_scheduler = set_module.set_scheduler(
learner_optimizer,
self.scheduler_name,
**self.scheduler_args # noqa: E501
)
# Training loop
estimator.train()
total_iters = 0
for epoch in range(epochs):
for batch_idx, elem in enumerate(train_loader):
data, target = io.split_data_target(elem, self.device)
# Compute the learning target of the current estimator
residual = self._pseudo_residual(est_idx, target, *data)
output = estimator(*data)
loss = criterion(output, residual)
learner_optimizer.zero_grad()
loss.backward()
learner_optimizer.step()
# Print training status
if batch_idx % log_interval == 0:
msg = (
"Estimator: {:03d} | Epoch: {:03d} | Batch:"
" {:03d} | RegLoss: {:.5f}"
)
self.logger.info(
msg.format(est_idx, epoch, batch_idx, loss)
)
if self.tb_logger:
self.tb_logger.add_scalar(
"gradient_boosting/Est_{}/Train_Loss".format(
est_idx
),
loss,
total_iters,
)
total_iters += 1
if self.use_scheduler_:
learner_scheduler.step()
# Validation
if test_loader:
flag = self._handle_early_stopping(test_loader, est_idx)
if flag:
n_counter += 1
msg = "Early stopping counter: {} out of {}"
self.logger.info(
msg.format(n_counter, early_stopping_rounds)
)
if n_counter == early_stopping_rounds:
msg = "Handling early stopping..."
self.logger.info(msg)
# Early stopping
offset = est_idx - n_counter
self.estimators_ = self.estimators_[: offset + 1]
self.n_estimators = len(self.estimators_)
break
else:
# Reset the counter if the performance improves
n_counter = 0
# Post-processing
msg = "The optimal number of base estimators: {}"
self.logger.info(msg.format(len(self.estimators_)))
if save_model:
io.save(self, save_dir, self.logger)
@_gradient_boosting_model_doc(
"""Implementation on the GradientBoostingClassifier.""", "model"
)
class GradientBoostingClassifier(_BaseGradientBoosting, BaseClassifier):
def _pseudo_residual(self, est_idx, y, *x):
"""Compute pseudo residuals in classification."""
output = torch.zeros(y.size(0), self.n_outputs).to(self.device)
# Before fitting the first estimator, we simply assume that GBM
# outputs 0 for any input (i.e., a null output).
if est_idx > 0:
results = [
estimator(*x) for estimator in self.estimators_[:est_idx]
]
output += op.sum_with_multiplicative(results, self.shrinkage_rate)
pseudo_residual = op.pseudo_residual_classification(
y, output, self.n_outputs
)
return pseudo_residual
def _handle_early_stopping(self, test_loader, est_idx):
# Compute the validation accuracy of base estimators fitted so far
self.eval()
correct = 0
total = 0
flag = False
with torch.no_grad():
for _, elem in enumerate(test_loader):
data, target = io.split_data_target(elem, self.device)
output = F.softmax(self._staged_forward(est_idx, *data), dim=1)
_, predicted = torch.max(output.data, 1)
correct += (predicted == target).sum().item()
total += target.size(0)
acc = 100 * correct / total
if est_idx == 0:
self.best_acc = acc
else:
if acc > self.best_acc:
self.best_acc = acc
else:
flag = True
msg = "Validation Acc: {:.3f} % | Historical Best: {:.3f} %"
self.logger.info(msg.format(acc, self.best_acc))
if self.tb_logger:
self.tb_logger.add_scalar(
"gradient_boosting/Validation_Acc", acc, est_idx
)
return flag
@torchensemble_model_doc(
"""Set the attributes on optimizer for GradientBoostingClassifier.""",
"set_optimizer",
)
def set_optimizer(self, optimizer_name, **kwargs):
super().set_optimizer(optimizer_name, **kwargs)
@torchensemble_model_doc(
"""Set the attributes on scheduler for GradientBoostingClassifier.""",
"set_scheduler",
)
def set_scheduler(self, scheduler_name, **kwargs):
super().set_scheduler(scheduler_name, **kwargs)
@_gradient_boosting_model_doc(
"""Implementation on the training stage of GradientBoostingClassifier.""", # noqa: E501
"fit",
)
def fit(
self,
train_loader,
epochs=100,
use_reduction_sum=True,
log_interval=100,
test_loader=None,
early_stopping_rounds=2,
save_model=True,
save_dir=None,
):
self._criterion = nn.CrossEntropyLoss()
super().fit(
train_loader=train_loader,
epochs=epochs,
use_reduction_sum=use_reduction_sum,
log_interval=log_interval,
test_loader=test_loader,
early_stopping_rounds=early_stopping_rounds,
save_model=save_model,
save_dir=save_dir,
)
@torchensemble_model_doc(
"""Implementation on the data forwarding in GradientBoostingClassifier.""", # noqa: E501
"classifier_forward",
)
def forward(self, *x):
output = [estimator(*x) for estimator in self.estimators_]
output = op.sum_with_multiplicative(output, self.shrinkage_rate)
proba = F.softmax(output, dim=1)
return proba
@torchensemble_model_doc(item="classifier_evaluate")
def evaluate(self, test_loader, return_loss=False):
return super().evaluate(test_loader, return_loss)
@torchensemble_model_doc(item="predict")
def predict(self, *x):
return super().predict(*x)
@_gradient_boosting_model_doc(
"""Implementation on the GradientBoostingRegressor.""", "model"
)
class GradientBoostingRegressor(_BaseGradientBoosting, BaseRegressor):
def _pseudo_residual(self, est_idx, y, *x):
"""Compute pseudo residuals in regression."""
output = torch.zeros_like(y).to(self.device)
if est_idx > 0:
results = [
estimator(*x) for estimator in self.estimators_[:est_idx]
]
output = op.sum_with_multiplicative(results, self.shrinkage_rate)
pseudo_residual = op.pseudo_residual_regression(y, output)
return pseudo_residual
def _handle_early_stopping(self, test_loader, est_idx):
# Compute the validation MSE of base estimators fitted so far
self.eval()
mse = 0.0
flag = False
criterion = nn.MSELoss()
with torch.no_grad():
for _, elem in enumerate(test_loader):
data, target = io.split_data_target(elem, self.device)
output = self._staged_forward(est_idx, *data)
mse += criterion(output, target)
mse /= len(test_loader)
if est_idx == 0:
self.best_mse = mse
else:
assert hasattr(self, "best_mse")
if mse < self.best_mse:
self.best_mse = mse
else:
flag = True
msg = "Validation MSE: {:.5f} | Historical Best: {:.5f}"
self.logger.info(msg.format(mse, self.best_mse))
if self.tb_logger:
self.tb_logger.add_scalar(
"gradient_boosting/Validation_MSE", mse, est_idx
)
return flag
@torchensemble_model_doc(
"""Set the attributes on optimizer for GradientBoostingRegressor.""",
"set_optimizer",
)
def set_optimizer(self, optimizer_name, **kwargs):
super().set_optimizer(optimizer_name, **kwargs)
@torchensemble_model_doc(
"""Set the attributes on scheduler for GradientBoostingRegressor.""",
"set_scheduler",
)
def set_scheduler(self, scheduler_name, **kwargs):
super().set_scheduler(scheduler_name, **kwargs)
@_gradient_boosting_model_doc(
"""Implementation on the training stage of GradientBoostingRegressor.""", # noqa: E501
"fit",
)
def fit(
self,
train_loader,
epochs=100,
use_reduction_sum=True,
log_interval=100,
test_loader=None,
early_stopping_rounds=2,
save_model=True,
save_dir=None,
):
self._criterion = nn.MSELoss()
super().fit(
train_loader=train_loader,
epochs=epochs,
use_reduction_sum=use_reduction_sum,
log_interval=log_interval,
test_loader=test_loader,
early_stopping_rounds=early_stopping_rounds,
save_model=save_model,
save_dir=save_dir,
)
@torchensemble_model_doc(
"""Implementation on the data forwarding in GradientBoostingRegressor.""", # noqa: E501
"regressor_forward",
)
def forward(self, *x):
outputs = [estimator(*x) for estimator in self.estimators_]
pred = op.sum_with_multiplicative(outputs, self.shrinkage_rate)
return pred
@torchensemble_model_doc(item="regressor_evaluate")
def evaluate(self, test_loader):
return super().evaluate(test_loader)
@torchensemble_model_doc(item="predict")
def predict(self, *x):
return super().predict(*x) | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/deps/libraries/torchensemble/gradient_boosting.py | 0.935273 | 0.447943 | gradient_boosting.py | pypi |
import math
import torch
import logging
import warnings
import torch.nn as nn
import torch.nn.functional as F
from torch.optim.lr_scheduler import LambdaLR
from ._base import BaseModule, BaseClassifier, BaseRegressor
from ._base import torchensemble_model_doc
from .utils import io
from .utils import set_module
from .utils import operator as op
from .utils.logging import get_tb_logger
__all__ = ["SnapshotEnsembleClassifier", "SnapshotEnsembleRegressor"]
__fit_doc = """
Parameters
----------
train_loader : torch.utils.data.DataLoader
A :mod:`DataLoader` container that contains the training data.
lr_clip : list or tuple, default=None
Specify the accepted range of learning rate. When the learning rate
determined by the scheduler is out of this range, it will be clipped.
- The first element should be the lower bound of learning rate.
- The second element should be the upper bound of learning rate.
epochs : int, default=100
The number of training epochs.
log_interval : int, default=100
The number of batches to wait before logging the training status.
test_loader : torch.utils.data.DataLoader, default=None
A :mod:`DataLoader` container that contains the evaluating data.
- If ``None``, no validation is conducted after each snapshot
being generated.
- If not ``None``, the ensemble will be evaluated on this
dataloader after each snapshot being generated.
save_model : bool, default=True
Specify whether to save the model parameters.
- If test_loader is ``None``, the ensemble with
``n_estimators`` base estimators will be saved.
- If test_loader is not ``None``, the ensemble with the best
validation performance will be saved.
save_dir : string, default=None
Specify where to save the model parameters.
- If ``None``, the model will be saved in the current directory.
- If not ``None``, the model will be saved in the specified
directory: ``save_dir``.
"""
def _snapshot_ensemble_model_doc(header, item="fit"):
"""
Decorator on obtaining documentation for different snapshot ensemble
models.
"""
def get_doc(item):
"""Return selected item"""
__doc = {"fit": __fit_doc}
return __doc[item]
def adddoc(cls):
doc = [header + "\n\n"]
doc.extend(get_doc(item))
cls.__doc__ = "".join(doc)
return cls
return adddoc
class _BaseSnapshotEnsemble(BaseModule):
def __init__(
self, estimator, n_estimators, estimator_args=None, cuda=True
):
super(BaseModule, self).__init__()
self.base_estimator_ = estimator
self.n_estimators = n_estimators
self.estimator_args = estimator_args
if estimator_args and not isinstance(estimator, type):
msg = (
"The input `estimator_args` will have no effect since"
" `estimator` is already an object after instantiation."
)
warnings.warn(msg, RuntimeWarning)
self.device = torch.device("cuda" if cuda else "cpu")
self.logger = logging.getLogger()
self.tb_logger = get_tb_logger()
self.estimators_ = nn.ModuleList()
def _validate_parameters(self, lr_clip, epochs, log_interval):
"""Validate hyper-parameters on training the ensemble."""
if lr_clip:
if not (isinstance(lr_clip, list) or isinstance(lr_clip, tuple)):
msg = "lr_clip should be a list or tuple with two elements."
self.logger.error(msg)
raise ValueError(msg)
if len(lr_clip) != 2:
msg = (
"lr_clip should only have two elements, one for lower"
" bound, and another for upper bound."
)
self.logger.error(msg)
raise ValueError(msg)
if not lr_clip[0] < lr_clip[1]:
msg = (
"The first element = {} should be smaller than the"
" second element = {} in lr_clip."
)
self.logger.error(msg.format(lr_clip[0], lr_clip[1]))
raise ValueError(msg.format(lr_clip[0], lr_clip[1]))
if not epochs > 0:
msg = (
"The number of training epochs = {} should be strictly"
" positive."
)
self.logger.error(msg.format(epochs))
raise ValueError(msg.format(epochs))
if not log_interval > 0:
msg = (
"The number of batches to wait before printting the"
" training status should be strictly positive, but got {}"
" instead."
)
self.logger.error(msg.format(log_interval))
raise ValueError(msg.format(log_interval))
if not epochs % self.n_estimators == 0:
msg = (
"The number of training epochs = {} should be a multiple"
" of n_estimators = {}."
)
self.logger.error(msg.format(epochs, self.n_estimators))
raise ValueError(msg.format(epochs, self.n_estimators))
def _forward(self, *x):
"""
Implementation on the internal data forwarding in snapshot ensemble.
"""
# Average
results = [estimator(*x) for estimator in self.estimators_]
output = op.average(results)
return output
def _clip_lr(self, optimizer, lr_clip):
"""Clip the learning rate of the optimizer according to `lr_clip`."""
if not lr_clip:
return optimizer
for param_group in optimizer.param_groups:
if param_group["lr"] < lr_clip[0]:
param_group["lr"] = lr_clip[0]
if param_group["lr"] > lr_clip[1]:
param_group["lr"] = lr_clip[1]
return optimizer
def _set_scheduler(self, optimizer, n_iters):
"""
Set the learning rate scheduler for snapshot ensemble.
Please refer to the equation (2) in original paper for details.
"""
T_M = math.ceil(n_iters / self.n_estimators)
lr_lambda = lambda iteration: 0.5 * ( # noqa: E731
torch.cos(torch.tensor(math.pi * (iteration % T_M) / T_M)) + 1
)
scheduler = LambdaLR(optimizer, lr_lambda=lr_lambda)
return scheduler
def set_scheduler(self, scheduler_name, **kwargs):
msg = (
"The learning rate scheduler for Snapshot Ensemble will be"
" automatically set. Calling this function has no effect on"
" the training stage of Snapshot Ensemble."
)
warnings.warn(msg, RuntimeWarning)
@torchensemble_model_doc(
"""Implementation on the SnapshotEnsembleClassifier.""", "seq_model"
)
class SnapshotEnsembleClassifier(_BaseSnapshotEnsemble, BaseClassifier):
@torchensemble_model_doc(
"""Implementation on the data forwarding in SnapshotEnsembleClassifier.""", # noqa: E501
"classifier_forward",
)
def forward(self, *x):
proba = self._forward(*x)
return F.softmax(proba, dim=1)
@torchensemble_model_doc(
"""Set the attributes on optimizer for SnapshotEnsembleClassifier.""",
"set_optimizer",
)
def set_optimizer(self, optimizer_name, **kwargs):
super().set_optimizer(optimizer_name, **kwargs)
@torchensemble_model_doc(
"""Set the training criterion for SnapshotEnsembleClassifier.""",
"set_criterion",
)
def set_criterion(self, criterion):
super().set_criterion(criterion)
@_snapshot_ensemble_model_doc(
"""Implementation on the training stage of SnapshotEnsembleClassifier.""", # noqa: E501
"fit",
)
def fit(
self,
train_loader,
lr_clip=None,
epochs=100,
log_interval=100,
test_loader=None,
save_model=True,
save_dir=None,
):
self._validate_parameters(lr_clip, epochs, log_interval)
self.n_outputs = self._decide_n_outputs(train_loader)
estimator = self._make_estimator()
# Set the optimizer and scheduler
optimizer = set_module.set_optimizer(
estimator, self.optimizer_name, **self.optimizer_args
)
scheduler = self._set_scheduler(optimizer, epochs * len(train_loader))
# Check the training criterion
if not hasattr(self, "_criterion"):
self._criterion = nn.CrossEntropyLoss()
# Utils
best_acc = 0.0
counter = 0 # a counter on generating snapshots
total_iters = 0
n_iters_per_estimator = epochs * len(train_loader) // self.n_estimators
# Training loop
estimator.train()
for epoch in range(epochs):
for batch_idx, elem in enumerate(train_loader):
data, target = io.split_data_target(elem, self.device)
batch_size = data[0].size(0)
# Clip the learning rate
optimizer = self._clip_lr(optimizer, lr_clip)
optimizer.zero_grad()
output = estimator(*data)
loss = self._criterion(output, target)
loss.backward()
optimizer.step()
# Print training status
if batch_idx % log_interval == 0:
with torch.no_grad():
_, predicted = torch.max(output.data, 1)
correct = (predicted == target).sum().item()
msg = (
"lr: {:.5f} | Epoch: {:03d} | Batch: {:03d} |"
" Loss: {:.5f} | Correct: {:d}/{:d}"
)
self.logger.info(
msg.format(
optimizer.param_groups[0]["lr"],
epoch,
batch_idx,
loss,
correct,
batch_size,
)
)
if self.tb_logger:
self.tb_logger.add_scalar(
"snapshot_ensemble/Train_Loss",
loss,
total_iters,
)
else:
print("None")
# Snapshot ensemble updates the learning rate per iteration
# instead of per epoch.
scheduler.step()
counter += 1
total_iters += 1
if counter % n_iters_per_estimator == 0:
# Generate and save the snapshot
snapshot = self._make_estimator()
snapshot.load_state_dict(estimator.state_dict())
self.estimators_.append(snapshot)
msg = "Save the snapshot model with index: {}"
self.logger.info(msg.format(len(self.estimators_) - 1))
# Validation after each snapshot model being generated
if test_loader and counter % n_iters_per_estimator == 0:
self.eval()
with torch.no_grad():
correct = 0
total = 0
for _, elem in enumerate(test_loader):
data, target = io.split_data_target(elem, self.device)
output = self.forward(*data)
_, predicted = torch.max(output.data, 1)
correct += (predicted == target).sum().item()
total += target.size(0)
acc = 100 * correct / total
if acc > best_acc:
best_acc = acc
if save_model:
io.save(self, save_dir, self.logger)
msg = (
"n_estimators: {} | Validation Acc: {:.3f} %"
" | Historical Best: {:.3f} %"
)
self.logger.info(
msg.format(len(self.estimators_), acc, best_acc)
)
if self.tb_logger:
self.tb_logger.add_scalar(
"snapshot_ensemble/Validation_Acc",
acc,
len(self.estimators_),
)
if save_model and not test_loader:
io.save(self, save_dir, self.logger)
@torchensemble_model_doc(item="classifier_evaluate")
def evaluate(self, test_loader, return_loss=False):
return super().evaluate(test_loader, return_loss)
@torchensemble_model_doc(item="predict")
def predict(self, *x):
return super().predict(*x)
@torchensemble_model_doc(
"""Implementation on the SnapshotEnsembleRegressor.""", "seq_model"
)
class SnapshotEnsembleRegressor(_BaseSnapshotEnsemble, BaseRegressor):
@torchensemble_model_doc(
"""Implementation on the data forwarding in SnapshotEnsembleRegressor.""", # noqa: E501
"regressor_forward",
)
def forward(self, *x):
pred = self._forward(*x)
return pred
@torchensemble_model_doc(
"""Set the attributes on optimizer for SnapshotEnsembleRegressor.""",
"set_optimizer",
)
def set_optimizer(self, optimizer_name, **kwargs):
super().set_optimizer(optimizer_name, **kwargs)
@torchensemble_model_doc(
"""Set the training criterion for SnapshotEnsembleRegressor.""",
"set_criterion",
)
def set_criterion(self, criterion):
super().set_criterion(criterion)
@_snapshot_ensemble_model_doc(
"""Implementation on the training stage of SnapshotEnsembleRegressor.""", # noqa: E501
"fit",
)
def fit(
self,
train_loader,
lr_clip=None,
epochs=100,
log_interval=100,
test_loader=None,
save_model=True,
save_dir=None,
):
self._validate_parameters(lr_clip, epochs, log_interval)
self.n_outputs = self._decide_n_outputs(train_loader)
estimator = self._make_estimator()
# Set the optimizer and scheduler
optimizer = set_module.set_optimizer(
estimator, self.optimizer_name, **self.optimizer_args
)
scheduler = self._set_scheduler(optimizer, epochs * len(train_loader))
# Check the training criterion
if not hasattr(self, "_criterion"):
self._criterion = nn.MSELoss()
# Utils
best_loss = float("inf")
counter = 0 # a counter on generating snapshots
total_iters = 0
n_iters_per_estimator = epochs * len(train_loader) // self.n_estimators
# Training loop
estimator.train()
for epoch in range(epochs):
for batch_idx, elem in enumerate(train_loader):
data, target = io.split_data_target(elem, self.device)
# Clip the learning rate
optimizer = self._clip_lr(optimizer, lr_clip)
optimizer.zero_grad()
output = estimator(*data)
loss = self._criterion(output, target)
loss.backward()
optimizer.step()
# Print training status
if batch_idx % log_interval == 0:
with torch.no_grad():
msg = (
"lr: {:.5f} | Epoch: {:03d} | Batch: {:03d}"
" | Loss: {:.5f}"
)
self.logger.info(
msg.format(
optimizer.param_groups[0]["lr"],
epoch,
batch_idx,
loss,
)
)
if self.tb_logger:
self.tb_logger.add_scalar(
"snapshot_ensemble/Train_Loss",
loss,
total_iters,
)
# Snapshot ensemble updates the learning rate per iteration
# instead of per epoch.
scheduler.step()
counter += 1
total_iters += 1
if counter % n_iters_per_estimator == 0:
# Generate and save the snapshot
snapshot = self._make_estimator()
snapshot.load_state_dict(estimator.state_dict())
self.estimators_.append(snapshot)
msg = "Save the snapshot model with index: {}"
self.logger.info(msg.format(len(self.estimators_) - 1))
# Validation after each snapshot model being generated
if test_loader and counter % n_iters_per_estimator == 0:
self.eval()
with torch.no_grad():
val_loss = 0.0
for _, elem in enumerate(test_loader):
data, target = io.split_data_target(elem, self.device)
output = self.forward(*data)
val_loss += self._criterion(output, target)
val_loss /= len(test_loader)
if val_loss < best_loss:
best_loss = val_loss
if save_model:
io.save(self, save_dir, self.logger)
msg = (
"n_estimators: {} | Validation Loss: {:.5f} |"
" Historical Best: {:.5f}"
)
self.logger.info(
msg.format(len(self.estimators_), val_loss, best_loss)
)
if self.tb_logger:
self.tb_logger.add_scalar(
"snapshot_ensemble/Validation_Loss",
val_loss,
len(self.estimators_),
)
if save_model and not test_loader:
io.save(self, save_dir, self.logger)
@torchensemble_model_doc(item="regressor_evaluate")
def evaluate(self, test_loader):
return super().evaluate(test_loader)
@torchensemble_model_doc(item="predict")
def predict(self, *x):
return super().predict(*x) | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/deps/libraries/torchensemble/snapshot_ensemble.py | 0.927199 | 0.351701 | snapshot_ensemble.py | pypi |
__model_doc = """
Parameters
----------
estimator : torch.nn.Module
The class or object of your base estimator.
- If :obj:`class`, it should inherit from :mod:`torch.nn.Module`.
- If :obj:`object`, it should be instantiated from a class inherited
from :mod:`torch.nn.Module`.
n_estimators : int
The number of base estimators in the ensemble.
estimator_args : dict, default=None
The dictionary of hyper-parameters used to instantiate base
estimators. This parameter will have no effect if ``estimator`` is a
base estimator object after instantiation.
cuda : bool, default=True
- If ``True``, use GPU to train and evaluate the ensemble.
- If ``False``, use CPU to train and evaluate the ensemble.
n_jobs : int, default=None
The number of workers for training the ensemble. This input
argument is used for parallel ensemble methods such as
:mod:`voting` and :mod:`bagging`. Setting it to an integer larger
than ``1`` enables ``n_jobs`` base estimators to be trained
simultaneously.
Attributes
----------
estimators_ : torch.nn.ModuleList
An internal container that stores all fitted base estimators.
"""
__seq_model_doc = """
Parameters
----------
estimator : torch.nn.Module
The class or object of your base estimator.
- If :obj:`class`, it should inherit from :mod:`torch.nn.Module`.
- If :obj:`object`, it should be instantiated from a class inherited
from :mod:`torch.nn.Module`.
n_estimators : int
The number of base estimators in the ensemble.
estimator_args : dict, default=None
The dictionary of hyper-parameters used to instantiate base
estimators. This parameter will have no effect if ``estimator`` is a
base estimator object after instantiation.
cuda : bool, default=True
- If ``True``, use GPU to train and evaluate the ensemble.
- If ``False``, use CPU to train and evaluate the ensemble.
Attributes
----------
estimators_ : torch.nn.ModuleList
An internal container that stores all fitted base estimators.
"""
__set_optimizer_doc = """
Parameters
----------
optimizer_name : string
The name of the optimizer, should be one of {``Adadelta``, ``Adagrad``,
``Adam``, ``AdamW``, ``Adamax``, ``ASGD``, ``RMSprop``, ``Rprop``,
``SGD``}.
**kwargs : keyword arguments
Keyword arguments on setting the optimizer, should be in the form:
``lr=1e-3, weight_decay=5e-4, ...``. These keyword arguments
will be directly passed to :mod:`torch.optim.Optimizer`.
"""
__set_scheduler_doc = """
Parameters
----------
scheduler_name : string
The name of the scheduler, should be one of {``LambdaLR``,
``MultiplicativeLR``, ``StepLR``, ``MultiStepLR``, ``ExponentialLR``,
``CosineAnnealingLR``, ``ReduceLROnPlateau``}.
**kwargs : keyword arguments
Keyword arguments on setting the scheduler. These keyword arguments
will be directly passed to :mod:`torch.optim.lr_scheduler`.
"""
__set_criterion_doc = """
Parameters
----------
criterion : torch.nn.loss
The customized training criterion object.
"""
__fit_doc = """
Parameters
----------
train_loader : torch.utils.data.DataLoader
A :mod:`torch.utils.data.DataLoader` container that contains the
training data.
epochs : int, default=100
The number of training epochs.
log_interval : int, default=100
The number of batches to wait before logging the training status.
test_loader : torch.utils.data.DataLoader, default=None
A :mod:`torch.utils.data.DataLoader` container that contains the
evaluating data.
- If ``None``, no validation is conducted during the training
stage.
- If not ``None``, the ensemble will be evaluated on this
dataloader after each training epoch.
save_model : bool, default=True
Specify whether to save the model parameters.
- If test_loader is ``None``, the ensemble fully trained will be
saved.
- If test_loader is not ``None``, the ensemble with the best
validation performance will be saved.
save_dir : string, default=None
Specify where to save the model parameters.
- If ``None``, the model will be saved in the current directory.
- If not ``None``, the model will be saved in the specified
directory: ``save_dir``.
"""
__predict_doc = """
Return the predictions of the ensemble given the testing data.
Parameters
----------
X : {tensor, numpy array}
A data batch in the form of tensor or numpy array.
Returns
-------
pred : tensor of shape (n_samples, n_outputs)
For classifiers, ``n_outputs`` is the number of distinct classes. For
regressors, ``n_output`` is the number of target variables.
"""
__classification_forward_doc = """
Parameters
----------
X : tensor
An input batch of data, which should be a valid input data batch
for base estimators in the ensemble.
Returns
-------
proba : tensor of shape (batch_size, n_classes)
The predicted class distribution.
"""
__classification_evaluate_doc = """
Compute the classification accuracy of the ensemble given the testing
dataloader and optionally the average cross-entropy loss.
Parameters
----------
test_loader : torch.utils.data.DataLoader
A data loader that contains the testing data.
return_loss : bool, default=False
Whether to return the average cross-entropy loss over all batches
in the ``test_loader``.
Returns
-------
accuracy : float
The classification accuracy of the fitted ensemble on ``test_loader``.
loss : float
The average cross-entropy loss of the fitted ensemble on
``test_loader``, only available when ``return_loss`` is True.
"""
__regression_forward_doc = """
Parameters
----------
X : tensor
An input batch of data, which should be a valid input data batch
for base estimators in the ensemble.
Returns
-------
pred : tensor of shape (batch_size, n_outputs)
The predicted values.
"""
__regression_evaluate_doc = """
Compute the mean squared error (MSE) of the ensemble given the testing
dataloader.
Parameters
----------
test_loader : torch.utils.data.DataLoader
A data loader that contains the testing data.
Returns
-------
mse : float
The testing mean squared error of the fitted ensemble on
``test_loader``.
""" | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/deps/libraries/torchensemble/_constants.py | 0.961326 | 0.697712 | _constants.py | pypi |
import importlib
def set_optimizer(model, optimizer_name, **kwargs):
"""
Set the parameter optimizer for the model.
Reference: https://pytorch.org/docs/stable/optim.html#algorithms
"""
torch_optim_optimizers = [
"Adadelta",
"Adagrad",
"Adam",
"AdamW",
"Adamax",
"ASGD",
"RMSprop",
"Rprop",
"SGD",
]
if optimizer_name not in torch_optim_optimizers:
msg = "Unrecognized optimizer: {}, should be one of {}."
raise NotImplementedError(
msg.format(optimizer_name, ",".join(torch_optim_optimizers))
)
optimizer_cls = getattr(
importlib.import_module("torch.optim"), optimizer_name
)
optimizer = optimizer_cls(model.parameters(), **kwargs)
return optimizer
def update_lr(optimizer, lr):
"""
Manually update the learning rate of the optimizer. This function is used
when the parallelization corrupts the bindings between the optimizer and
the scheduler.
"""
if not lr > 0:
msg = (
"The learning rate should be strictly positive, but got"
" {} instead."
)
raise ValueError(msg.format(lr))
for group in optimizer.param_groups:
group["lr"] = lr
return optimizer
def set_scheduler(optimizer, scheduler_name, **kwargs):
"""
Set the scheduler on learning rate for the optimizer.
Reference:
https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate
"""
torch_lr_schedulers = [
"LambdaLR",
"MultiplicativeLR",
"StepLR",
"MultiStepLR",
"ExponentialLR",
"CosineAnnealingLR",
"ReduceLROnPlateau",
"CyclicLR",
"OneCycleLR",
"CosineAnnealingWarmRestarts",
]
if scheduler_name not in torch_lr_schedulers:
msg = "Unrecognized scheduler: {}, should be one of {}."
raise NotImplementedError(
msg.format(scheduler_name, ",".join(torch_lr_schedulers))
)
scheduler_cls = getattr(
importlib.import_module("torch.optim.lr_scheduler"), scheduler_name
)
scheduler = scheduler_cls(optimizer, **kwargs)
return scheduler | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/deps/libraries/torchensemble/utils/set_module.py | 0.865537 | 0.440409 | set_module.py | pypi |
import os
import torch
def save(model, save_dir, logger):
"""Implement model serialization to the specified directory."""
if save_dir is None:
save_dir = "./"
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
# Decide the base estimator name
if isinstance(model.base_estimator_, type):
base_estimator_name = model.base_estimator_.__name__
else:
base_estimator_name = model.base_estimator_.__class__.__name__
# {Ensemble_Model_Name}_{Base_Estimator_Name}_{n_estimators}
filename = "{}_{}_{}_ckpt.pth".format(
type(model).__name__,
base_estimator_name,
model.n_estimators,
)
# The real number of base estimators in some ensembles is not same as
# `n_estimators`.
state = {
"n_estimators": len(model.estimators_),
"model": model.state_dict(),
"_criterion": model._criterion,
}
save_dir = os.path.join(save_dir, filename)
logger.info("Saving the model to `{}`".format(save_dir))
# Save
torch.save(state, save_dir)
return
def load(model, save_dir="./", logger=None):
"""Implement model deserialization from the specified directory."""
if not os.path.exists(save_dir):
raise FileExistsError("`{}` does not exist".format(save_dir))
# Decide the base estimator name
if isinstance(model.base_estimator_, type):
base_estimator_name = model.base_estimator_.__name__
else:
base_estimator_name = model.base_estimator_.__class__.__name__
# {Ensemble_Model_Name}_{Base_Estimator_Name}_{n_estimators}
filename = "{}_{}_{}_ckpt.pth".format(
type(model).__name__,
base_estimator_name,
model.n_estimators,
)
save_dir = os.path.join(save_dir, filename)
if logger:
logger.info("Loading the model from `{}`".format(save_dir))
state = torch.load(save_dir)
n_estimators = state["n_estimators"]
model_params = state["model"]
model._criterion = state["_criterion"]
# Pre-allocate and load all base estimators
for _ in range(n_estimators):
model.estimators_.append(model._make_estimator())
model.load_state_dict(model_params)
def split_data_target(element, device, logger=None):
"""Split elements in dataloader according to pre-defined rules."""
if not (isinstance(element, list) or isinstance(element, tuple)):
msg = (
"Invalid dataloader, please check if the input dataloder is valid."
)
if logger:
logger.error(msg)
raise ValueError(msg)
if len(element) == 2:
# Dataloader with one input and one target
data, target = element[0], element[1]
return [data.to(device)], target.to(device) # tensor -> list
elif len(element) > 2:
# Dataloader with multiple inputs and one target
data, target = element[:-1], element[-1]
data_device = [tensor.to(device) for tensor in data]
return data_device, target.to(device)
else:
# Dataloader with invalid input
msg = (
"The input dataloader should at least contain two tensors - data"
" and target."
)
if logger:
logger.error(msg)
raise ValueError(msg) | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/deps/libraries/torchensemble/utils/io.py | 0.70253 | 0.180865 | io.py | pypi |
from typing import Iterable
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
from matplotlib.ticker import MaxNLocator
from rrt_ml.utilities.configs import *
from rrt_ml.utilities.hints import *
from rrt_ml.utilities.infos import *
from rrt_ml.utilities.paths import *
class Node:
"""
Node (or pose) for the RRT tree.
"""
def __init__(
self,
pose: Vector3,
parent: None or 'Node' = None,
cost_from_parent: None or 'Node' = None,
cost_from_root: None | float = None,
origin: None | str = None,
):
"""
Initialize.
:param pose: vector [x y theta]
:param parent: parent pose
:param cost_from_parent: cost of moving from parent to this pose
:param cost_from_root: cost of moving from root node to this pose
:param origin: whether the node comes from sampler, random, etc
"""
self.pose = pose # type: Vector3
self.parent = parent # type: None | Node
self.cost_from_parent = cost_from_parent # type: None | float
self.cost_from_root = cost_from_root # type: None | float
self.origin = origin # type: None | str
self.children = [] # type: list[Node, ...]
self.info = None # type: None | NodeReachInfo
self._draw_cost_from_root = False
self._draw_cost_from_parent = False
self._draw_color = 'black'
self._draw_info = False
self._draw_temp_distance = False
self._draw_temp_info = False
self._draw_temp_cost_from_root = False
self._mpl_txts = []
self._mpl_patches = []
self._mpl_lines = []
self._temp_distance = None
self._temp_info = None
self._temp_cost_from_root = None
def set_cost_change_propagation(self):
"""
When the 'cost_from_root' changes we need to update the 'cost_from_root' of all children.
"""
for node_child in self.children:
node_child.cost_from_root = (
self.cost_from_root + node_child.cost_from_parent
)
node_child.set_cost_change_propagation()
def set_remove_me_as_child(self):
"""
This node may belong to a list of children, and we may want to remove it.
"""
if self.parent is not None:
self.parent.children.remove(self)
def set_info(self, node_reach_info: 'NodeReachInfo'):
"""
Add stats on how this node was reached from parent node.
"""
# Get copy of bullet stats
self.info = node_reach_info
def __repr__(self):
"""
Print node.
"""
if self.parent is not None:
return (
f"Pose: {self.pose}\n"
f"Parent: {self.parent.pose}\n"
f"Cost from parent: {self.cost_from_parent}\n"
f"Cost from root: {self.cost_from_root}\n\n"
)
else:
return f"Pose: {self.pose}\n" \
f"Cost from root: {self.cost_from_root}\n\n"
class Map:
def __init__(self, cfg: MasterConfig):
self.cfg = cfg
self.temp_arrows = []
self.temp_lines = []
self.temp_texts = []
self.nodes = []
self.vertices = []
self.orientations = []
self._setup()
def set_add_nodes(self, nodes: Iterable['Node'] | 'Node'):
"""
Add node(s) to list of nodes.
:param nodes: node(s) to add.
"""
if isinstance(nodes, Iterable):
for node in nodes:
self.nodes.append(node)
else:
self.nodes.append(nodes)
def set_add_states(self, states: Iterable[Vector4] | Vector4, origin: str = 'sl'):
"""
Add states to list of nodes
:param states: list of states to add
:param origin: where the states come from
"""
# Handle single state case
if isinstance(states[0], Iterable):
pass
else:
states = [states]
# Iterate and add to map's list of nodes
for st in states:
pose = [st[0], st[1], np.arctan2(st[2], st[3])]
self.nodes.append(Node(pose, None, None, None, origin))
def get_plot_tree(self, ax: plt.Axes | None = None) -> tuple[plt.Figure, plt.Axes] | plt.Axes:
"""
Plot map with obstacles and nodes/connections as a tree.
:return: figure and axes.
"""
# Handle cases where axis is not given and must return a new figure
fig = None
if ax is None:
fig, ax = plt.subplots()
# Set up axis layout
self._set_plot_layout(ax)
# Add obstacles
self._set_plot_obstacles(ax)
# Add branches
self._set_plot_branches(ax)
if fig is None:
return ax
else:
return fig, ax
def get_plot_lines(self, ax: plt.Axes | None = None) -> tuple[plt.Figure, plt.Axes] | plt.Axes:
"""
Plot map with nodes and line connections.
:return: figure and axes or axes
"""
# Handle cases where axis is not given and must return a new figure
fig = None
if ax is None:
fig, ax = plt.subplots()
# Set up axis layout
self._set_plot_layout(ax)
# Add obstacles
self._set_plot_obstacles(ax)
# Add nodes
self._set_plot_nodes(ax)
# Add connections
self._set_plot_connections_lines(ax)
if fig is None:
return ax
else:
return fig, ax
def set_add_temp_line(self, ax: plt.Axes, node: Node, color: str):
"""
Add temporary node to the tree (for debugging)
"""
self._setup_ignore_static_warnings()
# Plot line
line = ax.plot(node.info.xs, node.info.ys, '--', color=color)
# Add arrow to list of temp arrows
self.temp_lines.append(line)
def set_add_temp_arrows(self, ax: plt.Axes, nodes: Node | list[Node, ...], color: str):
"""
Add temporary node to the tree (for debugging)
"""
self._setup_ignore_static_warnings()
if not isinstance(nodes, Iterable):
nodes = [nodes]
# Set params for different types of nodes
kw = dict(width=0.01, head_width=0.15, head_length=0.15, ec=color, fc=color)
for node in nodes:
# Get arrow params
x, y = node.pose[0:2]
dx, dy = 0.45 * np.cos(node.pose[2]), 0.45 * np.sin(node.pose[2])
# Plot arrow
arrow = ax.arrow(x, y, dx, dy, **kw)
# Add arrow to list of temp arrows
self.temp_arrows.append(arrow)
def set_add_temp_text(self, ax: plt.Axes, pose, text, color):
"""
Add temporary text to the tree (for debugging)
"""
self._setup_ignore_static_warnings()
# Plot text
xp, yp = pose[:2]
# xt, yt = pose[0] + 1, pose[1] + 1
txt = ax.annotate(text=text, xy=(xp, yp), xycoords='data', color=color)
# txt = ax.annotate(text=text, xy=(xp, yp), xycoords='data', xytext=(xt, yt), textcoords='data')
# Add arrow to list of temp arrows
self.temp_texts.append(txt)
def set_clear_temp_objs(self):
"""
Clear matplotlib temporary objects.
"""
for line in self.temp_lines:
line[0].remove()
for arrow in self.temp_arrows:
arrow.remove()
for text in self.temp_texts:
text.remove()
self.temp_lines, self.temp_arrows, self.temp_texts = [], [], []
def _set_plot_branches(self, ax: plt.Axes):
"""
Add nodes and connections as branches.
:param ax: axes to plot
"""
# Plot connection for all nodes
for node in self.nodes: # type: Node
# Initial node has no parent
if node.parent is not None:
# Now we take (x, y) pairs, i.e. the trajectory, from stats
xs = node.info.xs
ys = node.info.ys
# Plot on ax
ax.plot(xs, ys, color='grey', alpha=0.5)
def _set_plot_branches_for_list(self, ax: plt.Axes, list_nodes: list['Node', ...]):
"""
Plot branches for a list of nodes only.
:param list_nodes: list of nodes to plot branches.
"""
# Ignore warnings
self._setup_ignore_static_warnings()
# Loop provided nodes
for node in list_nodes:
# Initial node has no parent
if node.parent is not None:
# Now we take (x, y) pairs, i.e. the trajectory, from stats
xs = node.info.xs
ys = node.info.ys
# Plot on ax
ax.plot(xs, ys, color='blue')
def _set_plot_connections_lines(self, ax: plt.Axes):
"""
Add connections between nodes.
:param ax: axes do draw connections
"""
for node in self.nodes:
# If node has no parent continue
if node.parent is None:
continue
# Get positions
x_i, y_i, orn_i = node.pose
x_f, y_f, orn_f = node.parent.pose
# Add a line from root to destination point
ax.plot([x_i, x_f], [y_i, y_f], color="k", linewidth=1.5, alpha=0.2)
def _set_plot_layout(self, ax: plt.Axes):
"""
Adjust aspect ratio, grid, etc.
:param ax: axis to change.
"""
# Make limits according to map size
match self.cfg.maps.general.map_name:
case 'narrow':
# self.cfg.maps.narrow.size
ax.set_xlim(-0.5, 0.5 + self.cfg.maps.narrow.size)
ax.set_ylim(-0.5, 0.5 + self.cfg.maps.narrow.size)
case _:
pass
# Keep figure height and width equal
ax.set_aspect("equal", adjustable="box")
# Tick integer values only
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
self._setup_ignore_static_warnings()
return ax
def _set_plot_nodes(self, ax: plt.Axes, nodes: list[Node, ...] | None = None):
"""
Add nodes to plot.
:param ax: axes to plot.
"""
# Set list of nodes to plot
if nodes is None:
nodes = self.nodes
# Set params for different types of nodes
kw_init = dict(width=0.03, head_width=0.3, head_length=0.3, ec="g", fc="g", alpha=1)
kw_final = dict(width=0.03, head_width=0.3, head_length=0.3, ec="r", fc="r", alpha=1)
kw_final_new = dict(width=0.01, head_width=0.15, head_length=0.15, ec="r", fc="r")
kw_sl = dict(width=0.01, head_width=0.15, head_length=0.15, ec="k", fc="k")
kw_random = dict(width=0.01, head_width=0.15, head_length=0.15, ec="k", fc="k")
origin_kwargs = dict(init=kw_init, final=kw_final, final_new=kw_final_new, sl=kw_sl, random=kw_random)
# Iterate over nodes
for node in nodes:
# Get arrow params
x, y = node.pose[0:2]
dx, dy = 0.45*np.cos(node.pose[2]), 0.45*np.sin(node.pose[2])
# Set higher z-order for init and final node
zorder = 3 if node.origin == 'final' or node.origin == 'init' else 1
# Plot arrow
ax.arrow(x, y, dx, dy, **origin_kwargs[node.origin], zorder=zorder)
def _set_plot_obstacles(self, ax: plt.Axes):
"""
Get rectangles patches.
:param ax: axes to plot.
"""
patches = []
for v, o in zip(self.vertices, self.orientations):
# Get params
width = v[2] - v[0]
height = v[1] - v[3]
x = v[0]
y = v[1] - height
# Create patch and add to list
rect = Rectangle((x, y), width, height, o)
patches.append(rect)
# Create black patch collection
patches = PatchCollection(patches)
patches.set_color("black")
# Add to plot
ax.add_collection(patches)
def _set_wall(self, top_left_bottom_right: Vector4, space_snwe: Vector4 = (0, 0, 0, 0)):
"""
Set a wall to map as a rectangle, making it shorter to not collide with other walls
:param top_left_bottom_right: top left and bottom right vertices positions
:param space_snwe: cut this much from each side of the rectangle (south-north-west-east)
"""
# TopLeft and BottomRight aliases
tl = top_left_bottom_right[:2]
br = top_left_bottom_right[2:]
# SpaceNorth, SpaceSouth, SpaceWest and SpaceEast aliases
ss, sn, sw, se = space_snwe
# Move top left vertex
tl = [tl[0] + sw, tl[1] - sn]
# Move bottom right vertex
br = [br[0] - se, br[1] + ss]
self.vertices.append([*tl, *br])
self.orientations.append(0)
def _setup(self):
"""
Set up.
"""
match self.cfg.maps.general.map_name:
case 'arena':
self._setup_arena()
case 'narrow':
self._setup_narrow()
case _:
raise NotImplementedError
def _setup_arena(self, length=None, width=None, offset=None):
"""
Set a rectangular arena.
:param length: length of arena
:param width: width of arena
:param offset: offset center of arena
"""
# Config constants
if length is None:
length = self.cfg.maps.arena.length
if width is None:
width = self.cfg.maps.arena.width
if offset is None:
offset = self.cfg.maps.arena.offset
ox, oy = offset
# Fine tune constants: WallThickness, Spacing
wt = 0.3
s = 1e-3
# Make vertices
left_wall_pos = [-wt + ox, width + oy, ox, oy]
right_wall_pos = [length + ox, width + oy, length + wt + ox, oy]
bottom_wall_pos = [ox, oy, length + ox, -wt + oy]
top_wall_pos = [ox, width + wt + oy, length + ox, width]
# Set walls with spacing
self._set_wall(bottom_wall_pos, space_snwe=[0, s, 0, 0])
self._set_wall(top_wall_pos, space_snwe=[s, 0, 0, 0])
self._set_wall(left_wall_pos, space_snwe=[0, 0, 0, s])
self._set_wall(right_wall_pos, space_snwe=[0, 0, s, 0])
def _setup_narrow(self):
"""
Set up narrow map.
"""
# Config constants
size = self.cfg.maps.narrow.size
x1, y1 = self.cfg.maps.narrow.narrow1_pos
x2, y2 = self.cfg.maps.narrow.narrow2_pos
ww = self.cfg.maps.narrow.wall_width
pw = self.cfg.maps.narrow.passage_width
# Fine tune constants: Spacing
s = 1e-3
# Box around map
self._setup_arena(size, size, offset=(0, 0))
# Left lower, left upper, right lower and right upper walls
ll = [x1 - ww / 2, y1 - pw / 2, x1 + ww / 2, 0]
lu = [x1 - ww / 2, size, x1 + ww / 2, y1 + pw / 2]
rl = [x2 - ww / 2, y2 - pw / 2, x2 + ww / 2, 0]
ru = [x2 - ww / 2, size, x2 + ww / 2, y2 + pw / 2]
# Set walls
self._set_wall(ll, space_snwe=[s, 0, 0, 0])
self._set_wall(lu, space_snwe=[0, s, 0, 0])
self._set_wall(rl, space_snwe=[s, 0, 0, 0])
self._set_wall(ru, space_snwe=[0, s, 0, 0])
def _setup_ignore_static_warnings(self):
"""
Ignore static warnings.
"""
pass | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/utilities/maps.py | 0.912615 | 0.518668 | maps.py | pypi |
from coral_pytorch.losses import corn_loss
from rrt_ml.utilities.configs import *
from rrt_ml.utilities.hints import *
def get_ackermann_v_rf_lr_phi_lr(v_ref: float, phi_ref: float, cfg: MasterConfig) -> Vector6:
"""
Ackermann formula to get velocity for each wheel and steering angles.
:param v_ref: desired linear velocity
:param phi_ref: desired steering angle
:param cfg: master config that contains car parameters
:return: [vrl vrr vfl vfr phil phir] (f: forward, r: rear/right, l: left)
"""
# Clip maximum steering angle
if abs(phi_ref) > cfg.env.car.phi_max:
phi_ref = phi_ref / (abs(phi_ref)) * cfg.env.car.phi_max
# V to rad/s
v_ref = np.clip(v_ref, -cfg.env.car.v_max, +cfg.env.car.v_max)
v_ref = v_ref / cfg.env.car.wheel_radius
if phi_ref == 0:
r = 99999999
else:
r = cfg.env.car.axis_dist / (np.tan(phi_ref))
# Right and left wheel
phir = np.arctan(cfg.env.car.axis_dist / (r + cfg.env.car.wheel_dist / 2))
phil = np.arctan(cfg.env.car.axis_dist / (r - cfg.env.car.wheel_dist / 2))
# Target velocity for wheels
vrr = v_ref * (1 + cfg.env.car.wheel_dist / (2 * r))
vrl = v_ref * (1 - cfg.env.car.wheel_dist / (2 * r))
vfr = vrr / (np.cos(phir))
vfl = vrl / (np.cos(phil))
return vrl, vrr, vfl, vfr, phil, phir
def quaternion_to_theta(quaternion: Vector4) -> float:
"""
Convert quaternion to theta (orientation in the xy-plane)
:param quaternion: orientation from bullet
:return: theta angle
"""
w = quaternion[3]
theta = 2 * np.arccos(w)
theta = theta * quaternion[2] / np.abs(quaternion[2]) if quaternion[2] != 0 else theta
return theta
def path_length(xs, ys):
"""
Get path length using arc length formula.
:param xs: list of xs
:param ys: list of ys
:return: path length
"""
d = 0
for i, _ in enumerate(zip(xs[:-1], ys[:-1])):
d += np.sqrt((xs[i]-xs[i+1])**2 + (ys[i]-ys[i+1])**2)
return d
def pose3_to_state4(pose: Vector3) -> Vector4:
"""
Unpack angle into sine and cosine, maintaining x and y coordinates.
:param pose: [x y theta]
:return: [x y sin_theta cos_theta]
"""
return np.array([pose[0], pose[1], np.sin(pose[2]), np.cos(pose[2])])
def pose3_to_state4_sl(pose: Vector3 | np.ndarray) -> Vector4 | np.ndarray:
"""
State in SL is in the form [x y cos sin]
:param pose: pose as [x y theta]
:return: state in SL form
"""
# ndarray case
if isinstance(pose, np.ndarray):
if len(pose.shape) == 1:
cos = np.cos(pose[2])
sin = np.sin(pose[2])
state_sl = np.array([pose[0], pose[1], cos, sin])
elif len(pose.shape) == 2:
cos = np.cos(pose[:, 2])
sin = np.sin(pose[:, 2])
state_sl = np.zeros(shape=(pose.shape[0], 4))
state_sl[:, 0] = pose[:, 0]
state_sl[:, 1] = pose[:, 1]
state_sl[:, 2] = cos
state_sl[:, 3] = sin
else:
raise NotImplementedError
else:
state_sl = pose3_to_state4_sl(np.array(pose))
return state_sl
def scale_to_range(x: float, src_range: Vector, dst_range: Vector):
"""
Scale a number from a source range to a destination range.
:param x: input number
:param src_range: source range
:param dst_range: destination range
:return: scaled vector
"""
return (x - src_range[0]) * (dst_range[1] - dst_range[0]) / (src_range[1] - src_range[0]) + dst_range[0]
def state4_to_pose3(state: Vector4) -> Vector3:
"""
Convert [x y sin_theta cos_theta] to [x y theta].
:param state: vector with unpacked angle
:return: pose
"""
return np.array([state[0], state[1], np.arctan2(state[2], state[3])])
def state6_to_pose3(state: Vector6) -> Vector3:
"""
Convert [x y sin_theta cos_theta v phi] to [x y theta]
:param state: vector with 6 components, including velocity and steering angle
:return: corresponding position and orientation
"""
return np.array([state[0], state[1], np.arctan2(state[2], state[3])])
def state4_sl_to_state4_rl(state_sl: Vector4 | np.ndarray) -> Vector4 | np.ndarray:
"""
State in SL is in the form [x y cos sin], and in RL is [x y sin cos]
:param state_sl: state on SL dataset
:return: state in RL form
"""
# Single state case
if len(state_sl.shape) == 1:
cos = state_sl[2]
sin = state_sl[3]
state_rl = np.array([state_sl[0], state_sl[1], sin, cos])
elif len(state_sl.shape) == 2:
cos = state_sl[:, 2]
sin = state_sl[:, 3]
state_rl = np.copy(state_sl)
state_rl[:, 2] = sin
state_rl[:, 3] = cos
else:
raise NotImplementedError
return state_rl
def state4_sl_to_pose3(state_sl: Vector4 | np.ndarray) -> Vector3 | np.ndarray:
"""
State in SL is in the form [x y cos sin], so we convert to [x y theta]
:param state_sl: state on SL dataset
:return: pose [x y theta]
"""
# Single state case
if len(state_sl.shape) == 1:
cos = state_sl[2]
sin = state_sl[3]
state_rl = np.array([state_sl[0], state_sl[1], np.arctan2(sin, cos)])
elif len(state_sl.shape) == 2:
cos = state_sl[:, 2]
sin = state_sl[:, 3]
state_rl = np.zeros(shape=(state_sl.shape[0], 3))
state_rl[:, 0] = state_sl[:, 0]
state_rl[:, 1] = state_sl[:, 1]
state_rl[:, 2] = np.arctan2(sin, cos)
else:
raise NotImplementedError
return state_rl
def transform_to_origin(obs=None, node_from=None, node_to=None):
"""
Transform target pose to relative pose wrt car.
:param obs: env observation
:param node_from: node of car origin
:param node_to: node of target pose
:return: new target pose
"""
# Handle args and get poses
if obs is None:
assert node_from is not None and node_to is not None
car_pose = node_from.pose
target_pose = node_to.pose
else:
assert node_from is None and node_to is None
car_pose = state4_to_pose3(obs['achieved_goal'])
target_pose = state4_to_pose3(obs['desired_goal'])
# Constants
x = target_pose[0]
y = target_pose[1]
ct = np.cos(-car_pose[2])
st = np.sin(-car_pose[2])
# Translate target
x = x - car_pose[0]
y = y - car_pose[1]
# 2D rotation
x_new = x*ct - y*st
y_new = y*ct + x*st
theta_new = target_pose[2] - car_pose[2]
return np.array([x_new, y_new, theta_new]) | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/utilities/formulas.py | 0.908214 | 0.744424 | formulas.py | pypi |
from typing import Any
import numpy as np
from rrt_ml.utilities.formulas import *
from rrt_ml.utilities.hints import *
from rrt_ml.utilities.stats import *
class RRTEpochInfo(BaseStats):
train_losses: list[float, ...] | None = None
val_losses: list[float, ...] | None = None
@classmethod
def new(cls, train_losses, val_losses):
epoch_info = cls()
epoch_info.train_losses = train_losses
epoch_info.val_losses = val_losses
return epoch_info
class SLEpochInfo(BaseStats):
epoch_n: int | None = None
train_loss: Vector | None = None
val_loss: Vector | None = None
train_kl_loss: Vector | None = None
val_kl_loss: Vector | None = None
train_recon_loss: Vector | None = None
val_recon_loss: Vector | None = None
@classmethod
def new(cls, epoch_n, t_l, v_l, t_kl_l, v_kl_l, t_r_l, v_r_l):
self = cls()
self.epoch_n = epoch_n
self.train_loss = t_l
self.val_loss = v_l
self.train_kl_loss = t_kl_l
self.val_kl_loss = v_kl_l
self.train_recon_loss = t_r_l
self.val_recon_loss = v_r_l
return self
class RLEpochInfo(BaseStats):
det_val_reward: float | None = None
rand_val_reward: float | None = None
env_stats: EnvStats | None = None
train_timestep_num: int | None = None
@classmethod
def new(cls, det_val_reward, rand_val_reward, env_stats, train_timestep_num):
self = cls()
self.det_val_reward = det_val_reward
self.rand_val_reward = rand_val_reward
self.env_stats = env_stats
self.train_timestep_num = train_timestep_num
return self
class NodeReachInfo(BaseStats):
cost: None | float | int = None
done_info: None | dict = None
length: None | float = None
rewards: None | np.ndarray = None
steers1: None | np.ndarray = None
steers2: None | np.ndarray = None
timesteps: None | np.ndarray = None
vs: None | np.ndarray = None
xs: None | np.ndarray = None
yaws: None | np.ndarray = None
ys: None | np.ndarray = None
@classmethod
def new_from_bullet(cls, timesteps, done_info, env_stats: EnvStats):
# Instantiate and assign args
stats = cls()
stats.done_info = done_info
stats.timesteps = timesteps
# Secondary stats
stats.rewards = np.trim_zeros(env_stats.bullet.rewards[0, :, 0])
stats.steers1 = np.trim_zeros(env_stats.bullet.car_steers[0, :, 0])
stats.steers2 = np.trim_zeros(env_stats.bullet.car_steers[0, :, 1])
stats.vs = np.trim_zeros(env_stats.bullet.car_velocities[0, :, 0])
stats.xs = np.trim_zeros(env_stats.bullet.car_poses[0, :, 0])
stats.yaws = np.trim_zeros(env_stats.bullet.car_poses[0, :, 2])
stats.ys = np.trim_zeros(env_stats.bullet.car_poses[0, :, 1])
# Length
stats.length = path_length(stats.xs, stats.ys)
# Get cost
stats.cost = stats.timesteps
return stats
@classmethod
def new_from_rs(cls, timesteps, done_info, poses, rewards):
# Instantiate and assign args
stats = cls()
stats.timesteps = timesteps
stats.done_info = done_info
poses = np.array(poses)
stats.xs = np.array(poses[:, 0])
stats.yaws = np.array(poses[:, 2])
stats.ys = np.array(poses[:, 1])
stats.rewards = np.array(rewards)
# Secondary stats
stats.steers1 = None
stats.steers2 = None
stats.vs = None
# Length
stats.length = path_length(stats.xs, stats.ys)
# Get cost
stats.cost = stats.length
return stats
class RRTSolutionInfo(BaseStats):
ep_stats: None | EnvStats = None
cpu_time: None | float = None
list_nodes: None | list[Any, ...] = None
success: None | bool = None
@classmethod
def new(cls, ep_stats, cpu_time, list_nodes, success):
stats = cls()
stats.ep_stats = ep_stats
stats.cpu_time = cpu_time
stats.list_nodes = list_nodes
stats.success = success
return stats | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/utilities/infos.py | 0.922774 | 0.3398 | infos.py | pypi |
from glob import glob
from pathlib import Path
from typing import Any
import numpy as np
from joblib import dump, load
from pydantic import BaseModel
from scipy.spatial.distance import euclidean
from rrt_ml.utilities.configs import *
from rrt_ml.utilities.hints import *
from rrt_ml.utilities.misc import *
class BaseStats(BaseModel):
class Config:
arbitrary_types_allowed = True
z: None = None
def save_to_file(self, path: Path | str):
"""
Save pickle to file.
"""
# Convert to string if needed
if isinstance(path, Path):
path = str(path)
# Get dict and save to file
d = self.dict()
with open(path, 'wb') as f:
dump(d, f)
# Split possibly large pickle file
self._split(path)
# Delete possibly large pickle file
Path(path).unlink(missing_ok=True)
@classmethod
def load_from_file(cls, path: Path):
"""
Load from file.
"""
# Join split files
cls()._join(str(path))
# Instantiate class from file dict
with open(str(path), 'rb') as f:
d = load(f)
new = cls(**d)
# Remove stats file created to load file
path.unlink(missing_ok=True)
return new
@staticmethod
def _split(path: str):
"""
Split large file into smaller parts.
"""
# Open the source file in binary mode
input_file = open(path, 'rb')
# Start file counter
counter = 0
# Loop read chunks and write to file
while True:
# Read a portion of the input file
chunk = input_file.read(96000000)
# End the loop if we have hit EOF
if not chunk:
break
# Increment counter
counter += 1
# Create a new file name
filename = path + str(counter).zfill(2)
# Create a destination file
dest_file = open(filename, 'wb')
# Write to this portion of the destination file and close the file
dest_file.write(chunk)
dest_file.close()
# Close the file being read
input_file.close()
@staticmethod
def _join(path: str):
"""
Join split pickled files.
"""
# Create a new destination file
output_file = open(path, 'wb')
# Get a list of the file parts
parts = glob(path + '[0-9][0-9]')
# Go through each portion one by one
for file in parts:
# Assemble the full path to the file
path = file
# Open the part
input_file = open(path, 'rb')
# Read and merge
while True:
# Read all bytes of the part
b = input_file.read(96000000)
# Break out of loop if we are at end of file
if not b:
break
# Write the bytes to the output file
output_file.write(b)
# Close the input file
input_file.close()
# Close the output file
output_file.close()
class EnvStats(BaseStats):
class _bullet(BaseStats):
car_poses: np.ndarray | None = None # ep_n x timestep x pose (3)
car_steers: np.ndarray | None = None # ep_n x timestep x position (2)
car_velocities: np.ndarray | None = None # ep_n x timestep x velocity (1)
dones: np.ndarray | None = None # ep_n x timestep x done (bool, dict[str, bool])
rewards: np.ndarray | None = None # ep_n x timestep x reward (1)
target_poses: np.ndarray | None = None # ep_n x timestep x pose (3)
time: np.ndarray | None = None # ep_n x timestep x time (1)
wall_time: np.ndarray | None = None # ep_n x timestep x wall_time (1)
class _mdp(BaseStats):
achieved_goals: np.ndarray | None = None # ep_n x timestep x goal (4)
actions: np.ndarray | None = None # ep_n x timestep x action (2)
desired_goals: np.ndarray | None = None # ep_n x timestep x goal (4)
dones: np.ndarray | None = None # ep_n x timestep x done (bool, dict[str, bool])
next_achieved_goals: np.ndarray | None = None # ep_n x timestep x goal (4)
next_desired_goals: np.ndarray | None = None # ep_n x timestep x goal (4)
next_states: np.ndarray | None = None # ep_n x timestep x state(6)
rewards: np.ndarray | None = None # ep_n x timestep x reward (1)
states: np.ndarray | None = None # ep_n x timestep x state (6)
bullet: _bullet = _bullet()
mdp: _mdp = _mdp()
@classmethod
def new(cls, cfg: MasterConfig):
# Constants from config
self = cls()
n_episodes = cfg.rl.val.n_episodes
n_mdp_timesteps = cfg.env.general.max_timestep
n_bullet_timesteps = n_mdp_timesteps * (int(1 / (cfg.env.car.f_val * (1 / 240))))
# MDP attributes
self.mdp.states = np.zeros((n_episodes, n_mdp_timesteps, 6))
self.mdp.achieved_goals = np.zeros((n_episodes, n_mdp_timesteps, 4))
self.mdp.desired_goals = np.zeros((n_episodes, n_mdp_timesteps, 4))
self.mdp.next_states = np.zeros((n_episodes, n_mdp_timesteps, 6))
self.mdp.next_achieved_goals = np.zeros((n_episodes, n_mdp_timesteps, 4))
self.mdp.next_desired_goals = np.zeros((n_episodes, n_mdp_timesteps, 4))
self.mdp.actions = np.zeros((n_episodes, n_mdp_timesteps, 2))
self.mdp.rewards = np.zeros((n_episodes, n_mdp_timesteps, 1))
self.mdp.dones = np.empty((n_episodes, n_mdp_timesteps, 2), dtype=object)
# Bullet attributes
self.bullet.car_poses = np.zeros((n_episodes, n_bullet_timesteps, 3))
self.bullet.car_steers = np.zeros((n_episodes, n_bullet_timesteps, 2))
self.bullet.car_velocities = np.zeros((n_episodes, n_bullet_timesteps, 1))
self.bullet.dones = np.empty((n_episodes, n_bullet_timesteps, 2), dtype=object)
self.bullet.rewards = np.zeros((n_episodes, n_bullet_timesteps, 1))
self.bullet.target_poses = np.zeros((n_episodes, n_bullet_timesteps, 3))
self.bullet.time = np.zeros((n_episodes, n_bullet_timesteps, 1))
self.bullet.wall_time = np.zeros((n_episodes, n_bullet_timesteps, 1))
return self
def get_distance_traveled(self, ep_num: int) -> float:
"""
Calculate total distance traveled in an episode.
:param ep_num: episode number
:return: distance in meters
"""
# Get xs and ys
poses = self.bullet.car_poses[ep_num, ...]
xs = poses[:, 0].flatten()
ys = poses[:, 1].flatten()
# Remove zeros
xs = remove_trailing_zeros(xs)
ys = remove_trailing_zeros(ys)
# Sum differentials
d = 0
for i, (x, y) in enumerate(zip(xs[:-1], ys[:-1])):
d += euclidean([x, y], [xs[i + 1], ys[i + 1]])
return d
def get_distance_traveled(self, ep_num: int) -> float:
"""
Calculate total distance traveled in an episode.
:param ep_num: episode number
:return: distance in meters
"""
# Get xs and ys
poses = self.bullet.car_poses[ep_num, ...]
xs = poses[:, 0].flatten()
ys = poses[:, 1].flatten()
# Remove zeros
xs = remove_trailing_zeros(xs)
ys = remove_trailing_zeros(ys)
# Sum differentials
d = 0
for i, (x, y) in enumerate(zip(xs[:-1], ys[:-1])):
d += euclidean([x, y], [xs[i + 1], ys[i + 1]])
return d
def get_time_to_reach(self, ep_num: int):
"""
Get total time to reach goal in an episode.
:param ep_num: episode number
:return: total simulation time to reach
"""
# Get list of times
l_times = self.bullet.time[ep_num, :, 0]
# Get last non-zero element of array (remove zeros and flat)
return l_times.ravel()[np.flatnonzero(l_times)][-1]
class RLStats(BaseStats):
det_val_rewards: list[float, ...] | None = None
rand_val_rewards: list[float, ...] | None = None
env_stats: list[EnvStats, ...] | None = None
total_val_rewards: list[float, ...] | None = None
train_timestep_nums: list[int, ...] | None = None
@classmethod
def new(cls, cfg):
stats = cls()
stats.det_val_rewards = []
stats.rand_val_rewards = []
stats.env_stats = []
stats.total_val_rewards = []
stats.train_timestep_nums = []
return stats
class SLStats(BaseStats):
arr_epoch_idx_state_dim: None | np.ndarray = None # epoch x idx x sample_n x state
train_loss: Vector | None = None
train_kl_loss: Vector | None = None
train_recon_loss: Vector | None = None
val_loss: Vector | None = None
val_kl_loss: Vector | None = None
val_recon_loss: Vector | None = None
@classmethod
def new(cls, cfg: MasterConfig):
stats = cls()
n_epochs = cfg.sl.train.n_epochs
num_val_maps = cfg.sl.val.n_maps
num_gen_states = cfg.sl.val.n_states
state_dim = cfg.sl.dim.state
stats.arr_epoch_idx_state_dim = np.zeros(shape=(n_epochs, num_val_maps, num_gen_states, state_dim))
stats.train_loss = []
stats.train_kl_loss = []
stats.train_recon_loss = []
stats.val_loss = []
stats.val_kl_loss = []
stats.val_recon_loss = []
return stats
class RRTStats(BaseStats):
l_nodes: list[list[Any, ...], ...] | None = None
l_wall_times: list[float, ...] | None = None
@classmethod
def new(cls, cfg):
stats = cls()
stats.l_nodes = []
stats.l_wall_times = []
return stats | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/utilities/stats.py | 0.809163 | 0.325976 | stats.py | pypi |
from pathlib import Path
from pydantic import BaseModel
class Paths(BaseModel):
home: Path = Path(__file__).parents[1]
configs: Path = home / 'configs'
configs_rl = configs / 'rl'
configs_rrt = configs / 'rrt'
configs_sl = configs / 'sl'
configs_hyper = configs / 'hyper'
data: Path = home / 'data'
data_rl: Path = data / 'rl'
data_rl_distance: Path = data_rl / 'distance'
data_sl: Path = data / 'sl'
data_sl_narrow: Path = data_sl / 'narrow'
data_sl_narrow_train_csv: Path = data_sl_narrow / 'train.csv'
data_sl_narrow_val_csv: Path = data_sl_narrow / 'val.csv'
deps: Path = home / 'deps'
deps_libraries: Path = deps / 'libraries'
deps_models: Path = deps / 'models'
environments: Path = home / 'environments'
experiments: Path = home / 'experiments'
experiments_rl: Path = experiments / 'rl'
experiments_sl: Path = experiments / 'sl'
experiments_rrt: Path = experiments / 'rrt'
utilities: Path = home / 'utilities'
def cfg(self, alg: str, name: str, hyper: bool):
if hyper:
return self.configs_hyper / (name + '.yaml')
else:
match alg:
case 'rl':
path = self.configs_rl / (name + '.yaml')
case 'sl':
path = self.configs_sl / (name + '.yaml')
case 'rrt':
path = self.configs_rrt / (name + '.yaml')
case _:
raise NotImplementedError
return path
def exp(self, alg: str, name: str):
match alg:
case 'rl':
path = self.experiments_rl / name
case 'sl':
path = self.experiments_sl / name
case 'rrt':
path = self.experiments_rrt / name
case _:
raise NotImplementedError
return path
def exp_fig(self, alg: str, name: str):
return self.exp(alg=alg, name=name) / 'figs'
def exp_tensorboard(self, alg: str, name: str):
match alg:
case 'rl': # In MRL tensorboard file is on the parent folder
return self.exp(alg=alg, name=name)
case _:
return self.exp(alg=alg, name=name) / 'tensorboard'
def exp_checkpoint(self, alg: str, name: str):
return self.exp(alg=alg, name=name) / 'checkpoint'
def exp_stats(self, alg: str, name: str):
return self.exp(alg=alg, name=name) / 'stats'
class PathsIntellisense:
"""
Visualize all paths and files.
"""
def __init__(self, *sub_folders):
"""
Recursively build a list of paths and files
:param sub_folders: for recursive calls
"""
home = Path().cwd().parent
for folder in sub_folders:
home = home / folder
self.path = home
skip_folders = ['.git', '.idea']
files_and_dirs = [f for f in list(home.glob('*'))]
for file_or_dir in files_and_dirs:
name = str(file_or_dir).rsplit("\\")[-1]
if name in skip_folders:
continue
elif not file_or_dir.is_dir():
name = name.replace('.', '')
self.__setattr__(name, file_or_dir)
else:
call_sub_folders = [f for f in sub_folders]
call_sub_folders.append(name)
self.__setattr__(name, PathsIntellisense(*call_sub_folders))
def __call__(self, *args, **kwargs):
"""
Call object to get the path of folders.
:param args: None.
:param kwargs: None.
:return: path to folder.
"""
return self.path | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/utilities/paths.py | 0.740644 | 0.245266 | paths.py | pypi |
import numpy as np
import pandas as pd
import torch as t
from coral_pytorch.dataset import corn_label_from_logits
from torch.nn import functional as f
from rrt_ml.utilities.configs import *
from rrt_ml.utilities.paths import *
class CVAE(t.nn.Module):
def __init__(self, cfg: MasterConfig):
"""
Initialize.
:param cfg: SL config.
"""
super(CVAE, self).__init__()
# Attributes
self.cfg = cfg
self.device = None
self.mlp_encoder_shared = None
self.mlp_encoder_mu = None
self.mlp_encoder_sigma = None
self.mlp_decoder = None
self.beta_norm = None
self._setup()
def forward(self, x, y):
"""
Forward.
:param x: state vector.
:param y: conditions vector.
:return: x_hat, mu and log_sigma.
"""
# Concatenate input and condition passing through encoder MLP
x_and_y = t.cat((x, y), dim=1)
encoder_shared_out = self.mlp_encoder_shared(x_and_y)
# Get mu and log sigma outputs
mu = self.mlp_encoder_mu(encoder_shared_out)
log_sigma = self.mlp_encoder_sigma(encoder_shared_out)
# Transform log_sigma to sigma
sigma = t.exp(log_sigma / 2)
# Re-parametrization trick
epsilon = t.randn_like(mu)
z = mu + epsilon * sigma
# Concatenate latent vector and condition
z_and_y = t.cat((z, y), dim=1)
# Pass through decoder MLP
x_hat = self.mlp_decoder(z_and_y)
return x_hat, mu, log_sigma
def loss(self, x_recon, x_real, mu, log_sigma):
"""
Calculate loss.
"""
# Individual losses
kl_loss = self.beta_norm * t.mean(
t.sum(t.exp(log_sigma) + mu ** 2 - 1 - log_sigma, dim=1), dim=0
)
recon_loss = f.mse_loss(x_recon, x_real)
# Weighted loss
loss = kl_loss + recon_loss
return loss, kl_loss, recon_loss
def _setup(self):
"""
Setup model.
"""
self._setup_device()
self._setup_encoder()
self._setup_decoder()
self._setup_beta()
self._setup_float()
def _setup_device(self):
"""
Setup device, GPU if cuda available.
"""
# Check if cuda available
self.device = "cuda" if t.cuda.is_available() else "cpu"
def _setup_encoder(self):
"""
Setup encoder MLP.
"""
# Encoder MLP
mlp_encoder = []
for i, n_units in enumerate(self.cfg.sl.net.mlp_units):
mlp_encoder.extend([t.nn.LazyLinear(n_units), t.nn.ReLU()])
self.mlp_encoder_shared = t.nn.Sequential(*mlp_encoder)
# Encoder mu and sigma
self.mlp_encoder_mu = t.nn.LazyLinear(self.cfg.sl.dim.latent)
self.mlp_encoder_sigma = t.nn.LazyLinear(self.cfg.sl.dim.latent)
def _setup_decoder(self):
"""
Setup decoder MLP.
"""
# Decoder
mlp_decoder = []
for i, n_units in enumerate(self.cfg.sl.net.mlp_units):
mlp_decoder.extend([t.nn.LazyLinear(n_units), t.nn.ReLU()])
mlp_decoder.append(t.nn.LazyLinear(self.cfg.sl.dim.state))
self.mlp_decoder = t.nn.Sequential(*mlp_decoder)
def _setup_beta(self):
"""
Setup KL balancing.
"""
self.beta_norm = self.cfg.sl.loss.beta * self.cfg.sl.dim.latent / self.cfg.sl.dim.state
def _setup_float(self):
"""
Change model parameters to 32 bit.
"""
self.float()
class Ordinal(t.nn.Module):
def __init__(self, cfg: MasterConfig = MasterConfig()):
"""
Initialize ordinal MLP regressor.
"""
super().__init__()
self.cfg = cfg
self.timesteps_range = None # type: None | list[int, int]
self._setup()
def forward(self, x):
"""
Forward.
:param x: input.
:return: output.
"""
return self.net(x)
def get_distance(self, x: np.ndarray):
"""
Predict distance.
:return: distance predictions.
"""
# Convert to tensor
x_t = t.tensor(x, dtype=t.float32).to("cuda")
# Forward
with t.no_grad():
self.eval()
y = self(x_t)
y = corn_label_from_logits(y)
# Add minimum timesteps from dataset to get actual values
pred = y.cpu().numpy().flatten() - self.timesteps_range[0]
return pred
def _setup(self):
"""
Set up.
"""
self._setup_timesteps_range()
self._setup_net()
self._setup_cuda()
def _setup_cuda(self):
"""
Set model to cuda.
"""
self.cuda()
def _setup_timesteps_range(self):
"""
Get min max timesteps from data.
"""
# Set column names
col_names = ['x', 'y', 'sin_theta', 'cos_theta', 'success', 'time', 'timesteps', 'length']
# Read data
path = Paths().data_rl_distance / self.cfg.rrt.names.rl / 'train.csv'
df = pd.read_csv(path, names=col_names, index_col=None)
# Get min and max values for timesteps
self.timesteps_range = [df['timesteps'].min(), df['timesteps'].max()]
def _setup_net(self):
"""
Set up net architecture.
"""
# Get number of classes
max_timesteps = int(self.timesteps_range[1] - self.timesteps_range[0] + 1)
# Net
units = []
for i, n_units in enumerate(self.cfg.rrt.ordinal.layers):
units.extend([t.nn.LazyLinear(n_units), t.nn.GELU()])
units.append(t.nn.LazyLinear(max_timesteps))
self.net = t.nn.Sequential(*units)
# Convert to float
self.float()
self.net.float()
class OrdinalEnsemble:
def __init__(self, cfg: MasterConfig = MasterConfig()):
"""
Initialize.
:param cfg: master config.
"""
self.models = []
def get_distance(self, x):
"""
Get distance prediction from ensemble.
:param x: input array [[x_car y_car sin_car cos_car]]
:return: distance prediction
"""
# Convert to tensor
x_t = t.tensor(x, dtype=t.float32, device='cuda', requires_grad=False)
# Initialize array
batch_size = x_t.size()[0]
all_pred = np.zeros(shape=(20, batch_size))
# Forward
with t.no_grad():
for i, model in enumerate(self.models):
model.eval()
all_pred[i, :] = corn_label_from_logits(model(x_t)).detach().cpu().numpy()
# Get mean value
all_pred = np.mean(all_pred, axis=0)
# Add minimum timesteps from dataset to get actual values
all_pred = all_pred - self.models[0].timesteps_range[0]
return all_pred | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/utilities/models.py | 0.921025 | 0.431464 | models.py | pypi |
import numpy as np
import pandas as pd
import torch as t
from sklearn.preprocessing import StandardScaler
from torch.utils.data import Dataset
from rrt_ml.utilities.configs import *
from rrt_ml.utilities.paths import *
class NarrowCVAEDataset(Dataset):
def __init__(self, cfg: MasterConfig, train: bool):
"""
Initialize.
"""
self.cfg = cfg
self.train = train
self.data_ratio = cfg.sl.general.data_ratio
self.path = None
self.x = None
self.y = None
self._setup()
def __getitem__(self, index: int) -> dict:
"""
Get sample.
:param index: sample index.
:return: sample (x and y fields).
"""
sample = dict()
sample['x'] = t.tensor(self.x[index, :], dtype=t.float32, device='cuda')
sample['y'] = t.tensor(self.y[index, :], dtype=t.float32, device='cuda')
return sample
def __len__(self):
"""
Len of dataset.
:return: len of dataset.
"""
return self.x.shape[0]
def _setup(self):
"""
Set up dataset.
"""
self._setup_path()
self._setup_arrays()
def _setup_path(self):
"""
Setup appropriate csv path.
"""
if self.train:
self.path = Paths().data_sl_narrow_train_csv
else:
self.path = Paths().data_sl_narrow_val_csv
def _setup_arrays(self):
"""
Set up data into two arrays x (states) and y (conditions).
"""
# Read from master file
arr = pd.read_csv(self.path, sep=',').to_numpy()
# Constants
y_dim = 2 * self.cfg.sl.dim.state + self.cfg.sl.dim.obstacle
x_dim = self.cfg.sl.dim.state
# Split into x and y taking the ratio (assume data start at index 1 and is y then x)
stop_at = int(self.cfg.sl.general.data_ratio * len(arr))
self.x = arr[:stop_at, -x_dim:]
self.y = arr[:stop_at, 1:(y_dim + 1)]
class DistanceOrdinalDataset(Dataset):
def __init__(self,
cfg: MasterConfig,
split_type: str,
scaler: None | StandardScaler = None,
timesteps_range: None | list[int, int] | str = None):
"""
Initialize.
"""
self.cfg = cfg
self.split_type = split_type
self.scaler = scaler
self.timesteps_range = timesteps_range
self.x = None
self.y = None
self._setup()
def __getitem__(self, index):
"""
Get sample.
:param index: sample index.
:return: sample as namedtuple (x and y fields).
"""
x = t.tensor(self.x[index, :], dtype=t.float32, device="cuda")
y = t.tensor(self.y[index], dtype=t.float32, device="cuda")
return [x, y]
def __len__(self):
"""
Len of dataset.
:return:
"""
return self.x.shape[0]
def _setup(self):
"""
Set up.
"""
self._setup_path()
self._setup_arrays()
self._setup_scaling()
def _setup_arrays(self):
"""
Set up data into two arrays x (states) and y (conditions).
"""
col_names = ['x', 'y', 'sin_theta', 'cos_theta', 'success', 'time', 'timesteps', 'length']
# Read and remove some columns
df = pd.read_csv(self.path, names=col_names, index_col=None)
df.drop(columns=['success', 'time', 'length'], axis=1, inplace=True)
# Remove above cutoff
df = df[df['timesteps'] < self.cfg.rrt.ordinal.max_timesteps]
# Get or set min and max timestep values
if self.timesteps_range is None:
self.timesteps_range = [df['timesteps'].min(), df['timesteps'].max()]
# Scale target to range
df['timesteps'] = df['timesteps'] - self.timesteps_range[0]
# Convert to numpy array and get only part of the data
stop_at = int(self.cfg.rrt.ordinal.data_ratio * len(df))
self.x = df.iloc[:stop_at, :-1].to_numpy()
self.y = df.iloc[:stop_at, -1].to_numpy().astype(int)
def _setup_path(self):
"""
Set up appropriate csv path.
"""
match self.split_type:
case 'train':
self.path = Paths().data_rl_distance / self.cfg.rrt.names.rl / "train.csv"
case 'val':
self.path = Paths().data_rl_distance / self.cfg.rrt.names.rl / "val.csv"
case 'test':
self.path = Paths().data_rl_distance / self.cfg.rrt.names.rl / "test.csv"
case _:
raise NotImplementedError
def _setup_scaling(self):
"""
Set up data scaling.
"""
# Instantiate and fit scaler for training
if self.scaler is None:
self.scaler = StandardScaler().fit(self.x)
# Transform x data
self.x = self.scaler.transform(self.x) | /rrt_ml-0.0.8-py3-none-any.whl/rrt_ml/utilities/datasets.py | 0.841191 | 0.441252 | datasets.py | pypi |
from dataclasses import dataclass
from typing import Optional, List, Any
from enum import Enum
@dataclass
class ChannelMessage:
ChannelId: Optional[int] = None
ChannelMessageTranslations: Optional[List[Any]] = None
Message: Optional[str] = None
@dataclass
class Message:
URL: None
DetourId: None
SharedMessageKey: None
Cause: Optional[int] = None
CauseReportLabel: Optional[str] = None
Header: Optional[str] = None
ChannelMessages: Optional[List[ChannelMessage]] = None
DaysOfWeek: Optional[int] = None
Effect: Optional[int] = None
EffectReportLabel: Optional[str] = None
FromDate: Optional[str] = None
FromTime: Optional[str] = None
Message: Optional[str] = None
MessageId: Optional[int] = None
MessageTranslations: Optional[List[Any]] = None
Priority: Optional[int] = None
PublicAccess: Optional[int] = None
Published: Optional[bool] = None
Routes: Optional[List[int]] = None
Signs: Optional[List[int]] = None
ToDate: Optional[str] = None
ToTime: Optional[str] = None
IsPrimaryRecord: Optional[bool] = None
class TextColor(Enum):
FFFFFF = "FFFFFF"
the000000 = "000000"
class CommStatus(Enum):
GOOD = "GOOD"
class Direction(Enum):
I = "I"
L = "L"
O = "O"
class DirectionLong(Enum):
Inbound = "Inbound"
Loop = "Loop"
Outbound = "Outbound"
class OccupancyStatusReportLabel(Enum):
Empty = "Empty"
class OpStatus(Enum):
EARLY = "EARLY"
ONTIME = "ONTIME"
TRIPSTART = "TRIP START"
class PropertyName(Enum):
RRTA = "RRTA"
@dataclass
class Vehicle:
Deviation: None
DisplayStatus: None
CurrentStatus: None
DriverLastName: None
DriverFirstName: None
OnBoard: None
BlockFareboxId: Optional[int] = None
CommStatus: Optional[CommStatus] = None
Destination: Optional[str] = None
Direction: Optional[Direction] = None
DirectionLong: Optional[DirectionLong] = None
StopId: Optional[int] = None
DriverName: Optional[str] = None
DriverFareboxId: Optional[int] = None
VehicleFareboxId: Optional[int] = None
GPSStatus: Optional[int] = None
Heading: Optional[int] = None
LastStop: Optional[str] = None
LastUpdated: Optional[str] = None
Latitude: Optional[float] = None
Longitude: Optional[float] = None
Name: Optional[int] = None
OccupancyStatus: Optional[int] = None
OpStatus: Optional[OpStatus] = None
RouteId: Optional[int] = None
RunId: Optional[int] = None
Speed: Optional[int] = None
TripId: Optional[int] = None
VehicleId: Optional[int] = None
SeatingCapacity: Optional[int] = None
TotalCapacity: Optional[int] = None
PropertyName: Optional[PropertyName] = None
OccupancyStatusReportLabel: Optional[OccupancyStatusReportLabel] = None
@dataclass
class Route:
Directions: None
GoogleDescription: None
Group: None
RouteStops: None
RouteTraceHash64: None
Stops: None
Color: Optional[str] = None
IncludeInGoogle: Optional[bool] = None
IsHeadway: Optional[bool] = None
IsHeadwayMonitored: Optional[bool] = None
IsVisible: Optional[bool] = None
IvrDescription: Optional[str] = None
LongName: Optional[str] = None
Messages: Optional[List[Message]] = None
RouteAbbreviation: Optional[str] = None
RouteId: Optional[int] = None
RouteRecordId: Optional[int] = None
RouteTraceFilename: Optional[str] = None
SortOrder: Optional[int] = None
ShortName: Optional[int] = None
TextColor: Optional[TextColor] = None
Vehicles: Optional[List[Vehicle]] = None
DetourActiveMessageCount: Optional[int] = None | /models/route.py | 0.836521 | 0.268496 | route.py | pypi |
from enum import Enum
from dataclasses import dataclass
from typing import Optional, List
from datetime import datetime
class ModeReportLabel(Enum):
Normal = "Normal"
class PropertyName(Enum):
RRTA = "RRTA"
class PStatusReportLabel(Enum):
Scheduled = "Scheduled"
class DirectionCode(Enum):
L = "L"
O = "O"
@dataclass
class Trip:
IVRServiceDesc: None
BlockFareboxId: Optional[int] = None
GtfsTripId: Optional[str] = None
InternalSignDesc: Optional[str] = None
InternetServiceDesc: Optional[str] = None
StopSequence: Optional[int] = None
TripDirection: Optional[DirectionCode] = None
TripId: Optional[int] = None
TripRecordId: Optional[int] = None
TripStartTime: Optional[str] = None
TripStartTimeLocalTime: Optional[datetime] = None
TripStatus: Optional[int] = None
TripStatusReportLabel: Optional[PStatusReportLabel] = None
@dataclass
class Departure:
ADT: None
ADTLocalTime: None
ATA: None
ATALocalTime: None
Bay: None
Dev: Optional[datetime] = None
EDT: Optional[str] = None
EDTLocalTime: Optional[datetime] = None
ETA: Optional[str] = None
ETALocalTime: Optional[datetime] = None
IsCompleted: Optional[bool] = None
IsLastStopOnTrip: Optional[bool] = None
LastUpdated: Optional[str] = None
LastUpdatedLocalTime: Optional[datetime] = None
Mode: Optional[int] = None
ModeReportLabel: Optional[ModeReportLabel] = None
PropogationStatus: Optional[int] = None
SDT: Optional[str] = None
SDTLocalTime: Optional[datetime] = None
STA: Optional[str] = None
STALocalTime: Optional[datetime] = None
StopFlag: Optional[int] = None
StopStatus: Optional[int] = None
StopStatusReportLabel: Optional[PStatusReportLabel] = None
Trip: Optional[Trip] = None
PropertyName: Optional[PropertyName] = None
@dataclass
class RouteDirection:
HeadwayDepartures: None
Departures: Optional[List[Departure]] = None
Direction: Optional[str] = None
DirectionCode: Optional[DirectionCode] = None
IsDone: Optional[bool] = None
IsHeadway: Optional[bool] = None
IsHeadwayMonitored: Optional[bool] = None
RouteId: Optional[int] = None
RouteRecordId: Optional[int] = None
@dataclass
class StopDeparture:
LastUpdated: Optional[str] = None
RouteDirections: Optional[List[RouteDirection]] = None
StopId: Optional[int] = None
StopRecordId: Optional[int] = None | /models/stopdepartures.py | 0.837421 | 0.279988 | stopdepartures.py | pypi |
from enum import Enum
from dataclasses import dataclass
from typing import Optional, List, Any
class Dir(Enum):
L = "L"
@dataclass
class Direction:
DirectionDesc: None
DirectionIconFileName: None
Dir: Optional[Dir] = None
@dataclass
class ChannelMessage:
ChannelId: Optional[int] = None
ChannelMessageTranslations: Optional[List[Any]] = None
Message: Optional[str] = None
@dataclass
class Message:
URL: None
DetourId: None
SharedMessageKey: None
Cause: Optional[int] = None
CauseReportLabel: Optional[str] = None
Header: Optional[str] = None
ChannelMessages: Optional[List[ChannelMessage]] = None
DaysOfWeek: Optional[int] = None
Effect: Optional[int] = None
EffectReportLabel: Optional[str] = None
FromDate: Optional[str] = None
FromTime: Optional[str] = None
Message: Optional[str] = None
MessageId: Optional[int] = None
MessageTranslations: Optional[List[Any]] = None
Priority: Optional[int] = None
PublicAccess: Optional[int] = None
Published: Optional[bool] = None
Routes: Optional[List[int]] = None
Signs: Optional[List[int]] = None
ToDate: Optional[str] = None
ToTime: Optional[str] = None
IsPrimaryRecord: Optional[bool] = None
@dataclass
class RouteStop:
Direction: Optional[Dir] = None
RouteId: Optional[int] = None
SortOrder: Optional[int] = None
StopId: Optional[int] = None
@dataclass
class Stop:
Description: Optional[str] = None
IsTimePoint: Optional[bool] = None
Latitude: Optional[float] = None
Longitude: Optional[float] = None
Name: Optional[str] = None
StopId: Optional[int] = None
StopRecordId: Optional[int] = None
@dataclass
class Vehicle:
Deviation: None
DisplayStatus: None
CurrentStatus: None
DriverLastName: None
DriverFirstName: None
OnBoard: None
BlockFareboxId: Optional[int] = None
CommStatus: Optional[str] = None
Destination: Optional[str] = None
Direction: Optional[Dir] = None
DirectionLong: Optional[str] = None
StopId: Optional[int] = None
DriverName: Optional[str] = None
DriverFareboxId: Optional[int] = None
VehicleFareboxId: Optional[int] = None
GPSStatus: Optional[int] = None
Heading: Optional[int] = None
LastStop: Optional[str] = None
LastUpdated: Optional[str] = None
Latitude: Optional[float] = None
Longitude: Optional[float] = None
Name: Optional[int] = None
OccupancyStatus: Optional[int] = None
OpStatus: Optional[str] = None
RouteId: Optional[int] = None
RunId: Optional[int] = None
Speed: Optional[int] = None
TripId: Optional[int] = None
VehicleId: Optional[int] = None
SeatingCapacity: Optional[int] = None
TotalCapacity: Optional[int] = None
PropertyName: Optional[str] = None
OccupancyStatusReportLabel: Optional[str] = None
@dataclass
class RouteDetails:
GoogleDescription: None
Group: None
RouteTraceHash64: None
IvrDescription: Optional[int] = None
LongName: Optional[int] = None
RouteAbbreviation: Optional[int] = None
ShortName: Optional[int] = None
Color: Optional[str] = None
Directions: Optional[List[Direction]] = None
IncludeInGoogle: Optional[bool] = None
IsHeadway: Optional[bool] = None
IsHeadwayMonitored: Optional[bool] = None
IsVisible: Optional[bool] = None
Messages: Optional[List[Message]] = None
RouteId: Optional[int] = None
RouteRecordId: Optional[int] = None
RouteStops: Optional[List[RouteStop]] = None
RouteTraceFilename: Optional[str] = None
SortOrder: Optional[int] = None
Stops: Optional[List[Stop]] = None
TextColor: Optional[str] = None
Vehicles: Optional[List[Vehicle]] = None
DetourActiveMessageCount: Optional[int] = None | /models/routedetails.py | 0.838316 | 0.296158 | routedetails.py | pypi |
from __future__ import division
from builtins import map
from builtins import object
import numpy as np
import cv2
"""
#im1: object image, im2: scenery image
Tpt = cv2.perspectiveTransform(np.float32([[point]]), H) # point: [col,row] -> [x,y]
TM = cv2.getPerspectiveTransform(bx1,bx2) # box points: np.float32([Top_left,Top_right,Bottom_left,Bottom_right])
Tim = cv2.warpPerspective(im1,TM,(w,h)) # h,w = im2.shape
#Tpt: transformed point, TM: transformation matrix, Tim: transformed image
"""
def getSOpointRelation(source_shape, destine_shape, asMatrix = False):
"""
Return parameters to change scaled point to original point.
# destine_domain = relation*source_domain
:param source_shape: image shape for source domain
:param destine_shape: image shape for destine domain
:param asMatrix: if true returns a Transformation Matrix H
:return: x, y coordinate relations or H if asMatrix is True
.. note:: Used to get relations to convert scaled points to original points of an Image.
"""
rH = destine_shape[0]
rW = destine_shape[1]
sH = source_shape[0]
sW = source_shape[1]
if asMatrix:
return np.array([[rW/sW,0,0],[0,rH/sH,0],[0,0,1]])
return rW/sW, rH/sH
def spoint2opointfunc(source_shape,destine_shape):
"""
Return function with parameters to change scaled point to original point.
:param source_shape:
:param destine_shape: shape of
:return:
Example::
forefunc = scaled2realfunc(imgf.shape,bgr.shape)
backfunc = scaled2realfunc(imgb.shape,back.shape)
p1fore = np.array([forefunc(i) for i in p1])
p2back = np.array([backfunc(i) for i in p2])
"""
x,y = getSOpointRelation(source_shape,destine_shape)
op = np.array([x,y],dtype=np.float32)
def scaled2original(p):
#rx = sx*rW/sW
#ry = sy*rH/sH
return p*op
return scaled2original
def sh2oh(sH,osrc_sh,sscr_sh,odst_sh,sdst_sh):
"""
Convert scaled transformation matrix (sH) to original (oH).
:param sH: scaled transformation matrix
:param osrc_sh: original source's shape
:param sscr_sh: scaled source's shape
:param odst_sh: original destine's shape
:param sdst_sh: scaled destine's shape
:return:
"""
oH = sH.copy()
#height, Width
Hos,Wos = (osrc_sh[0]),(osrc_sh[1]) # original source
Hss,Wss = (sscr_sh[0]),(sscr_sh[1]) # scaled source
Hod,Wod = (odst_sh[0]),(odst_sh[1]) # original destine
Hsd,Wsd = (sdst_sh[0]),(sdst_sh[1]) # scaled destine
oH[:,0] = oH[:,0]*Wss/Wos # first row
oH[:,1] = oH[:,1]*Hss/Hos # second row
oH[0] = oH[0]*Wod/Wsd # first column
oH[1] = oH[1]*Hod/Hsd # second column
return oH
def invertH(H):
"""
Invert Transformation Matrix.
:param H:
:return:
"""
# inverse perspective
return np.linalg.inv(H)
def conv3H4H(M):
"""
Convert a 3D transformation matrix (TM) to 4D TM.
:param M: Matrix
:return: 4D Matrix
"""
M = np.append(M.copy(),[[0,0,1]],0) # add row
return np.append(M,[[0],[0],[0],[0]],1) # add column
def apply2kp_pairs(kp_pairs, kp1_rel, kp2_rel, func=None):
"""
Apply to kp_pairs.
:param kp_pairs: list of (kp1,kp2) pairs
:param kp1_rel: x,y relation or function to apply to kp1
:param kp2_rel: x,y relation or function to apply to kp2
:param func: function to build new copy of keypoint
:return: transformed kp_pairs
"""
def withtupple(keypoint,kp_op):
if func:
keypoint = func(keypoint)
try:
keypoint = keypoint.copy()
keypoint["pt"] = np.multiply(keypoint["pt"],kp_op) # transform pt with kp_op
except:
x,y = keypoint.pt
rx,ry = kp_op
keypoint.pt = (x*rx,y*ry)
return keypoint
def withfunc(keypoint,kp_op):
if func:
keypoint = func(keypoint)
try:
keypoint = keypoint.copy()
keypoint["pt"] = kp_op(*keypoint["pt"]) # transform pt with kp_op
except:
x,y = keypoint.pt
keypoint.pt = kp_op(x,y)
return keypoint
if type(kp1_rel) is tuple: # expected tuple operands
kp1_func = withtupple
else:
kp1_func = withfunc
if type(kp2_rel) is tuple: # expected tuple operands
kp2_func = withtupple
else:
kp2_func = withfunc
return [(kp1_func(i, kp1_rel), kp2_func(j, kp2_rel)) for i, j in kp_pairs]
def spairs2opairs(kp_pairs,osrc_sh,sscr_sh,odst_sh,sdst_sh,func=None):
"""
Convert scaled kp_pairs to original kp_pairs.
:param kp_pairs: list of kp_pairs
:param osrc_sh: original source's shape
:param sscr_sh: scaled source's shape
:param odst_sh: original destine's shape
:param sdst_sh: scaled destine's shape
:param func: function to build new copy of keypoint
:return:
"""
kp1_pair = getSOpointRelation(sscr_sh,osrc_sh) # fore
kp2_pair = getSOpointRelation(sdst_sh,odst_sh) # back
return apply2kp_pairs(kp_pairs,kp1_pair,kp2_pair,func=func)
def keyPoint2tuple(keypoint):
""" obj.angle, obj.class_id, obj.octave, obj.pt, obj.response, obj.size"""
return (keypoint.pt, keypoint.size, keypoint.angle, keypoint.response, keypoint.octave, keypoint.class_id)
def tuple2keyPoint(points, func = cv2.KeyPoint):
""" KeyPoint([x, y, _size[, _angle[, _response[, _octave[, _class_id]]]]]) -> <KeyPoint object> """
return func(*(points[0][0],points[0][1],points[1],points[2], points[3],points[4], points[5]))
def dict2keyPoint(d, func = cv2.KeyPoint):
""" KeyPoint([x, y, _size[, _angle[, _response[, _octave[, _class_id]]]]]) -> <KeyPoint object> """
return func(*(d["pt"][0],d["pt"][1],d["size"],d["angle"], d["response"],d["octave"], d["class_id"]))
class SimKeyPoint(object):
"""
Simulates opencv keypoint (it allows manipulation, conversion and serialization of keypoints).
.. note:: Used for conversions and data persistence.
"""
# FIXME: correct for memoizer: some warning are created if the script is run as __main__
# it would be great if cv2.KeyPoint did not have pickling incompatibilities
def __init__(self,*args):
if len(args)==1:
obj = args[0]
if isinstance(obj,dict): # it got a dictionary
getattr(self,"__dict__").update(obj)
return
elif isinstance(obj,tuple):
args = obj
else: # it got cv2.Keypoint
self.angle = obj.angle
self.class_id = obj.class_id
self.octave = obj.octave
self.pt = obj.pt
self.response = obj.response
self.size = obj.size
return
# tupple is broadcasted as in cv2.KeyPoint
self.pt =args[0]
self.size=args[1]
self.angle =args[2]
self.response=args[3]
self.octave=args[4]
self.class_id=args[5]
def contour2points(contours):
"""
Convert contours to points. (cnt2pts)
:param contours: array of contours (cnt) ([[x,y]] only for openCV)
:return:
Example::
contours = np.array([[[0, 0]], [[1, 0]]]) # contours
points = contour2points(contours)
print points # np.array([[0, 0], [1, 0]])
"""
return contours.reshape(-1,2)
cnt2pts = contour2points # compatibility reasons
def points2contour(points):
"""
Convert points to contours. (pts2cnt)
:param points: array of points ([x,y] for openCV, [y,x] for numpy)
:return:
Example::
points = np.array([[0, 0], [1, 0]]) # points
contours = points2contour(points)
print contours # np.array([[[0, 0]], [[1, 0]]])
"""
return points.reshape(-1,1,2)
pts2cnt = points2contour # compatibility reasons
def toTupple(obj):
"""
Converts recursively to tuple
:param obj: numpy array, list structure, iterators, etc.
:return: tuple representation obj.
"""
try:
return tuple(map(toTupple, obj))
except TypeError:
return obj
def points2vectos(pts, origin = None):
"""
Convert points to vectors with respect to origin.
:param pts: array of points.
:param origin: point of origin.
:return: vectors.
"""
pts = np.array(pts)
return pts - (origin or np.zeros_like(pts))
def vectos2points(vecs, origin = None):
"""
Convert points to vectors with respect to origin.
:param vecs: array of vectors.
:param origin: point of origin.
:return: points.
"""
vecs = np.array(vecs)
return vecs + (origin or np.zeros_like(vecs))
quadrantmap = {(0,0):"origin",(1,0):"right",(1,1):"right-up",(0,1):"up",(-1,1):"left-up",
(-1,0):"left",(-1,-1):"left-down",(0,-1):"down",(1,-1):"right-down"}
def translateQuadrants(quadrants, quadrantmap = quadrantmap):
"""
Convert quadrants into human readable data.
:param quadrants: array of quadrants.
:param quadrantmap: dictionary map to translate quadrants. it is of the form::
{(0,0):"origin",(1,0):"right",(1,1):"top-right",(0,1):"top",(-1,1):"top-left",
(-1,0):"left",(-1,-1):"bottom-left",(0,-1):"bottom",(1,-1):"bottom-right"}
:return: list of translated quadrants.
"""
return [quadrantmap[i] for i in toTupple(quadrants)] | /rrtools-1.0.0a2.tar.gz/rrtools-1.0.0a2/RRtoolbox/lib/arrayops/convert.py | 0.829527 | 0.461441 | convert.py | pypi |
import os
import re
import sys
from typing import List, Dict, Text, Union, Any
from loguru import logger
from rrtv_httprunner import exceptions, globalvar
from rrtv_httprunner.loader import load_project_meta, convert_relative_project_root_dir
from rrtv_httprunner.parser import parse_data
from rrtv_httprunner.utils import sort_dict_by_custom_order
def convert_variables(
raw_variables: Union[Dict, List, Text], test_path: Text
) -> Dict[Text, Any]:
if isinstance(raw_variables, Dict):
return raw_variables
if isinstance(raw_variables, List):
# [{"var1": 1}, {"var2": 2}]
variables: Dict[Text, Any] = {}
for var_item in raw_variables:
if not isinstance(var_item, Dict) or len(var_item) != 1:
raise exceptions.TestCaseFormatError(
f"Invalid variables format: {raw_variables}"
)
variables.update(var_item)
return variables
elif isinstance(raw_variables, Text):
# get variables by function, e.g. ${get_variables()}
project_meta = load_project_meta(test_path)
variables = parse_data(raw_variables, {}, project_meta.functions)
return variables
else:
raise exceptions.TestCaseFormatError(
f"Invalid variables format: {raw_variables}"
)
def _convert_jmespath(raw: Text) -> Text:
if not isinstance(raw, Text):
raise exceptions.TestCaseFormatError(f"Invalid jmespath extractor: {raw}")
# content.xx/json.xx => body.xx
if raw.startswith("content"):
raw = f"body{raw[len('content'):]}"
elif raw.startswith("json"):
raw = f"body{raw[len('json'):]}"
raw_list = []
for item in raw.split("."):
if "-" in item:
# add quotes for field with separator
# e.g. headers.Content-Type => headers."Content-Type"
item = item.strip('"')
raw_list.append(f'"{item}"')
elif item.isdigit():
# convert lst.0.name to lst[0].name
if len(raw_list) == 0:
logger.error(f"Invalid jmespath: {raw}")
sys.exit(1)
last_item = raw_list.pop()
item = f"{last_item}[{item}]"
raw_list.append(item)
else:
raw_list.append(item)
return ".".join(raw_list)
def _convert_extractors(extractors: Union[List, Dict]) -> Dict:
""" convert extract list(v2) to dict(v3)
Args:
extractors: [{"varA": "content.varA"}, {"varB": "json.varB"}]
Returns:
{"varA": "body.varA", "varB": "body.varB"}
"""
v3_extractors: Dict = {}
if isinstance(extractors, List):
# [{"varA": "content.varA"}, {"varB": "json.varB"}]
for extractor in extractors:
if not isinstance(extractor, Dict):
logger.error(f"Invalid extractor: {extractors}")
sys.exit(1)
for k, v in extractor.items():
v3_extractors[k] = v
elif isinstance(extractors, Dict):
# {"varA": "body.varA", "varB": "body.varB"}
v3_extractors = extractors
else:
logger.error(f"Invalid extractor: {extractors}")
sys.exit(1)
for k, v in v3_extractors.items():
v3_extractors[k] = _convert_jmespath(v)
return v3_extractors
def _convert_validators(validators: List) -> List:
for v in validators:
if "check" in v and "expect" in v:
# format1: {"check": "content.abc", "assert": "eq", "expect": 201}
v["check"] = _convert_jmespath(v["check"])
elif len(v) == 1:
# format2: {'eq': ['status_code', 201]}
comparator = list(v.keys())[0]
v[comparator][0] = _convert_jmespath(v[comparator][0])
return validators
def _sort_request_by_custom_order(request: Dict) -> Dict:
custom_order = [
"method",
"url",
"params",
"headers",
"cookies",
"data",
"json",
"files",
"timeout",
"allow_redirects",
"proxies",
"verify",
"stream",
"auth",
"cert",
]
return sort_dict_by_custom_order(request, custom_order)
def _sort_step_by_custom_order(step: Dict) -> Dict:
custom_order = [
"name",
"variables",
"request",
"testcase",
"setup_hooks",
"teardown_hooks",
"extract",
"validate",
"validate_script",
]
return sort_dict_by_custom_order(step, custom_order)
def _ensure_step_attachment(step: Dict) -> Dict:
test_dict = {
"name": step["name"],
}
if "variables" in step:
test_dict["variables"] = step["variables"]
if "setup_hooks" in step:
test_dict["setup_hooks"] = step["setup_hooks"]
if "teardown_hooks" in step:
test_dict["teardown_hooks"] = step["teardown_hooks"]
if "extract" in step:
test_dict["extract"] = _convert_extractors(step["extract"])
if "export" in step:
test_dict["export"] = step["export"]
if "validate" in step:
if not isinstance(step["validate"], List):
raise exceptions.TestCaseFormatError(
f'Invalid teststep validate: {step["validate"]}'
)
test_dict["validate"] = _convert_validators(step["validate"])
if "validate_script" in step:
test_dict["validate_script"] = step["validate_script"]
return test_dict
def ensure_testcase_v3_api(api_content: Dict) -> Dict:
logger.info("convert api in v2 to testcase format v3")
teststep = {
"request": _sort_request_by_custom_order(api_content["request"]),
}
teststep.update(_ensure_step_attachment(api_content))
teststep = _sort_step_by_custom_order(teststep)
config = {"name": api_content["name"]}
extract_variable_names: List = list(teststep.get("extract", {}).keys())
if extract_variable_names:
config["export"] = extract_variable_names
return {
"config": config,
"teststeps": [teststep],
}
def ensure_testcase_v3(test_content: Dict) -> Dict:
logger.info("ensure compatibility with testcase format v2")
v3_content = {"config": test_content["config"], "teststeps": []}
if "teststeps" not in test_content:
logger.error(f"Miss teststeps: {test_content}")
sys.exit(1)
if not isinstance(test_content["teststeps"], list):
logger.error(
f'teststeps should be list type, got {type(test_content["teststeps"])}: {test_content["teststeps"]}'
)
sys.exit(1)
for step in test_content["teststeps"]:
teststep = {}
if "request" in step:
teststep["request"] = _sort_request_by_custom_order(step.pop("request"))
elif "api" in step:
teststep["testcase"] = step.pop("api")
elif "testcase" in step:
teststep["testcase"] = step.pop("testcase")
else:
raise exceptions.TestCaseFormatError(f"Invalid teststep: {step}")
teststep.update(_ensure_step_attachment(step))
teststep = _sort_step_by_custom_order(teststep)
v3_content["teststeps"].append(teststep)
return v3_content
def ensure_cli_args(args: List) -> List:
""" ensure compatibility with deprecated cli args in v2
"""
# remove deprecated --failfast
if "--failfast" in args:
logger.warning(f"remove deprecated argument: --failfast")
args.pop(args.index("--failfast"))
# convert --report-file to --html
if "--report-file" in args:
logger.warning(f"replace deprecated argument --report-file with --html")
index = args.index("--report-file")
args[index] = "--html"
args.append("--self-contained-html")
# keep compatibility with --save-tests in v2
if "--save-tests" in args:
logger.warning(
f"generate conftest.py keep compatibility with --save-tests in v2"
)
args.pop(args.index("--save-tests"))
_generate_conftest_for_summary(args)
# write api information to Excel --toexcel
for arg in args:
toexcel = globalvar.get_value("toexcel", None)
if toexcel is None and "--toexcel" in arg:
index = args.index(arg)
excel_regex_compile = re.compile(r"--toexcel=([\s\S\r\n]*)")
func_match = excel_regex_compile.match(arg, 0)
if func_match:
globalvar.set_value("toexcel", str(func_match.group(1)))
path = globalvar.get_value("toexcel")
if os.path.exists(path):
os.remove(path)
args.pop(index)
break
if "--todb" in args:
logger.debug("测试用例写入数据库")
args.pop(args.index("--todb"))
globalvar.set_value("todb", True)
# env config --env
for arg in args:
env = globalvar.get_value("env", None)
if env is None and "--env" in arg:
index = args.index(arg)
env_regex_compile = re.compile(r"--env=([\s\S\r\n]*)")
func_match = env_regex_compile.match(arg, 0)
if func_match:
globalvar.set_value("env", str(func_match.group(1)))
else:
logger.error(f"Miss env:{arg}")
sys.exit(1)
args.pop(index)
break
return args
def _generate_conftest_for_summary(args: List):
for arg in args:
if os.path.exists(arg):
test_path = arg
# FIXME: several test paths maybe specified
break
else:
logger.error(f"No valid test path specified! \nargs: {args}")
sys.exit(1)
conftest_content = '''# NOTICE: Generated By HttpRunner.
import json
import os
import time
import pytest
from loguru import logger
from rrtv_httprunner.utils import get_platform, ExtendJSONEncoder
@pytest.fixture(scope="session", autouse=True)
def session_fixture(request):
"""setup and teardown each task"""
logger.info(f"start running testcases ...")
start_at = time.time()
yield
logger.info(f"task finished, generate task summary for --save-tests")
summary = {
"success": True,
"stat": {
"testcases": {"total": 0, "success": 0, "fail": 0},
"teststeps": {"total": 0, "failures": 0, "successes": 0},
},
"time": {"start_at": start_at, "duration": time.time() - start_at},
"platform": get_platform(),
"details": [],
}
for item in request.node.items:
testcase_summary = item.instance.get_summary()
summary["success"] &= testcase_summary.success
summary["stat"]["testcases"]["total"] += 1
summary["stat"]["teststeps"]["total"] += len(testcase_summary.step_datas)
if testcase_summary.success:
summary["stat"]["testcases"]["success"] += 1
summary["stat"]["teststeps"]["successes"] += len(
testcase_summary.step_datas
)
else:
summary["stat"]["testcases"]["fail"] += 1
summary["stat"]["teststeps"]["successes"] += (
len(testcase_summary.step_datas) - 1
)
summary["stat"]["teststeps"]["failures"] += 1
testcase_summary_json = testcase_summary.dict()
testcase_summary_json["records"] = testcase_summary_json.pop("step_datas")
summary["details"].append(testcase_summary_json)
summary_path = "{{SUMMARY_PATH_PLACEHOLDER}}"
summary_dir = os.path.dirname(summary_path)
os.makedirs(summary_dir, exist_ok=True)
with open(summary_path, "w", encoding="utf-8") as f:
json.dump(summary, f, indent=4, ensure_ascii=False, cls=ExtendJSONEncoder)
logger.info(f"generated task summary: {summary_path}")
'''
project_meta = load_project_meta(test_path)
project_root_dir = project_meta.RootDir
conftest_path = os.path.join(project_root_dir, "conftest.py")
test_path = os.path.abspath(test_path)
logs_dir_path = os.path.join(project_root_dir, "logs")
test_path_relative_path = convert_relative_project_root_dir(test_path)
if os.path.isdir(test_path):
file_foder_path = os.path.join(logs_dir_path, test_path_relative_path)
dump_file_name = "all.summary.json"
else:
file_relative_folder_path, test_file = os.path.split(test_path_relative_path)
file_foder_path = os.path.join(logs_dir_path, file_relative_folder_path)
test_file_name, _ = os.path.splitext(test_file)
dump_file_name = f"{test_file_name}.summary.json"
summary_path = os.path.join(file_foder_path, dump_file_name)
conftest_content = conftest_content.replace(
"{{SUMMARY_PATH_PLACEHOLDER}}", summary_path
)
dir_path = os.path.dirname(conftest_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
with open(conftest_path, "w", encoding="utf-8") as f:
f.write(conftest_content)
logger.info("generated conftest.py to generate summary.json")
def ensure_path_sep(path: Text) -> Text:
""" ensure compatibility with different path separators of Linux and Windows
"""
if "/" in path:
path = os.sep.join(path.split("/"))
if "\\" in path:
path = os.sep.join(path.split("\\"))
return path | /rrtv-httprunner-2.8.13.tar.gz/rrtv-httprunner-2.8.13/rrtv_httprunner/compat.py | 0.466846 | 0.192179 | compat.py | pypi |
import csv
import importlib
import json
import os
import sys
import types
from typing import Tuple, Dict, Union, Text, List, Callable
import yaml
from loguru import logger
from pydantic import ValidationError
from rrtv_httprunner import builtin, utils
from rrtv_httprunner import exceptions
from rrtv_httprunner.models import TestCase, ProjectMeta, TestSuite, GlobalConfig
from rrtv_httprunner.utils import loads_json
try:
# PyYAML version >= 5.1
# ref: https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation
yaml.warnings({"YAMLLoadWarning": False})
except AttributeError:
pass
project_meta: Union[ProjectMeta, None] = None
def _load_yaml_file(yaml_file: Text) -> Dict:
""" load yaml file and check file content format
"""
with open(yaml_file, mode="rb") as stream:
try:
yaml_content = yaml.load(stream)
except yaml.YAMLError as ex:
err_msg = f"YAMLError:\nfile: {yaml_file}\nerror: {ex}"
logger.error(err_msg)
raise exceptions.FileFormatError
return yaml_content
def _load_json_file(json_file: Text) -> Dict:
""" load json file and check file content format
"""
with open(json_file, mode="rb") as data_file:
try:
json_content = json.load(data_file)
except json.JSONDecodeError as ex:
err_msg = f"JSONDecodeError:\nfile: {json_file}\nerror: {ex}"
raise exceptions.FileFormatError(err_msg)
return json_content
def load_test_file(test_file: Text) -> Dict:
"""load testcase/testsuite file content"""
if not os.path.isfile(test_file):
raise exceptions.FileNotFound(f"test file not exists: {test_file}")
file_suffix = os.path.splitext(test_file)[1].lower()
if file_suffix == ".json":
test_file_content = _load_json_file(test_file)
elif file_suffix in [".yaml", ".yml"]:
test_file_content = _load_yaml_file(test_file)
else:
# '' or other suffix
raise exceptions.FileFormatError(
f"testcase/testsuite file should be YAML/JSON format, invalid format file: {test_file}"
)
return test_file_content
def load_testcase(testcase: Dict) -> TestCase:
try:
# validate with pydantic TestCase model
testcase_obj = TestCase.parse_obj(testcase)
except ValidationError as ex:
err_msg = f"TestCase ValidationError:\nerror: {ex}\ncontent: {testcase}"
raise exceptions.TestCaseFormatError(err_msg)
return testcase_obj
def load_testcase_file(testcase_file: Text) -> TestCase:
"""load testcase file and validate with pydantic model"""
testcase_content = load_test_file(testcase_file)
testcase_obj = load_testcase(testcase_content)
testcase_obj.config.path = testcase_file
return testcase_obj
def load_testsuite(testsuite: Dict) -> TestSuite:
path = testsuite["config"]["path"]
try:
# validate with pydantic TestCase model
testsuite_obj = TestSuite.parse_obj(testsuite)
except ValidationError as ex:
err_msg = f"TestSuite ValidationError:\nfile: {path}\nerror: {ex}"
raise exceptions.TestSuiteFormatError(err_msg)
return testsuite_obj
def load_dot_env_file(dot_env_path: Text) -> Dict:
""" load .env file.
Args:
dot_env_path (str): .env file path
Returns:
dict: environment variables mapping
{
"UserName": "debugtalk",
"Password": "123456",
"PROJECT_KEY": "ABCDEFGH"
}
Raises:
exceptions.FileFormatError: If .env file format is invalid.
"""
if not os.path.isfile(dot_env_path):
return {}
logger.info(f"Loading environment variables from {dot_env_path}")
env_variables_mapping = {}
with open(dot_env_path, mode="rb") as fp:
for line in fp:
# maxsplit=1
if b"=" in line:
variable, value = line.split(b"=", 1)
elif b":" in line:
variable, value = line.split(b":", 1)
else:
raise exceptions.FileFormatError(".env format error")
env_variables_mapping[
variable.strip().decode("utf-8")
] = value.strip().decode("utf-8")
utils.set_os_environ(env_variables_mapping)
return env_variables_mapping
def load_csv_file(csv_file: Text) -> List[Dict]:
""" load csv file and check file content format
Args:
csv_file (str): csv file path, csv file content is like below:
Returns:
list: list of parameters, each parameter is in dict format
Examples:
>>> cat csv_file
username,password
test1,111111
test2,222222
test3,333333
>>> load_csv_file(csv_file)
[
{'username': 'test1', 'password': '111111'},
{'username': 'test2', 'password': '222222'},
{'username': 'test3', 'password': '333333'}
]
"""
if not os.path.isabs(csv_file):
global project_meta
if project_meta is None:
raise exceptions.MyBaseFailure("load_project_meta() has not been called!")
# make compatible with Windows/Linux
csv_file = os.path.join(project_meta.RootDir, *csv_file.split("/"))
if not os.path.isfile(csv_file):
# file path not exist
raise exceptions.CSVNotFound(csv_file)
csv_content_list = []
with open(csv_file, encoding="utf-8") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
csv_content_list.append(row)
return csv_content_list
def load_folder_files(folder_path: Text, recursive: bool = True) -> List:
""" load folder path, return all files endswith .yml/.yaml/.json/_test.py in list.
Args:
folder_path (str): specified folder path to load
recursive (bool): load files recursively if True
Returns:
list: files endswith yml/yaml/json
"""
if isinstance(folder_path, (list, set)):
files = []
for path in set(folder_path):
files.extend(load_folder_files(path, recursive))
return files
if not os.path.exists(folder_path):
return []
file_list = []
for dirpath, dirnames, filenames in os.walk(folder_path):
filenames_list = []
for filename in filenames:
if not filename.lower().endswith((".yml", ".yaml", ".json", "_test.py")):
continue
filenames_list.append(filename)
for filename in filenames_list:
file_path = os.path.join(dirpath, filename)
file_list.append(file_path)
if not recursive:
break
return file_list
def load_module_functions(module) -> Dict[Text, Callable]:
""" load python module functions.
Args:
module: python module
Returns:
dict: functions mapping for specified python module
{
"func1_name": func1,
"func2_name": func2
}
"""
module_functions = {}
for name, item in vars(module).items():
if isinstance(item, types.FunctionType):
module_functions[name] = item
return module_functions
def load_builtin_functions() -> Dict[Text, Callable]:
""" load builtin module functions
"""
return load_module_functions(builtin)
def locate_file(start_path: Text, file_name: Text) -> Text:
""" locate filename and return absolute file path.
searching will be recursive upward until system root dir.
Args:
file_name (str): target locate file name
start_path (str): start locating path, maybe file path or directory path
Returns:
str: located file path. None if file not found.
Raises:
exceptions.FileNotFound: If failed to locate file.
"""
if os.path.isfile(start_path):
start_dir_path = os.path.dirname(start_path)
elif os.path.isdir(start_path):
start_dir_path = start_path
else:
raise exceptions.FileNotFound(f"invalid path: {start_path}")
file_path = os.path.join(start_dir_path, file_name)
if os.path.isfile(file_path):
# ensure absolute
return os.path.abspath(file_path)
# system root dir
# Windows, e.g. 'E:\\'
# Linux/Darwin, '/'
parent_dir = os.path.dirname(start_dir_path)
if parent_dir == start_dir_path:
raise exceptions.FileNotFound(f"{file_name} not found in {start_path}")
# locate recursive upward
return locate_file(parent_dir, file_name)
def locate_debugtalk_py(start_path: Text) -> Text:
""" locate debugtalk.py file
Args:
start_path (str): start locating path,
maybe testcase file path or directory path
Returns:
str: debugtalk.py file path, None if not found
"""
try:
# locate debugtalk.py file.
debugtalk_path = locate_file(start_path, "debugtalk.py")
except exceptions.FileNotFound:
debugtalk_path = None
return debugtalk_path
def locate_custom_py(start_path: Text, project_root_directory: Text) -> Text:
""" locate {custom}.py file
Args:
start_path (str): start locating path,
maybe testcase file path or directory path
Returns:
str: {custom}.py file path, None if not found
"""
try:
# locate {custom}.py file.
start_dir_path = os.path.dirname(start_path)
custom_dir_name = start_dir_path.split("/")[-1]
custom_dir_path = os.path.join(project_root_directory, "hooks")
custom_path = os.path.join(custom_dir_path, custom_dir_name + ".py")
except exceptions.FileNotFound:
custom_path = None
return custom_path
def locate_project_root_directory(test_path: Text) -> Tuple[str, str]:
""" locate debugtalk.py path as project root directory
Args:
test_path: specified testfile path
Returns:
(str, str): debugtalk.py path, project_root_directory
"""
def prepare_path(path):
if not os.path.exists(path):
err_msg = f"path not exist: {path}"
logger.error(err_msg)
raise exceptions.FileNotFound(err_msg)
if not os.path.isabs(path):
path = os.path.join(os.getcwd(), path)
return path
test_path = prepare_path(test_path)
# locate debugtalk.py file
debugtalk_path = locate_debugtalk_py(test_path)
if debugtalk_path:
# The folder contains debugtalk.py will be treated as project RootDir.
project_root_directory = os.path.dirname(debugtalk_path)
else:
# debugtalk.py not found, use os.getcwd() as project RootDir.
project_root_directory = os.getcwd()
# custom_path = locate_custom_py(test_path, project_root_directory)
return debugtalk_path, project_root_directory
def load_debugtalk_functions() -> Dict[Text, Callable]:
""" load project debugtalk.py module functions
debugtalk.py should be located in project root directory.
Returns:
dict: debugtalk module functions mapping
{
"func1_name": func1,
"func2_name": func2
}
"""
# load debugtalk.py module
try:
imported_module = importlib.import_module("debugtalk")
except Exception as ex:
logger.error(f"error occurred in debugtalk.py: {ex}")
sys.exit(1)
# reload to refresh previously loaded module
imported_module = importlib.reload(imported_module)
return load_module_functions(imported_module)
def load_hooks_functions_by_path(hooks_paths: List = None) -> Dict[Text, Callable]:
""" load project hooks module functions
"""
try:
batch_hooks_functions = {}
if hooks_paths:
for path in hooks_paths:
if path != "":
path = path.replace("/", ".").split(".py")[0]
imported_module = importlib.import_module(path)
batch_hooks_functions.update(load_module_functions(imported_module))
except Exception as ex:
logger.error(f"error occurred in hooks.py: {ex}")
sys.exit(1)
return batch_hooks_functions
def load_hooks_functions_default(start_path: Text = None) -> Dict[Text, Callable]:
""" load project hooks module functions default
默认会在hooks目录下读取testcases下根目录名文件夹名的py文件
"""
try:
batch_hooks_functions = {}
file_path = os.path.join(start_path, "hooks")
if os.path.exists(file_path):
file_path_list = os.listdir(file_path)
for i in range(0, len(file_path_list)):
path = os.path.join(file_path, file_path_list[i])
if os.path.isfile(path):
hooks_py_name = file_path_list[i].split(".py")[0]
# reload to refresh previously loaded module
imported_module = importlib.import_module(f"hooks.{hooks_py_name}")
batch_hooks_functions.update(load_module_functions(imported_module))
except Exception as ex:
logger.error(f"error occurred in hooks.py: {ex}")
sys.exit(1)
return batch_hooks_functions
def load_self_functions_default(start_path: Text = None, test_path: Text = None) -> Dict[Text, Callable]:
"""
load project self module functions
默认会把自身py文件中写的函数加载进来
"""
try:
separator = "/"
if sys.platform.startswith("win"):
separator = "\\"
if os.path.isabs(test_path) is False:
test_path = os.path.abspath(test_path)
if os.path.exists(test_path) and test_path.endswith(".py"):
batch_self_functions = {}
if start_path[-1] != separator:
start_path = start_path + separator
self_py_dir = test_path.split(start_path)[1]
path = self_py_dir.replace(separator, ".").split(".py")[0]
imported_module = importlib.import_module(path)
batch_self_functions.update(load_module_functions(imported_module))
return batch_self_functions
except Exception as ex:
logger.error(f"error occurred in {test_path}: {ex}")
sys.exit(1)
return {}
def load_project_meta(test_path: Text, reload: bool = False) -> ProjectMeta:
""" load testcases, .env, debugtalk.py functions.
testcases folder is relative to project_root_directory
by default, project_meta will be loaded only once, unless set reload to true.
Args:
test_path (str): test file/folder path, locate project RootDir from this path.
reload: reload project meta if set true, default to false
Returns:
project loaded api/testcases definitions,
environments and debugtalk.py functions.
"""
global project_meta
if project_meta and (not reload):
if not test_path:
return project_meta
debugtalk_path, project_root_directory = locate_project_root_directory(test_path)
self_functions = load_self_functions_default(project_root_directory, test_path)
project_meta.functions.update(self_functions)
return project_meta
project_meta = ProjectMeta()
if not test_path:
return project_meta
if sys.platform.startswith("win"):
test_path = test_path.replace("/", "\\")
debugtalk_path, project_root_directory = locate_project_root_directory(test_path)
# add project RootDir to sys.path
sys.path.insert(0, project_root_directory)
# load .env file
# NOTICE:
# environment variable maybe loaded in debugtalk.py
# thus .env file should be loaded before loading debugtalk.py
dot_env_path = os.path.join(project_root_directory, ".env")
dot_env = load_dot_env_file(dot_env_path)
global_config_path = os.path.join(project_root_directory, "config.json")
global_config = loads_json(global_config_path)
hooks_functions = {}
hooks_path = None
datasource = {}
if dot_env:
project_meta.env = dot_env
project_meta.dot_env_path = dot_env_path
if global_config:
if "hooks" in global_config:
if "paths" in global_config["hooks"]:
hooks_path = global_config["hooks"]["paths"]
hooks_functions = load_hooks_functions_by_path(hooks_path)
if "middleware" in global_config:
if "datasource" in global_config["middleware"]:
datasource = global_config["middleware"]["datasource"]
# load {hooks}.py functions
if hooks_functions == {}:
hooks_functions = load_hooks_functions_default(project_root_directory)
else:
hooks_functions.update(load_hooks_functions_default(project_root_directory))
if debugtalk_path:
# load debugtalk.py functions
debugtalk_functions = load_debugtalk_functions()
else:
debugtalk_functions = {}
self_functions = load_self_functions_default(project_root_directory, test_path)
# locate project RootDir and load debugtalk.py functions
project_meta.RootDir = project_root_directory
project_meta.functions = debugtalk_functions
project_meta.functions.update(hooks_functions)
project_meta.functions.update(self_functions)
project_meta.debugtalk_path = debugtalk_path
project_meta.custom_path = hooks_path
project_meta.self_path = test_path
project_meta.datasource = datasource
return project_meta
def convert_relative_project_root_dir(abs_path: Text) -> Text:
""" convert absolute path to relative path, based on project_meta.RootDir
Args:
abs_path: absolute path
Returns: relative path based on project_meta.RootDir
"""
_project_meta = load_project_meta(abs_path)
if not abs_path.startswith(_project_meta.RootDir):
raise exceptions.ParamsError(
f"failed to convert absolute path to relative path based on project_meta.RootDir\n"
f"abs_path: {abs_path}\n"
f"project_meta.RootDir: {_project_meta.RootDir}"
)
return abs_path[len(_project_meta.RootDir) + 1:] | /rrtv-httprunner-2.8.13.tar.gz/rrtv-httprunner-2.8.13/rrtv_httprunner/loader.py | 0.460532 | 0.185652 | loader.py | pypi |
from elasticsearch5 import Elasticsearch, Transport
from loguru import logger
class ESHandler(Elasticsearch):
def __init__(self, hosts=None, transport_class=Transport, **kwargs):
"""
:arg hosts: list of nodes we should connect to. Node should be a
dictionary ({"host": "localhost", "port": 9200}), the entire dictionary
will be passed to the :class:`~elasticsearch.Connection` class as
kwargs, or a string in the format of ``host[:port]`` which will be
translated to a dictionary automatically. If no value is given the
:class:`~elasticsearch.Urllib3HttpConnection` class defaults will be used.
:arg transport_class: :class:`~elasticsearch.Transport` subclass to use.
:arg kwargs: any additional arguments will be passed on to the
:class:`~elasticsearch.Transport` class and, subsequently, to the
:class:`~elasticsearch.Connection` instances.
"""
if "index" in kwargs and kwargs["index"] is not None:
self.index_name = kwargs["index"]
else:
self.index_name = "cms_season_test"
super().__init__(hosts, transport_class, **kwargs)
def term_search(self, condition):
"""
精确查询
"""
body = {
"query": {
"term": condition
}
}
result = self.search(index=self.index_name, body=body)
return result
def term_searchHits(self, condition):
"""
精准匹配对于文本类 是keyword
"term": {
"name.keyword": "是否"
}
"""
body = {
"query": {
"term": condition
}
}
result = self.search(index=self.index_name, body=body)
logger.debug(result)
if len(result["hits"]["hits"]) > 0:
return self.search(index=self.index_name, body=body)["hits"]["hits"][0]
else:
return None
def match_search(self, condition):
"""
匹配查询
"""
body = {
"query": {
"match": condition
}
}
result = self.search(index=self.index_name, body=body)
logger.debug(result)
return result
def match_searchHits(self, condition):
"""
匹配查询
"""
body = {
"query": {
"match": condition
}
}
result = self.search(index=self.index_name, body=body)
logger.debug(result)
if result["hits"]["hits"]:
return self.search(index=self.index_name, body=body)["hits"]["hits"]
else:
return self.search(index=self.index_name, body=body)["hits"]["hits"][0]
def searchById(self, id):
"""
查询指定ID数据
"""
body = {
"query": {
"ids": {
"type": "_doc",
"values": [
id
]
}
}
}
return self.search(index=self.index_name, body=body)
def searchHitsById(self, id):
"""
查询指定ID hit数据
"""
body = {
"query": {
"ids": {
"type": "_doc",
"values": [
id
]
}
}
}
if not self.search(index=self.index_name, body=body)["hits"]["hits"]:
return self.search(index=self.index_name, body=body)["hits"]["hits"]
else:
return self.search(index=self.index_name, body=body)["hits"]["hits"][0]
def term_delete(self, condition):
"""
精确删除
"""
body = {
"query": {
"term": condition
}
}
result = self.delete_by_query(index=self.index_name, body=body)
logger.debug(result)
return result
def match_delete(self, condition):
"""
匹配删除
"""
body = {
"query": {
"match": condition
}
}
result = self.delete_by_query(index=self.index_name, body=body)
logger.debug(result)
if result["deleted"] == 0:
logger.warning("删除失败")
return result
def deleteById(self, id):
"""
删除指定ID数据
"""
body = {
"query": {
"ids": {
"type": "_doc",
"values": [
id
]
}
}
}
result = self.delete_by_query(index=self.index_name, body=body)
logger.debug(result)
return result
def term_update(self, condition, field, value):
"""
精确更新
"""
body = {
"query": {
"term": condition
}
}
data = self.term_search(body)
if data["hits"]["hits"]:
id = data["hits"]["hits"][0]["_id"]
source = data["hits"]["hits"][0]["_source"]
expr = f"""content{field} =value"""
exec(expr, {'content': source, "value": value})
result = self.update(index=self.index_name, doc_type='_doc', id=id, body={"doc": source})
logger.debug(result)
if result["_shards"]["successful"] == 0:
logger.warning("更新失败")
return result
else:
logger.warning(f"根据{body}查询的数据为空,更新失败")
return None
def updateById(self, id, field, value):
"""
通过指定id更新
"""
data = self.searchById(id)
if data["hits"]["hits"]:
id = data["hits"]["hits"][0]["_id"]
source = data["hits"]["hits"][0]["_source"]
expr = f"""content{field} =value"""
exec(expr, {'content': source, "value": value})
result = self.update(index=self.index_name, doc_type='_doc', id=id, body={"doc": source})
logger.debug(result)
if result["_shards"]["successful"] == 0:
logger.warning("更新失败")
return result
else:
logger.warning(f"id为{id}的数据为空,更新失败")
return None | /rrtv-httprunner-2.8.13.tar.gz/rrtv-httprunner-2.8.13/rrtv_httprunner/es.py | 0.721841 | 0.349172 | es.py | pypi |
import re
from typing import Text, Any, Union, Dict
from deepdiff import DeepDiff
from jsonschema import validate
from loguru import logger
def equal(check_value: Any, expect_value: Any, message: Text = ""):
assert check_value == expect_value, message
def greater_than(
check_value: Union[int, float], expect_value: Union[int, float], message: Text = ""
):
assert check_value > expect_value, message
def less_than(
check_value: Union[int, float], expect_value: Union[int, float], message: Text = ""
):
assert check_value < expect_value, message
def greater_or_equals(
check_value: Union[int, float], expect_value: Union[int, float], message: Text = ""
):
assert check_value >= expect_value, message
def less_or_equals(
check_value: Union[int, float], expect_value: Union[int, float], message: Text = ""
):
assert check_value <= expect_value, message
def not_equal(check_value: Any, expect_value: Any, message: Text = ""):
assert check_value != expect_value, message
def string_equals(check_value: Text, expect_value: Any, message: Text = ""):
assert str(check_value) == str(expect_value), message
def length_equal(check_value: Text, expect_value: int, message: Text = ""):
assert isinstance(expect_value, int), "expect_value should be int type"
assert len(check_value) == expect_value, message
def length_greater_than(
check_value: Text, expect_value: Union[int, float], message: Text = ""
):
assert isinstance(
expect_value, (int, float)
), "expect_value should be int/float type"
assert len(check_value) > expect_value, message
def length_greater_or_equals(
check_value: Text, expect_value: Union[int, float], message: Text = ""
):
assert isinstance(
expect_value, (int, float)
), "expect_value should be int/float type"
assert len(check_value) >= expect_value, message
def length_less_than(
check_value: Text, expect_value: Union[int, float], message: Text = ""
):
assert isinstance(
expect_value, (int, float)
), "expect_value should be int/float type"
assert len(check_value) < expect_value, message
def length_less_or_equals(
check_value: Text, expect_value: Union[int, float], message: Text = ""
):
assert isinstance(
expect_value, (int, float)
), "expect_value should be int/float type"
assert len(check_value) <= expect_value, message
def contains(check_value: Any, expect_value: Any, message: Text = ""):
assert isinstance(
check_value, (list, tuple, dict, str, bytes)
), "expect_value should be list/tuple/dict/str/bytes type"
assert expect_value in check_value, message
def contained_by(check_value: Any, expect_value: Any, message: Text = ""):
assert isinstance(
expect_value, (list, tuple, dict, str, bytes)
), "expect_value should be list/tuple/dict/str/bytes type"
assert check_value in expect_value, message
def type_match(check_value: Any, expect_value: Any, message: Text = ""):
def get_type(name):
if isinstance(name, type):
return name
elif isinstance(name, str):
try:
return __builtins__[name]
except KeyError:
raise ValueError(name)
else:
raise ValueError(name)
if expect_value in ["None", "NoneType", None]:
assert check_value is None, message
else:
assert type(check_value) == get_type(expect_value), message
def regex_match(check_value: Text, expect_value: Any, message: Text = ""):
assert isinstance(expect_value, str), "expect_value should be Text type"
assert isinstance(check_value, str), "check_value should be Text type"
assert re.match(expect_value, check_value), message
def startswith(check_value: Any, expect_value: Any, message: Text = ""):
assert str(check_value).startswith(str(expect_value)), message
def endswith(check_value: Text, expect_value: Any, message: Text = ""):
assert str(check_value).endswith(str(expect_value)), message
def diff(t1: Dict, t2: Dict, kwargs=None):
if kwargs is None:
kwargs = {}
validate_value = kwargs.get("validate_value", False)
kwargs.pop("validate_value", None)
info = dict(DeepDiff(t1, t2, **kwargs))
# 不校验value
info.pop("values_changed", None) if not validate_value else None
info.pop("type_changes", None) if not validate_value else None
logger.error(info) if info != {} else None
assert info == {}
def schema(json_data, _schema):
validate(instance=json_data, schema=_schema) | /rrtv-httprunner-2.8.13.tar.gz/rrtv-httprunner-2.8.13/rrtv_httprunner/builtin/comparators.py | 0.801509 | 0.69449 | comparators.py | pypi |
import json
import sys
from json.decoder import JSONDecodeError
from urllib.parse import unquote
import yaml
from loguru import logger
def load_har_log_entries(file_path):
""" load HAR file and return log entries list
Args:
file_path (str)
Returns:
list: entries
[
{
"request": {},
"response": {}
},
{
"request": {},
"response": {}
}
]
"""
with open(file_path, mode="rb") as f:
try:
content_json = json.load(f)
return content_json["log"]["entries"]
except (TypeError, JSONDecodeError) as ex:
logger.error(f"failed to load HAR file {file_path}: {ex}")
sys.exit(1)
except KeyError:
logger.error(f"log entries not found in HAR file: {content_json}")
sys.exit(1)
def x_www_form_urlencoded(post_data):
""" convert origin dict to x-www-form-urlencoded
Args:
post_data (dict):
{"a": 1, "b":2}
Returns:
str:
a=1&b=2
"""
if isinstance(post_data, dict):
return "&".join(
["{}={}".format(key, value) for key, value in post_data.items()]
)
else:
return post_data
def convert_x_www_form_urlencoded_to_dict(post_data):
""" convert x_www_form_urlencoded data to dict
Args:
post_data (str): a=1&b=2
Returns:
dict: {"a":1, "b":2}
"""
if isinstance(post_data, str):
converted_dict = {}
for k_v in post_data.split("&"):
try:
key, value = k_v.split("=")
except ValueError:
raise Exception(
"Invalid x_www_form_urlencoded data format: {}".format(post_data)
)
converted_dict[key] = unquote(value)
return converted_dict
else:
return post_data
def convert_list_to_dict(origin_list):
""" convert HAR data list to mapping
Args:
origin_list (list)
[
{"name": "v", "value": "1"},
{"name": "w", "value": "2"}
]
Returns:
dict:
{"v": "1", "w": "2"}
"""
return {item["name"]: item.get("value") for item in origin_list}
def dump_yaml(testcase, yaml_file):
""" dump HAR entries to yaml testcase
"""
logger.info("dump testcase to YAML format.")
with open(yaml_file, "w", encoding="utf-8") as outfile:
yaml.dump(
testcase, outfile, allow_unicode=True, default_flow_style=False, indent=4
)
logger.info("Generate YAML testcase successfully: {}".format(yaml_file))
def dump_json(testcase, json_file):
""" dump HAR entries to json testcase
"""
logger.info("dump testcase to JSON format.")
with open(json_file, "w", encoding="utf-8") as outfile:
my_json_str = json.dumps(testcase, ensure_ascii=False, indent=4)
if isinstance(my_json_str, bytes):
my_json_str = my_json_str.decode("utf-8")
outfile.write(my_json_str)
logger.info("Generate JSON testcase successfully: {}".format(json_file)) | /rrtv-httprunner-2.8.13.tar.gz/rrtv-httprunner-2.8.13/rrtv_httprunner/ext/har2case/utils.py | 0.418816 | 0.26134 | utils.py | pypi |
import os
import sys
from typing import Text, NoReturn
from loguru import logger
from rrtv_httprunner.models import TStep, FunctionsMapping
from rrtv_httprunner.parser import parse_variables_mapping
try:
import filetype
from requests_toolbelt import MultipartEncoder
UPLOAD_READY = True
except ModuleNotFoundError:
UPLOAD_READY = False
def ensure_upload_ready():
if UPLOAD_READY:
return
msg = """
uploader extension dependencies uninstalled, install first and try again.
install with pip:
$ pip install requests_toolbelt filetype
or you can install rrtv_httprunner with optional upload dependencies:
$ pip install "rrtv_httprunner[upload]"
"""
logger.error(msg)
sys.exit(1)
def prepare_upload_step(step: TStep, functions: FunctionsMapping) -> "NoReturn":
""" preprocess for upload test
replace `upload` info with MultipartEncoder
Args:
step: teststep
{
"variables": {},
"request": {
"url": "http://httpbin.org/upload",
"method": "POST",
"headers": {
"Cookie": "session=AAA-BBB-CCC"
},
"upload": {
"file": "data/file_to_upload"
"md5": "123"
}
}
}
functions: functions mapping
"""
if not step.request.upload:
return
ensure_upload_ready()
params_list = []
for key, value in step.request.upload.items():
step.variables[key] = value
params_list.append(f"{key}=${key}")
params_str = ", ".join(params_list)
step.variables["m_encoder"] = "${multipart_encoder(" + params_str + ")}"
# parse variables
step.variables = parse_variables_mapping(step.variables, functions)
step.request.headers["Content-Type"] = "${multipart_content_type($m_encoder)}"
step.request.data = "$m_encoder"
def multipart_encoder(**kwargs):
""" initialize MultipartEncoder with uploading fields.
Returns:
MultipartEncoder: initialized MultipartEncoder object
"""
def get_filetype(file_path):
file_type = filetype.guess(file_path)
if file_type:
return file_type.mime
else:
return "text/html"
ensure_upload_ready()
fields_dict = {}
for key, value in kwargs.items():
if os.path.isabs(value):
# value is absolute file path
_file_path = value
is_exists_file = os.path.isfile(value)
else:
# value is not absolute file path, check if it is relative file path
from rrtv_httprunner.loader import load_project_meta
project_meta = load_project_meta("")
_file_path = os.path.join(project_meta.RootDir, value)
is_exists_file = os.path.isfile(_file_path)
if is_exists_file:
# value is file path to upload
filename = os.path.basename(_file_path)
mime_type = get_filetype(_file_path)
# TODO: fix ResourceWarning for unclosed file
file_handler = open(_file_path, "rb")
fields_dict[key] = (filename, file_handler, mime_type)
else:
fields_dict[key] = value
return MultipartEncoder(fields=fields_dict)
def multipart_content_type(m_encoder) -> Text:
""" prepare Content-Type for request headers
Args:
m_encoder: MultipartEncoder object
Returns:
content type
"""
ensure_upload_ready()
return m_encoder.content_type | /rrtv-httprunner-2.8.13.tar.gz/rrtv-httprunner-2.8.13/rrtv_httprunner/ext/uploader/__init__.py | 0.473657 | 0.171685 | __init__.py | pypi |
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import shutil
import subprocess
from tempfile import NamedTemporaryFile
import reciprocalspaceship as rs
def parse_arguments():
"""Parse commandline arguments"""
parser = ArgumentParser(
formatter_class=RawDescriptionHelpFormatter, description=__doc__
)
parser.add_argument(
"-r",
"--refmtz",
nargs=3,
metavar=("ref", "data_col", "sig_col"),
required=True,
help=(
"MTZ to be used as reference for scaling using given data columns. "
"Specified as (filename, F, SigF) or (filename, I, SigI)"
),
)
parser.add_argument(
"-i",
"--inputmtz",
nargs=3,
metavar=("mtz", "data_col", "sig_col"),
action="append",
required=True,
help=(
"MTZ to be scaled to reference using given data columns. "
"Specified as (filename, F, SigF) or (filename, I, SigI)"
),
)
parser.add_argument(
"-o",
"--outfile",
default="scaled.mtz",
help="MTZ file to which scaleit output will be written",
)
return parser#.parse_args()
def load_mtz(mtzpath, data_col, sig_col):
"""Load mtz and do French-Wilson scaling, if necessary"""
mtz = rs.read_mtz(mtzpath)
# Check dtypes
if not isinstance(
mtz[data_col].dtype, (rs.StructureFactorAmplitudeDtype, rs.IntensityDtype)
):
raise ValueError(
f"{data_col} must specify an intensity or |F| column in {mtzpath}"
)
if not isinstance(mtz[sig_col].dtype, rs.StandardDeviationDtype):
raise ValueError(
f"{sig_col} must specify a standard deviation column in {mtzpath}"
)
# Run French-Wilson scaling if intensities are provided:
if isinstance(mtz[data_col].dtype, rs.IntensityDtype):
scaled = rs.algorithms.scale_merged_intensities(
mtz, data_col, sig_col, mean_intensity_method="anisotropic"
)
result = scaled.loc[:, ["FW-F", "FW-SIGF"]]
result.rename(columns={"FW-F": "F", "FW-SIGF": "SIGF"}, inplace=True)
return result
result = mtz.loc[:, [data_col, sig_col]]
result.rename(columns={data_col: "F", sig_col: "SIGF"}, inplace=True)
return result
def run_scaleit(joined, outfile, n_mtzs):
"""
Run scaleit on given data
Parameters
----------
joined : filepath, str
Path to MTZ file with input data
outfile : filename, str
Filename for scaled MTZ output
n_mtzs : int
Number of datasets being scaled to reference
"""
columns = [f"FPH{i}=FPH{i} SIGFPH{i}=SIGFPH{i}" for i in range(1, n_mtzs + 1)]
labin = " ".join(columns)
with NamedTemporaryFile(suffix=".mtz") as tmp:
joined.write_mtz(tmp.name)
subprocess.call(
f"scaleit HKLIN {tmp.name} HKLOUT {outfile} <<EOF\nrefine anisotropic\nLABIN FP=FP SIGFP=SIGFP {labin}\nEOF",
shell=True,
)
return
def main():
# Parse commandline arguments
args = parse_arguments().parse_args()
# Test whether scaleit is on PATH
if shutil.which("scaleit") is None:
raise EnvironmentError(
"Cannot find executable, scaleit. Please set up your CCP4 environment."
)
# Load reference
ref = load_mtz(*args.refmtz)
ref.rename(columns={"F": "FP", "SIGF": "SIGFP"}, inplace=True)
# Load input datasets
mtzs = []
for i, inputmtz in enumerate(args.inputmtz, 1):
mtz = load_mtz(*inputmtz)
mtz.rename(columns={"F": f"FPH{i}", "SIGF": f"SIGFPH{i}"}, inplace=True)
mtzs.append(mtz)
# Join on common Miller indices
common = None
for mtz in mtzs:
if common is None:
common = ref.index.intersection(mtz.index)
else:
common = common.intersection(mtz.index)
common = common.sort_values()
print(f"Number of common reflections: {len(common)}")
mtzs = [mtz.loc[common] for mtz in mtzs]
joined = rs.concat([ref.loc[common]] + mtzs, axis=1)
# Run scaleit
run_scaleit(joined, args.outfile, len(mtzs))
if __name__ == "__main__":
main() | /rs-booster-0.1.1.tar.gz/rs-booster-0.1.1/rsbooster/scaleit/scaleit.py | 0.715921 | 0.315393 | scaleit.py | pypi |
import argparse
import numpy as np
import reciprocalspaceship as rs
from rsbooster.diffmaps.weights import compute_weights
from rsbooster.utils.io import subset_to_FSigF
def parse_arguments():
"""Parse commandline arguments"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter, description=__doc__
)
# Required arguments
parser.add_argument(
"-on",
"--onmtz",
nargs=3,
metavar=("mtz", "data_col", "sig_col"),
required=True,
help=("MTZ to be used as `on` data. Specified as (filename, F, SigF)"),
)
parser.add_argument(
"-off",
"--offmtz",
nargs=3,
metavar=("mtz", "data_col", "sig_col"),
required=True,
help=("MTZ to be used as `off` data. Specified as (filename, F, SigF)"),
)
parser.add_argument(
"-r",
"--refmtz",
nargs=2,
metavar=("ref", "phi_col"),
required=True,
help=(
"MTZ containing isomorphous phases to be used. "
"Specified as (filename, Phi)."
),
)
# Optional arguments
parser.add_argument(
"-a",
"--alpha",
type=float,
default=0.0,
help="alpha value for computing difference map weights (default=0.0)",
)
parser.add_argument(
"-d",
"--dmax",
type=float,
default=None,
help="If set, dmax to truncate difference map",
)
parser.add_argument(
"-o", "--outfile", default="diffmap.mtz", help="Output MTZ filename"
)
return parser#.parse_args()
def main():
# Parse commandline arguments
args = parse_arguments().parse_args()
refmtz, phi_col = args.refmtz
# Read MTZ files
onmtz = subset_to_FSigF(*args.onmtz, {args.onmtz[1]: "F", args.onmtz[2]: "SigF"})
offmtz = subset_to_FSigF(
*args.offmtz, {args.offmtz[1]: "F", args.offmtz[2]: "SigF"}
)
ref = rs.read_mtz(refmtz)
ref.rename(columns={phi_col: "Phi"}, inplace=True)
ref = ref.loc[:, ["Phi"]]
if not isinstance(ref["Phi"].dtype, rs.PhaseDtype):
raise ValueError(
f"{args.Phi} is not a phases column in {args.mtz2}. Try again."
)
diff = onmtz.merge(offmtz, on=["H", "K", "L"], suffixes=("_on", "_off"))
diff["DF"] = diff["F_on"] - diff["F_off"]
diff["SigDF"] = np.sqrt((diff["SigF_on"] ** 2) + (diff["SigF_off"] ** 2))
# Compute weights
diff["W"] = compute_weights(diff["DF"], diff["SigDF"], alpha=args.alpha)
diff["W"] = diff["W"].astype("Weight")
# Join with phases and write map
common = diff.index.intersection(ref.index).sort_values()
diff = diff.loc[common]
diff["Phi"] = ref.loc[common, "Phi"]
diff.infer_mtz_dtypes(inplace=True)
# Useful for PyMOL
diff["wDF"] = (diff["DF"] * diff["W"]).astype("SFAmplitude")
if args.dmax is None:
diff.write_mtz(args.outfile)
else:
diff = diff.loc[diff.compute_dHKL()["dHKL"] < args.dmax]
diff.write_mtz(args.outfile)
if __name__ == "__main__":
main() | /rs-booster-0.1.1.tar.gz/rs-booster-0.1.1/rsbooster/diffmaps/diffmap.py | 0.759582 | 0.296922 | diffmap.py | pypi |
import argparse
import numpy as np
import reciprocalspaceship as rs
import gemmi
from rsbooster.diffmaps.weights import compute_weights
from rsbooster.utils.io import subset_to_FSigF
def parse_arguments():
"""Parse commandline arguments"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter, description=__doc__
)
# Required arguments
parser.add_argument(
"-i",
"--inputmtz",
nargs=3,
metavar=("mtz", "data_col", "sig_col"),
required=True,
help=(
"MTZ to be used for internal difference map. "
"Specified as (filename, F, SigF)"
),
)
parser.add_argument(
"-r",
"--refmtz",
nargs=2,
metavar=("ref", "phi_col"),
required=True,
help=(
"MTZ containing isomorphous phases to be used. "
"Specified as (filename, Phi)."
),
)
parser.add_argument(
"-op",
"--symop",
required=True,
help=(
"Symmetry operation to use to compute internal difference map. "
"Can be given as ISYM if used with a `spacegroup` argument."
),
)
# Optional arguments
parser.add_argument(
"-a",
"--alpha",
type=float,
default=0.0,
help="alpha value for computing difference map weights (default=0.0)",
)
parser.add_argument(
"-d",
"--dmax",
type=float,
default=None,
help="If set, dmax to truncate difference map",
)
parser.add_argument(
"-sg",
"--spacegroup",
help="Spacegroup to use for symmetry operation (only necessary if `op` specifies an ISYM).",
)
parser.add_argument(
"-o", "--outfile", default="internal_diffmap.mtz", help="Output MTZ filename"
)
return parser#.parse_args()
def main():
# Parse commandline arguments
args = parse_arguments().parse_args()
refmtz, phi_col = args.refmtz
# Read MTZ files
mtz = subset_to_FSigF(
*args.inputmtz, {args.inputmtz[1]: "F", args.inputmtz[2]: "SigF"}
)
ref = rs.read_mtz(refmtz)
# Canonicalize column names
ref.rename(columns={phi_col: "Phi"}, inplace=True)
ref = ref[["Phi"]]
# Error checking of datatypes
if not isinstance(ref["Phi"].dtype, rs.PhaseDtype):
raise ValueError(
f"{args.Phi} is not a phases column in {args.mtz2}. Try again."
)
# Compare across symmetry operation
try:
isym = int(args.symop)
sg = gemmi.SpaceGroup(args.spacegroup)
op = sg.operations().sym_ops[isym]
except ValueError:
op = gemmi.Operation(args.symop)
internal = mtz.merge(
mtz.apply_symop(op).hkl_to_asu(), on=["H", "K", "L"], suffixes=("1", "2")
)
internal["DF"] = internal["F1"] - internal["F2"]
internal["SigDF"] = np.sqrt((internal["SigF1"] ** 2) + (internal["SigF2"] ** 2))
# Compute weights
internal["W"] = compute_weights(internal["DF"], internal["SigDF"], alpha=args.alpha)
internal["W"] = internal["W"].astype("Weight")
# Join with phases and write map
common = internal.index.intersection(ref.index).sort_values()
internal = internal.loc[common]
internal["Phi"] = ref.loc[common, "Phi"]
internal.infer_mtz_dtypes(inplace=True)
# Useful for PyMOL
internal["wDF"] = (internal["DF"] * internal["W"]).astype("SFAmplitude")
if args.dmax is None:
internal.write_mtz(args.outfile)
else:
internal = internal.loc[internal.compute_dHKL()["dHKL"] < args.dmax]
internal.write_mtz(args.outfile)
if __name__ == "__main__":
main() | /rs-booster-0.1.1.tar.gz/rs-booster-0.1.1/rsbooster/diffmaps/internaldiffmap.py | 0.715126 | 0.261739 | internaldiffmap.py | pypi |
from argparse import ArgumentParser
import reciprocalspaceship as rs
def parse_arguments():
desc = """Convert precognition ingegration results to `.mtz` files for mergning in Careless."""
parser = ArgumentParser(description=desc)
parser.add_argument(
"--remove-sys-absences",
action="store_true",
default=False,
help="Optionally remove systematic absences from the data according to --spacegroup or --spacegroup-for-absences if supplied.",
)
parser.add_argument(
"--spacegroup-for-absences",
type=str,
default=None,
help="Optionally use a different spacegroup to compute systematic absences. This may be useful for some EF-X data.",
)
parser.add_argument(
"--spacegroup", type=str, required=True, help="The spacegroup of the data"
)
parser.add_argument(
"--cell",
type=float,
required=True,
nargs=6,
help="The unit cell supplied as six floats. "
"For example, --spacegroup 34. 45. 98. 90. 90. 90.",
)
parser.add_argument(
"ii_in",
nargs="+",
type=str,
help="Precognition `.ii` file(s)",
)
parser.add_argument(
"-o",
"--mtz-out",
type=str,
default="integrated.mtz",
help="Name of the output mtz file.",
)
# parser = parser.parse_args()
return parser
def make_dataset(filenames, spacegroup, cell):
"""
Make an rs.DataSet from all *.ii the files in filenames.
Parameters
----------
filenames : list or tuple
List or tuple of strings corresponding to precognition `ii` files.
spacegroup : gemmi.SpaceGroup or similar
cell : gemmi.UnitCell or similar
Returns
-------
dataset : rs.DataSet
Dataset containing the Precognition Laue data from filenames
"""
datasets = []
for i, f in enumerate(sorted(filenames), 1):
ds = rs.read_precognition(f, spacegroup=spacegroup, cell=cell)
ds["BATCH"] = i
ds["BATCH"] = ds["BATCH"].astype(rs.BatchDtype())
datasets.append(ds)
return rs.concat(datasets)
def main():
parser = parse_arguments().parse_args()
# Parse the output filename(s)
if isinstance(parser.ii_in, str):
filenames = [parser.ii_in]
else:
filenames = parser.ii_in
# Parse simple arguments
cell = parser.cell
spacegroup = parser.spacegroup
outfile = parser.mtz_out
ds = make_dataset(filenames, spacegroup, cell)
if parser.remove_sys_absences:
sys_absences_spacegroup = parser.spacegroup_for_absences
if sys_absences_spacegroup is None:
sys_absences_spacegroup = spacegroup
ds.spacegroup = sys_absences_spacegroup
ds.remove_absences(inplace=True)
ds.spacegroup = spacegroup
ds.write_mtz(outfile)
if __name__ == "__main__":
main() | /rs-booster-0.1.1.tar.gz/rs-booster-0.1.1/rsbooster/io/precog2mtz.py | 0.868757 | 0.430028 | precog2mtz.py | pypi |
import argparse
import matplotlib.pyplot as plt
import reciprocalspaceship as rs
import seaborn as sns
from rsbooster.stats.parser import BaseParser
class ArgumentParser(BaseParser):
def __init__(self):
super().__init__(
description=__doc__
)
# Required arguments
self.add_argument(
"mtz",
nargs="+",
help="MTZs containing crossvalidation data from careless",
)
self.add_argument(
"-m",
"--method",
default="spearman",
choices=["spearman", "pearson"],
help=("Method for computing correlation coefficient (spearman or pearson)"),
)
def make_halves_ccanom(mtz, bins=10):
"""Construct half-datasets for computing CCanom"""
half1 = mtz.loc[mtz.half == 0].copy()
half2 = mtz.loc[mtz.half == 1].copy()
half1["DF"] = half1["F(+)"] - half1["F(-)"]
half2["DF"] = half2["F(+)"] - half2["F(-)"]
temp = half1[["DF", "repeat"]].merge(
half2[["DF", "repeat"]], on=["H", "K", "L", "repeat"], suffixes=("1", "2")
)
temp, labels = temp.assign_resolution_bins(bins)
return temp, labels
def analyze_ccanom_mtz(mtzpath, bins=10, return_labels=True, method="spearman"):
"""Compute CCsym from 2-fold cross-validation"""
if type(mtzpath) is rs.dataset.DataSet:
mtz=mtzpath
else:
mtz = rs.read_mtz(mtzpath)
# Error handling -- make sure MTZ file is appropriate
if "half" not in mtz.columns:
raise ValueError("Please provide MTZs from careless crossvalidation")
if "F(+)" not in mtz.columns:
raise ValueError("Please provide MTZs merged with `--anomalous` in careless")
mtz = mtz.acentrics
mtz = mtz.loc[(mtz["N(+)"] > 0) & (mtz["N(-)"] > 0)]
m, labels = make_halves_ccanom(mtz)
grouper = m.groupby(["bin", "repeat"])[["DF1", "DF2"]]
result = (
grouper.corr(method=method).unstack()[("DF1", "DF2")].to_frame().reset_index()
)
if return_labels:
return result, labels
else:
return result
def run_analysis(args):
results = []
labels = None
for m in args.mtz:
result = analyze_ccanom_mtz(m, method=args.method)
if result is None:
continue
else:
result[0]["filename"] = m
results.append(result[0])
labels = result[1]
results = rs.concat(results, check_isomorphous=False)
results = results.reset_index(drop=True)
results["CCanom"] = results[("DF1", "DF2")]
results.drop(columns=[("DF1", "DF2")], inplace=True)
for k in ('bin', 'repeat'):
results[k] = results[k].to_numpy('int32')
if args.output is not None:
results.to_csv(args.output)
else:
print(results.to_string())
# print(results.info())
sns.lineplot(
data=results, x="bin", y="CCanom", hue="filename", errorbar="sd", palette="viridis"
)
plt.xticks(range(10), labels, rotation=45, ha="right", rotation_mode="anchor")
plt.ylabel(r"$CC_{anom}$ " + f"({args.method})")
plt.legend(loc="center left", bbox_to_anchor=(1, 0.5))
plt.grid()
plt.tight_layout()
if args.image is not None:
plt.savefig(args.image)
if args.show:
plt.show()
def parse_arguments():
return ArgumentParser()
def main():
run_analysis(parse_arguments().parse_args()) | /rs-booster-0.1.1.tar.gz/rs-booster-0.1.1/rsbooster/stats/ccanom.py | 0.524882 | 0.42937 | ccanom.py | pypi |
import argparse
import numpy as np
import reciprocalspaceship as rs
import gemmi
import matplotlib.pyplot as plt
import seaborn as sns
from rsbooster.stats.parser import BaseParser
class ArgumentParser(BaseParser):
def __init__(self):
super().__init__(
description=__doc__
)
# Required arguments
self.add_argument(
"mtz",
nargs="+",
help="MTZs containing crossvalidation data from careless",
)
self.add_argument(
"--op",
required=True,
help=(
"Symmetry operation to use to compute internal difference map. "
"Can be given as ISYM if used with a `spacegroup` argument"
"Symops start counting at 0 (the identity), but CCsym for the identity are NaNs."
"Minus signs in symops are presently causing problems!"
),
)
# Optional arguments
self.add_argument(
"-sg",
"--spacegroup",
help=(
"Spacegroup to use for symmetry operation "
"(only necessary if `op` specifies an ISYM)."
),
)
self.add_argument(
"-m",
"--method",
default="spearman",
choices=["spearman", "pearson"],
help=("Method for computing correlation coefficient (spearman or pearson)"),
)
self.add_argument(
"--mod2",
action="store_true",
help=("Use id mod 2 to assign delays (use when employing spacegroup hack)"),
)
def make_halves_ccsym(mtz, op, bins=10):
"""Construct half-datasets for computing CCsym"""
half1 = mtz.loc[mtz.half == 0].copy()
half2 = mtz.loc[mtz.half == 1].copy()
temp1 = half1.merge(
half1.apply_symop(op).hkl_to_asu(),
on=["H", "K", "L", "repeat"],
suffixes=("1", "2"),
)
temp2 = half2.merge(
half2.apply_symop(op).hkl_to_asu(),
on=["H", "K", "L", "repeat"],
suffixes=("1", "2"),
)
temp1["DF"] = temp1["F1"] - temp1["F2"]
temp2["DF"] = temp2["F1"] - temp2["F2"]
temp = temp1[["DF", "repeat"]].merge(
temp2[["DF", "repeat"]], on=["H", "K", "L", "repeat"], suffixes=("1", "2")
)
temp, labels = temp.assign_resolution_bins(bins)
return temp, labels
def analyze_ccsym_mtz(
mtzpath, op, bins=10, return_labels=True, method="spearman", mod2=False
):
"""Compute CCsym from 2-fold cross-validation"""
if type(mtzpath) is rs.dataset.DataSet:
mtz=mtzpath
else:
mtz = rs.read_mtz(mtzpath)
m, labels = make_halves_ccsym(mtz, op)
# print(m)
# print(labels)
grouper = m.groupby(["bin", "repeat"])[["DF1", "DF2"]]
result = (
grouper.corr(method=method).unstack()[("DF1", "DF2")].to_frame().reset_index()
)
if mod2:
result["delay"] = np.floor(int(mtzpath[-5]) / 2)
else:
result["delay"] = int(mtzpath[-5])
if return_labels:
return result, labels
else:
return result
def run_analysis(args):
# Get symmetry operation
try:
isym = int(args.op)
sg = gemmi.SpaceGroup(args.spacegroup)
op = sg.operations().sym_ops[isym]
except ValueError:
op = gemmi.Op(args.op)
print(op)
results = []
labels = None
for m in args.mtz:
result = analyze_ccsym_mtz(m, op, method=args.method, mod2=args.mod2)
if result is None:
continue
else:
results.append(result[0])
labels = result[1]
results = rs.concat(results, check_isomorphous=False)
results = results.reset_index(drop=True)
results["CCsym"] = results[("DF1", "DF2")]
results.drop(columns=[("DF1", "DF2")], inplace=True)
for k in ('bin', 'repeat'):
results[k] = results[k].to_numpy('int32')
if args.output is not None:
results.to_csv(args.output)
else:
print(results.to_string())
results.info()
sns.lineplot(
data=results, x="bin", y="CCsym", hue="delay", errorbar="sd", palette="viridis"
)
plt.xticks(range(10), labels, rotation=45, ha="right", rotation_mode="anchor")
plt.ylabel(r"$CC_{sym}$ " + f"({args.method})")
plt.legend(loc="center left", bbox_to_anchor=(1, 0.5))
plt.grid()
plt.tight_layout()
if args.image is not None:
plt.savefig(args.image)
if args.show:
plt.show()
def parse_arguments():
return ArgumentParser()
def main():
run_analysis(parse_arguments().parse_args()) | /rs-booster-0.1.1.tar.gz/rs-booster-0.1.1/rsbooster/stats/ccsym.py | 0.632162 | 0.386358 | ccsym.py | pypi |
import argparse
import numpy as np
import reciprocalspaceship as rs
import gemmi
import matplotlib.pyplot as plt
import seaborn as sns
from rsbooster.stats.parser import BaseParser
class ArgumentParser(BaseParser):
def __init__(self):
super().__init__(
description=__doc__
)
# Required arguments
self.add_argument(
"mtzs",
nargs="+",
help="MTZs containing prediction data from careless",
)
# Optional arguments
self.add_argument(
"-m",
"--method",
default="spearman",
choices=["spearman", "pearson"],
help=("Method for computing correlation coefficient (spearman or pearson)"),
)
self.add_argument(
"--mod2",
action="store_true",
help=("Use (id mod 2) to assign delays (use when employing spacegroup hack)"),
)
self.add_argument(
"--overall",
action="store_true",
default=False,
help=("Whether to report a single value for the entire dataset"),
)
def compute_ccpred(
mtzpath, overall=False, bins=10, return_labels=True, method="spearman", mod2=False
):
"""Compute CCsym from 2-fold cross-validation"""
if type(mtzpath) is rs.dataset.DataSet:
mtz=mtzpath
else:
mtz = rs.read_mtz(mtzpath)
if overall:
grouper = mtz.groupby(["test"])[["Iobs", "Ipred"]]
else:
mtz, labels = mtz.assign_resolution_bins(bins)
grouper = mtz.groupby(["bin", "test"])[["Iobs", "Ipred"]]
result = (
grouper.corr(method=method)
.unstack()[("Iobs", "Ipred")]
.to_frame()
.reset_index()
)
result["id"] = mtzpath.split("/")[0]
if mod2:
result["delay"] = np.floor(int(mtzpath[-5]) / 2)
else:
result["delay"] = int(mtzpath[-5])
result["spacegroup"] = mtz.spacegroup.xhm()
if return_labels and not overall:
return result, labels
else:
return result
def run_analysis(args):
results = []
labels = None
# mtzs -> args.mtzs!
for m in args.mtzs:
result = compute_ccpred(m, overall=args.overall, method=args.method, mod2=args.mod2)
if isinstance(result, tuple):
results.append(result[0])
labels = result[1]
else:
results.append(result)
results = rs.concat(results, check_isomorphous=False)
results = results.reset_index(drop=True)
results["CCpred"] = results[("Iobs", "Ipred")]
results.drop(columns=[("Iobs", "Ipred")], inplace=True)
# print(results.info())
for k in ('bin', 'test'):
results[k] = results[k].to_numpy('int32')
if args.output is not None:
results.to_csv(args.output)
else:
print(results.to_string())
print(results.info())
if args.overall:
g = sns.relplot(
data=results,
x="id",
y="CCpred",
style="test",
hue="delay",
col="spacegroup",
kind="line",
palette="viridis",
)
for col_val, ax in g.axes_dict.items():
ax.grid(True)
ax.set_xticklabels(
ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor"
)
plt.show()
else:
g = sns.relplot(
data=results,
x="bin",
y="CCpred",
style="test",
hue="delay",
col="spacegroup",
kind="line",
palette="viridis",
)
for col_val, ax in g.axes_dict.items():
ax.set_xticks(range(10))
ax.set_xticklabels(labels, rotation=45, ha="right", rotation_mode="anchor")
ax.grid(True)
if args.image is not None:
plt.savefig(args.image)
if args.show:
plt.show()
def parse_arguments():
return ArgumentParser()
def main():
run_analysis(parse_arguments().parse_args()) | /rs-booster-0.1.1.tar.gz/rs-booster-0.1.1/rsbooster/stats/ccpred.py | 0.579519 | 0.286023 | ccpred.py | pypi |
import argparse
import matplotlib.pyplot as plt
import reciprocalspaceship as rs
import seaborn as sns
from rsbooster.stats.parser import BaseParser
class ArgumentParser(BaseParser):
def __init__(self):
super().__init__(
description=__doc__
)
# Required arguments
self.add_argument(
"mtz",
nargs="+",
help="MTZs containing crossvalidation data from careless",
)
self.add_argument(
"-m",
"--method",
default="spearman",
choices=["spearman", "pearson"],
help=("Method for computing correlation coefficient (spearman or pearson)"),
)
def make_halves_cchalf(mtz, bins=10):
"""Construct half-datasets for computing CChalf"""
half1 = mtz.loc[mtz.half == 0].copy()
half2 = mtz.loc[mtz.half == 1].copy()
# Support anomalous
if "F(+)" in half1.columns:
half1 = half1.stack_anomalous()
half2 = half2.stack_anomalous()
temp = half1[["F", "repeat"]].merge(
half2[["F", "repeat"]], on=["H", "K", "L", "repeat"], suffixes=("1", "2")
)
temp, labels = temp.assign_resolution_bins(bins)
return temp, labels
def analyze_cchalf_mtz(mtzpath, bins=10, return_labels=True, method="spearman"):
"""Compute CChalf from 2-fold cross-validation"""
if type(mtzpath) is rs.dataset.DataSet:
mtz=mtzpath
else:
mtz = rs.read_mtz(mtzpath)
# Error handling -- make sure MTZ file is appropriate
if "half" not in mtz.columns:
raise ValueError("Please provide MTZs from careless crossvalidation")
m, labels = make_halves_cchalf(mtz)
grouper = m.groupby(["bin", "repeat"])[["F1", "F2"]]
result = (
grouper.corr(method=method).unstack()[("F1", "F2")].to_frame().reset_index()
)
if return_labels:
return result, labels
else:
return result
def run_analysis(args):
results = []
labels = None
for m in args.mtz:
result = analyze_cchalf_mtz(m, method=args.method)
if result is None:
continue
else:
result[0]["filename"] = m
results.append(result[0])
labels = result[1]
results = rs.concat(results, check_isomorphous=False)
results = results.reset_index(drop=True)
results["CChalf"] = results[("F1", "F2")]
results.drop(columns=[("F1", "F2")], inplace=True)
for k in ('bin', 'repeat'):
results[k] = results[k].to_numpy('int32')
if args.output is not None:
results.to_csv(args.output)
else:
print(results.to_string())
print(results.info())
sns.lineplot(
data=results, x="bin", y="CChalf", hue="filename", errorbar="sd", palette="viridis"
)
plt.xticks(range(10), labels, rotation=45, ha="right", rotation_mode="anchor")
plt.ylabel(r"$CC_{1/2}$ " + f"({args.method})")
plt.legend(loc="center left", bbox_to_anchor=(1, 0.5))
plt.grid()
plt.tight_layout()
if args.image is not None:
plt.savefig(args.image)
if args.show:
plt.show()
def parse_arguments():
return ArgumentParser()
def main():
run_analysis(parse_arguments().parse_args()) | /rs-booster-0.1.1.tar.gz/rs-booster-0.1.1/rsbooster/stats/cchalf.py | 0.55447 | 0.402157 | cchalf.py | pypi |
import argparse
import reciprocalspaceship as rs
def rfree(cell, sg, dmin, rfraction, seed):
h, k, l = rs.utils.generate_reciprocal_asu(cell, sg, dmin).T
ds = (
rs.DataSet(
{
"H": h,
"K": k,
"L": l,
},
cell=cell,
spacegroup=sg,
)
.infer_mtz_dtypes()
.set_index(["H", "K", "L"])
)
ds = rs.utils.add_rfree(ds, rfraction, seed=seed)
return ds
def parse_arguments():
"""Parse commandline arguments"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description="Create an mtz containing rfree flags",
)
# Required arguments
parser.add_argument(
"-o", "--outfile", default="rfree.mtz", help="Output MTZ filename"
)
parser.add_argument(
"-c",
"--cell",
nargs=6,
metavar=("a", "b", "c", "alpha", "beta", "gamma"),
type=float,
required=True,
help=(
"Cell for output mtz file containing rfree flags. Specified as (a, b, c, alpha, beta, gamma)"
),
)
parser.add_argument(
"-sg",
"--spacegroup",
required=True,
type=int,
help=("Spacegroup for output mtz file containing rfree flags"),
)
parser.add_argument(
"-d",
"--dmin",
required=True,
type=float,
help=("Maximum resolution of reflections to be included"),
)
parser.add_argument(
"-r",
"--rfraction",
required=True,
type=float,
help=("Fraction of reflections to be flagged as Rfree"),
)
parser.add_argument(
"-s",
"--seed",
default=None,
type=int,
help=("Seed to random number generator for reproducible Rfree flags"),
)
return parser#.parse_args() # making docs works best when a function returns just the parser
def main():
args = parse_arguments().parse_args()
flags = rfree(
args.cell, args.spacegroup, args.dmin, args.rfraction, args.seed
)
flags.write_mtz(args.outfile)
return
if __name__ == "__main__":
main() | /rs-booster-0.1.1.tar.gz/rs-booster-0.1.1/rsbooster/utils/rfree.py | 0.719778 | 0.262721 | rfree.py | pypi |
import argparse
import numpy as np
import reciprocalspaceship as rs
def parse_arguments():
"""Parse commandline arguments"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter, description=__doc__
)
# Required arguments
parser.add_argument(
"-on",
"--onmtz",
nargs=3,
metavar=("mtz", "f_col", "sig_col"),
required=True,
help="MTZ to be used as `on` data. Specified as (filename, F, SigF)",
)
parser.add_argument(
"-off",
"--offmtz",
nargs=3,
metavar=("mtz", "data_col", "sig_col"),
required=True,
help=("MTZ to be used as `off` data. Specified as (filename, F, SigF)"),
)
# at most one of these must be provided
parser.add_argument(
"-calc",
"--calcmtz",
nargs=2,
metavar=("mtz", "data_col"),
help=("MTZ to be used as `calc` data. Specified as (filename, F). At most one of `-calc` and `-ref` can be specified."),
)
parser.add_argument(
"-ref",
"--refmtz",
nargs=3,
metavar=("mtz", "data_col", "sig_col"),
help=("MTZ to be used as `ref` data. Specified as (filename, F, SigF). At most one of `-calc` and `-ref` can be specified."),
)
# Optional arguments
parser.add_argument(
"-f", "--factor", type=float, default=10.0, help="Extrapolation factor"
)
parser.add_argument(
"-o", "--outfile", default="esf.mtz", help="Output MTZ filename"
)
return parser#.parse_args()
def main():
# Parse commandline arguments
args = parse_arguments().parse_args()
on, f_on, sigf_on = args.onmtz
off, f_off, sigf_off = args.offmtz
if args.calcmtz and args.refmtz:
raise ValueError("Only specify `-calc` or `-ref`, not both.")
elif args.calcmtz:
calc, f_calc = args.calcmtz
sigf_calc = None
elif args.refmtz:
calc, f_calc, sigf_calc = args.refmtz
else:
print("No `-calc` or `-ref` is specified. `-calc` is set to `-off`.")
calc, f_calc, _ = args.offmtz
sigf_calc = None
# Read MTZ files
on = rs.read_mtz(on)
off = rs.read_mtz(off)
calc = rs.read_mtz(calc)
# Canonicalize column names
on.rename(columns={f_on: "F", sigf_on: "SigF"}, inplace=True)
off.rename(columns={f_off: "F", sigf_off: "SigF"}, inplace=True)
calc.rename(columns={f_calc: "F_calc"}, inplace=True)
if sigf_calc:
calc.rename(columns={sigf_calc: "SigF_calc"}, inplace=True)
calc = calc[["F_calc", "SigF_calc"]]
else:
calc = calc[["F_calc"]]
# Subset DataSet objects to relevant columns
on = on[["F", "SigF"]]
off = off[["F", "SigF"]]
# Merge into common DataSet, keeping cell/spacegroup from on data
joined = on.merge(off, on=["H", "K", "L"], suffixes=("_on", "_off"))
joined = joined.merge(calc, on=["H", "K", "L"], suffixes=(None, "_calc"))
# Compute F_esf and SigF_esf
factor = args.factor
joined["F_esf"] = factor * (joined["F_on"] - joined["F_off"]) + joined["F_calc"]
if np.array_equal(joined["F_off"].to_numpy(), joined["F_calc"].to_numpy()):
print("F_off == F_calc... changing error propagation accordingly.")
joined["SigF_esf"] = np.sqrt(
((factor ** 2) * (joined["SigF_on"] ** 2))
+ (((factor - 1) ** 2) * (joined["SigF_off"] ** 2))
)
else:
joined["SigF_esf"] = np.sqrt(
((factor ** 2) * (joined["SigF_on"] ** 2))
+ ((factor ** 2) * (joined["SigF_off"] ** 2))
)
if sigf_calc:
joined["SigF_esf"] = np.sqrt(
(joined["SigF_esf"] ** 2) + (joined["SigF_calc"] ** 2)
)
# Handle any negative values of |F_esf|
joined["F_esf"] = np.abs(joined["F_esf"])
joined.infer_mtz_dtypes(inplace=True)
joined.write_mtz(args.outfile)
if __name__ == "__main__":
main() | /rs-booster-0.1.1.tar.gz/rs-booster-0.1.1/rsbooster/esf/extrapolate.py | 0.676192 | 0.320768 | extrapolate.py | pypi |
import os
from os import rename
from os.path import join
import datatable as dt
import pandas as pd
from rs_datasets.data_loader import download_dataset, download_url
from rs_datasets.generic_dataset import Dataset, safe
class MillionSongDataset(Dataset):
def __init__(
self,
merge_kaggle_splits: bool = True,
drop_mismatches: bool = True,
path: str = None
):
"""
:param merge_kaggle_splits:
In MSD Challenge on [Kaggle](https://www.kaggle.com/c/msdchallenge) there were
public and private parts. By default they are merged together. You can change this, setting
`merge_kaggle_splits` to `False`.
:param drop_mismatches:
There is a [matching error](http://millionsongdataset.com/blog/12-2-12-fixing-matching-errors/)
between track ids and song ids in MSD. It shouldn't matter if you don't use audio features, but
by default these items are removed.
:param path: where to read dataset from or where to download to.
"""
super().__init__(path)
folder = join(self.data_folder, 'msd')
if not os.path.exists(folder):
self._download(folder)
try_cache = merge_kaggle_splits and drop_mismatches
processed = join(folder, 'clean')
if try_cache and os.path.exists(processed):
self.train = dt.fread(join(processed, 'train.csv')).to_pandas()
self.val = dt.fread(join(processed, 'val.csv')).to_pandas()
self.test = dt.fread(join(processed, 'test.csv')).to_pandas()
else:
eval_folder = join(folder, 'evaluation')
self.train = self._read_triplets(join(folder,
'train_triplets.txt'))
val_vis = self._read_triplets(join(eval_folder,
'year1_valid_triplets_visible.txt'))
val_hid = self._read_triplets(join(eval_folder,
'year1_valid_triplets_hidden.txt'))
test_vis = self._read_triplets(join(eval_folder,
'year1_test_triplets_visible.txt'))
test_hid = self._read_triplets(join(eval_folder,
'year1_test_triplets_hidden.txt'))
if drop_mismatches:
mismatches = self._read_mismatches(folder)
mismatches = set(mismatches.item_id)
self.train = self._drop_mismatches(self.train, mismatches)
val_vis = self._drop_mismatches(val_vis, mismatches)
val_hid = self._drop_mismatches(val_hid, mismatches)
test_vis = self._drop_mismatches(test_vis, mismatches)
test_hid = self._drop_mismatches(test_hid, mismatches)
if merge_kaggle_splits:
self.val = pd.concat([val_vis, val_hid], ignore_index=True)
self.test = pd.concat([test_vis, test_hid], ignore_index=True)
else:
self.val_visible = val_vis
self.val_hidden = val_hid
self.test_visible = test_vis
self.test_hidden = test_hid
if try_cache and not os.path.exists(processed):
os.mkdir(processed)
self.train.to_csv(join(processed, 'train.csv'), index=False)
self.val.to_csv(join(processed, 'val.csv'), index=False)
self.test.to_csv(join(processed, 'test.csv'), index=False)
@staticmethod
def _read_triplets(path):
return dt.fread(
path,
columns=['user_id', 'item_id', 'play_count']
).to_pandas().dropna()
@staticmethod
def _read_mismatches(path):
name = 'sid_mismatches.txt'
file = join(path, name)
mismatches = []
with open(file) as f:
for line in f.readlines():
song, track = line[
line.find('<') + 1: line.find('>')].split(' ')
mismatches.append([song, track])
return pd.DataFrame(mismatches, columns=['item_id', 'track_id'])
@staticmethod
def _drop_mismatches(df, mismatches):
return df[~df.item_id.isin(mismatches)]
@safe
def _download(self, path):
"""
Downloads train triplets, MSD Challenge Kaggle data
(http://millionsongdataset.com/challenge/)
and a list of matching errors
http://millionsongdataset.com/blog/12-2-12-fixing-matching-errors/
:param path: path to save
:return: None
"""
self.logger.info('Getting Million Song Dataset...')
self.logger.info('Downloading Echo Nest Taste Subprofile train data...')
base_url = 'http://millionsongdataset.com/sites/default/files/challenge/'
download_dataset(
base_url + 'train_triplets.txt.zip',
join(self.data_folder, 'train.zip')
)
rename(join(self.data_folder, 'train'), path)
self.logger.info('Downloading evaluation data for MSD Challenge...')
download_dataset(
base_url + 'EvalDataYear1MSDWebsite.zip',
join(path, 'eval.zip')
)
rename(
join(path, 'EvalDataYear1MSDWebsite'),
join(path, 'evaluation')
)
self.logger.info('Downloading list of matching errors...')
url = 'http://millionsongdataset.com/sites/default/files/tasteprofile/sid_mismatches.txt'
download_url(url, join(path, 'sid_mismatches.txt')) | /rs_datasets-0.5.1.tar.gz/rs_datasets-0.5.1/rs_datasets/msd.py | 0.515376 | 0.294786 | msd.py | pypi |
import os
from os import rename
from os.path import join
from typing import Tuple
import datatable as dt
from datatable import Frame
from rs_datasets.data_loader import download_dataset
from rs_datasets.generic_dataset import Dataset, safe
rating_cols = ['user_id', 'item_id', 'rating', 'timestamp']
item_cols = ['item_id', 'title', 'genres']
tag_cols = ['user_id', 'item_id', 'tag', 'timestamp']
user_cols = ['user_id', 'gender', 'age', 'occupation', 'zip_code']
link_cols = ['item_id', 'imdb_id', 'tmdb_id']
tag_g_cols = ['tag_id', 'tag']
score_cols = ['movie_id', 'tag_id', 'rating']
genre_cols = ['item_id', 'title', 'release_date', 'video_release_date',
'imdb_url', 'unknown', 'Action', 'Adventure', 'Animation',
'Children\'s', 'Comedy', 'Crime', 'Documentary', 'Drama',
'Fantasy', 'Film-Noir', 'Horror', 'Musical', 'Mystery',
'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western']
class MovieLens(Dataset):
def __init__(
self,
version: str = 'small',
read_genome: bool = False,
path: str = None
):
"""
:param version: dataset version,
one of {'100k', '1m', '10m', '20m', '25m', 'small', 'latest'}
:param read_genome: whether to read genome tag dataset or not
(available from version 20m and up).
Are not loaded by default to save memory.
:param path: where to read dataset from or where to download to.
"""
super().__init__(path)
options = {'100k', '1m', '10m', '20m', '25m', 'small', 'latest'}
if version not in options:
raise ValueError(
f'{version} is not supported. Available options: {options}')
if version == 'small':
dataset = 'ml-latest-small'
else:
dataset = 'ml-' + version
folder = join(self.data_folder, dataset)
if not os.path.exists(folder):
self._download(folder, dataset)
if version == '100k':
(self.ratings,
self.users,
self.items) = self._read_100k(folder)
elif version == '1m':
(self.ratings,
self.users,
self.items) = self._read_1m(folder)
elif version == '10m':
(self.ratings,
self.items,
self.tags) = self._read_10m(folder)
else:
(self.ratings,
self.items,
self.tags,
self.links) = self._read_modern(folder)
if read_genome:
(self.genome_tags,
self.genome_scores) = self._read_genome(folder)
@staticmethod
def _read_modern(folder: str) -> Tuple[Frame, Frame, Frame, Frame]:
ratings = dt.fread(join(folder, 'ratings.csv'), columns=rating_cols).to_pandas()
items = dt.fread(join(folder, 'movies.csv'), columns=item_cols).to_pandas()
tags = dt.fread(join(folder, 'tags.csv'), columns=tag_cols).to_pandas()
links = dt.fread(join(folder, 'links.csv'), columns=link_cols).to_pandas()
return ratings, items, tags, links
@staticmethod
def _read_genome(folder: str) -> Tuple[Frame, Frame]:
genome_tags = dt.fread(join(folder, 'genome-tags.csv'), columns=tag_g_cols).to_pandas()
genome_scores = dt.fread(join(folder, 'genome-scores.csv'), columns=score_cols).to_pandas()
return genome_tags, genome_scores
@staticmethod
def _read_10m(folder: str) -> Tuple[Frame, Frame, Frame]:
ratings = dt.fread(join(folder, 'ratings.dat'), columns=rating_cols).to_pandas()
items = dt.fread(join(folder, 'movies.dat'), columns=item_cols, quotechar="").to_pandas()
tags = dt.fread(join(folder, 'tags.dat'), columns=tag_cols, quotechar="").to_pandas()
return ratings, items, tags
@staticmethod
def _read_1m(folder: str) -> Tuple[Frame, Frame, Frame]:
ratings = dt.fread(join(folder, 'ratings.dat'), columns=rating_cols).to_pandas()
users = dt.fread(join(folder, 'users.dat'), columns=user_cols).to_pandas()
items = dt.fread(join(folder, 'movies.dat'), columns=item_cols).to_pandas()
return ratings, users, items
@staticmethod
def _read_100k(folder: str) -> Tuple[Frame, Frame, Frame]:
ratings = dt.fread(join(folder, 'u.data'), columns=rating_cols).to_pandas()
users = dt.fread(join(folder, 'u.user'), columns=user_cols).to_pandas()
items = dt.fread(join(folder, 'u.item'), columns=genre_cols)
del items[:, 'video_release_date']
items = items.to_pandas()
return ratings, users, items
@safe
def _download(self, path, dataset):
"""
Download data from https://grouplens.org/datasets/movielens/
Available options: ml-20m, ml-latest-small, ml-latest and other,
can be checked on ml site.
:param path: where to save
:param dataset: dataset version
:return: None
"""
self.logger.info('Downloading %s from grouplens...', dataset)
archive = dataset + '.zip'
url = f'http://files.grouplens.org/datasets/movielens/{archive}'
download_dataset(url, path + '.zip')
if dataset == 'ml-10m':
rename(join(self.data_folder, 'ml-10M100K'), path)
self.replace_separator(join(path, 'movies.dat'), '::', '\t')
self.replace_separator(join(path, 'ratings.dat'), '::', '\t')
self.replace_separator(join(path, 'tags.dat'), '::', '\t')
elif dataset == 'ml-1m':
self.replace_separator(join(path, 'movies.dat'), '::', '\t', 'ISO-8859-1')
self.replace_separator(join(path, 'ratings.dat'), '::', '\t')
self.replace_separator(join(path, 'users.dat'), '::', '\t')
@staticmethod
def replace_separator(filepath: str, old: str, new: str, encoding: str = 'utf8'):
with open(filepath, 'r', encoding=encoding) as f:
newlines = []
for line in f.readlines():
newlines.append(line.replace(old, new))
with open(filepath, 'w') as f:
for line in newlines:
f.write(line) | /rs_datasets-0.5.1.tar.gz/rs_datasets-0.5.1/rs_datasets/movielens.py | 0.604282 | 0.315921 | movielens.py | pypi |
import os
import tarfile
from os.path import splitext
from tarfile import TarFile
from typing import Union
from zipfile import ZipFile
from py7zr import SevenZipFile
def extract(archive_name: str, manage_folder: bool = True) -> None:
"""
Extract `archive_name` and put it inside a folder
if there are multiple files inside.
:param archive_name: path to archive
:param manage_folder: check if there is root folder in archive:
if there is one, do not create extra folder,
if there are just files inside, put them into folder.
If param is set to `False`, extract "as is".
:return:
"""
if archive_name.endswith('.zip'):
archive = ZipFile(archive_name)
elif archive_name.endswith('.7z'):
archive = SevenZipFile(archive_name)
else:
try:
archive = tarfile.open(archive_name)
except Exception:
raise NotImplementedError(f'Can\'t extract {archive_name}')
name = os.path.dirname(archive_name)
if manage_folder and not contains_dir(archive):
name = remove_extension(archive_name)
os.mkdir(name)
archive.extractall(path=name)
archive.close()
def rm_if_exists(filepath: str) -> None:
"""
Remove file if it exists, else do nothing.
:param filepath: path to file
:return: None
"""
if os.path.exists(filepath):
os.remove(filepath)
def contains_dir(archive: Union[ZipFile, TarFile]) -> bool:
"""
Check if archive contains a root folder or just files.
:param archive: archive file
:return: `True` if first element of archive is folder
"""
if isinstance(archive, ZipFile):
contents = archive.infolist()
is_dir = contents[0].is_dir()
elif isinstance(archive, TarFile):
contents = archive.getmembers()
is_dir = contents[0].isdir()
elif isinstance(archive, SevenZipFile):
contents = archive.getnames()
is_dir = os.path.isdir(contents[0])
else:
raise TypeError(f'Unknown archive type: {type(archive)}')
return is_dir
def remove_extension(file: str) -> str:
"""
Get file name without _last_ extension.
:param file: string
:return: archive.tar.gz -> archive.tar
"""
return splitext(file)[0] | /rs_datasets-0.5.1.tar.gz/rs_datasets-0.5.1/rs_datasets/data_loader/archives.py | 0.557364 | 0.154472 | archives.py | pypi |
import sys
import platform
PYTHON_VERSION_INFO = sys.version_info
PY2 = sys.version_info[0] == 2
# Infos about python passed to the trace agent through the header
PYTHON_VERSION = platform.python_version()
PYTHON_INTERPRETER = platform.python_implementation()
stringify = str
if PY2:
from urllib import urlencode
import httplib
stringify = unicode
from Queue import Queue
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
else:
from queue import Queue
from urllib.parse import urlencode
import http.client as httplib
from io import StringIO
try:
import urlparse as parse
except ImportError:
from urllib import parse
try:
from asyncio import iscoroutinefunction
from .compat_async import _make_async_decorator as make_async_decorator
except ImportError:
# asyncio is missing so we can't have coroutines; these
# functions are used only to ensure code executions in case
# of an unexpected behavior
def iscoroutinefunction(fn):
return False
def make_async_decorator(tracer, fn, *params, **kw_params):
return fn
def iteritems(obj, **kwargs):
func = getattr(obj, "iteritems", None)
if not func:
func = obj.items
return func(**kwargs)
def to_unicode(s):
""" Return a unicode string for the given bytes or string instance. """
# No reason to decode if we already have the unicode compatible object we expect
# DEV: `stringify` will be a `str` for python 3 and `unicode` for python 2
# DEV: Double decoding a `unicode` can cause a `UnicodeEncodeError`
# e.g. `'\xc3\xbf'.decode('utf-8').decode('utf-8')`
if isinstance(s, stringify):
return s
# If the object has a `decode` method, then decode into `utf-8`
# e.g. Python 2 `str`, Python 2/3 `bytearray`, etc
if hasattr(s, 'decode'):
return s.decode('utf-8')
# Always try to coerce the object into the `stringify` object we expect
# e.g. `to_unicode(1)`, `to_unicode(dict(key='value'))`
return stringify(s)
if PY2:
string_type = basestring
msgpack_type = basestring
numeric_types = (int, long, float)
else:
string_type = str
msgpack_type = bytes
numeric_types = (int, float)
if PY2:
# avoids Python 3 `SyntaxError`
# this block will be replaced with the `six` library
from .utils.reraise import _reraise as reraise
else:
def reraise(tp, value, tb=None):
"""Python 3 re-raise function. This function is internal and
will be replaced entirely with the `six` library.
"""
try:
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
value = None
tb = None
__all__ = [
'httplib',
'iteritems',
'PY2',
'Queue',
'stringify',
'StringIO',
'urlencode',
'parse',
'reraise',
] | /rs-ddtrace-0.12.1.tar.gz/rs-ddtrace-0.12.1/ddtrace/compat.py | 0.449151 | 0.199776 | compat.py | pypi |
import json
import logging
# check msgpack CPP implementation; if the import fails, we're using the
# pure Python implementation that is really slow, so the ``Encoder`` should use
# a different encoding format.
try:
import msgpack
from msgpack._packer import Packer # noqa
from msgpack._unpacker import unpack, unpackb, Unpacker # noqa
from msgpack._version import version
# use_bin_type kwarg only exists since msgpack-python v0.4.0
MSGPACK_PARAMS = { 'use_bin_type': True } if version >= (0, 4, 0) else {}
MSGPACK_ENCODING = True
except ImportError:
# fallback to JSON
MSGPACK_PARAMS = {}
MSGPACK_ENCODING = False
log = logging.getLogger(__name__)
class Encoder(object):
"""
Encoder interface that provides the logic to encode traces and service.
"""
def __init__(self):
"""
When extending the ``Encoder`` class, ``headers`` must be set because
they're returned by the encoding methods, so that the API transport doesn't
need to know what is the right header to suggest the decoding format to the
agent
"""
self.content_type = ''
def encode_traces(self, traces):
"""
Encodes a list of traces, expecting a list of items where each items
is a list of spans. Before dump the string in a serialized format all
traces are normalized, calling the ``to_dict()`` method. The traces
nesting is not changed.
:param traces: A list of traces that should be serialized
"""
normalized_traces = [[span.to_dict() for span in trace] for trace in traces]
return self._encode(normalized_traces)
def encode_services(self, services):
"""
Encodes a dictionary of services.
:param services: A dictionary that contains one or more services
"""
return self._encode(services)
def _encode(self, obj):
"""
Defines the underlying format used during traces or services encoding.
This method must be implemented and should only be used by the internal functions.
"""
raise NotImplementedError
class JSONEncoder(Encoder):
def __init__(self):
# TODO[manu]: add instructions about how users can switch to Msgpack
log.debug('using JSON encoder; application performance may be degraded')
self.content_type = 'application/json'
def _encode(self, obj):
return json.dumps(obj)
class MsgpackEncoder(Encoder):
def __init__(self):
log.debug('using Msgpack encoder')
self.content_type = 'application/msgpack'
def _encode(self, obj):
return msgpack.packb(obj, **MSGPACK_PARAMS)
def get_encoder():
"""
Switching logic that choose the best encoder for the API transport.
The default behavior is to use Msgpack if we have a CPP implementation
installed, falling back to the Python built-in JSON encoder.
"""
if MSGPACK_ENCODING:
return MsgpackEncoder()
else:
return JSONEncoder() | /rs-ddtrace-0.12.1.tar.gz/rs-ddtrace-0.12.1/ddtrace/encoding.py | 0.559771 | 0.152663 | encoding.py | pypi |
import logging
from threading import Lock
from .compat import iteritems
log = logging.getLogger(__name__)
MAX_TRACE_ID = 2 ** 64
# Has to be the same factor and key as the Agent to allow chained sampling
KNUTH_FACTOR = 1111111111111111111
class AllSampler(object):
"""Sampler sampling all the traces"""
def sample(self, span):
return True
class RateSampler(object):
"""Sampler based on a rate
Keep (100 * `sample_rate`)% of the traces.
It samples randomly, its main purpose is to reduce the instrumentation footprint.
"""
def __init__(self, sample_rate=1):
if sample_rate <= 0:
log.error("sample_rate is negative or null, disable the Sampler")
sample_rate = 1
elif sample_rate > 1:
sample_rate = 1
self.set_sample_rate(sample_rate)
log.info("initialized RateSampler, sample %s%% of traces", 100 * sample_rate)
def set_sample_rate(self, sample_rate):
self.sample_rate = sample_rate
self.sampling_id_threshold = sample_rate * MAX_TRACE_ID
def sample(self, span):
sampled = ((span.trace_id * KNUTH_FACTOR) % MAX_TRACE_ID) <= self.sampling_id_threshold
return sampled
def _key(service=None, env=None):
service = service or ""
env = env or ""
return "service:" + service + ",env:" + env
_default_key = _key()
class RateByServiceSampler(object):
"""Sampler based on a rate, by service
Keep (100 * `sample_rate`)% of the traces.
The sample rate is kept independently for each service/env tuple.
"""
def __init__(self, sample_rate=1):
self._lock = Lock()
self._by_service_samplers = {}
self._by_service_samplers[_default_key] = RateSampler(sample_rate)
def _set_sample_rate_by_key(self, sample_rate, key):
with self._lock:
if key in self._by_service_samplers:
self._by_service_samplers[key].set_sample_rate(sample_rate)
else:
self._by_service_samplers[key] = RateSampler(sample_rate)
def set_sample_rate(self, sample_rate, service="", env=""):
self._set_sample_rate_by_key(sample_rate, _key(service, env))
def sample(self, span):
tags = span.tracer().tags
env = tags['env'] if 'env' in tags else None
key = _key(span.service, env)
with self._lock:
if key in self._by_service_samplers:
return self._by_service_samplers[key].sample(span)
return self._by_service_samplers[_default_key].sample(span)
def set_sample_rate_by_service(self, rate_by_service):
for key, sample_rate in iteritems(rate_by_service):
self._set_sample_rate_by_key(sample_rate, key)
with self._lock:
for key in list(self._by_service_samplers):
if key not in rate_by_service and key != _default_key:
del self._by_service_samplers[key] | /rs-ddtrace-0.12.1.tar.gz/rs-ddtrace-0.12.1/ddtrace/sampler.py | 0.756447 | 0.25159 | sampler.py | pypi |
import logging
import ddtrace
from ddtrace import config
from .constants import DEFAULT_SERVICE
from ...ext import http
from ...compat import parse
from ...propagation.http import HTTPPropagator
log = logging.getLogger(__name__)
def _extract_service_name(session, span, netloc=None):
"""Extracts the right service name based on the following logic:
- `requests` is the default service name
- users can change it via `session.service_name = 'clients'`
- if the Span doesn't have a parent, use the set service name
or fallback to the default
- if the Span has a parent, use the set service name or the
parent service value if the set service name is the default
- if `split_by_domain` is used, always override users settings
and use the network location as a service name
The priority can be represented as:
Updated service name > parent service name > default to `requests`.
"""
cfg = config.get_from(session)
if cfg['split_by_domain'] and netloc:
return netloc
service_name = cfg['service_name']
if (service_name == DEFAULT_SERVICE and
span._parent is not None and
span._parent.service is not None):
service_name = span._parent.service
return service_name
def _wrap_request(func, instance, args, kwargs):
"""Trace the `Session.request` instance method"""
# TODO[manu]: we already offer a way to provide the Global Tracer
# and is ddtrace.tracer; it's used only inside our tests and can
# be easily changed by providing a TracingTestCase that sets common
# tracing functionalities.
tracer = getattr(instance, 'datadog_tracer', ddtrace.tracer)
# skip if tracing is not enabled
if not tracer.enabled:
return func(*args, **kwargs)
method = kwargs.get('method') or args[0]
url = kwargs.get('url') or args[1]
headers = kwargs.get('headers', {})
parsed_uri = parse.urlparse(url)
with tracer.trace("requests.request", span_type=http.TYPE) as span:
# update the span service name before doing any action
span.service = _extract_service_name(instance, span, netloc=parsed_uri.netloc)
# propagate distributed tracing headers
if config.get_from(instance).get('distributed_tracing'):
propagator = HTTPPropagator()
propagator.inject(span.context, headers)
kwargs['headers'] = headers
response = None
try:
response = func(*args, **kwargs)
return response
finally:
try:
span.set_tag(http.METHOD, method.upper())
span.set_tag(http.URL, url)
if response is not None:
span.set_tag(http.STATUS_CODE, response.status_code)
# `span.error` must be an integer
span.error = int(500 <= response.status_code)
except Exception:
log.debug("requests: error adding tags", exc_info=True) | /rs-ddtrace-0.12.1.tar.gz/rs-ddtrace-0.12.1/ddtrace/contrib/requests/connection.py | 0.429549 | 0.159185 | connection.py | pypi |
import asyncio
import ddtrace
from asyncio.base_events import BaseEventLoop
from .provider import CONTEXT_ATTR
from ...context import Context
_orig_create_task = BaseEventLoop.create_task
def set_call_context(task, ctx):
"""
Updates the ``Context`` for the given Task. Useful when you need to
pass the context among different tasks.
This method is available for backward-compatibility. Use the
``AsyncioContextProvider`` API to set the current active ``Context``.
"""
setattr(task, CONTEXT_ATTR, ctx)
def ensure_future(coro_or_future, *, loop=None, tracer=None): # noqa: E999
"""
Wrapper for the asyncio.ensure_future() function that
sets a context to the newly created Task. If the current
task already has a Context, it will be attached to the
new Task so the Trace list will be preserved.
"""
tracer = tracer or ddtrace.tracer
current_ctx = tracer.get_call_context()
task = asyncio.ensure_future(coro_or_future, loop=loop)
set_call_context(task, current_ctx)
return task
def run_in_executor(loop, executor, func, *args, tracer=None):
"""
Wrapper for the loop.run_in_executor() function that
sets a context to the newly created Thread. If the current
task has a Context, it will be attached as an empty Context
with the current_span activated to inherit the ``trace_id``
and the ``parent_id``.
Because the Executor can run the Thread immediately or after the
coroutine is executed, we may have two different scenarios:
* the Context is copied in the new Thread and the trace is sent twice
* the coroutine flushes the Context and when the Thread copies the
Context it is already empty (so it will be a root Span)
To support both situations, we create a new Context that knows only what was
the latest active Span when the new thread was created. In this new thread,
we fallback to the thread-local ``Context`` storage.
"""
tracer = tracer or ddtrace.tracer
ctx = Context()
current_ctx = tracer.get_call_context()
ctx._current_span = current_ctx._current_span
# prepare the future using an executor wrapper
future = loop.run_in_executor(executor, _wrap_executor, func, args, tracer, ctx)
return future
def _wrap_executor(fn, args, tracer, ctx):
"""
This function is executed in the newly created Thread so the right
``Context`` can be set in the thread-local storage. This operation
is safe because the ``Context`` class is thread-safe and can be
updated concurrently.
"""
# the AsyncioContextProvider knows that this is a new thread
# so it is legit to pass the Context in the thread-local storage;
# fn() will be executed outside the asyncio loop as a synchronous code
tracer.context_provider.activate(ctx)
return fn(*args)
def create_task(*args, **kwargs):
"""This function spawns a task with a Context that inherits the
`trace_id` and the `parent_id` from the current active one if available.
"""
loop = asyncio.get_event_loop()
return _wrapped_create_task(loop.create_task, None, args, kwargs)
def _wrapped_create_task(wrapped, instance, args, kwargs):
"""Wrapper for ``create_task(coro)`` that propagates the current active
``Context`` to the new ``Task``. This function is useful to connect traces
of detached executions.
Note: we can't just link the task contexts due to the following scenario:
* begin task A
* task A starts task B1..B10
* finish task B1-B9 (B10 still on trace stack)
* task A starts task C
* now task C gets parented to task B10 since it's still on the stack,
however was not actually triggered by B10
"""
new_task = wrapped(*args, **kwargs)
current_task = asyncio.Task.current_task()
ctx = getattr(current_task, CONTEXT_ATTR, None)
if ctx:
# current task has a context, so parent a new context to the base context
new_ctx = Context(
trace_id=ctx.trace_id,
span_id=ctx.span_id,
sampling_priority=ctx.sampling_priority,
)
set_call_context(new_task, new_ctx)
return new_task | /rs-ddtrace-0.12.1.tar.gz/rs-ddtrace-0.12.1/ddtrace/contrib/asyncio/helpers.py | 0.728845 | 0.370282 | helpers.py | pypi |
import wrapt
import inspect
from .deprecation import deprecated
def unwrap(obj, attr):
f = getattr(obj, attr, None)
if f and isinstance(f, wrapt.ObjectProxy) and hasattr(f, '__wrapped__'):
setattr(obj, attr, f.__wrapped__)
@deprecated('`wrapt` library is used instead', version='1.0.0')
def safe_patch(patchable, key, patch_func, service, meta, tracer):
""" takes patch_func (signature: takes the orig_method that is
wrapped in the monkey patch == UNBOUND + service and meta) and
attach the patched result to patchable at patchable.key
- if this is the module/class we can rely on methods being unbound, and just have to
update the __dict__
- if this is an instance, we have to unbind the current and rebind our
patched method
- If patchable is an instance and if we've already patched at the module/class level
then patchable[key] contains an already patched command!
To workaround this, check if patchable or patchable.__class__ are _dogtraced
If is isn't, nothing to worry about, patch the key as usual
But if it is, search for a "__dd_orig_{key}" method on the class, which is
the original unpatched method we wish to trace.
"""
def _get_original_method(thing, key):
orig = None
if hasattr(thing, '_dogtraced'):
# Search for original method
orig = getattr(thing, "__dd_orig_{}".format(key), None)
else:
orig = getattr(thing, key)
# Set it for the next time we attempt to patch `thing`
setattr(thing, "__dd_orig_{}".format(key), orig)
return orig
if inspect.isclass(patchable) or inspect.ismodule(patchable):
orig = _get_original_method(patchable, key)
if not orig:
# Should never happen
return
elif hasattr(patchable, '__class__'):
orig = _get_original_method(patchable.__class__, key)
if not orig:
# Should never happen
return
else:
return
dest = patch_func(orig, service, meta, tracer)
if inspect.isclass(patchable) or inspect.ismodule(patchable):
setattr(patchable, key, dest)
elif hasattr(patchable, '__class__'):
setattr(patchable, key, dest.__get__(patchable, patchable.__class__)) | /rs-ddtrace-0.12.1.tar.gz/rs-ddtrace-0.12.1/ddtrace/utils/wrappers.py | 0.474144 | 0.179279 | wrappers.py | pypi |
import warnings
from functools import wraps
class RemovedInDDTrace10Warning(DeprecationWarning):
pass
def format_message(name, message, version):
"""Message formatter to create `DeprecationWarning` messages
such as:
'fn' is deprecated and will be remove in future versions (1.0).
"""
return "'{}' is deprecated and will be remove in future versions{}. {}".format(
name,
' ({})'.format(version) if version else '',
message,
)
def warn(message, stacklevel=2):
"""Helper function used as a ``DeprecationWarning``."""
warnings.warn(message, RemovedInDDTrace10Warning, stacklevel=stacklevel)
def deprecation(name='', message='', version=None):
"""Function to report a ``DeprecationWarning``. Bear in mind that `DeprecationWarning`
are ignored by default so they're not available in user logs. To show them,
the application must be launched with a special flag:
$ python -Wall script.py
This approach is used by most of the frameworks, including Django
(ref: https://docs.djangoproject.com/en/2.0/howto/upgrade-version/#resolving-deprecation-warnings)
"""
msg = format_message(name, message, version)
warn(msg, stacklevel=4)
def deprecated(message='', version=None):
"""Decorator function to report a ``DeprecationWarning``. Bear
in mind that `DeprecationWarning` are ignored by default so they're
not available in user logs. To show them, the application must be launched
with a special flag:
$ python -Wall script.py
This approach is used by most of the frameworks, including Django
(ref: https://docs.djangoproject.com/en/2.0/howto/upgrade-version/#resolving-deprecation-warnings)
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
msg = format_message(func.__name__, message, version)
warn(msg, stacklevel=3)
return func(*args, **kwargs)
return wrapper
return decorator | /rs-ddtrace-0.12.1.tar.gz/rs-ddtrace-0.12.1/ddtrace/utils/deprecation.py | 0.698227 | 0.252686 | deprecation.py | pypi |
import numpy
import numpy as np
import pandas as pd
import pkg_resources
from pyproj import Geod
g = Geod(ellps="WGS84")
def load_test_data():
"""Return a dataframe with a test ascent.
Contains all the necessary data.
"""
# This is a stream-like object. If you want the actual info, call
# stream.read()
stream = pkg_resources.resource_stream(__name__, 'data/test_data.csv')
return pd.read_csv(stream, encoding='latin-1')
def calc_height(t, p):
'''
t
isotherm height formula
z = -R*t0/g * ln(p/p0)
z = -287.053*t0/9.80665 * ln(p/p0)
polytrop height forumula
z = t0/L * ((p/p0)**(-L*R/g) -1)
L = −0.0065 K/m
R = 287.053 J/(kg K)
g = 9.80665 m/s2
z = t0/−0.0065 * ((p/p0)**(0.0065*287.053/9.80665) -1)
international height formula
z = 288.15/0.0065 * (1- (p/1013.25)**(1/5.255))
'''
# from: https://www.cesm.ucar.edu/models/cesm1.1/cesm/cesmBbrowser/html_code/cam/tropopause.F90.html
# https://agupubs.onlinelibrary.wiley.com/doi/full/10.1029/2003GL018240
SHR_CONST_AVOGAD = 6.02214e26
SHR_CONST_BOLTZ = 1.38065e-23
SHR_CONST_MWDAIR = 28.966
SHR_CONST_RGAS = SHR_CONST_AVOGAD*SHR_CONST_BOLTZ
SHR_CONST_RDAIR = SHR_CONST_RGAS/SHR_CONST_MWDAIR
rair = SHR_CONST_RDAIR
SHR_CONST_G = 9.80616
gravit = SHR_CONST_G
SHR_CONST_CPDAIR = 1.00464e3
cappa = (SHR_CONST_RGAS/SHR_CONST_MWDAIR)/SHR_CONST_CPDAIR
cnst_kap = cappa
cnst_faktor = -gravit/rair
cnst_ka1 = cnst_kap - 1.
z = []
for i in range(len(t)):
if i == 0:
L = -0.0065
height = t[i]/L * ((p[i]/101325)**(-L*287.053/9.80665) -1)
z.append(height)
else:
# dt/dz
pmk= .5 * (p[i-1]**cnst_kap+p[i]**cnst_kap)
pm = pmk**(1/cnst_kap)
a = (t[i-1]-t[i])/(p[i-1]**cnst_kap-p[i]**cnst_kap)
b = t[i]-(a*p[i]**cnst_kap)
tm = a * pmk + b
dtdp = a * cnst_kap * (pm**cnst_ka1)
L = cnst_faktor*dtdp*pm/tm # dtdz
if L == 0:
L = -0.001
height = t[i-1]/L * ((p[i]/p[i-1])**(-L*287.053/9.80665) -1)
if np.isnan(height):
z.append(z[-1])
else:
z.append(z[-1] + height)
return z
def haversine(lon1, lat1, lon2, lat2):
"""
lat1 starting latitude [°] [float]
lon1 starting longitude [°] [float]
lat2 end latitude [°] [float]
lon2 end longitude [°] [float]
returns:
distance
Calculate the great circle distance in kilometers between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1 = numpy.radians(lon1)
lat1 = numpy.radians(lat1)
lon2 = numpy.radians(lon2)
lat2 = numpy.radians(lat2)
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2
c = 2 * np.arcsin(np.sqrt(a))
r = 6371 # Radius of earth in kilometers. Use 3956 for miles. Determines return value units.
return c * r
def inverse_haversine(lat, lon, distance, direction):
'''
lat actual latitude [°] [float]
lon actual longitude [°] [float]
distance distance to move [km] [float]
direction direction to move ['NORTH', 'EAST']
returns:
new_latitude, new_longitude
inverse haversine calculation - point and distance to new point
'''
lat = numpy.radians(lat)
lon = numpy.radians(lon)
d = numpy.array(distance)
r = 6371 #[km]
if direction == "NORTH":
brng = numpy.radians(0)
elif direction == "EAST":
brng = numpy.radians(90)
else:
return "error - not a valid direction"
return_lat = numpy.arcsin(numpy.sin(lat) * numpy.cos(d / r) + numpy.cos(lat) * numpy.sin(d / r) * numpy.cos(brng))
return_lon = lon + numpy.arctan2(numpy.sin(brng) * numpy.sin(d / r) * numpy.cos(lat), numpy.cos(d / r) - numpy.sin(lat) * numpy.sin(return_lat))
return numpy.degrees(return_lat), numpy.degrees(return_lon)
def transport(lat, lon, u_dist, v_dist, transport_type):
if transport_type == 'sphere':
new_lat, new_lon = transport_sphere(lat, lon, u_dist, v_dist)
else:
new_lat, new_lon = transport_geod(lat, lon, u_dist, v_dist)
return new_lat, new_lon
def transport_sphere(lat, lon, u_dist, v_dist):
'''
lat actual latitude [°] [float]
lon actual longitude [°] [float]
u_dist longitudinal distance added to position [km] [float]
v_dist meridional distance added to position [km] [float]
returns:
new_latitude, new_longitude
'''
new_lat, new_lon = inverse_haversine(lat, lon, u_dist, "EAST")
new_lat, new_lon = inverse_haversine(new_lat, new_lon, v_dist, "NORTH")
return new_lat, new_lon
def transport_geod(lat, lon, u_dist, v_dist):
'''
lat actual latitude [°] [float]
lon actual longitude [°] [float]
u_dist longitudinal distance added to position [km] [float]
v_dist meridional distance added to position [km] [float]
returns:
new_latitude, new_longitude
'''
new_lon, new_lat, backward_azim = g.fwd(lons=lon, lats=lat, az=90., dist=u_dist*1000.0)
new_lon, new_lat, backward_azim = g.fwd(lons=new_lon, lats=new_lat, az=0., dist=v_dist*1000.0)
return new_lat, new_lon
def trajectory(lat, lon, u, v, pressure, temperature, w_rs = 5.0, wind = 'mean', output='degree', transport_type='ellipsoid'):
'''
main function to calculate trajectories
lat station latitude [°] [int]
lon station longitude [°] [int]
u eastward wind speed [m/s] [array - float]
v northward wind speed [m/s] [array - float]
pressure pressure for given levels, ascending, [Pa] [array - float]
temperature temperature [K] [array - float]
w_rs radio sonde rising speed [m/s] [float]
wind wind calculation option ['mean', 'upper', 'lower']
output displacement output unit ['degree', 'km']
transport_type distance calculation ['sphere', ellipsoid']
returns:
latitude displacement [output - array], longitude displacement [output - array],
seconds since launch [s - array]
'''
# check if sorted correctly
if pressure[0] < pressure[-1]:
print("Please resort the input data - ascending order is necessary!")
return None, None, None #, None, None
z = calc_height(temperature, pressure) # m from K and Pa
new_lat = lat
new_lon = lon
lat_displacement = [0.]
lon_displacement = [0.]
u_shear=[0.]
v_shear=[0.]
rts = [0]
for i in range(len(z)):
if i != 0:
rising_time = (z[i]-z[i-1]) / w_rs
rts.append(rts[-1] + rising_time)
u_shear.append(u[i]-u[i-1])
v_shear.append(v[i]-v[i-1])
if wind == 'mean':
if output == 'degree':
new_lat, new_lon = transport(new_lat, new_lon, (np.mean([u[i],u[i-1]]) * rising_time)/1000. , (np.mean([v[i],v[i-1]]) * rising_time)/1000., transport_type)
elif output == 'km':
new_lon = (np.mean([u[i],u[i-1]]) * rising_time)/1000.
new_lat = (np.mean([v[i],v[i-1]]) * rising_time)/1000.
elif wind == 'upper':
new_lat, new_lon = transport(new_lat, new_lon, (u[i] * rising_time)/1000., (v[i] * rising_time)/1000., transport_type)
elif wind == 'lower':
new_lat, new_lon = transport(new_lat, new_lon, (u[i-1] * rising_time)/1000., (v[i-1] * rising_time)/1000., transport_type)
else:
print('error: not a valid wind request')
if output == 'degree':
lat_displacement.append(new_lat - lat)
lon_displacement.append(new_lon - lon)
elif output == 'km':
lat_displacement.append(new_lat)
lon_displacement.append(new_lon)
return lat_displacement, lon_displacement, rts # , np.array(u_shear), np.array(v_shear), | /rs_drift-1.1.0-py3-none-any.whl/rs_drift/drift.py | 0.77373 | 0.325012 | drift.py | pypi |
import numpy
import numpy as np
import pandas as pd
import pkg_resources
from pyproj import Geod
g = Geod(ellps="WGS84")
def load_test_data():
"""Return a dataframe with a test ascent.
Contains all the necessary data.
"""
# This is a stream-like object. If you want the actual info, call
# stream.read()
stream = pkg_resources.resource_stream(__name__, 'data/test_data.csv')
return pd.read_csv(stream, encoding='latin-1')
def calc_height(t, p):
'''
t
isotherm height formula
z = -R*t0/g * ln(p/p0)
z = -287.053*t0/9.80665 * ln(p/p0)
polytrop height forumula
z = t0/L * ((p/p0)**(-L*R/g) -1)
L = −0.0065 K/m
R = 287.053 J/(kg K)
g = 9.80665 m/s2
z = t0/−0.0065 * ((p/p0)**(0.0065*287.053/9.80665) -1)
international height formula
z = 288.15/0.0065 * (1- (p/1013.25)**(1/5.255))
'''
# from: https://www.cesm.ucar.edu/models/cesm1.1/cesm/cesmBbrowser/html_code/cam/tropopause.F90.html
# https://agupubs.onlinelibrary.wiley.com/doi/full/10.1029/2003GL018240
SHR_CONST_AVOGAD = 6.02214e26
SHR_CONST_BOLTZ = 1.38065e-23
SHR_CONST_MWDAIR = 28.966
SHR_CONST_RGAS = SHR_CONST_AVOGAD*SHR_CONST_BOLTZ
SHR_CONST_RDAIR = SHR_CONST_RGAS/SHR_CONST_MWDAIR
rair = SHR_CONST_RDAIR
SHR_CONST_G = 9.80616
gravit = SHR_CONST_G
SHR_CONST_CPDAIR = 1.00464e3
cappa = (SHR_CONST_RGAS/SHR_CONST_MWDAIR)/SHR_CONST_CPDAIR
cnst_kap = cappa
cnst_faktor = -gravit/rair
cnst_ka1 = cnst_kap - 1.
z = []
for i in range(len(t)):
if i == 0:
L = -0.0065
height = t[i]/L * ((p[i]/101325)**(-L*287.053/9.80665) -1)
z.append(height)
else:
# dt/dz
pmk= .5 * (p[i-1]**cnst_kap+p[i]**cnst_kap)
pm = pmk**(1/cnst_kap)
a = (t[i-1]-t[i])/(p[i-1]**cnst_kap-p[i]**cnst_kap)
b = t[i]-(a*p[i]**cnst_kap)
tm = a * pmk + b
dtdp = a * cnst_kap * (pm**cnst_ka1)
L = cnst_faktor*dtdp*pm/tm # dtdz
if L == 0:
L = -0.001
height = t[i-1]/L * ((p[i]/p[i-1])**(-L*287.053/9.80665) -1)
if np.isnan(height):
z.append(z[-1])
else:
z.append(z[-1] + height)
return z
def haversine(lon1, lat1, lon2, lat2):
"""
lat1 starting latitude [°] [float]
lon1 starting longitude [°] [float]
lat2 end latitude [°] [float]
lon2 end longitude [°] [float]
returns:
distance
Calculate the great circle distance in kilometers between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1 = numpy.radians(lon1)
lat1 = numpy.radians(lat1)
lon2 = numpy.radians(lon2)
lat2 = numpy.radians(lat2)
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2
c = 2 * np.arcsin(np.sqrt(a))
r = 6371 # Radius of earth in kilometers. Use 3956 for miles. Determines return value units.
return c * r
def inverse_haversine(lat, lon, distance, direction):
'''
lat actual latitude [°] [float]
lon actual longitude [°] [float]
distance distance to move [km] [float]
direction direction to move ['NORTH', 'EAST']
returns:
new_latitude, new_longitude
inverse haversine calculation - point and distance to new point
'''
lat = numpy.radians(lat)
lon = numpy.radians(lon)
d = numpy.array(distance)
r = 6371 #[km]
if direction == "NORTH":
brng = numpy.radians(0)
elif direction == "EAST":
brng = numpy.radians(90)
else:
return "error - not a valid direction"
return_lat = numpy.arcsin(numpy.sin(lat) * numpy.cos(d / r) + numpy.cos(lat) * numpy.sin(d / r) * numpy.cos(brng))
return_lon = lon + numpy.arctan2(numpy.sin(brng) * numpy.sin(d / r) * numpy.cos(lat), numpy.cos(d / r) - numpy.sin(lat) * numpy.sin(return_lat))
return numpy.degrees(return_lat), numpy.degrees(return_lon)
def transport(lat, lon, u_dist, v_dist, transport_type):
if transport_type == 'sphere':
new_lat, new_lon = transport_sphere(lat, lon, u_dist, v_dist)
else:
new_lat, new_lon = transport_geod(lat, lon, u_dist, v_dist)
return new_lat, new_lon
def transport_sphere(lat, lon, u_dist, v_dist):
'''
lat actual latitude [°] [float]
lon actual longitude [°] [float]
u_dist longitudinal distance added to position [km] [float]
v_dist meridional distance added to position [km] [float]
returns:
new_latitude, new_longitude
'''
new_lat, new_lon = inverse_haversine(lat, lon, u_dist, "EAST")
new_lat, new_lon = inverse_haversine(new_lat, new_lon, v_dist, "NORTH")
return new_lat, new_lon
def transport_geod(lat, lon, u_dist, v_dist):
'''
lat actual latitude [°] [float]
lon actual longitude [°] [float]
u_dist longitudinal distance added to position [km] [float]
v_dist meridional distance added to position [km] [float]
returns:
new_latitude, new_longitude
'''
new_lon, new_lat, backward_azim = g.fwd(lons=lon, lats=lat, az=90., dist=u_dist*1000.0)
new_lon, new_lat, backward_azim = g.fwd(lons=new_lon, lats=new_lat, az=0., dist=v_dist*1000.0)
return new_lat, new_lon
def trajectory(lat, lon, u, v, pressure, temperature, w_rs = 5.0, wind = 'mean', output='degree', transport_type='ellipsoid'):
'''
main function to calculate trajectories
lat station latitude [°] [int]
lon station longitude [°] [int]
u eastward wind speed [m/s] [array - float]
v northward wind speed [m/s] [array - float]
pressure pressure for given levels, ascending, [Pa] [array - float]
temperature temperature [K] [array - float]
w_rs radio sonde rising speed [m/s] [float]
wind wind calculation option ['mean', 'upper', 'lower']
output displacement output unit ['degree', 'km']
transport_type distance calculation ['sphere', ellipsoid']
returns:
latitude displacement [output - array], longitude displacement [output - array],
seconds since launch [s - array]
'''
# check if sorted correctly
if pressure[0] < pressure[-1]:
print("Please resort the input data - ascending order is necessary!")
return None, None, None #, None, None
z = calc_height(temperature, pressure) # m from K and Pa
new_lat = lat
new_lon = lon
lat_displacement = [0.]
lon_displacement = [0.]
u_shear=[0.]
v_shear=[0.]
rts = [0]
for i in range(len(z)):
if i != 0:
rising_time = (z[i]-z[i-1]) / w_rs
rts.append(rts[-1] + rising_time)
u_shear.append(u[i]-u[i-1])
v_shear.append(v[i]-v[i-1])
if wind == 'mean':
if output == 'degree':
new_lat, new_lon = transport(new_lat, new_lon, (np.mean([u[i],u[i-1]]) * rising_time)/1000. , (np.mean([v[i],v[i-1]]) * rising_time)/1000., transport_type)
elif output == 'km':
new_lon = (np.mean([u[i],u[i-1]]) * rising_time)/1000.
new_lat = (np.mean([v[i],v[i-1]]) * rising_time)/1000.
elif wind == 'upper':
new_lat, new_lon = transport(new_lat, new_lon, (u[i] * rising_time)/1000., (v[i] * rising_time)/1000., transport_type)
elif wind == 'lower':
new_lat, new_lon = transport(new_lat, new_lon, (u[i-1] * rising_time)/1000., (v[i-1] * rising_time)/1000., transport_type)
else:
print('error: not a valid wind request')
if output == 'degree':
lat_displacement.append(new_lat - lat)
lon_displacement.append(new_lon - lon)
elif output == 'km':
lat_displacement.append(new_lat)
lon_displacement.append(new_lon)
return lat_displacement, lon_displacement, rts # , np.array(u_shear), np.array(v_shear), | /rs_drift-1.1.0-py3-none-any.whl/rs_drift/.ipynb_checkpoints/drift-checkpoint.py | 0.77373 | 0.325012 | drift-checkpoint.py | pypi |
from __future__ import annotations
import resource
import time
from collections.abc import Callable
from typing import Any
from fastapi import FastAPI
from starlette.middleware.base import RequestResponseEndpoint
from starlette.requests import Request
from starlette.responses import Response
from starlette.routing import Match, Mount
from starlette.types import Scope
TIMER_ATTRIBUTE = '__fastapi_utils_timer__'
def add_timing_middleware(
app: FastAPI, record: Callable[[str], None] | None = None, prefix: str = '', exclude: str | None = None
) -> None:
"""
Adds a middleware to the provided `app` that records timing metrics using the provided `record` callable.
Typically `record` would be something like `logger.info` for a `logging.Logger` instance.
The provided `prefix` is used when generating route names.
If `exclude` is provided, timings for any routes containing `exclude`
as an exact substring of the generated metric name will not be logged.
This provides an easy way to disable logging for routes
The `exclude` will probably be replaced by a regex match at some point in the future. (PR welcome!)
"""
metric_namer = _MetricNamer(prefix=prefix, app=app)
@app.middleware('http')
async def timing_middleware(request: Request, call_next: RequestResponseEndpoint) -> Response:
metric_name = metric_namer(request.scope)
with _TimingStats(metric_name, record=record, exclude=exclude) as timer:
setattr(request.state, TIMER_ATTRIBUTE, timer)
response = await call_next(request)
return response
def record_timing(request: Request, note: str | None = None) -> None:
"""
Call this function at any point that you want to display elapsed time during the handling of a single request
This can help profile which piece of a request is causing a performance bottleneck.
Note that for this function to succeed, the request should have been generated by a FastAPI app
that has had timing middleware added using the `rs_fastapi_utils.timing.add_timing_middleware` function.
"""
timer = getattr(request.state, TIMER_ATTRIBUTE, None)
if timer is not None:
assert isinstance(timer, _TimingStats)
timer.emit(note)
else:
raise ValueError('No timer present on request')
class _TimingStats:
"""
This class tracks and records endpoint timing data.
Should be used as a context manager; on exit, timing stats will be emitted.
name:
The name to include with the recorded timing data
record:
The callable to call on generated messages. Defaults to `print`, but typically
something like `logger.info` for a `logging.Logger` instance would be preferable.
exclude:
An optional string; if it is not None and occurs inside `name`, no stats will be emitted
"""
def __init__(
self, name: str | None = None, record: Callable[[str], None] | None = None, exclude: str | None = None
) -> None:
self.name = name
self.record = record or print
self.start_time: float = 0
self.start_cpu_time: float = 0
self.end_cpu_time: float = 0
self.end_time: float = 0
self.silent: bool = False
if self.name is not None and exclude is not None and (exclude in self.name):
self.silent = True
def start(self) -> None:
self.start_time = time.time()
self.start_cpu_time = _get_cpu_time()
def take_split(self) -> None:
self.end_time = time.time()
self.end_cpu_time = _get_cpu_time()
@property
def time(self) -> float:
return self.end_time - self.start_time
@property
def cpu_time(self) -> float:
return self.end_cpu_time - self.start_cpu_time
def __enter__(self) -> _TimingStats:
self.start()
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.emit()
def emit(self, note: str | None = None) -> None:
"""
Emit timing information, optionally including a specified note
"""
if not self.silent:
self.take_split()
cpu_ms = 1000 * self.cpu_time
wall_ms = 1000 * self.time
message = f'TIMING: Wall: {wall_ms:6.1f}ms | CPU: {cpu_ms:6.1f}ms | {self.name}'
if note is not None:
message += f' ({note})'
self.record(message)
class _MetricNamer:
"""
This class generates the route "name" used when logging timing records.
If the route has `endpoint` and `name` attributes, the endpoint's module and route's name will be used
(along with an optional prefix that can be used, e.g., to distinguish between multiple mounted ASGI apps).
By default, in FastAPI the route name is the `__name__` of the route's function (or type if it is a callable class
instance).
For example, with prefix == "custom", a function defined in the module `app.crud` with name `read_item`
would get name `custom.app.crud.read_item`. If the empty string were used as the prefix, the result would be
just "app.crud.read_item".
For starlette.routing.Mount instances, the name of the type of `route.app` is used in a slightly different format.
For other routes missing either an endpoint or name, the raw route path is included in the generated name.
"""
def __init__(self, prefix: str, app: FastAPI):
if prefix:
prefix += '.'
self.prefix = prefix
self.app = app
def __call__(self, scope: Scope) -> str:
"""
Generates the actual name to use when logging timing metrics for a specified ASGI Scope
"""
route = None
for r in self.app.router.routes:
if r.matches(scope)[0] == Match.FULL:
route = r
break
if hasattr(route, 'endpoint') and hasattr(route, 'name'):
name = f'{self.prefix}{route.endpoint.__module__}.{route.name}' # type: ignore
elif isinstance(route, Mount):
name = f'{type(route.app).__name__}<{route.name!r}>'
else:
name = str(f"<Path: {scope['path']}>")
return name
def _get_cpu_time() -> float:
"""
Generates the cpu time to report. Adds the user and system time, following the implementation from timing-asgi
"""
resources = resource.getrusage(resource.RUSAGE_SELF)
# add up user time (ru_utime) and system time (ru_stime)
return resources[0] + resources[1] | /rs_fastapi_utils-0.4.0-py3-none-any.whl/rs_fastapi_utils/timing.py | 0.923644 | 0.29 | timing.py | pypi |
from __future__ import annotations
from functools import lru_cache
from typing import Any
from pydantic import BaseSettings
class APISettings(BaseSettings):
"""
This class enables the configuration of your FastAPI instance through the use of environment variables.
Any of the instance attributes can be overridden upon instantiation by either passing the desired value to the
initializer, or by setting the corresponding environment variable.
Attribute `xxx_yyy` corresponds to environment variable `API_XXX_YYY`. So, for example, to override
`openapi_prefix`, you would set the environment variable `API_OPENAPI_PREFIX`.
Note that assignments to variables are also validated, ensuring that even if you make runtime-modifications
to the config, they should have the correct types.
"""
# fastapi.applications.FastAPI initializer kwargs
debug: bool = False
docs_url: str = '/docs'
openapi_prefix: str = ''
openapi_url: str = '/openapi.json'
redoc_url: str = '/redoc'
title: str = 'FastAPI'
version: str = '0.1.0'
# Custom settings
disable_docs: bool = False
@property
def fastapi_kwargs(self) -> dict[str, Any]:
"""
This returns a dictionary of the most commonly used keyword arguments when initializing a FastAPI instance
If `self.disable_docs` is True, the various docs-related arguments are disabled, preventing your spec from being
published.
"""
fastapi_kwargs: dict[str, Any] = {
'debug': self.debug,
'docs_url': self.docs_url,
'openapi_prefix': self.openapi_prefix,
'openapi_url': self.openapi_url,
'redoc_url': self.redoc_url,
'title': self.title,
'version': self.version,
}
if self.disable_docs:
fastapi_kwargs.update({'docs_url': None, 'openapi_url': None, 'redoc_url': None})
return fastapi_kwargs
class Config:
env_prefix = 'api_'
validate_assignment = True
@lru_cache()
def get_api_settings() -> APISettings:
"""
This function returns a cached instance of the APISettings object.
Caching is used to prevent re-reading the environment every time the API settings are used in an endpoint.
If you want to change an environment variable and reset the cache (e.g., during testing), this can be done
using the `lru_cache` instance method `get_api_settings.cache_clear()`.
"""
return APISettings() | /rs_fastapi_utils-0.4.0-py3-none-any.whl/rs_fastapi_utils/api_settings.py | 0.891434 | 0.321353 | api_settings.py | pypi |
from __future__ import annotations
from collections.abc import Iterator
from contextlib import contextmanager
import sqlalchemy as sa
from sqlalchemy.orm import Session
class FastAPISessionMaker:
"""
A convenience class for managing a (cached) sqlalchemy ORM engine and sessionmaker.
Intended for use creating ORM sessions injected into endpoint functions by FastAPI.
"""
def __init__(self, database_uri: str):
"""
`database_uri` should be any sqlalchemy-compatible database URI.
In particular, `sqlalchemy.create_engine(database_uri)` should work to create an engine.
Typically, this would look like:
"<scheme>://<user>:<password>@<host>:<port>/<database>"
A concrete example looks like "postgresql://db_user:password@db:5432/app"
"""
self.database_uri = database_uri
self._cached_engine: sa.engine.Engine | None = None
self._cached_sessionmaker: sa.orm.sessionmaker | None = None
@property
def cached_engine(self) -> sa.engine.Engine:
"""
Returns a lazily-cached sqlalchemy engine for the instance's database_uri.
"""
engine = self._cached_engine
if engine is None:
engine = self.get_new_engine()
self._cached_engine = engine
return engine
@property
def cached_sessionmaker(self) -> sa.orm.sessionmaker:
"""
Returns a lazily-cached sqlalchemy sessionmaker using the instance's (lazily-cached) engine.
"""
sessionmaker = self._cached_sessionmaker
if sessionmaker is None:
sessionmaker = self.get_new_sessionmaker(self.cached_engine)
self._cached_sessionmaker = sessionmaker
return sessionmaker
def get_new_engine(self) -> sa.engine.Engine:
"""
Returns a new sqlalchemy engine using the instance's database_uri.
"""
return get_engine(self.database_uri)
def get_new_sessionmaker(self, engine: sa.engine.Engine | None) -> sa.orm.sessionmaker:
"""
Returns a new sessionmaker for the provided sqlalchemy engine. If no engine is provided, the
instance's (lazily-cached) engine is used.
"""
engine = engine or self.cached_engine
return get_sessionmaker_for_engine(engine)
def get_db(self) -> Iterator[Session]:
"""
A generator function that yields a sqlalchemy orm session and cleans up the session once resumed after yielding.
Can be used directly as a context-manager FastAPI dependency, or yielded from inside a separate dependency.
"""
yield from _get_db(self.cached_sessionmaker)
@contextmanager
def context_session(self) -> Iterator[Session]:
"""
A context-manager wrapped version of the `get_db` method.
This makes it possible to get a context-managed orm session for the relevant database_uri without
needing to rely on FastAPI's dependency injection.
Usage looks like:
session_maker = FastAPISessionMaker(database_uri)
with session_maker.context_session() as session:
session.query(...)
...
"""
yield from self.get_db()
def reset_cache(self) -> None:
"""
Resets the engine and sessionmaker caches.
After calling this method, the next time you try to use the cached engine or sessionmaker,
new ones will be created.
"""
self._cached_engine = None
self._cached_sessionmaker = None
def get_engine(uri: str) -> sa.engine.Engine:
"""
Returns a sqlalchemy engine with pool_pre_ping enabled.
This function may be updated over time to reflect recommended engine configuration for use with FastAPI.
"""
return sa.create_engine(uri, pool_pre_ping=True)
def get_sessionmaker_for_engine(engine: sa.engine.Engine) -> sa.orm.sessionmaker:
"""
Returns a sqlalchemy sessionmaker for the provided engine with recommended configuration settings.
This function may be updated over time to reflect recommended sessionmaker configuration for use with FastAPI.
"""
return sa.orm.sessionmaker(autocommit=False, autoflush=False, bind=engine)
@contextmanager
def context_session(engine: sa.engine.Engine) -> Iterator[Session]:
"""
This contextmanager yields a managed session for the provided engine.
Usage is similar to `FastAPISessionMaker.context_session`, except that you have to provide the engine to use.
A new sessionmaker is created for each call, so the FastAPISessionMaker.context_session
method may be preferable in performance-sensitive contexts.
"""
sessionmaker = get_sessionmaker_for_engine(engine)
yield from _get_db(sessionmaker)
def _get_db(sessionmaker: sa.orm.sessionmaker) -> Iterator[Session]:
"""
A generator function that yields an ORM session using the provided sessionmaker, and cleans it up when resumed.
"""
session = sessionmaker()
try:
yield session
session.commit()
except Exception as exc:
session.rollback()
raise exc
finally:
session.close() | /rs_fastapi_utils-0.4.0-py3-none-any.whl/rs_fastapi_utils/session.py | 0.887951 | 0.240613 | session.py | pypi |
from __future__ import annotations
import inspect
from collections.abc import Callable
from typing import Any, TypeVar, get_type_hints
from fastapi import APIRouter, Depends
from pydantic.typing import is_classvar
from starlette.routing import Route, WebSocketRoute
T = TypeVar('T')
CBV_CLASS_KEY = '__cbv_class__'
def cbv(router: APIRouter) -> Callable[[type[T]], type[T]]:
"""
This function returns a decorator that converts the decorated into a class-based view for the provided router.
Any methods of the decorated class that are decorated as endpoints using the router provided to this function
will become endpoints in the router. The first positional argument to the methods (typically `self`)
will be populated with an instance created using FastAPI's dependency-injection.
For more detail, review the documentation at
https://rs-fastapi-utils.docs.rocshers.com/user-guide/class-based-views/#the-cbv-decorator
"""
def decorator(cls: type[T]) -> type[T]:
return _cbv(router, cls)
return decorator
def _cbv(router: APIRouter, cls: type[T]) -> type[T]:
"""
Replaces any methods of the provided class `cls` that are endpoints of routes in `router` with updated
function calls that will properly inject an instance of `cls`.
"""
_init_cbv(cls)
cbv_router = APIRouter()
function_members = inspect.getmembers(cls, inspect.isfunction)
functions_set = {func for _, func in function_members}
cbv_routes = [
route
for route in router.routes
if isinstance(route, (Route, WebSocketRoute)) and route.endpoint in functions_set
]
for route in cbv_routes:
router.routes.remove(route)
_update_cbv_route_endpoint_signature(cls, route)
cbv_router.routes.append(route)
router.include_router(cbv_router)
return cls
def _init_cbv(cls: type[Any]) -> None:
"""
Idempotently modifies the provided `cls`, performing the following modifications:
* The `__init__` function is updated to set any class-annotated dependencies as instance attributes
* The `__signature__` attribute is updated to indicate to FastAPI what arguments should be passed to the initializer
"""
if getattr(cls, CBV_CLASS_KEY, False): # pragma: no cover
return # Already initialized
old_init: Callable[..., Any] = cls.__init__
old_signature = inspect.signature(old_init)
old_parameters = list(old_signature.parameters.values())[1:] # drop `self` parameter
new_parameters = [
x for x in old_parameters if x.kind not in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD)
]
dependency_names: list[str] = []
for name, hint in get_type_hints(cls).items():
if is_classvar(hint):
continue
parameter_kwargs = {'default': getattr(cls, name, Ellipsis)}
dependency_names.append(name)
new_parameters.append(
inspect.Parameter(name=name, kind=inspect.Parameter.KEYWORD_ONLY, annotation=hint, **parameter_kwargs)
)
new_signature = old_signature.replace(parameters=new_parameters)
def new_init(self: Any, *args: Any, **kwargs: Any) -> None:
for dep_name in dependency_names:
dep_value = kwargs.pop(dep_name)
setattr(self, dep_name, dep_value)
old_init(self, *args, **kwargs)
setattr(cls, '__signature__', new_signature)
setattr(cls, '__init__', new_init)
setattr(cls, CBV_CLASS_KEY, True)
def _update_cbv_route_endpoint_signature(cls: type[Any], route: Route | WebSocketRoute) -> None:
"""
Fixes the endpoint signature for a cbv route to ensure FastAPI performs dependency injection properly.
"""
old_endpoint = route.endpoint
old_signature = inspect.signature(old_endpoint)
old_parameters: list[inspect.Parameter] = list(old_signature.parameters.values())
old_first_parameter = old_parameters[0]
new_first_parameter = old_first_parameter.replace(default=Depends(cls))
new_parameters = [new_first_parameter] + [
parameter.replace(kind=inspect.Parameter.KEYWORD_ONLY) for parameter in old_parameters[1:]
]
new_signature = old_signature.replace(parameters=new_parameters)
setattr(route.endpoint, '__signature__', new_signature) | /rs_fastapi_utils-0.4.0-py3-none-any.whl/rs_fastapi_utils/cbv.py | 0.850205 | 0.241176 | cbv.py | pypi |
from __future__ import annotations
import asyncio
import logging
from asyncio import ensure_future
from functools import wraps
from traceback import format_exception
from typing import Any, Callable, Coroutine, Union
from starlette.concurrency import run_in_threadpool
NoArgsNoReturnFuncT = Callable[[], None]
NoArgsNoReturnAsyncFuncT = Callable[[], Coroutine[Any, Any, None]]
NoArgsNoReturnDecorator = Callable[[Union[NoArgsNoReturnFuncT, NoArgsNoReturnAsyncFuncT]], NoArgsNoReturnAsyncFuncT]
def repeat_every(
*,
seconds: float,
wait_first: bool = False,
logger: logging.Logger | None = None,
raise_exceptions: bool = False,
max_repetitions: int | None = None,
) -> NoArgsNoReturnDecorator:
"""
This function returns a decorator that modifies a function so it is periodically re-executed after its first call.
The function it decorates should accept no arguments and return nothing. If necessary, this can be accomplished
by using `functools.partial` or otherwise wrapping the target function prior to decoration.
Parameters
----------
seconds: float
The number of seconds to wait between repeated calls
wait_first: bool (default False)
If True, the function will wait for a single period before the first call
logger: Optional[logging.Logger] (default None)
The logger to use to log any exceptions raised by calls to the decorated function.
If not provided, exceptions will not be logged by this function (though they may be handled by the event loop).
raise_exceptions: bool (default False)
If True, errors raised by the decorated function will be raised to the event loop's exception handler.
Note that if an error is raised, the repeated execution will stop.
Otherwise, exceptions are just logged and the execution continues to repeat.
See https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.set_exception_handler for more info.
max_repetitions: Optional[int] (default None)
The maximum number of times to call the repeated function. If `None`, the function is repeated forever.
"""
def decorator(func: NoArgsNoReturnAsyncFuncT | NoArgsNoReturnFuncT) -> NoArgsNoReturnAsyncFuncT:
"""
Converts the decorated function into a repeated, periodically-called version of itself.
"""
is_coroutine = asyncio.iscoroutinefunction(func)
@wraps(func)
async def wrapped() -> None:
repetitions = 0
async def loop() -> None:
nonlocal repetitions
if wait_first:
await asyncio.sleep(seconds)
while max_repetitions is None or repetitions < max_repetitions:
try:
if is_coroutine:
await func() # type: ignore
else:
await run_in_threadpool(func)
repetitions += 1
except Exception as exc:
if logger is not None:
formatted_exception = ''.join(format_exception(type(exc), exc, exc.__traceback__))
logger.error(formatted_exception)
if raise_exceptions:
raise exc
await asyncio.sleep(seconds)
ensure_future(loop())
return wrapped
return decorator | /rs_fastapi_utils-0.4.0-py3-none-any.whl/rs_fastapi_utils/tasks.py | 0.916009 | 0.176069 | tasks.py | pypi |
import pyvisa as visa
import numpy as np
import pandas as pd
from warnings import warn
warn("rs_fsl is deprecated. Use pymeasure instead.", DeprecationWarning)
def read_csv(filename):
"""Reads x and y values measured by the spectrum analyzer stored in a csv file."""
data = pd.read_csv(
filename, delimiter=";", skiprows=28, usecols=[0, 1], names=["x", "y"]
)
x, y = data.x.values, data.y.values
return x, y
def _to_numeric(string):
"""Converts strings received from instrument to numerical values."""
string = string.rstrip()
arr = np.fromstring(string, sep=",")
if len(arr) > 1:
return arr
else:
return arr[0] # no array for single values
class FSL:
def __init__(self, ip, announce_connection=False):
"""
Communication with the Rohde&Schwarz FSL spectrum analyzer via pyvisa. The IP
address can be found from the R&S FSL by pressing Setup, General Setup, Network
Address, IP Address and can be directly be specified with the `ip`
keyword.
"""
addr = "TCPIP0::{}::inst0::INSTR".format(ip)
rm = visa.ResourceManager()
self.instr = rm.open_resource(addr)
if announce_connection:
print("Successfully connected to {}".format(self.idn()))
# basic communication with device --------------------------------------------------
def idn(self):
"""Identification of instrument."""
return self.instr.query("*IDN?").rstrip()
def clear(self):
"""Reset status register"""
self.instr.write("*CLS")
def reset(self):
"""Reset instrument"""
self.instr.write("*RST")
# Set and get basic instrument settings --------------------------------------------
@property
def freq_span(self):
"""Frequency span in Hertz"""
return _to_numeric(self.instr.query("FREQ:SPAN?"))
@freq_span.setter
def freq_span(self, value):
self.instr.write("FREQ:SPAN {}".format(value))
@property
def freq_center(self):
"""Center frequency"""
return _to_numeric(self.instr.query("FREQ:CENT?"))
@freq_center.setter
def freq_center(self, value):
self.instr.write("FREQ:CENT {}".format(value))
@property
def freq_start(self):
"""Start frequency in Hertz"""
return _to_numeric(self.instr.query("FREQ:STAR?"))
@freq_start.setter
def freq_start(self, value):
self.instr.write("FREQ:STAR {}".format(value))
@property
def freq_stop(self):
"""Start frequency Hertz"""
return _to_numeric(self.instr.query("FREQ:STOP?"))
@freq_stop.setter
def freq_stop(self, value):
self.instr.write("FREQ:STOP {}".format(value))
@property
def attenuation(self):
"""Attenuation in dB"""
return _to_numeric(self.instr.query("INP:ATT?"))
@attenuation.setter
def attenuation(self, value):
self.instr.write("INP:ATT {}".format(value))
@property
def rbw(self):
"""resolution bandwidth in Hertz"""
return _to_numeric(self.instr.query("BAND:RES?"))
@rbw.setter
def rbw(self, value):
if type(value) is str and value.upper() == "AUTO":
self.instr.write("BAND:AUTO ON")
else:
self.instr.write("BAND:RES {}".format(value))
@property
def vbw(self):
"""video bandwidth in Hertz"""
return _to_numeric(self.instr.query("BAND:VID?"))
@vbw.setter
def vbw(self, value):
if type(value) is str and value.upper() == "AUTO":
self.instr.write("BAND:VID:AUTO ON")
else:
self.instr.write("BAND:VID {}".format(value))
@property
def sweep_time(self):
"""Sweep time in seconds"""
return _to_numeric(self.instr.query("SWE:TIME?"))
@sweep_time.setter
def sweep_time(self, value):
if type(value) is str and value.upper() == "AUTO":
self.instr.write("SWE:TIME:AUTO ON")
else:
self.instr.write("BAND:RES {}".format(value))
self.instr.write("SWE:TIME {}".format(value))
# Sweeping -------------------------------------------------------------------------
@property
def continuous_sweep(self):
"""Continuous (True) or single sweep (False)"""
return bool(_to_numeric(self.instr.query("INIT:CONT?")))
@continuous_sweep.setter
def continuous_sweep(self, on):
if on:
self.instr.write("INIT:CONT ON")
else:
self.instr.write("INIT:CONT OFF")
def single_sweep(self):
"""Performas a sweep with sync."""
self.instr.write("INIT; *WAI")
def continue_single_sweep(self):
"""Continues with single sweep with sync."""
self.instr.write("INIT:CONM; *WAI")
# Traces ---------------------------------------------------------------------------
def read_trace(self):
"""Read trace data, returns x (frequency) and y (level)"""
y = _to_numeric(self.instr.query("TRAC1? TRACE1"))
n = len(y) # numer of trace points
x = np.linspace(self.freq_start, self.freq_stop, n)
return x, y
@property
def trace_mode(self):
return self.instr.query("DISP:TRAC:MODE?").rstrip()
@trace_mode.setter
def trace_mode(self, mode):
_modes = ["WRIT", "MAXH", "MINH", "AVER", "VIEW"]
if mode.upper() not in _modes:
raise KeyError("mode has to be in {}".format(_modes))
else:
self.instr.write("DISP:TRAC:MODE {}".format(mode))
# Markers --------------------------------------------------------------------------
def create_marker(self, num=1, is_delta_marker=False):
"""
The number of the marker (default 1) and a bool to define whether the
marker is a delta marker (default False).
"""
return self.Marker(self, num, is_delta_marker)
class Marker:
def __init__(self, device, num, is_delta_marker):
"""
Marker and Delte Marker class. Specify device (instance of FSL), the marker
number (int) and whether the marker is a delta marker (bool).
"""
self.instr = device.instr
self.is_delta_marker = is_delta_marker
# building the marker name for the commands
if self.is_delta_marker:
# smallest delta marker number is 2
self.name = "DELT" + str(max(2, num))
else:
self.name = "MARK"
if num > 1:
# marker 1 doesn't get a number
self.name = self.name + str(num)
self.activate()
def activate(self):
"""Activate a marker"""
self.instr.write("CALC:{}:STAT ON".format(self.name))
def disable(self):
"""Disable a marker"""
self.instr.write("CALC:{}:STAT OFF".format(self.name))
def to_trace(self, n_trace=1):
"""Set marker to trace (default 1)"""
self.instr.write("CALC:{}:TRAC {}".format(self.name, n_trace))
@property
def peak_excursion(self):
"""Peak excursion in dB"""
return _to_numeric(self.instr.query("CALC:{}:PEXC?".format(self.name)))
@peak_excursion.setter
def peak_excursion(self, value):
self.instr.write("CALC:{}:PEXC {}".format(self.name, value))
def to_peak(self):
"""Set marker to peak"""
self.instr.write("CALC:{}:MAX".format(self.name))
def to_next_peak(self, relative="right"):
"""Set marker to next peak (left or right of current position)"""
self.instr.write("CALC:{}:MAX:{}".format(self.name, relative))
@property
def x(self):
"""Frequency in hertz"""
return _to_numeric(self.instr.query("CALC:{}:X?".format(self.name)))
@x.setter
def x(self, value):
self.instr.write("CALC:{}:X {}".format(self.name, value))
@property
def y(self):
"""Amplitude of the marker"""
return _to_numeric(self.instr.query("CALC:{}:Y?".format(self.name)))
@y.setter
def y(self, value):
self.instr.write("CALC:{}:Y {}".format(self.name, value))
def zoom(self, value):
"""Zoom in two a frequency span or by a factor"""
self.instr.write("CALC:{}:FUNC:ZOOM {}; *WAI".format(self.name, value)) | /rs_fsl-0.2.tar.gz/rs_fsl-0.2/rs_fsl/rs_fsl.py | 0.77343 | 0.53692 | rs_fsl.py | pypi |
import math
def ray(io, eo, z, col, row):
x_ppa = io[0]
y_ppa = io[1]
focal_length = io[2]
pixel_size = io[3]
image_extent_x = io[4]
image_extent_y = io[5]
x0 = eo[0]
y0 = eo[1]
z0 = eo[2]
ome = eo[3]
phi = eo[4]
kap = eo[5]
o = math.radians(ome)
p = math.radians(phi)
k = math.radians(kap)
d11 = math.cos(p) * math.cos(k)
d12 = - math.cos(p) * math.sin(k)
d13 = math.sin(p)
d21 = math.cos(o) * math.sin(k) + math.sin(o) * math.sin(p) * math.cos(k)
d22 = math.cos(o) * math.cos(k) - math.sin(o) * math.sin(p) * math.sin(k)
d23 = - math.sin(o) * math.cos(p)
d31 = math.sin(o) * math.sin(k) - math.cos(o) * math.sin(p) * math.cos(k)
d32 = math.sin(o) * math.cos(k) + math.cos(o) * math.sin(p) * math.sin(k)
d33 = math.cos(o) * math.cos(p)
x_dot = ((col*pixel_size) - image_extent_x*-1) - x_ppa
y_dot = ((row*pixel_size) - image_extent_y*-1) - y_ppa
kx = (d11*x_dot + d12*y_dot + d13*focal_length) / (d31*x_dot + d32*y_dot + d33*focal_length)
ky = (d21*x_dot + d22*y_dot + d23*focal_length) / (d31*x_dot + d32*y_dot + d33*focal_length)
x = (z-z0) * kx+x0
y = (z-z0) * ky+y0
return(x, y)
def rayverse(io, eo, x, y, z):
x_ppa = io[0]
y_ppa = io[1]
focal_length = io[2]
pixel_size = io[3]
image_extent_x = io[4]
image_extent_y = io[5]
x0 = eo[0]
y0 = eo[1]
z0 = eo[2]
ome = eo[3]
phi = eo[4]
kap = eo[5]
o = math.radians(ome)
p = math.radians(phi)
k = math.radians(kap)
d11 = math.cos(p) * math.cos(k)
d12 = - math.cos(p) * math.sin(k)
d13 = math.sin(p)
d21 = math.cos(o) * math.sin(k) + math.sin(o) * math.sin(p) * math.cos(k)
d22 = math.cos(o) * math.cos(k) - math.sin(o) * math.sin(p) * math.sin(k)
d23 = - math.sin(o) * math.cos(p)
d31 = math.sin(o) * math.sin(k) - math.cos(o) * math.sin(p) * math.cos(k)
d32 = math.sin(o) * math.cos(k) + math.cos(o) * math.sin(p) * math.sin(k)
d33 = math.cos(o) * math.cos(p)
x_dot = (-1)*focal_length*((d11*(x-x0) + d21*(y-y0) + d31*(z-z0)) / (d13*(x-x0) + d23*(y-y0) + d33*(z-z0)))
y_dot = (-1)*focal_length*((d12*(x-x0) + d22*(y-y0) + d32*(z-z0)) / (d13*(x-x0) + d23*(y-y0) + d33*(z-z0)))
col = ((x_dot-x_ppa) + (image_extent_x))*(-1) / pixel_size
row = ((y_dot-y_ppa) + (image_extent_y))*(-1) / pixel_size
return(col, row)
def create_sure(out_path, img_path, io, eo):
x_ppa = io[0]
y_ppa = io[1]
focal_length = io[2] * (-1)
pixel_size = io[3]
image_extent_x = int(io[4]*-2 / pixel_size)
image_extent_y = int(io[5]*-2 / pixel_size)
x = eo[0]
y = eo[1]
z = eo[2]
omega = eo[3]
phi = eo[4]
kappa = eo[5]
pri_x_pix = (image_extent_x / 2) + ((x_ppa / pixel_size))
pri_y_pix = (image_extent_y / 2) + ((y_ppa / pixel_size))
r = math.pi / 180
so = math.sin(omega * r)
co = math.cos(omega * r)
sp = math.sin(phi * r)
cp = math.cos(phi * r)
ck = math.cos(kappa * r)
sk = math.sin(kappa * r)
rot0 = cp * ck
rot1 = co*sk + so*sp*ck
rot2 = so*sk - co*sp*ck
rot3 = cp * sk
rot4 = -co*ck + so*sp*sk
rot5 = -so*ck - co*sp*sk
rot6 = -sp
rot7 = so * cp
rot8 = -co * cp
cc_pix = focal_length / pixel_size
with open(out_path, "w") as text_file:
text_file.write("$ImageID___________________________________________________(ORI_Ver_1.0)" + " \n")
text_file.write("\t" + img_path + " \n")
text_file.write("$IntOri_FocalLength_________________________________________________[mm]" + " \n")
text_file.write("\t" + str(focal_length) + " \n")
text_file.write("$IntOri_PixelSize______(x|y)________________________________________[mm]" + " \n")
text_file.write("\t" + str(pixel_size) + "\t " + str(pixel_size) + " \n")
text_file.write("$IntOri_SensorSize_____(x|y)_____________________________________[pixel]" + " \n")
text_file.write("\t" + str(image_extent_x) + "\t " + str(image_extent_y) + " \n")
text_file.write("$IntOri_PrincipalPoint_(x|y)_____________________________________[pixel]" + " \n")
text_file.write("\t" + str(pri_x_pix) + "\t " + str(pri_y_pix) + " \n")
text_file.write("$IntOri_CameraMatrix_____________________________(ImageCoordinateSystem)" + " \n")
text_file.write("\t" + str(cc_pix) + "\t " + "0.00000000" + "\t " + str(pri_x_pix) + " \n")
text_file.write("\t" + "0.00000000" + "\t " + str(cc_pix) + "\t " + str(pri_y_pix) + " \n")
text_file.write("\t" + "0.00000000" + "\t" + " 0.00000000" + "\t" + " 1.00000000" + " \n")
text_file.write("$ExtOri_RotationMatrix____________________(World->ImageCoordinateSystem)" + " \n")
text_file.write("\t" + str(rot0) + "\t " + str(rot1) + "\t " + str(rot2) + " \n")
text_file.write("\t" + str(rot3) + "\t " + str(rot4) + "\t " + str(rot5) + " \n")
text_file.write("\t" + str(rot6) + "\t " + str(rot7) + "\t " + str(rot8) + " \n")
text_file.write("$ExtOri_TranslationVector________________________(WorldCoordinateSystem)" + " \n")
text_file.write("\t" + str(x) + "\t " + str(y) + "\t " + str(z) + " \n")
text_file.write("$IntOri_Distortion_____(Model|ParameterCount|(Parameters))______________" + " \n")
text_file.write("\t" + "none 0")
text_file.close()
def create_footprint(io, eo):
pixel_size = io[3]
image_extent_x = io[4]
image_extent_y = io[5]
xy1 = ray(io, eo, 0, 0, 0)
xy2 = ray(io, eo, 0, image_extent_x*-2 / pixel_size, 0)
xy3 = ray(io, eo, 0, image_extent_x*-2 / pixel_size, image_extent_y*-2 / pixel_size)
xy4 = ray(io, eo, 0, 0, image_extent_y*-2 / pixel_size)
poly = [xy1, xy2, xy3, xy4]
return(xy1, xy2, xy3, xy4, poly)
def create_def(def_name, img_path, dem_path, ort_path, io, eo, poly, res):
x_ppa = io[0]
y_ppa = io[1]
focal_length = io[2]
pixel_size = io[3]
image_extent_x = io[4]
image_extent_y = io[5]
mount_rotation = io[6]
# Calculate Orto Extents
bbox = bounding_box(poly)
tlx = int(round((bbox[0] / float(res)), 0) * float(res))
tly = int(round((bbox[3] / float(res)), 0) * float(res))
lrx = int(round((bbox[1] / float(res)), 0) * float(res))
lry = int(round((bbox[2] / float(res)), 0) * float(res))
szx = (lrx - tlx) / float(res)
szt = (tly - lry) / float(res)
# Adjust for camera rotation
if mount_rotation == 0:
il1 = "0.000 " + str(pixel_size)
il2 = str(pixel_size) + " 0.000 "
il3 = str(image_extent_x) + " " + str(image_extent_y)
pri_y = str(x_ppa)
pri_x = str(y_ppa)
elif mount_rotation == 180:
il1 = " 0.000 " + str(pixel_size)
il2 = str(pixel_size) + " 0.000 "
il3 = str(image_extent_y) + " " + str(image_extent_x * (-1))
pri_y = str(x_ppa)
pri_x = str(y_ppa)
elif mount_rotation == 90:
il1 = str(pixel_size) + " 0.000"
il2 = "0.000 " + str(pixel_size * (-1))
il3 = str(image_extent_y) + " " + str(image_extent_x*(-1))
pri_y = str(y_ppa)
pri_x = str(x_ppa)
elif mount_rotation == 270:
il1 = str(pixel_size) + " 0.000"
il2 = "0.000 " + str(pixel_size * (-1))
il3 = str(image_extent_x) + " " + str(image_extent_y*(-1))
pri_y = str(y_ppa)
pri_x = str(x_ppa)
elif mount_rotation == 999:
il1 = str(pixel_size) + " 0.000"
il2 = "0.000 " + str(pixel_size * (-1))
il3 = str(image_extent_y*(-1)) + " " + str(image_extent_x)
pri_y = str(y_ppa)
pri_x = str(x_ppa)
else:
print ("Illegal Camera Rotation" + str(mount_rotation))
exit()
# Write DEF file
with open(def_name, "w") as text_file:
text_file.write("PRJ= nill.apr" + " \n")
text_file.write("ORI= nill.txt" + " \n")
text_file.write("RUN= 0" + " \n")
text_file.write("DEL= NO" + " \n")
text_file.write("IMG= " + img_path + " \n")
text_file.write("DTM= " + dem_path + " \n")
text_file.write("ORT= " + ort_path + " \n")
text_file.write("TLX= " + str(tlx) + " \n")
text_file.write("TLY= " + str(tly) + " \n")
text_file.write("RES= " + str(res) + " \n")
text_file.write("SZX= " + str(math.trunc(szx)) + " \n")
text_file.write("SZY= " + str(math.trunc(szt)) + " \n")
text_file.write("R34= NO" + " \n")
text_file.write("INT= CUB -1" + " \n")
text_file.write("CON= " + str(focal_length / 1000) + " \n") # 0.1005
text_file.write("XDH= " + pri_x + " \n") #str(x_ppa) + " \n") # -0.18
text_file.write("YDH= " + pri_y + " \n") #str(y_ppa) + " \n")
text_file.write("IL1= " + il1 + " \n") #str(pixel_size) + " 0.000 \n")
text_file.write("IL2= " + il2 + " \n") #"0.000 " + str(pixel_size * (-1)) + " \n")
text_file.write("IL3= " + il3 + " \n") #str(image_extent_x*-pixel_size/2) + " " + str(image_extent_y*pixel_size/2) + " \n")
text_file.write("X_0= " + str(eo[0]) + " \n")
text_file.write("Y_0= " + str(eo[1]) + " \n")
text_file.write("Z_0= " + str(eo[2]) + " \n")
text_file.write("DRG= DEG" + " \n")
text_file.write("OME= " + str(eo[3]) + " \n")
text_file.write("PHI= " + str(eo[4]) + " \n")
text_file.write("KAP= " + str(eo[5]) + " \n")
text_file.write("MBF= 870" + " \n")
text_file.write("BBF= 999999" + " \n")
text_file.write("STR= NO" + " \n")
text_file.close()
return
def bounding_box(poly):
min_x = min_y = float("inf")
max_x = max_y = float("-inf")
for x, y in poly:
# Set min coords
if x < min_x:
min_x = x
if y < min_y:
min_y = y
# Set max coords
if x > max_x:
max_x = x
if y > max_y:
max_y = y
return [min_x, max_x, min_y, max_y]
def utm_to_lat_lng(zone, easting, northing, northernHemisphere=True):
if not northernHemisphere:
northing = 10000000 - northing
a = 6378137
e = 0.081819191
e1sq = 0.006739497
k0 = 0.9996
arc = northing / k0
mu = arc / (a * (1 - math.pow(e, 2) / 4.0-3 * math.pow(e, 4) / 64.0-5 * math.pow(e, 6) / 256.0))
ei = (1 - math.pow((1 - e*e), (1 / 2.0))) / (1 + math.pow((1 - e*e), (1 / 2.0)))
ca = 3 * ei / 2 - 27 * math.pow(ei, 3) / 32.0
cb = 21 * math.pow(ei, 2) / 16 - 55 * math.pow(ei, 4) / 32
cc = 151 * math.pow(ei, 3) / 96
cd = 1097 * math.pow(ei, 4) / 512
phi1 = mu + ca*math.sin(2 * mu) + cb*math.sin(4 * mu) + cc*math.sin(6 * mu) + cd*math.sin(8 * mu)
n0 = a / math.pow((1 - math.pow((e * math.sin(phi1)), 2)), (1 / 2.0))
r0 = a * (1 - e*e) / math.pow((1 - math.pow((e * math.sin(phi1)), 2)), (3 / 2.0))
fact1 = n0 * math.tan(phi1) / r0
a1 = 500000 - easting
dd0 = a1 / (n0 * k0)
fact2 = dd0*dd0 / 2
t0 = math.pow(math.tan(phi1), 2)
q0 = e1sq * math.pow(math.cos(phi1), 2)
fact3 = (5 + 3*t0 + 10*q0 - 4*q0*q0 - 9*e1sq) * math.pow(dd0, 4) / 24
fact4 = (61 + 90*t0 + 298*q0 + 45*t0*t0 - 252*e1sq - 3*q0*q0) * math.pow(dd0, 6) / 720
lof1 = a1 / (n0 * k0)
lof2 = (1 + 2*t0 + q0) * math.pow(dd0, 3) / 6.0
lof3 = (5 - 2*q0 + 28*t0 - 3*math.pow(q0, 2) + 8*e1sq + 24*math.pow(t0, 2)) * math.pow(dd0, 5) / 120
a2 = (lof1 - lof2 + lof3) / math.cos(phi1)
a3 = a2*180 / math.pi
latitude = 180 * (phi1 - fact1 * (fact2 + fact3 + fact4)) / math.pi
if not northernHemisphere:
latitude = -latitude
longitude = ((zone > 0) and (6*zone - 183.0) or 3.0) - a3
if (zone > 29):
longitude = longitude * (-1)
return (latitude, longitude)
def sunAngle(self, datotiden, lati, longi):
import math
import datetime
datotiden = datotiden.replace('-', ':')
patterndatetime1 = re.compile("[0-9]{4}:[0-9]{2}:[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,3}")
if patterndatetime1.match(datotiden):
DateTime = datetime.datetime.strptime(datotiden, '%Y:%m:%d %H:%M:%S.%f')
else:
DateTime = datetime.datetime.strptime(datotiden, '%Y:%m:%d %H:%M:%S')
dayOfYear = DateTime.timetuple().tm_yday
hour = DateTime.hour
mins = DateTime.minute
sec = DateTime.second
timeZone = 0
gamma = (2 * math.pi / 365) * ((dayOfYear + 1) + (hour - 12) / 24)
eqtime = 229.18 * (0.000075 + 0.001868 * math.cos(gamma) - 0.032077 * math.sin(gamma) - 0.014615 * math.cos(2 * gamma) - 0.040849 * math.sin(2 * gamma))
declin = 0.006918 - (0.399912 * math.cos(gamma)) + 0.070257 * math.sin(gamma) - 0.006758 * math.cos(2 * gamma) + 0.000907 * math.sin(2 * gamma) - 0.002697 * math.cos(3 * gamma) + 0.00148 * math.sin(3 * gamma)
tOffset = eqtime - 4 * longi + 60 * timeZone
tst = hour * 60 + mins + sec / 60 + tOffset
sh = (tst / 4) - 180
zenit = math.degrees(math.acos(((math.sin(math.radians(lati)) * math.sin(declin)) + (math.cos(math.radians(lati)) * math.cos(declin) * math.cos(math.radians(sh))))))
sunVinkel = 90 - zenit
return sunVinkel | /rs_lib-0.0.15-py3-none-any.whl/rs_lib/rs_lib.py | 0.495606 | 0.345906 | rs_lib.py | pypi |
from functools import lru_cache
import numpy as np
import pandas as pd
from rs_metrics.helpers import flatten_list, pandas_to_dict, convert_pandas
from rs_metrics.parallel import user_mean, top_k, user_apply, user_mean_sub
from rs_metrics.statistics import item_pop
@convert_pandas
def ndcg(true, pred, k=10):
"""Measures ranking quality"""
return user_mean(_ndcg_score, true, pred, k)
def _ndcg_score(true, pred, k):
true = set(true)
gain = [item in true for item in pred]
dcg = np.sum(gain / _discounts(len(gain)))
return dcg / _idcg(min(len(true), k))
@lru_cache(maxsize=128)
def _discounts(k):
return np.log2(np.arange(k) + 2)
@lru_cache(maxsize=128)
def _idcg(k):
return np.sum(np.ones(k) / np.log2(np.arange(k) + 2))
@convert_pandas
def hitrate(true, pred, k=10):
"""Shows what percentage of users has at least one relevant recommendation in their list."""
return user_mean(_hitrate, true, pred, k)
def _hitrate(true, pred, k):
return int(np.isin(pred, true).any())
@convert_pandas
def precision(true, pred, k=10):
"""Shows what percentage of items in recommendations are relevant, on average."""
return user_mean(_precision, true, pred, k)
def _precision(true, pred, k):
return np.isin(pred, true).mean()
@convert_pandas
def recall(true, pred, k=10):
"""Shows what percentage of relevant items appeared in recommendations, on average."""
return user_mean(_recall, true, pred, k)
def _recall(true, pred, k):
return np.isin(true, pred).mean()
@convert_pandas
def mrr(true, pred, k=10):
"""Shows inverted position of the first relevant item, on average."""
return user_mean(_mrr, true, pred, k)
def _mrr(true, pred, k):
entries = np.isin(pred, true)
if entries.any():
return 1 / (entries.argmax() + 1)
else:
return 0
@convert_pandas
def mapr(true, pred, k=10):
return user_mean(_map, true, pred, k)
def _map(true, pred, k):
rel = np.isin(pred, true)
if len(true) > 0:
return (rel.cumsum() / np.arange(1, len(pred) + 1) * rel).sum() / len(true)
else:
return 0.0
def coverage(items, recs, k=None, user_col='user_id', item_col='item_id'):
"""What percentage of items appears in recommendations?
Args:
items: list of unique item ids
recs: dict of recommendations
k: topk items to use from recs
Returns: float
"""
if type(recs) is pd.DataFrame:
recs = pandas_to_dict(recs, user_col, item_col)
topk = list(set(flatten_list(top_k(recs, k).values())))
return np.isin(items, topk).mean()
def _popularity(df, pred, fill):
return np.mean([df.get(item, fill) for item in pred])
def popularity(log, pred, k=10, user_col='user_id', item_col='item_id'):
"""
Mean popularity of recommendations.
Args:
log: pandas DataFrame with interactions
pred: pandas DataFrame with recommendations
k: top k items to use from recs
user_col: column name for user ids
item_col: column name for item ids
"""
if type(pred) is pd.DataFrame:
pred = pandas_to_dict(pred, user_col, item_col)
scores = item_pop(log, user_col, item_col)
return user_apply(_popularity, scores, pred, k, 0)
def surprisal(log, pred, k=10, user_col='user_id', item_col='item_id'):
if type(pred) is pd.DataFrame:
pred = pandas_to_dict(pred, user_col, item_col)
scores = -np.log2(item_pop(log, user_col, item_col))
fill = np.log2(log[user_col].nunique())
return user_apply(_popularity, scores, pred, k, fill)
def a_ndcg(true, pred, aspects, k=10, alpha=0.5, user_col='user_id', item_col='item_id'):
"""Measures redundancy-aware quality and diversity."""
if type(true) is pd.DataFrame:
true = pandas_to_dict(true, user_col, item_col)
if type(pred) is pd.DataFrame:
pred = pandas_to_dict(pred, user_col, item_col)
return user_mean_sub(_a_ndcg, true, pred, aspects, k, alpha)
def _a_ndcg(true, pred, aspects, alpha):
p = pd.Series(pred, dtype=object)
penalty = 1 - alpha
dcg = 0
hits = p.isin(true).astype(int)
num_aspects = len(aspects)
k = len(pred)
fit = k // num_aspects
extra = k % num_aspects
idcg = np.append(np.ones(fit * num_aspects), [penalty ** fit] * extra)
idcg /= np.log2(np.arange(k) + 2)
idcg = idcg.sum()
for aspect in aspects:
items_from_aspects = p.isin(aspect)
if items_from_aspects.any():
aspect_positions = pd.Series(np.NaN, index=p.index, dtype=float)
aspect_positions[items_from_aspects] = range(items_from_aspects.sum())
gain = hits * (penalty ** aspect_positions).fillna(0)
discounts = np.log2(np.arange(len(gain)) + 2)
dcg += np.sum(gain / discounts)
return dcg / idcg | /rs_metrics-0.5.0-py3-none-any.whl/rs_metrics/metrics.py | 0.814274 | 0.261823 | metrics.py | pypi |
from __future__ import annotations
from typing import List, Tuple
import matplotlib.pyplot as plt # type: ignore[import]
import numpy as np
from . import planner, primitives
# List of path endpoints to visualize Reeds-Shepp paths for with matplotlib.
# Format: (end x, end y, end yaw, turn radius, runway length)
_END_POSES: List[Tuple[float, float, float, int, float]] = [
(5, 6, np.pi, 1, 0),
(15, 3, np.pi / 2.0, 2, 6),
(-2, -4, np.pi, 4, 3),
(-7, 2, np.pi, 4, 0),
(-7, -7, 0.0, 6, 1),
(0.7, 1.8, 1, 1, 1),
(-5, 6, np.pi / 3.0, 2, 1),
(7, 2, 0.0, 6, 3),
(-4, -1, -np.pi / 2.0, 1, 3),
(-4, -1, -np.pi / 2.0, 2, 3),
(-4, -1, -np.pi / 2.0, 4, 3),
(-4, -1, -np.pi / 2.0, 6, 3),
(1.41513, 5.670786, 1.08317, 1, 3),
]
def _plot_arrow(
x: float,
y: float,
yaw: float,
length: float = 0.3,
width: float = 0.2,
label: str = "",
) -> None:
"""Adds an arrow to the plot."""
plt.arrow(
x,
y,
length * np.cos(yaw),
length * np.sin(yaw),
head_width=width,
head_length=width,
)
plt.plot(x, y, marker="s", label=label)
def _viz_path(rs_path: primitives.Path, path_num: int) -> None:
"""Visualizes the given path in the plot."""
x_coords, y_coords, _ = rs_path.coordinates_tuple()
path_plt = plt.subplot(111)
path_plt.plot(x_coords, y_coords, label=(f"Path ix: {path_num}"))
# Shrink current axis's height by 10% on the bottom
box = path_plt.get_position()
path_plt.set_position(
[box.x0, box.y0 + box.height * 0.05, box.width, box.height * 0.95]
)
# Put a legend below current axis
path_plt.legend(
loc="upper center", bbox_to_anchor=(0.5, -0.1), fancybox=True, ncol=3
)
plt.show(block=False)
def _demo_scene() -> None:
"""Generate and visualize the set of pre-defined paths solved by the path planner."""
plt.cla()
plt.legend()
plt.grid(True)
plt.axis("equal")
start = (0.0, 0.0, 0.0)
for i in range(len(_END_POSES)):
end_coords = _END_POSES[i]
x, y, yaw, turn_radius, runway_length = end_coords
step_size = 0.05
_plot_arrow(
*start
) # Start of path arrow (same for all paths starting at origin)
# Passing in yaw angles in radians
rs_path = planner.path(
start, (x, y, yaw), turn_radius, runway_length, step_size
)
_viz_path(rs_path, i + 1)
_plot_arrow(x, y, yaw) # End of path arrow to show direction
plt.show()
if __name__ == "__main__":
_demo_scene() | /rs-path-1.0.0.tar.gz/rs-path-1.0.0/rs/demo.py | 0.890002 | 0.68125 | demo.py | pypi |
from __future__ import annotations
import dataclasses
import functools
from typing import Any, List, Literal, Optional, Tuple
import numpy as np
from . import helpers
@dataclasses.dataclass
class Path:
"""Reeds-Shepp path represented as its start/end points, turn radius (in meters),
and a list of Segments. Additionally contains a step size value (in meters) used to
calculate the Waypoint representation of the path.
"""
start_pt: Tuple[float, float, float]
end_pt: Tuple[float, float, float]
segments: List[Segment]
turn_radius: float
step_size: float
@property
def start(self) -> Waypoint:
return self.waypoints()[0]
@property
def end(self) -> Waypoint:
return self.waypoints()[-1]
@property
def _has_runway(self) -> bool:
return self.segments[-1].is_straight
@functools.cached_property
def total_length(self) -> float:
return sum([abs(segment.length) for segment in self.segments])
@functools.cached_property
def runway_length(self) -> float:
if self._has_runway:
return abs(self.segments[-1].length)
return 0.0 # Path does not have runway
@functools.cached_property
def number_of_cusp_points(self) -> int:
count = 0
for p0, p1 in zip(self.waypoints()[:-1], self.waypoints()[1:]):
if p0.driving_direction != p1.driving_direction:
count += 1
return count
def prune(self, increment: int) -> List[Waypoint]:
"""Returns a pruned list of waypoints that occur at regularly spaced distances."""
return [
self.waypoints()[ix] for ix in range(0, len(self.waypoints()), increment)
]
def coordinates_tuple(self) -> Tuple[List[float], List[float], List[float]]:
"""Convenience function for decomposing the path points into their components
(x, y, yaw).
"""
x_coords, y_coords, yaw_coords = [], [], []
for pt in self.waypoints():
x_coords.append(pt.x)
y_coords.append(pt.y)
yaw_coords.append(pt.yaw)
return x_coords, y_coords, yaw_coords
@functools.lru_cache
def waypoints(self) -> List[Waypoint]:
"""Interpolate the path's segments into a list of Waypoints. First compute the
pure segment points, then stitch to path list of points. For negative segments,
we find the segment motion in positive discretization, then we adjust the sign
of the motion in the equations.
"""
x0, y0, yaw0 = self.start_pt
path_points: List[Tuple[float, float, float, float, Literal[-1, 1], bool]] = []
# Calculate list of Waypoint parameter tuples for non-runway segments
for ix, segment in enumerate(self.segments):
if self._has_runway and ix == len(self.segments) - 1: # Runway segment
seg_points = segment.calc_waypoints(
(x0, y0, yaw0), self.step_size, True, end_pt=self.end_pt
)
# Remove duplicated runway starting point
if Waypoint(*path_points[-1]).is_close(Waypoint(*seg_points[0])):
seg_points.pop(0)
else: # Non-runway segment
seg_points = segment.calc_waypoints(
(x0, y0, yaw0), self.step_size, False
)
path_points.extend(seg_points) # Add segment pts to list of path pts
# For next segment, set first point to last pt of this segment
x0, y0, yaw0 = seg_points[-1][0], seg_points[-1][1], seg_points[-1][2]
# Ensures that the path's last pt equals the provided end pt by appending end pt
# to the list of path pts if the last pt in the path is not the end pt
end_pt_to_add = self._end_pt_to_add(path_points[-1])
path_points.append(end_pt_to_add) if end_pt_to_add is not None else ()
return [Waypoint(*point) for point in path_points]
def _end_pt_to_add(
self, last_path_pt: Tuple[float, float, float, float, Literal[-1, 1], bool]
) -> Optional[Tuple[float, float, float, float, Literal[-1, 1], bool]]:
"""Checks if the last path point equals the provided Path end point. It's
possible for end points to be slightly off the target end pose due to path
discretization with a non-ideal step size.
"""
end_pt_with_params = (*self.end_pt, *last_path_pt[3:])
if not Waypoint(*last_path_pt).is_close(Waypoint(*end_pt_with_params)):
# Point to append is end point with last 3 parameters from final path point
return end_pt_with_params
return None
def __hash__(self) -> int:
segment_tuple = tuple(
(segment.type, segment.direction, segment.length)
for segment in self.segments
)
return hash(
(
self.start_pt,
self.end_pt,
self.turn_radius,
self.step_size,
segment_tuple,
)
)
@dataclasses.dataclass
class Waypoint:
"""A waypoint along a Reeds-Shepp Path, which includes X, Y (position variables) and
yaw (orientation variable), as well as curvature and driving direction to represent
characteristics of the segment the point is on in the overall path.
"""
x: float
y: float
yaw: float
curvature: float
driving_direction: Literal[-1, 1]
is_runway: bool
@property
def turn_direction(self) -> Literal[-1, 0, 1]:
"""The direction to turn at the Waypoint defined by the right hand rule.
Turns either left (negative), right (positive) or straight (zero).
"""
return helpers.sign(self.curvature)
@property
def pose_2d_tuple(self) -> Tuple[float, float, float]:
"""The X, Y, and yaw of the RSNavPoint as a Tuple."""
return (self.x, self.y, self.yaw)
def transform_to(self, end: Waypoint) -> Tuple[float, float, float]:
"""Calculates the X and Y translation and the yaw rotation values needed to
transform the current point to the end point.
"""
x_translation = end.x - self.x
y_translation = end.y - self.y
yaw_rotation = np.rad2deg(end.yaw - self.yaw)
return x_translation, y_translation, yaw_rotation
def is_close(self, p2: Waypoint) -> bool:
return [round(a, 5) for a in (self.x, self.y, self.yaw)] == [
round(b, 5) for b in (p2.x, p2.y, p2.yaw)
]
def __hash__(self) -> int:
return hash(
(
self.x,
self.y,
self.yaw,
self.curvature,
self.driving_direction,
self.is_runway,
)
)
@dataclasses.dataclass
class Segment:
"""A single segment within a Reeds Shepp path. A segment is described by its type
(left, right, or straight), and direction. The direction represents whether the car
is following the curve type forwards or backwards (see diagram in curves.py).
The length represents the length of the segment (straight segment's length
is the straight length and curved segment's length is the arc length calculated by
turn radius * turn angle).
"""
type: Literal["left", "right", "straight"]
direction: Literal[-1, 1]
length: float
turn_radius: float
@property
def is_straight(self) -> bool:
return self.type == "straight"
def calc_waypoints(
self,
start_pt: Tuple[float, float, float],
step_size: float,
is_runway: bool,
end_pt: Tuple[float, float, float] = (0.0, 0.0, 0.0),
) -> List[Tuple[float, float, float, float, Literal[-1, 1], bool]]:
"""Calculate the parameters needed (x, y, yaw coordinates, and list of segment
length and curvature) to calculate the list of points used to represent this
segment.
"""
if self.is_straight and end_pt != (0.0, 0.0, 0.0):
# Runway segment with end point passed in for accuracy
xs, ys, yaws = self._straight_runway_pts(start_pt, end_pt, step_size)
else:
# Non-runway segment
segment_points = self._interpolated(step_size)
xs, ys, yaws = self._get_segment_coords(start_pt, segment_points)
return [
(xs[i], ys[i], yaws[i], self._curvature(), self.direction, is_runway)
for i in range(len(xs))
]
def _curvature(self) -> float:
"""Radius of curvature for the segment. Based on segment driving direction,
curve type, and turn radius.
"""
if self.type == "left":
return -1.0 / self.turn_radius
elif self.type == "right":
return 1.0 / self.turn_radius
return 0.0
def _straight_runway_pts(
self,
start: Tuple[float, float, float],
end: Tuple[float, float, float],
step_size: float,
) -> Tuple[list[float], list[float], list[float]]:
"""Calculate a straight line of coordinates from the runway start point to the
runway end point using the yaw angle of the runway end point to ensure the
runway coordinates are accurate.
"""
num_coords = int((self.length / step_size) + 2)
x_coords = (np.linspace(start[0], end[0], num=num_coords, dtype=float)).tolist()
y_coords = (np.linspace(start[1], end[1], num=num_coords, dtype=float)).tolist()
yaw_coords = (np.ones(num_coords) * end[2]).tolist()
return x_coords, y_coords, yaw_coords
def _get_segment_coords(
self,
start: Tuple[float, float, float],
segment_points: np.ndarray[Any, np.dtype[np.floating[Any]]],
) -> Tuple[List[float], List[float], List[float]]:
"""Generates the segment's x, y, yaw coordinate lists for each point in the
interpolated list) using the segment type, turn radius, and start point.
"""
if self.type == "left":
xs = self.direction * self.turn_radius * np.sin(segment_points)
ys = self.turn_radius * (1 - np.cos(segment_points))
yaws = self.direction * segment_points
elif self.type == "right":
xs = self.direction * self.turn_radius * np.sin(segment_points)
ys = -self.turn_radius * (1 - np.cos(segment_points))
yaws = -self.direction * segment_points
elif self.type == "straight":
xs = self.direction * segment_points
ys = np.zeros(xs.shape[0])
yaws = np.zeros(xs.shape[0])
x0, y0, yaw0 = start
# Rotate generic coordinates w.r.t segment start orientation
yaw_coords = (yaws + yaw0).tolist()
xs, ys = helpers.rotate(xs, ys, yaw0) if yaw0 != 0 else (xs, ys)
# Add segment start position (x0 and y0) values to x and y coordinates
x_coords = (xs + x0).tolist()
y_coords = (ys + y0).tolist()
return x_coords, y_coords, yaw_coords
def _interpolated(
self, step_size: float
) -> np.ndarray[Any, np.dtype[np.floating[Any]]]:
"""Discretizes the segment into a list of equidistant points (starting from 0,
not actual segment starting point).
"""
magnitude = (
abs(self.length)
if self.is_straight
else abs(self.length) / self.turn_radius
)
# step is the distance between points along the segment: dl (linear distance for
# straight segments) and dtheta (step size / turn radius) for curved segments.
step = step_size if self.is_straight else step_size / self.turn_radius
seg_pts = np.arange(0, magnitude, step)
# Add segment endpoint if the list of segment points is not empty
seg_pts = np.append(seg_pts, [magnitude]) if seg_pts.any() else np.array([0.0])
return seg_pts | /rs-path-1.0.0.tar.gz/rs-path-1.0.0/rs/primitives.py | 0.956145 | 0.610308 | primitives.py | pypi |
import pickle
import re
import shutil
import os
class FESReporter(object):
def __init__(self,file,reportInterval,metaD_wrapper,CV_list,
potentialEnergy=False, kineticEnergy=False,
totalEnergy=False, temperature=False):
self._reportInterval = reportInterval
self.reporter_metaD = metaD_wrapper
self.CV_list = CV_list
self._out = file
self._hasInitialized = False
self._needsPositions = True
self._needsVelocities = False
self._needsForces = False
self._needEnergy = potentialEnergy or kineticEnergy or totalEnergy or temperature
def _initialize(self, simulation):
"""Deferred initialization of the reporter, which happens before
processing the first report."""
self.CV_values = []
self.free_energy = []
self._hasInitialized = True
def report(self, simulation, state):
"""Generate a report.
Parameters
----------
simulation : simtk.openmm.app.Simulation
The Simulation to generate a report for
"""
if not self._hasInitialized:
self._initialize(simulation)
self._hasInitialized = True
self.CV_values.append(self.reporter_metaD.getCollectiveVariables(simulation))
self.free_energy.append(self.reporter_metaD.getFreeEnergy())
# Save free energy and CV files periodically
with open(os.path.join(self._out,'CV_values.pickle'), 'wb') as cv_pickle:
pickle.dump(self.CV_values, cv_pickle)
with open(os.path.join(self._out,'fes.pickle'), 'wb') as fes_pickle:
pickle.dump(self.free_energy, fes_pickle)
def describeNextReport(self, simulation):
"""Get information about the next report this object will generate.
Parameters
----------
simulation : simtk.openmm.app.Simulation
The Simulation to generate a report for
Returns
-------
report_description : tuple
A five element tuple. The first element is the number of steps
until the next report. The remaining elements specify whether
that report will require positions, velocities, forces, and
energies respectively.
"""
steps = self._reportInterval - simulation.currentStep % self._reportInterval
return (steps, self._needsPositions, self._needsVelocities, self._needsForces, self._needEnergy) | /rs-simtools-0.0.4.tar.gz/rs-simtools-0.0.4/rs_simtools/custom_reporters.py | 0.793066 | 0.187003 | custom_reporters.py | pypi |
from . import tabler
from typing import Iterable, Any
def from_plaintext(text: Iterable[Iterable[str]]) -> tabler.table.Table:
"""
Create a table from some plain text.
"""
tabler.table.Table(
list(
map(
lambda row: list(
map(lambda s: tabler.table.Cell(tabler.table.Content(s)), row)
),
text,
)
)
)
def create_from_plaintext(
header: Iterable[str], content: Iterable[Iterable[str]], splitter: bool
) -> tabler.table.Table:
"""
Create a table from header and content.
"""
tabler.table.Table.create(
header=list(
map(lambda cell: tabler.table.Cell(tabler.table.Content(cell)), header)
),
content=list(
map(
lambda row: list(
map(lambda s: tabler.table.Cell(tabler.table.Content(s)), row)
),
content,
)
),
splitter=splitter,
)
def auto_cell(v: Any) -> tabler.table.Cell:
"""
Automatically create a new table cell.
"""
if v is None:
return tabler.table.Cell(tabler.table.Content.Empty)
elif isinstance(v, tabler.table.Cell):
return v
elif isinstance(v, tabler.table.Content):
return tabler.table.Cell(v)
elif isinstance(v, str):
return tabler.table.Cell(tabler.table.Content(v))
else:
return tabler.table.Cell(tabler.table.Content(str(v)))
def auto_table_from(v: Iterable[Iterable[Any]]) -> tabler.table.Table:
"""
Create a table with automatical cells.
"""
return tabler.table.Table(
list(map(lambda row: list(map(lambda cell: auto_cell(cell), row)), v))
)
def auto_table_create(
header: Iterable[Iterable[Any]], content: Iterable[Iterable[Any]], splitter: bool
) -> tabler.table.Table:
"""
Create a table with automatical cells.
"""
return tabler.table.Table.create(
header=list(map(lambda cell: auto_cell(cell), header)),
content=list(
map(lambda row: list(map(lambda cell: auto_cell(cell), row)), content)
),
splitter=splitter,
) | /rs_tabler-0.1.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/tabler/utils.py | 0.778944 | 0.372648 | utils.py | pypi |
from implicit.als import AlternatingLeastSquares
import numpy as np
from rs_tools.utils import encode, to_csc, dict_to_pandas
class Wrapper:
def fit(
self,
df,
show_progress=True,
user_col='user_id',
item_col='item_id',
rating_col='rating',
):
df, ue, ie = encode(df, user_col, item_col)
self.ue, self.ie = ue, ie
item_users = to_csc(df, user_col, item_col, rating_col).T
self.model.fit(item_users, show_progress)
df.loc[:, user_col] = ue.inverse_transform(df[user_col])
df.loc[:, item_col] = ie.inverse_transform(df[item_col])
def predict(
self,
df,
k,
filter_already_liked_items=True,
filter_items=None,
recalculate_user=False,
user_col='user_id',
item_col='item_id',
rating_col='rating',
):
df.loc[:, user_col] = self.ue.transform(df[user_col])
df.loc[:, item_col] = self.ie.transform(df[item_col])
user_items = to_csc(df, user_col, item_col, rating_col)
df.loc[:, user_col] = self.ue.inverse_transform(df[user_col])
df.loc[:, item_col] = self.ie.inverse_transform(df[item_col])
pred = self.model.recommend_all(
user_items, k, recalculate_user, filter_already_liked_items, filter_items
)
p = self.model.user_factors.dot(self.model.item_factors.T)
scores = [p[row][vals].tolist() for row, vals in enumerate(pred)]
pred = dict_to_pandas(
{user: items for user, items in enumerate(pred)}, user_col, item_col
)
scores = dict_to_pandas(
{user: score for user, score in enumerate(scores)},
user_col,
val_col=rating_col,
)
pred[rating_col] = scores[rating_col]
pred.loc[:, item_col] = self.ie.inverse_transform(pred[item_col].astype(int))
return pred
class ALS(Wrapper):
def __init__(
self,
factors=100,
regularization=0.01,
dtype=np.float32,
use_native=True,
use_cg=True,
use_gpu=False,
iterations=15,
calculate_training_loss=False,
num_threads=0,
):
self.model = AlternatingLeastSquares(
factors=factors,
regularization=regularization,
dtype=dtype,
use_native=use_native,
use_cg=use_cg,
use_gpu=use_gpu,
iterations=iterations,
calculate_training_loss=calculate_training_loss,
num_threads=num_threads,
) | /rs_tools-0.2.tar.gz/rs_tools-0.2/rs_tools/implicit_wrapper.py | 0.693473 | 0.278508 | implicit_wrapper.py | pypi |
# *V*ae *A*ssisted *L*igand *D*isc*O*very (Valdo)
[](https://pypi.org/project/rs-valdo/)
Using variational Autoencoders to improve the signal-to-noise ratio of drug
fragment screens
- [*V*ae *A*ssisted *L*igand *D*isc*O*very (Valdo)](#vae-assisted-ligand-discovery-valdo)
- [Installation](#installation)
- [Repository Organization](#repository-organization)
- [*VALDO* Usage](#valdo-usage)
- [Step 1: Diffraction Data](#step-1-diffraction-data)
- [Step 2: Reindexing \& Scaling](#step-2-reindexing--scaling)
- [Step 3: Normalization](#step-3-normalization)
- [Step 4: VAE Training](#step-4-vae-training)
- [Steps 5 \& 6: Reconstruction of "Apo" Data \& Calculating Difference Maps](#steps-5--6-reconstruction-of-apo-data--calculating-difference-maps)
- [Steps 7 \& 8: Gaussian Blurring \& Searching for Blobs](#steps-7--8-gaussian-blurring--searching-for-blobs)
- [Step 9: Identifying Events](#step-9-identifying-events)
## Installation
1. Create a environment conda or mamba
```
mamba create -n valdo python=3.10
```
2. Install [pytorch](https://pytorch.org/get-started/locally/)
3. Install the package
```
pip install rs-valdo
```
If you want the codes for further developing, install by:
```
git clone https://github.com/Hekstra-Lab/drug-screening.git
cd drug-screening/
pip install -e .
```
## Repository Organization
The `drug-screening` repository is organized into the following directories:
1. `notebooks/`
This directory contains several Jupyter notebooks and other related files for different stages of the drug screening method.
- `pipeline.ipynb`: This notebook provides a comprehensive walkthrough of our entire drug screening method, encompassing all necessary steps. The analysis is performed on the PTP1B dataset published by Keedy et al., found [here](https://zenodo.org/record/1044103). `scaler.ipynb` and `vae_training.ipynb` demonstrate uses of functions that are also included in `pipeline.ipynb`.
- `scaler.ipynb`: This notebook demonstrates the usage of `valdo`'s scaling functions.
- `vae_training.ipynb`: This notebook demonstrates the usage of `valdo`'s VAE training functions.
- `vae_metric_heavy_atom_peak_value.ipynb`: This standalone notebook allows users to calculate the average peak value of electron density maps at locations of ligands' heavy atom. It requires bound-state models to identify the position of the ligands' heavy atoms. This metric is useful for evaluating the signal-to-noise ratio of the drug screening method.
- `lig_heavy_atoms.pkl`: This file is required by `vae_metric_heavy_atom_peak_value.ipynb`, as it contains essential information for each sample, such as which samples have ligands with heavy atoms and which samples are bound.
- `refine_drug.eff`: This file assists in the command-line tool for automatic refinement.
2. `valdo/`
The `valdo/` directory contains the source code for the `rs-valdo` package.
## *VALDO* Usage
The full flow chart is shown below, followed by a discussion of each of the steps.

<br/>
### Step 1: Diffraction Data
The first step involves acquiring diffraction datasets in the `mtz` format. These datasets should follow a specific naming convention, where each file is named with a number followed by the `.mtz` extension (e.g., `01.mtz`, `02.mtz`, etc.).
**Usage:**
1. Ensure that you have collected diffraction datasets in the `mtz` format.
2. Organize the datasets with sequential numerical names (e.g., `01.mtz`, `02.mtz`, etc.).
Following this naming convention will allow datasets to be ready for further processing.
<br/>
### Step 2: Reindexing & Scaling
This step focuses on reindexing and scaling a list of input MTZ files to a reference MTZ file using gemmi.
**Reindexing:** The datasets provided may include samples from different space groups that describe the same physical crystal structure. To ensure comparability, we reindex each sample to a common indexing scheme by applying reindexing operators.
**Scaling:** The samples are scaled to a reference dataset using a global anisotropic scaling factor by an analytical scaling method that determines the Debye-Waller Factor. The scaling process ensures that structure factor amplitudes are comparable across different datasets, accounting for variabilities such as differences in lattice orientations.
**Usage:**
1. Import the required library, `valdo`.
2. Call the `reindex_files()` function from `valdo.reindex`. The `reindex_files()` function will enumerate possible reindexing operations for any space group and apply them to each input MTZ file. It will select the operation with the highest correlation with the reference dataset. The reindexed files will be saved in the specified output folder, following the same `##.mtz` naming convention.
This function can be called with the following parameters:
- `input_files`: List of paths to input MTZ files to be reindexed.
- `reference_file`: Path to the reference MTZ file.
- `output_folder`: Path to the folder where the reindexed MTZ files will be saved.
- `columns`: A list containing the names of the columns in the dataset that represent the amplitude and the error column.
3. Create a `Scaler` object by providing the path to the reference MTZ file.
4. Call the `batch_scaling()` method of the `Scaler` object. The `batch_scaling()` method will apply the scaling process to each input MTZ file and save the scaled MTZ files in the specified output folder. Scaling metrics, such as least squares values and correlations, will be saved in the report file.
This function can be called with the following parameters:
- `mtz_path_list`: List of paths to input MTZ files to be scaled.
- `outputmtz_path`: Path to the folder where the scaled MTZ files will be saved (optional, default `./scaled_mtzs/`).
- `reportfile`: Path to the file where scaling metrics will be saved (optional, default `./scaling_data.json`).
- `verbose`: Whether to display verbose information during scaling (optional, default `True`).
- `n_iter`: Number of iterations for the analytical scaling method (optional, default `5`).
<details>
<summary>Code Example:</summary>
```python
import valdo
file_list = [list of input MTZ file paths]
reindexed_path = "path/to/output/folder"
scaled_path = "path/to/output/folder"
amplitude_col = "name_of_column_with_amplitudes"
error_col = "name_of_column_with_errors"
valdo.reindex.reindex_files(input_files=file_list,
reference_file=file_list[0],
output_folder=reindexed_path,
columns=[amplitude_col, error_col])
file_list = [list of reindexed MTZ file paths]
scaler = valdo.Scaler(reference_mtz=file_list[0])
metrics = scaler.batch_scaling(mtz_path_list=file_list,
outputmtz_path=scaled_path,
verbose=False)
```
</details><br/>
### Step 3: Normalization
This step involves normalizing the scaled structure factor amplitudes obtained in the previous step. The input is restricted to only those Miller indices present in the intersection of all datasets, and the VAE predicts structure factor amplitudes for all Miller indices in the union of all datasets.
Additionally, we standardize all the input data, such that the structure factor amplitudes for each Miller index in the union of all datasets have a mean of zero and a unit variance across datasets.
**Usage:**
1. Import the required library, `valdo.preprocessing`.
2. Find the intersection and union of the scaled datasets using the following functions:
- `find_intersection()`: Finds the intersection of `amplitude_col` from multiple input MTZ files and saves the result to the specified output pickle file. Arguments include the following:
- `input_files`: List of input MTZ file paths.
- `output_path`: Path to save the output pickle file containing the intersection data.
- `amplitude_col`: Name of the column in the dataset that represents the scaled amplitude (default 'F-obs-scaled').
- `find_union()`: Finds the union of `amplitude_col` from multiple input MTZ files and saves the result to the specified output pickle file. Arguments are the same as `find_intersection()`.
3. Generate the VAE input and output data using the `generate_vae_io()` function. This standardizes the intersection dataset using mean and standard deviation calculated from the union dataset. The standardized intersection becomes the VAE input, while the standardized union becomes the VAE output. Both the VAE input and output are saved to the specified folder.
This function can be called with the following parameters:
- `intersection_path`: Path to the intersection dataset pickle file.
- `union_path`: Path to the union dataset pickle file.
- `io_folder`: Path to the output folder where the VAE input and output will be saved. Mean and standard deviation data calculated from the union dataset will also be saved in this folder as `union_mean.pkl` and `union_sd.pkl`.
<details>
<summary>Code Example:</summary>
```python
import valdo.preprocessing
file_list = [list of input MTZ file paths]
amplitude_scaled_col = "name_of_column_with_scaled_amplitudes"
intersection_path = "path/to/intersection_data.pkl"
union_path = "path/to/union_data.pkl"
vae_folder = "path/to/vae_input_output_folder"
valdo.preprocessing.find_intersection(input_files=file_list,
output_path=intersection_path,
amplitude_col=amplitude_scaled_col)
valdo.preprocessing.find_union(input_files=file_list,
output_path=union_path,
amplitude_col=amplitude_scaled_col)
valdo.preprocessing.generate_vae_io(intersection_path=intersection_path,
union_path=union_path,
io_folder=vae_folder)
```
</details><br/>
### Step 4: VAE Training
In this step, we train the VAE model using the provided VAE class.
**Usage:**
1. Load the VAE input and output data that was generated in the previous step.
2. Initialize the VAE model with the desired hyperparameters. Tune-able hyperparameters include the following:
- `n_dim_latent`: Number of dimensions in the latent space (optional, default `1`).
- `n_hidden_layers`: Number of hidden layers in the encoder and decoder. If an int is given, it will applied to both encoder and decoder; If a length 2 list is given, first int will be used for encoder, the second will be used for decoder.
- `n_hidden_size`: Number of units in hidden layers. If an int is given, it will be applied to all hidden layers in both encoder and decoder; otherwise, an array with length equal to the number of hidden layers can be given, the number of units will be assigned accordingly.
- `activation` : Activation function for the hidden layers (optional, default `tanh`).
3. Split the data into training and validation sets. Randomly select a subset of indices for training and use the rest for validation.
4. Convert the data into PyTorch tensors.
5. Set up the optimizer for training.
6. Train the VAE model using the `train()` method. The training process involves minimizing the ELBO (Evidence Lower Bound) loss function, which consists of a Negative Log-Likelihood (NLL) term and a Kullback-Leibler (KL) divergence term. Arguments used in this function include:
- `x_train`: Input data for training the VAE, a PyTorch tensor representing the VAE input data.
- `y_train`: Output data for training the VAE, a PyTorch tensor representing the VAE output data.
- `optim`: The optimizer used for training the VAE, a PyTorch optimizer object, such as `torch.optim.Adam`, that specifies the optimization algorithm and its hyperparameters, including the learning rate (`lr`).
- `x_val`: Input data for validation during training. (optional, default `None`).
- `y_val`: Output data for validation during training. (optional, default `None`).
- `epochs`: The number of training epochs (epoch: a single pass through the data).
- `batch_size`: The batch size used during training. If an integer is provided, the same batch size will be used for all epochs. If a list of integers is provided, it should have the same length as the number of epochs, and each value in the list will be used as the batch size for the corresponding epoch (optional, default `256`).
- `w_kl`: The weight of the Kullback-Leibler (KL) divergence term in the ELBO loss function. The KL divergence term encourages the latent distribution to be close to a prior distribution (usually a standard normal distribution). A higher value of `w_kl` will increase the regularization strength on the latent space (optional, default `1.0`).
**Note:** The VAE class internally keeps track of the training loss (`loss_train`) and its components (NLL and KL divergence) during each batch of training. These values can be accessed after training to monitor the training progress and performance. The `loss_train` attribute of the VAE object will be a list containing the training loss values for each batch during training. The `loss_names` attribute contains the names of the loss components: "Loss", "NLL", and "KL_div". These attributes are updated during training and can be used for analysis or visualization.
7. Save the trained VAE model for future use (optional).
<details>
<summary>Code Example with Pre-selected Hyperparamters:</summary>
```python
vae_input = np.load('path/to/vae_input.npy')
vae_output = np.load('path/to/vae_output.npy')
vae = valdo.VAE(n_dim_i=vae_input.shape[1],
n_dim_o=vae_output.shape[1],
n_dim_latent=3,
n_hidden_layers=[3, 6],
n_hidden_size=100,
activation=torch.relu)
# Randomly select 1300 indices for training
choice = np.random.choice(vae_input.shape[0], 1300, replace=False)
train_ind = np.zeros(vae_input.shape[0], dtype=bool)
train_ind[choice] = True
test_ind = ~train_ind
# Split the input and output data into training and validation sets
x_train, x_val = vae_input[train_ind], vae_input[test_ind]
y_train, y_val = vae_output[train_ind], vae_output[test_ind]
# Convert the data to torch tensors
x_train, x_val, y_train, y_val = torch.tensor(x_train), torch.tensor(x_val), torch.tensor(y_train), torch.tensor(y_val)
# Set up the optimizer and train the VAE
optimizer = torch.optim.Adam(vae.parameters(), lr=0.001)
vae.train(x_train, y_train, optimizer, x_val, y_val, epochs=300, batch_size=100, w_kl=1.0)
# Save the trained VAE model
vae.save('path/to/trained_vae.pkl')
```
</details><br/>
### Steps 5 & 6: Reconstruction of "Apo" Data & Calculating Difference Maps
In this step, VAE outputs are re-scaled accordingly to recover the original scale, and differences in amplitudes between the original and reconstructed data are calculated. A `recons` and a `diff` column will be created for all datasets.
**Usage:**
To perform the reconstruction, or re-scaling, the `rescale()` function can be called, providing the necessary arguments:
- `recons_path`: Path to the reconstructed output of the VAE in NumPy format.
- `intersection_path`: Path to the pickle file containing the intersection of all scaled datasets.
- `union_path`: Path to the pickle file containing the union data of all scaled datasets.
- `input_files`: List of input file paths. This list should be in the same order as is in the `vae_input.npy` or `intersection.mtz`.
- `info_folder`: Path to the folder containing files with the mean and SD used for standardization previously.
- `output_folder`: Path to the folder where the reconstructed data will be saved.
- `amplitude_col`: Column in the MTZ file that contains structure factor amplitudes to calculate the difference column.
<details>
<summary>Code Example:</summary>
```python
recons = vae.reconstruct(tensor object of vae_input)
recons = recons.detach().cpu().numpy()
np.save("path/to/reconstructed_vae.npy", recons)
valdo.preprocessing.rescale(recons_path="path/to/reconstructed_vae.npy",
intersection_path="path/to/intersection.pkl",
union_path="path/to/union.pkl",
input_files=["path/to/data/01.mtz", "path/to/data/02.mtz", ...],
info_folder="path/to/info_folder",
output_folder="path/to/output_folder",
amplitude_col="name_of_column_with_scaled_amplitudes")
```
</details><br/>
### Steps 7 & 8: Gaussian Blurring & Searching for Blobs
**Note Regarding Phases:** In this section, phases are required for each dataset. You can obtain phases by completing refinement via PHENIX for each dataset, and utilizing those phases.
**Note Regarding Models:** In this section, models are also required for each dataset. These can also be obtained by refinement via PHENIX for each dataset, and they should be stored in a single folder, with the same naming convention (i.e. `##.mtz`).
We offer a command-line tool for automatic refinement using PHENIX. Based on our tests, starting with a single apo model yields satisfactory phases and models for the following real-space maps. You can find an example `refine_drug.eff` file in the `notebook/` directory. You can modify the `eff` configuration file based on your needs.
Make sure you have installed [PHENIX](https://phenix-online.org/) before using the following CLI tool!
<details>
<summary>Code Example:</summary>
```shell
valdo.refine --pdbpath "xxx/xxx_apo.pdb" --mtzpath "xxx/*.mtz" --output "yyy/" --eff "xxx/refine_drug.eff"
```
</details><br/>
In this step, we aim to identify significant changes in electron density caused by ligand binding to a protein. By taking the absolute value of the electron density difference maps and applying Gaussian blurring, a new map is created with merged positive electron density blobs. The blurring process attempts to reduce noise. Blobs are then identified and characterized above a specified contour level and volume threshold.
**Usage:**
To generate blobs from electron density maps, call the `generate_blobs()` function, which takes electron density map files and corresponding refined protein models as inputs. The function preprocesses the maps and identifies blobs above a specified contour level and volume threshold (the volume threshold is the default set by `gemmi`). The output is a DataFrame containing statistics for each identified blob, including peak value, score, centroid coordinates, volume, and radius.
This function can be called with the following arguments:
- `input_files`: List of input file paths.
- `model_folder`: Path to the folder containing the refined models for each dataset (pdb format).
- `diff_col`: Name of the column representing diffraction values in the input MTZ files.
- `phase_col`: Name of the column representing phase values in the input MTZ files.
- `output_folder`: Path to the output folder where the blob statistics DataFrame will be saved.
- `cutoff`: Blob cutoff value. Blobs with values below this cutoff will be ignored (optional, default `5`).
- `negate`: Whether to negate the blob statistics (optional, default `False`). Use True if there is interest in both positive and negative peaks, which is not typically of interest here due to the absolute value function applied to the map.
- `sample_rate`: Sample rate for generating the grid in the FFT process (optional, default `3`).
<details>
<summary>Code Example (3.5 Selected as Desired Cutoff):</summary>
```python
valdo.blobs.generate_blobs(input_files=["path/to/data/01.mtz", "path/to/data/02.mtz", ...],
model_folder="path/to/refined_models_folder",
diff_col="difference_column_name",
phase_col="phase_column_name",
output_folder="path/to/output_folder",
cutoff=3.5)
```
</details><br/>
### Step 9: Identifying Events
In this final step, the highest scoring blobs returned in the previous step can be analyzed individually. If the blob is plausibly a ligand, refinement with a ligand may be completed to determine whether or not the blob can be considered a "hit."
Blobs that are returned can be related to various other events, not just ligand binding. Examples may include ligand-induced conformational change (which would still indicate the presence of a ligand) or various other unrelated conformational changes, such as radiation damage or cysteine oxidation (as is seen in `pipeline.ipynb`).
| /rs-valdo-0.0.4.tar.gz/rs-valdo-0.0.4/README.md | 0.954489 | 0.910704 | README.md | pypi |
import numpy as np
import reciprocalspaceship as rs
from tqdm import tqdm
import pandas as pd
import re
import glob
from scipy.ndimage import gaussian_filter
import os
import gemmi
def generate_blobs(input_files, model_folder, diff_col, phase_col, output_folder, cutoff=5, negate=False, sample_rate=3):
"""
Generates blobs from electron density maps that have been passed through a pre-processing function using the specified parameters and saves the blob statistics to a DataFrame.
The pre-processing function in this case takes the absolute value of the difference map and applies a Gaussian blur with radius 5 Angstroms.
The function identifies blobs above a certain contour level and volume threshold using gemmi's find_blobs_by_floodfill method. The blobs are characterized by metrics such as volume (proportional to the number of voxels in the region), score (sum of values at every voxel in the region), peak value (highest sigma value in the region), and more.
Args:
input_files (list): List of input file paths.
model_folder (str): Path to the folder containing the refined models for each dataset (pdb format).
diff_col (str): Name of the column representing diffraction values.
phase_col (str): Name of the column representing phase values.
output_folder (str): Path to the output folder where the blob statistics DataFrame will be saved.
cutoff (int, optional): Blob cutoff value. Blobs with values below this cutoff will be ignored. Default is 5.
negate (bool, optional): Whether to negate the blob statistics. Default is False. Use True if there is interest in both positive and negative peaks.
sample_rate (int, optional): Sample rate for generating the grid in the FFT process. Default is 3.
Returns:
None
Example:
input_files = ['./data/file1.mtz', './data/file2.mtz']
model_folder = './models'
diff_col = 'diff'
phase_col = 'refine_PH2FOFCWT'
output_folder = './output'
generate_blobs(input_files, model_folder, diff_col, phase_col, output_folder)
"""
def preprocess(matrix, radius_in_A=5):
"""
Preprocesses the input matrix by applying Gaussian filtering.
Args:
matrix (numpy.ndarray): Input matrix to be preprocessed.
radius_in_A (int, optional): Radius in Angstroms for Gaussian filtering. Default is 5.
Returns:
numpy.ndarray: Preprocessed matrix.
"""
grid_spacing = np.min(matrix.spacing)
matrix = np.absolute(matrix)
radius_in_voxels = int(radius_in_A / grid_spacing)
sigma = int(radius_in_voxels / 3)
return gaussian_filter(matrix, sigma=sigma, radius=radius_in_voxels)
peaks = []
blob_stats = []
error_file = os.path.join(output_folder, 'error_log.txt') # Path to the error log file
for file in tqdm(input_files):
sample = rs.read_mtz(file)[[diff_col, phase_col]].dropna()
sample_id = os.path.splitext(os.path.basename(file))[0]
try:
structure = gemmi.read_pdb(f'{model_folder}/{sample_id}.pdb')
except Exception as e:
error_message = f'Could not identify the model file for sample {sample_id}: {str(e)}.\n'
with open(error_file, 'a') as f:
f.write(error_message)
continue
sample_gemmi=sample.to_gemmi()
grid = sample_gemmi.transform_f_phi_to_map(diff_col, phase_col, sample_rate=sample_rate)
grid.normalize()
blurred_grid = preprocess(grid)
grid.set_subarray(blurred_grid, [0, 0, 0])
grid.normalize()
mean, sigma = np.mean(np.array(grid)), np.std(np.array(grid))
blobs = gemmi.find_blobs_by_flood_fill(grid, cutoff=cutoff, negate=negate)
use_long_names = False
sort_by_key='peakz'
ns = gemmi.NeighborSearch(structure[0], structure.cell, 5).populate()
count = 0
for blob in blobs:
blob_stat = {
"sample" : sample_id,
"peakz" : (blob.peak_value-mean)/sigma,
"peak" : blob.peak_value,
"score" : blob.score,
"cenx" : blob.centroid.x,
"ceny" : blob.centroid.y,
"cenz" : blob.centroid.z,
"volume" : blob.volume,
"radius" : (blob.volume / (4/3 * np.pi)) ** (1/3)
}
if negate:
negative_keys = ['peak', 'peakz', 'score', 'scorez']
for k in negative_keys:
blob_stat[k] = -blob_stat[k]
blob_stats.append(blob_stat)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
blob_stats_df = pd.DataFrame(blob_stats)
blob_stats_df.to_pickle(os.path.join(output_folder, 'blob_stats.pkl')) | /rs-valdo-0.0.4.tar.gz/rs-valdo-0.0.4/valdo/blobs.py | 0.813757 | 0.704786 | blobs.py | pypi |
import pandas as pd
import reciprocalspaceship as rs
import numpy as np
import os
from tqdm import tqdm
def find_intersection(input_files, output_path, amplitude_col='F-obs-scaled'):
"""
Finds the intersection of `amplitude_col` from multiple input MTZ files.
Args:
input_files (list): List of input MTZ file paths.
output_path (str): Path to save the output pickle file containing the intersection data.
"""
df_list = []
for file in tqdm(input_files):
try:
df = rs.read_mtz(file)[[amplitude_col]]
df = df.rename(columns={amplitude_col: os.path.basename(file)})
df_list.append(df)
except:
continue
result = pd.concat(df_list, axis=1, join='inner')
result.to_pickle(output_path)
def find_union(input_files, output_path, amplitude_col='F-obs-scaled'):
"""
Finds the union of `amplitude_col` from multiple input MTZ files.
Args:
input_files (list): List of input MTZ file paths.
output_path (str): Path to save the output pickle file containing the union data.
"""
df_list = []
for file in tqdm(input_files):
try:
df = rs.read_mtz(file)[[amplitude_col]]
df = df.rename(columns={amplitude_col: os.path.basename(file)})
df_list.append(df)
except:
continue
result = pd.concat(df_list, axis=1, join='outer')
result.to_pickle(output_path)
def standardize(input_, output_folder):
"""
Used by `generate_vae_io`, this helper function standardizes the input data and saves the standardized data, mean, and standard deviation to the specified output folder.
Args:
input_ (numpy.ndarray): The input data to be standardized.
output_folder (str): The path to the output folder where the standardized data, mean, and standard deviation will be saved as pickle files.
Returns:
tuple: A tuple containing the standardized data (numpy.ndarray), mean (float), and standard deviation (float).
"""
mean = np.mean(input_)
sd = np.std(input_)
standard = (input_ - mean)/sd
if not os.path.exists(output_folder):
os.makedirs(output_folder)
standard.to_pickle(os.path.join(output_folder, 'union_standardized.pkl'))
mean.to_pickle(os.path.join(output_folder, 'union_mean.pkl'))
sd.to_pickle(os.path.join(output_folder, 'union_sd.pkl'))
return standard, mean, sd
def generate_vae_io(intersection_path, union_path, io_folder):
"""
Generates VAE input and output data from the intersection and union datasets and saves them to the specified folder. Mean and SD data, calculated from union data, to re-scale are saved in io_folder. Standardized union becomes the VAE output. Intersection is standardized with the aforementioned mean and SD and becomes the VAE input.
Args:
intersection_path (str): The path to the intersection dataset pickle file.
union_path (str): The path to the union dataset pickle file.
io_folder (str): The path to the output folder where the VAE input and output will be saved.
"""
# Read in the intersection and union data
intersection = pd.read_pickle(intersection_path)
union = pd.read_pickle(union_path)
# Generate VAE output
vae_output, vae_output_mean, vae_output_std = standardize(union.T, io_folder)
vae_output = vae_output.values.astype(np.float32)
# Generate VAE input
vae_input = intersection.T
vae_input = (vae_input - vae_output_mean[vae_input.columns])/vae_output_std[vae_input.columns]
vae_input = vae_input.values.astype(np.float32)
# Save VAE input and output to specified folder path
if not os.path.exists(io_folder):
os.makedirs(io_folder)
np.save(os.path.join(io_folder, "vae_input.npy"), vae_input)
np.save(os.path.join(io_folder, "vae_output.npy"), vae_output)
def rescale(recons_path, intersection_path, union_path, input_files, info_folder, output_folder, amplitude_col='F-obs-scaled'):
"""
Re-scales datasets accordingly to recover the outputs in the original scale in column 'recons' and calculates the difference in amplitudes in column 'diff'.
Input files should be in the same order as intersection & union rows.
Args:
recons_path (str): Path to the reconstructed output of the VAE in NumPy format.
intersection_path (str): Path to the pickle file containing the intersection of all scaled datasets.
union_path (str): Path to the pickle file containing the union data of all scaled datasets.
input_files (list): List of input file paths.
info_folder (str): Path to the folder containing files with the mean and SD used to standardize previously.
output_folder (str): Path to the folder where the reconstructed data will be saved.
amplitude_col (str): Column in MTZ file that contains structure factor amplitudes to calculate the difference column.
Returns:
None
"""
recons = np.load(recons_path)
intersection = pd.read_pickle(intersection_path)
union = pd.read_pickle(union_path)
recons_df = pd.DataFrame(recons.T, index=union.index, columns=intersection.columns)
mean = pd.read_pickle(os.path.join(info_folder, 'union_mean.pkl'))
sd = pd.read_pickle(os.path.join(info_folder, 'union_sd.pkl'))
for file in tqdm(input_files):
col = recons_df[os.path.basename(file)]
ds = rs.read_mtz(file)
idx = ds.index
recons_col = col[idx] * sd[idx] + mean[idx]
recons_col = rs.DataSeries(recons_col, dtype="SFAmplitude")
ds['recons'] = recons_col
ds['diff'] = ds[amplitude_col] - ds['recons']
if not os.path.exists(output_folder):
os.makedirs(output_folder)
ds.write_mtz(os.path.join(output_folder, os.path.basename(file))) | /rs-valdo-0.0.4.tar.gz/rs-valdo-0.0.4/valdo/preprocessing.py | 0.823648 | 0.486088 | preprocessing.py | pypi |
import numpy as np
import reciprocalspaceship as rs
import pandas as pd
import time
def get_aniso_args_np(uaniso, reciprocal_cell_paras, hkl):
U11, U22, U33, U12, U13, U23 = uaniso
h, k, l = hkl.T
ar, br, cr, cos_alphar, cos_betar, cos_gammar = reciprocal_cell_paras
args = 2*np.pi**2*(
U11 * h**2 * ar**2
+ U22 * k**2 * br**2
+ U33 * l**2 * cr**2
+ 2 * (h * k * U12 * ar * br * cos_gammar
+ h * l * U13 * ar * cr * cos_betar
+ k * l * U23 * br * cr * cos_alphar)
)
return args
class Scaler(object):
"""
reference_mtz : path to mtz file as the reference dataset
columns : list of column names to be used
The first name is used for scaling, while the remaining
names will be saved as is without any alterations.
"""
def __init__(self, reference_mtz, columns=['F-obs', 'SIGF-obs']):
self.columns = columns
self.base_mtz = rs.read_mtz(reference_mtz)[columns]
self.base_mtz.dropna(inplace=True)
# Record reciprocal space parameters
reciprocal_cell = self.base_mtz.cell.reciprocal()
self.reciprocal_cell_paras = [reciprocal_cell.a,
reciprocal_cell.b,
reciprocal_cell.c,
np.cos(np.deg2rad(reciprocal_cell.alpha)),
np.cos(np.deg2rad(reciprocal_cell.beta)),
np.cos(np.deg2rad(reciprocal_cell.gamma))]
def _get_ln_k(self, FA, FB, hkl, uaniso):
args = get_aniso_args_np(uaniso, self.reciprocal_cell_paras, hkl)
ln_k = np.mean(args + np.log(FA/FB))
return ln_k
def _get_uaniso(self, FA, FB, hkl, ln_k):
V = np.concatenate([hkl**2, 2 * hkl[:, [0, 2, 1]] * hkl[:, [1, 0, 2]]], axis=-1)
Z = (np.log(FA/FB) - ln_k)/(2*np.pi**2)
M = V.T @ V
b = -np.sum(Z * V.T, axis=-1)
uaniso = np.linalg.inv(M) @ b
return uaniso
def ana_getku(self, FA, FB, hkl, n_iter=5):
"""
Use analytical scaling method to get parameter k and uaniso, with purely numpy.
Afonine, P. V., et al. Acta Crystallographica Section D: Biological Crystallography 69.4 (2013): 625-634.
TODO: opt_getku, use stepwise optimizer to further optimize the parameters, in pytorch
"""
uaniso = np.array([0.]*6) # initialize
for _ in range(n_iter):
ln_k = self._get_ln_k(FA, FB, hkl, uaniso)
uaniso = self._get_uaniso(FA, FB, hkl, ln_k)
return ln_k, uaniso
def scaleit(self, FB, ln_k, uaniso, hkl):
args = get_aniso_args_np(uaniso, self.reciprocal_cell_paras, hkl)
FB_scaled = np.exp(ln_k) * np.exp(-args) * FB
return FB_scaled
def get_metric(self, FA, FB, uaniso, ln_k, hkl):
# Before
LS_i = np.sum((FA - FB)**2)
corr_i = np.corrcoef(FA, FB)[0,1]
# After
FB_scaled = self.scaleit(FB, ln_k, uaniso, hkl)
LS_f = np.sum((FA - FB_scaled)**2)
corr_f = np.corrcoef(FA, FB_scaled)[0,1]
return [LS_i, corr_i, LS_f, corr_f]
def batch_scaling(self, mtz_path_list, outputmtz_path='./scaled_mtzs/', reportfile='./scaling_data.json', verbose=True, n_iter=5):
metrics = []
for path in mtz_path_list:
start_time = time.time()
concrete_filename = path.split('/')[-1].replace(".mtz", "") # "PTP1B_yxxx_idxs"
temp_mtz = rs.read_mtz(path)[self.columns].dropna()
merge = self.base_mtz.merge(temp_mtz, left_index=True, right_index=True,
suffixes=('ref', 'target'), check_isomorphous=False)
FA = merge[self.columns[0]+"ref"].to_numpy()
FB = merge[self.columns[0]+"target"].to_numpy()
hkl = merge.get_hkls()
ln_k, uaniso = self.ana_getku(FA, FB, hkl, n_iter=n_iter)
metric = self.get_metric(FA, FB, uaniso, ln_k, hkl)
FB_complete = temp_mtz[self.columns[0]].to_numpy()
hkl_complete = temp_mtz.get_hkls()
temp_mtz = temp_mtz.reset_index()
temp_mtz[self.columns[0]+'-scaled'] = rs.DataSeries(self.scaleit(FB_complete, ln_k, uaniso, hkl_complete), dtype="SFAmplitude")
temp_mtz = temp_mtz.set_index(['H', 'K', 'L'])
# Save the scaled mtz file
temp_mtz.write_mtz(outputmtz_path+concrete_filename+".mtz")
str_ = f"Time: {time.time()-start_time:.3f}"
if verbose:
print(f"LS before: {metric[0]:.1f}", f"LS after: {metric[2]:.0f}", flush=True)
print(f"Corr before: {metric[1]:.3f}", f"Corr after: {metric[3]:.3f}", flush=True)
print(str_, flush=True)
print("="*20)
metrics.append([concrete_filename, *metric])
pd.DataFrame(metrics).to_pickle(outputmtz_path + 'metrics.pkl')
return metrics | /rs-valdo-0.0.4.tar.gz/rs-valdo-0.0.4/valdo/scaling.py | 0.568895 | 0.208562 | scaling.py | pypi |
import torch
import torch.nn as nn
import numpy as np
import numbers
class DenseLayer(nn.Module):
"""Just your regular densely-connected NN layer
Parameters
----------
in_features : int
Size of each input sample
out_features : int
Size of each output sample
activation : None or torch function
Nonlinear activation type
dropout: float, 0 - 1, default None
Probability of an element to be zeroed
"""
def __init__(self, in_features, out_features, activation=None, dropout=None):
super(DenseLayer, self).__init__()
self.linear = nn.Linear(in_features, out_features, bias=True)
if activation is None:
self.activation = nn.Identity()
else:
self.activation = activation
self.dropout = self._get_dropout(dropout)
def _get_dropout(self, dropout):
if dropout is None:
return nn.Identity()
elif isinstance(dropout, float):
return nn.Dropout(dropout)
else:
raise ValueError("Need a valid dropout rate in float, between 0 and 1.")
def forward(self, x):
x = self.linear(x)
x = self.activation(x)
x = self.dropout(x)
return x
class DenseNet(nn.Module):
"""Generic dense trainable nonlinear transform
Returns the layers of a dense feedforward network with nlayers-1 hidden layers with nhidden neurons
and the specified activation functions. The last layer is linear in order to access the full real
number range and has output_size output neurons.
Parameters
----------
input_size : int
number of input neurons
output_size : int
number of output neurons
nlayers : int
number of layers, including the linear output layer. nlayers=3 means two hidden layers with
nonlinear activation and one linear output layer.
nhidden : int or list of int
number of neurons in each hidden layer, either a number or an array of length nlayers-1
to specify the width of each hidden layer
activation : torch element-wise function or None
nonlinear activation function in hidden layers
init_outputs : None or float or array
None means default initialization for the output layer, otherwise it is currently initialized with 0
**args : kwargs
Additional keyword arguments passed to the layer
"""
def __init__(
self,
input_size,
output_size,
nlayers=3,
nhidden=100,
activation=torch.relu,
dropout=None,
init_outputs=None,
**args
):
super().__init__()
if isinstance(nhidden, numbers.Integral):
nhidden = nhidden * np.ones(nlayers - 1, dtype=int)
else:
nhidden = np.array(nhidden)
if nhidden.size != nlayers - 1:
raise ValueError(
"Illegal size of nhidden. Expecting 1d array with nlayers-1 elements"
)
assert nlayers > 1, "nlayers should at least be 2!"
layers = []
layers.append(
DenseLayer(
input_size, nhidden[0], activation=activation, dropout=dropout, **args
)
)
for i in range(nlayers - 2):
layers.append(
DenseLayer(
nhidden[i], nhidden[i+1], activation=activation, dropout=dropout, **args
)
)
if init_outputs is None:
final_layer = nn.Linear(nhidden[-1], output_size, **args)
else:
final_layer = nn.Linear(nhidden[-1], output_size, **args)
nn.init.zeros_(final_layer.weight.data)
nn.init.constant_(final_layer.bias.data, init_outputs)
layers.append(final_layer)
self._layers = nn.Sequential(*layers)
def forward(self, x):
return self._layers(x)
def sampling(z_mean, z_log_var):
"""
Reparameterization Trick
Uses (z_mean, z_log_var) to sample z, with normal distribution.
"""
batch = z_mean.size(0)
dim = z_mean.size(1)
epsilon = torch.randn(batch, dim, device=z_mean.device)
return z_mean + torch.exp(0.5 * z_log_var) * epsilon
def elbo(mu_z, logvar_z, y_train, y_recons, w_kl):
nan_bool = torch.isnan(y_train)
eff_dim = torch.sum(~nan_bool)
least_squares = torch.sum((y_train[~nan_bool] - y_recons[~nan_bool])**2)
log_lkhd = (-0.5*eff_dim*np.log(2*np.pi) - 0.5*least_squares)/y_train.size(0)
# KL divergence between two multivariant gaussian
kl_div = torch.mean(torch.sum(0.5*(torch.square(mu_z) + torch.exp(logvar_z) - mu_z.size(1) - logvar_z), dim=1))
total_loss = -log_lkhd + w_kl*kl_div
return total_loss, -log_lkhd, kl_div | /rs-valdo-0.0.4.tar.gz/rs-valdo-0.0.4/valdo/vae_basics.py | 0.942612 | 0.690305 | vae_basics.py | pypi |
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from .vae_basics import DenseNet, sampling, elbo
from .helper import try_gpu
from tqdm import tqdm
import pickle
class VAE(nn.Module):
'''
Initialize a VAE model with assigned parameters
Parameters:
-----------
n_dim_i : int
Input dimensionality
n_dim_o : int
Output dimensionality
n_dim_latent : int, default 1
Number of dimensionality in latent space
n_hidden_layers : int, or [int, int]
Number of hidden layers in the encoder and decoder. If an int is given, it will applied to both encoder and decoder;
If a length 2 list is given, first int will be used for encoder, the second will be used for decoder
n_hidden_size : int, or [int, int], or array of int
Number of units in hidden layers. If an int is given, i will be applied to all hidden layers in both encoder and decoder;
If a length 2 array is given, first int will be used for all layers in the encoder, the second will be used for the decoder.
Or an array with length equal to the number of hidden layers can be given, the number of units will be assigned accordingly.
activation : str, default tanh
activation function for the hidden layers
'''
def __init__(self, n_dim_i, n_dim_o, n_dim_latent=1, n_hidden_layers=3, n_hidden_size=100, activation=torch.tanh, device=try_gpu()):
super(VAE, self).__init__()
if type(n_hidden_layers) is int:
self.n_layer_encoder = n_hidden_layers
self.n_layer_decoder = n_hidden_layers
elif len(n_hidden_layers) == 2:
self.n_layer_encoder = n_hidden_layers[0]
self.n_layer_decoder = n_hidden_layers[1]
else:
raise ValueError("Please provide legal n_hidden_layers!")
# List of hidden units in encoder and decoder
if type(n_hidden_size) is int:
self.n_size_encoder = [n_hidden_size]*self.n_layer_encoder
self.n_size_decoder = [n_hidden_size]*self.n_layer_decoder
elif len(n_hidden_size) == 2:
self.n_size_encoder = [n_hidden_size[0]]*self.n_layer_encoder
self.n_size_decoder = [n_hidden_size[1]]*self.n_layer_decoder
elif len(n_hidden_size) == self.n_layer_encoder+self.n_layer_decoder:
self.n_size_encoder = n_hidden_size[:self.n_layer_encoder]
self.n_size_decoder = n_hidden_size[self.n_layer_encoder:]
else:
raise ValueError("Please provide legal n_hidden_size!")
self.dim_x = n_dim_i
self.dim_y = n_dim_o
self.activation = activation
self.dim_z = n_dim_latent
self.device = device
self.encoder = DenseNet(self.dim_x, self.dim_z * 2, self.n_layer_encoder+1, self.n_size_encoder, self.activation).to(device)
self.decoder = DenseNet(self.dim_z, self.dim_y, self.n_layer_decoder+1, self.n_size_decoder, self.activation).to(device)
self.loss_train = []
self.loss_names = ["Loss", "NLL", "KL_div"]
def sample(self, n_sample=1000, mu=0, sigma=1):
z = mu + sigma * torch.randn(n_sample, self.dim_z, device=self.device)
x = self.decoder(z)
return x
def reconstruct(self, input_x):
encoding = self.encoder(input_x.to(self.device))
z_mean, z_log_var = encoding[:, :self.dim_z], encoding[:, self.dim_z:]
z = sampling(z_mean, z_log_var)
recons = self.decoder(z)
return recons
@classmethod
def load(cls, filepath):
with open(filepath, 'rb') as f:
D = pickle.load(f)
vae = cls(D['n_dim_i'], D['n_dim_o'], D['n_dim_latent'], D['n_hidden_layers'], D['n_hidden_size'], D['activation'])
vae.load_state_dict(D['state_dict'])
vae.loss_train = D['loss_train']
return vae
def save(self, filepath):
'''
Customized save function using pickle.
'''
D = {}
D['n_dim_i'] = self.dim_x
D['n_dim_o'] = self.dim_y
D['n_dim_latent'] = self.dim_z
D['n_hidden_layers'] = [self.n_layer_encoder, self.n_layer_decoder]
D['n_hidden_size'] = self.n_size_encoder + self.n_size_decoder
D['activation'] = self.activation
D['loss_train'] = self.loss_train
D['state_dict'] = self.state_dict()
with open(filepath, 'wb') as f:
pickle.dump(D, f, pickle.HIGHEST_PROTOCOL)
def train(self, x_train, y_train, optim, x_val=None, y_val=None, epochs=10, batch_size=256, w_kl=1.0):
if isinstance(batch_size, int):
batch_size = [batch_size] * epochs
elif isinstance(batch_size, list):
assert len(batch_size) == epochs
if x_val is not None and y_val is not None:
dataset_val = TensorDataset(x_val, y_val)
sampler = RandomSampler(dataset_val)
valloader = DataLoader(dataset_val, batch_size=256, sampler=sampler)
dataset_train = TensorDataset(x_train, y_train)
for epoch in range(epochs):
trainloader = DataLoader(dataset_train, batch_size=batch_size[epoch], shuffle=True)
progress_bar = tqdm(trainloader, desc=f"Epoch {epoch+1}")
for x_batch, y_batch in progress_bar:
x_batch = x_batch.to(self.device)
y_batch = y_batch.to(self.device)
optim.zero_grad()
encoding = self.encoder(x_batch)
z_mean, z_log_var = encoding[:, :self.dim_z], encoding[:, self.dim_z:]
z = sampling(z_mean, z_log_var)
recons_x = self.decoder(z)
loss_train, nll_train, kl_train = elbo(z_mean, z_log_var, y_batch, recons_x, w_kl)
loss_train.backward()
optim.step()
if x_val is not None and y_val is not None:
x_batch_test, y_batch_test = next(iter(valloader))
x_batch_test = x_batch_test.to(self.device)
y_batch_test = y_batch_test.to(self.device)
encoding_test = self.encoder(x_batch_test)
z_mean_test, z_log_var_test = encoding_test[:, :self.dim_z], encoding_test[:, self.dim_z:]
z_test = sampling(z_mean_test, z_log_var_test)
recons_x_test = self.decoder(z_test)
loss_test, nll_test, kl_test = elbo(z_mean_test, z_log_var_test, y_batch_test, recons_x_test, w_kl)
loss_np, loss_test_np = loss_train.item(), loss_test.item()
progress_bar.set_postfix(Trainloss=loss_np, Testloss=loss_test_np, memory=torch.cuda.memory_allocated()/1e9)
self.loss_train.append([loss_np, nll_train.item(), kl_train.item(), loss_test_np, nll_test.item(), kl_test.item()])
else:
loss_np = loss_train.item()
progress_bar.set_postfix(Trainloss=loss_np, memory=torch.cuda.memory_allocated()/1e9)
self.loss_train.append([loss_np, nll_train.item(), kl_train.item()]) | /rs-valdo-0.0.4.tar.gz/rs-valdo-0.0.4/valdo/vae_networks.py | 0.90902 | 0.574395 | vae_networks.py | pypi |
from dataclasses import dataclass, field
from typing import Dict, List, Optional
import requests
from rs3_api.hiscores.exceptions import UserNotFoundException
from rs3_api.hiscores.seasonal_hiscore import UserSeasonalHiscore
from rs3_api.hiscores.types import Minigame, Skill, UserSeason
from rs3_api.utils.const import BASE_URL, AccountTypes, Minigames, Skills
@dataclass
class UserHiscore:
username: str
account_type: Optional[AccountTypes]
skills: Dict[Skills, Skill] = field(init=False)
minigames: Dict[Minigames, Minigame] = field(init=False)
__api_data: str = field(init=False, repr=False)
__seasonal_hiscore: UserSeasonalHiscore = field(default_factory=lambda: UserSeasonalHiscore(), repr=False)
def __post_init__(self):
if not self.account_type:
user = self.__get_without_account_type()
else:
user = self.__get_user(self.account_type)
if not user:
raise UserNotFoundException(self.username)
self.__process()
def get_season(self, archived: bool = False) -> List[UserSeason]:
return self.__seasonal_hiscore.get_season(self.username, archived)
def __process(self):
api_data = list(map(
lambda data: data.split(','),
self.__api_data.split('\n')
))
self.skills = self.__get_skills(api_data),
self.minigames = self.__get_minigames(api_data)
return self
def __get_user(self, account_type: AccountTypes) -> str:
if getattr(self, '__api_data', None):
return self.__api_data
TYPE_URL = {
AccountTypes.NORMAL.value: '/m=hiscore',
AccountTypes.IRONMAN.value: '/m=hiscore_ironman',
AccountTypes.HARDCORE.value: '/m=hiscore_hardcore_ironman'
}
res = requests.get(
f'{BASE_URL+TYPE_URL[account_type]}/index_lite.ws?player={self.username}'
)
if res.status_code == 200:
self.__api_data = res.content.decode()
self.account_type = account_type
return self.__api_data
return ''
def __get_without_account_type(self) -> Optional[str]:
for type in reversed(AccountTypes):
user = self.__get_user(type)
if user:
return user
return None
def __get_minigames(self, api_data: List[List[str]]) -> Dict[Minigames, Minigame]:
starting_point = len(Skills)
data = {}
for i, minigame in enumerate(Minigames):
pos = i + starting_point
data[minigame.value] = Minigame(
name=minigame.value,
rank=int(api_data[pos][0]),
total=int(api_data[pos][1])
)
return data
def __get_skills(self, api_data: List[List[str]]) -> Dict[Skills, Skill]:
data = {}
for i, skill in enumerate(Skills):
data[skill.value] = Skill(
name=skill.value,
rank=int(api_data[i][0]),
level=int(api_data[i][1]),
experience=int(api_data[i][2]),
)
return data | /rs3_api-0.1.0-py3-none-any.whl/rs3_api/hiscores/user_hiscore/user_hiscore.py | 0.81231 | 0.152442 | user_hiscore.py | pypi |
from enum import Enum, unique
BASE_URL = "https://secure.runescape.com"
@unique
class Skills(str, Enum):
OVERALL = "overall",
ATTACK = "attack",
DEFENCE = "defence",
STRENGTH = "strength",
CONSTITUTION = "constitution",
RANGED = "ranged",
PRAYER = "prayer",
MAGIC = "magic",
COOKING = "cooking",
WOODCUTTING = "woodcutting",
FLETCHING = "fletching",
FISHING = "fishing",
FIREMAKING = "firemaking",
CRAFTING = "crafting",
SMITHING = "smithing",
MINING = "mining",
HERBLORE = "herblore",
AGILITY = "agility",
THIEVING = "thieving",
SLAYER = "slayer",
FARMING = "farming",
RUNECRAFTING = "runecrafting",
HUNTER = "hunter",
CONSTRUCTION = "construction",
SUMMONING = "summoning",
DUNGEONEERING = "dungeoneering",
DIVINATION = "divination",
INVENTION = "invention",
ARCHEOLOGY = "archeology"
@unique
class Minigames(str, Enum):
BOUNTY_HUNTER = "Bounty Hunter",
BH_ROGUES = "B.H. Rogues",
DOMINNION_TOWER = "Dominion Tower",
THE_CRUCIBLE = "The Crucible",
CASTLE_WARS = "Castle Wars games",
BA_ATTACKERS = "B.A. Attackers",
BA_DEFENDERS = "B.A. Defenders",
BA_COLLECTORS = "B.A. Collectors",
BA_HEALERS = "B.A. Healers",
DUEL_TOURNAMENT = "Duel Tournament",
MOBILISING_ARMIES = "Mobilising Armies",
CONQUEST = "Conquest",
FIST_OF_GUTHIX = "Fist of Guthix",
GG_ATHLETICS = "GG: Athletics",
GG_RESOURCE_RACE = "GG: Resource Race",
WE2_ARMADYL_LIFETIME = "WE2: Armadyl Lifetime Contribution",
WE2_BANDOS_LIFETIME = "WE2: Bandos Lifetime Contribution",
WE2_ARMADYL_PVP_KILLS = "WE2: Armadyl PvP kills",
WE2_BANDOS_PVP_KILLS = "WE2: Bandos PvP kills",
HEIST_GUARD_LEVEL = "Heist Guard Level",
HEIST_ROBBER_LEVEL = "Heist Robber Level",
CFP_5_GAME_AVG = "CFP: 5 game average",
AF15_COW_TIPPING = "AF15: Cow Tipping",
AF15_RATS_KILLED_AFTER_QUEST = "AF15: Rats killed after the miniquest",
RUNESCORE = "RuneScore",
CLUE_EASY = "Clue Scrolls Easy",
CLUE_MEDIUM = "Clue Scrolls Medium",
CLUE_HARD = "Clue Scrolls Hard",
CLUE_ELITE = "Clue Scrolls Elite",
CLUE_MASTER = "Clue Scrolls Master"
@unique
class AccountTypes(str, Enum):
NORMAL = 'normal'
IRONMAN = 'ironman'
HARDCORE = 'hardcore_ironman' | /rs3_api-0.1.0-py3-none-any.whl/rs3_api/utils/const.py | 0.523177 | 0.182389 | const.py | pypi |
# Rule Set 3
> Python package to predict the activity of CRISPR sgRNA sequences using Rule Set 3
## Install
You can install the latest release of rs3 from pypi using
`pip install rs3`
For mac users you may also have to brew install the OpenMP library
`brew install libomp`
or install lightgbm without Openmp
`pip install lightgbm --install-option=--nomp`
See the [LightGBM documentation](https://github.com/microsoft/LightGBM/tree/master/python-package)
for more information
## Documentation
You can see the complete documentation for Rule Set 3 [here](https://gpp-rnd.github.io/rs3/).
## Quick Start
### Sequence based model
To calculate Rule Set 3 (sequence) scores, import the predict_seq function from the seq module.
```
from rs3.seq import predict_seq
```
You can store the 30mer context sequences you want to predict as a list.
```
context_seqs = ['GACGAAAGCGACAACGCGTTCATCCGGGCA', 'AGAAAACACTAGCATCCCCACCCGCGGACT']
```
You can specify the
[Hsu2013](https://www.nature.com/articles/nbt.2647) or
[Chen2013](https://www.sciencedirect.com/science/article/pii/S0092867413015316?via%3Dihub)
as the tracrRNA to score with.
We generally find any tracrRNA that does not have a T in the fifth position is better predicted with the Chen2013 input.
```
predict_seq(context_seqs, sequence_tracr='Hsu2013')
```
Calculating sequence-based features
100%|██████████| 2/2 [00:00<00:00, 15.04it/s]
array([-0.90030944, 1.11451622])
### Target based model
To get target scores, which use features at the endogenous target site to make predictions,
you must build or load feature matrices for the amino acid sequences, conservation scores, and protein domains.
As an example, we'll calculate target scores for 250 sgRNAs in the GeckoV2 library.
```
import pandas as pd
from rs3.predicttarg import predict_target
from rs3.targetfeat import (add_target_columns,
get_aa_subseq_df,
get_protein_domain_features,
get_conservation_features)
```
```
design_df = pd.read_table('test_data/sgrna-designs.txt')
design_df.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Input</th>
<th>Quota</th>
<th>Target Taxon</th>
<th>Target Gene ID</th>
<th>Target Gene Symbol</th>
<th>Target Transcript</th>
<th>Target Reference Coords</th>
<th>Target Alias</th>
<th>CRISPR Mechanism</th>
<th>Target Domain</th>
<th>...</th>
<th>On-Target Rank Weight</th>
<th>Off-Target Rank Weight</th>
<th>Combined Rank</th>
<th>Preselected As</th>
<th>Matching Active Arrayed Oligos</th>
<th>Matching Arrayed Constructs</th>
<th>Pools Containing Matching Construct</th>
<th>Pick Order</th>
<th>Picking Round</th>
<th>Picking Notes</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>PSMB7</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000136930</td>
<td>PSMB7</td>
<td>ENST00000259457.8</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>1.0</td>
<td>1.0</td>
<td>7</td>
<td>GCAGATACAAGAGCAACTGA</td>
<td>NaN</td>
<td>BRDN0004619103</td>
<td>NaN</td>
<td>1</td>
<td>0</td>
<td>Preselected</td>
</tr>
<tr>
<th>1</th>
<td>PSMB7</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000136930</td>
<td>PSMB7</td>
<td>ENST00000259457.8</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>1.0</td>
<td>1.0</td>
<td>48</td>
<td>AAAACTGGCACGACCATCGC</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>2</td>
<td>0</td>
<td>Preselected</td>
</tr>
<tr>
<th>2</th>
<td>PRC1</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000198901</td>
<td>PRC1</td>
<td>ENST00000394249.8</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>1.0</td>
<td>1.0</td>
<td>7</td>
<td>AAAAGATTTGCGCACCCAAG</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>1</td>
<td>0</td>
<td>Preselected</td>
</tr>
<tr>
<th>3</th>
<td>PRC1</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000198901</td>
<td>PRC1</td>
<td>ENST00000394249.8</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>1.0</td>
<td>1.0</td>
<td>8</td>
<td>CTTTGACCCAGACATAATGG</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>2</td>
<td>0</td>
<td>Preselected</td>
</tr>
<tr>
<th>4</th>
<td>TOP1</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000198900</td>
<td>TOP1</td>
<td>ENST00000361337.3</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>1.0</td>
<td>1.0</td>
<td>1</td>
<td>NaN</td>
<td>NaN</td>
<td>BRDN0001486452</td>
<td>NaN</td>
<td>2</td>
<td>1</td>
<td>NaN</td>
</tr>
</tbody>
</table>
<p>5 rows × 60 columns</p>
</div>
Throughout the analysis we will be using a core set of ID columns to merge the feature matrices. These ID columns
should uniquely identify an sgRNA and its target site.
```
id_cols = ['sgRNA Context Sequence', 'Target Cut Length', 'Target Transcript', 'Orientation']
```
#### Amino acid sequence input
To calculate the amino acid sequence matrix, you must first load the complete sequence from ensembl using the
`build_transcript_aa_seq_df`. See the documentation for the `predicttarg` module for an example of how to
use this function.
In this example we will use amino acid sequences that have been precalculated using the `write_transcript_data`
function in the `targetdata` module. Check out the documentation for this module for more information on
how to use this function.
We use pyarrow to read the written transcript data.
The stored transcripts are indexed by their Ensembl ID without the version number identifier.
To get this shortened version of the Ensembl ID use the `add_target_columns` function from the `targetfeat` module.
This function adds the 'Transcript Base' column as well as a column indicating the amino acid index ('AA Index')
of the cut site. The 'AA Index' column will be used for merging with the amino acid translations.
```
design_targ_df = add_target_columns(design_df)
design_targ_df.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Input</th>
<th>Quota</th>
<th>Target Taxon</th>
<th>Target Gene ID</th>
<th>Target Gene Symbol</th>
<th>Target Transcript</th>
<th>Target Reference Coords</th>
<th>Target Alias</th>
<th>CRISPR Mechanism</th>
<th>Target Domain</th>
<th>...</th>
<th>Combined Rank</th>
<th>Preselected As</th>
<th>Matching Active Arrayed Oligos</th>
<th>Matching Arrayed Constructs</th>
<th>Pools Containing Matching Construct</th>
<th>Pick Order</th>
<th>Picking Round</th>
<th>Picking Notes</th>
<th>AA Index</th>
<th>Transcript Base</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>PSMB7</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000136930</td>
<td>PSMB7</td>
<td>ENST00000259457.8</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>7</td>
<td>GCAGATACAAGAGCAACTGA</td>
<td>NaN</td>
<td>BRDN0004619103</td>
<td>NaN</td>
<td>1</td>
<td>0</td>
<td>Preselected</td>
<td>64</td>
<td>ENST00000259457</td>
</tr>
<tr>
<th>1</th>
<td>PSMB7</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000136930</td>
<td>PSMB7</td>
<td>ENST00000259457.8</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>48</td>
<td>AAAACTGGCACGACCATCGC</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>2</td>
<td>0</td>
<td>Preselected</td>
<td>46</td>
<td>ENST00000259457</td>
</tr>
<tr>
<th>2</th>
<td>PRC1</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000198901</td>
<td>PRC1</td>
<td>ENST00000394249.8</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>7</td>
<td>AAAAGATTTGCGCACCCAAG</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>1</td>
<td>0</td>
<td>Preselected</td>
<td>106</td>
<td>ENST00000394249</td>
</tr>
<tr>
<th>3</th>
<td>PRC1</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000198901</td>
<td>PRC1</td>
<td>ENST00000394249.8</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>8</td>
<td>CTTTGACCCAGACATAATGG</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>2</td>
<td>0</td>
<td>Preselected</td>
<td>263</td>
<td>ENST00000394249</td>
</tr>
<tr>
<th>4</th>
<td>TOP1</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000198900</td>
<td>TOP1</td>
<td>ENST00000361337.3</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>1</td>
<td>NaN</td>
<td>NaN</td>
<td>BRDN0001486452</td>
<td>NaN</td>
<td>2</td>
<td>1</td>
<td>NaN</td>
<td>140</td>
<td>ENST00000361337</td>
</tr>
</tbody>
</table>
<p>5 rows × 62 columns</p>
</div>
```
transcript_bases = design_targ_df['Transcript Base'].unique()
transcript_bases[0:5]
```
array(['ENST00000259457', 'ENST00000394249', 'ENST00000361337',
'ENST00000368328', 'ENST00000610426'], dtype=object)
```
aa_seq_df = pd.read_parquet('test_data/target_data/aa_seqs.pq', engine='pyarrow',
filters=[[('Transcript Base', 'in', transcript_bases)]])
aa_seq_df.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Target Transcript</th>
<th>Target Total Length</th>
<th>Transcript Base</th>
<th>version</th>
<th>seq</th>
<th>molecule</th>
<th>desc</th>
<th>id</th>
<th>AA len</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>ENST00000259457.8</td>
<td>834</td>
<td>ENST00000259457</td>
<td>3</td>
<td>MAAVSVYAPPVGGFSFDNCRRNAVLEADFAKRGYKLPKVRKTGTTI...</td>
<td>protein</td>
<td>None</td>
<td>ENSP00000259457</td>
<td>277</td>
</tr>
<tr>
<th>1</th>
<td>ENST00000394249.8</td>
<td>1863</td>
<td>ENST00000394249</td>
<td>3</td>
<td>MRRSEVLAEESIVCLQKALNHLREIWELIGIPEDQRLQRTEVVKKH...</td>
<td>protein</td>
<td>None</td>
<td>ENSP00000377793</td>
<td>620</td>
</tr>
<tr>
<th>2</th>
<td>ENST00000361337.3</td>
<td>2298</td>
<td>ENST00000361337</td>
<td>2</td>
<td>MSGDHLHNDSQIEADFRLNDSHKHKDKHKDREHRHKEHKKEKDREK...</td>
<td>protein</td>
<td>None</td>
<td>ENSP00000354522</td>
<td>765</td>
</tr>
<tr>
<th>3</th>
<td>ENST00000368328.5</td>
<td>267</td>
<td>ENST00000368328</td>
<td>4</td>
<td>MALSTIVSQRKQIKRKAPRGFLKRVFKRKKPQLRLEKSGDLLVHLN...</td>
<td>protein</td>
<td>None</td>
<td>ENSP00000357311</td>
<td>88</td>
</tr>
<tr>
<th>4</th>
<td>ENST00000610426.5</td>
<td>783</td>
<td>ENST00000610426</td>
<td>1</td>
<td>MPQNEYIELHRKRYGYRLDYHEKKRKKESREAHERSKKAKKMIGLK...</td>
<td>protein</td>
<td>None</td>
<td>ENSP00000483484</td>
<td>260</td>
</tr>
</tbody>
</table>
</div>
From the complete transcript translations, we extract an amino acid subsequence as input to our model. The subsequence
is centered around the amino acid encoded by the nucleotide preceding the cut site in the direction of transcription.
This is the nucleotide that corresponds to the 'Target Cut Length' in a CRISPick design file.
We take 16 amino acids on either side of the cut site for a total sequence length of 33.
The `get_aa_subseq_df` from the `targetfeat` module will calculate these subsequences
from the complete amino acid sequences.
```
aa_subseq_df = get_aa_subseq_df(sg_designs=design_targ_df, aa_seq_df=aa_seq_df, width=16,
id_cols=id_cols)
aa_subseq_df.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Target Transcript</th>
<th>Target Total Length</th>
<th>Transcript Base</th>
<th>version</th>
<th>seq</th>
<th>molecule</th>
<th>desc</th>
<th>id</th>
<th>AA len</th>
<th>Target Cut Length</th>
<th>Orientation</th>
<th>sgRNA Context Sequence</th>
<th>AA Index</th>
<th>extended_seq</th>
<th>AA 0-Indexed</th>
<th>AA 0-Indexed padded</th>
<th>seq_start</th>
<th>seq_end</th>
<th>AA Subsequence</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>ENST00000259457.8</td>
<td>834</td>
<td>ENST00000259457</td>
<td>3</td>
<td>MAAVSVYAPPVGGFSFDNCRRNAVLEADFAKRGYKLPKVRKTGTTI...</td>
<td>protein</td>
<td>None</td>
<td>ENSP00000259457</td>
<td>277</td>
<td>191</td>
<td>sense</td>
<td>TGGAGCAGATACAAGAGCAACTGAAGGGAT</td>
<td>64</td>
<td>-----------------MAAVSVYAPPVGGFSFDNCRRNAVLEADF...</td>
<td>63</td>
<td>80</td>
<td>64</td>
<td>96</td>
<td>GVVYKDGIVLGADTRATEGMVVADKNCSKIHFI</td>
</tr>
<tr>
<th>1</th>
<td>ENST00000259457.8</td>
<td>834</td>
<td>ENST00000259457</td>
<td>3</td>
<td>MAAVSVYAPPVGGFSFDNCRRNAVLEADFAKRGYKLPKVRKTGTTI...</td>
<td>protein</td>
<td>None</td>
<td>ENSP00000259457</td>
<td>277</td>
<td>137</td>
<td>sense</td>
<td>CCGGAAAACTGGCACGACCATCGCTGGGGT</td>
<td>46</td>
<td>-----------------MAAVSVYAPPVGGFSFDNCRRNAVLEADF...</td>
<td>45</td>
<td>62</td>
<td>46</td>
<td>78</td>
<td>AKRGYKLPKVRKTGTTIAGVVYKDGIVLGADTR</td>
</tr>
<tr>
<th>2</th>
<td>ENST00000394249.8</td>
<td>1863</td>
<td>ENST00000394249</td>
<td>3</td>
<td>MRRSEVLAEESIVCLQKALNHLREIWELIGIPEDQRLQRTEVVKKH...</td>
<td>protein</td>
<td>None</td>
<td>ENSP00000377793</td>
<td>620</td>
<td>316</td>
<td>sense</td>
<td>TAGAAAAAGATTTGCGCACCCAAGTGGAAT</td>
<td>106</td>
<td>-----------------MRRSEVLAEESIVCLQKALNHLREIWELI...</td>
<td>105</td>
<td>122</td>
<td>106</td>
<td>138</td>
<td>EEGETTILQLEKDLRTQVELMRKQKKERKQELK</td>
</tr>
<tr>
<th>3</th>
<td>ENST00000394249.8</td>
<td>1863</td>
<td>ENST00000394249</td>
<td>3</td>
<td>MRRSEVLAEESIVCLQKALNHLREIWELIGIPEDQRLQRTEVVKKH...</td>
<td>protein</td>
<td>None</td>
<td>ENSP00000377793</td>
<td>620</td>
<td>787</td>
<td>antisense</td>
<td>TGGCCTTTGACCCAGACATAATGGTGGCCA</td>
<td>263</td>
<td>-----------------MRRSEVLAEESIVCLQKALNHLREIWELI...</td>
<td>262</td>
<td>279</td>
<td>263</td>
<td>295</td>
<td>WDRLQIPEEEREAVATIMSGSKAKVRKALQLEV</td>
</tr>
<tr>
<th>4</th>
<td>ENST00000361337.3</td>
<td>2298</td>
<td>ENST00000361337</td>
<td>2</td>
<td>MSGDHLHNDSQIEADFRLNDSHKHKDKHKDREHRHKEHKKEKDREK...</td>
<td>protein</td>
<td>None</td>
<td>ENSP00000354522</td>
<td>765</td>
<td>420</td>
<td>antisense</td>
<td>AAATACTCACTCATCCTCATCTCGAGGTCT</td>
<td>140</td>
<td>-----------------MSGDHLHNDSQIEADFRLNDSHKHKDKHK...</td>
<td>139</td>
<td>156</td>
<td>140</td>
<td>172</td>
<td>GYFVPPKEDIKPLKRPRDEDDADYKPKKIKTED</td>
</tr>
</tbody>
</table>
</div>
#### Lite Scores
You now have all the information you need to calculate "lite" Target Scores, which are less data intensive than complete
target scores, with the `predict_target` function from the `predicttarg` module.
```
lite_predictions = predict_target(design_df=design_df,
aa_subseq_df=aa_subseq_df)
design_df['Target Score Lite'] = lite_predictions
design_df.head()
```
/opt/anaconda3/envs/rs3/lib/python3.8/site-packages/sklearn/base.py:310: UserWarning: Trying to unpickle estimator SimpleImputer from version 1.0.dev0 when using version 0.24.2. This might lead to breaking code or invalid results. Use at your own risk.
warnings.warn(
/opt/anaconda3/envs/rs3/lib/python3.8/site-packages/sklearn/base.py:310: UserWarning: Trying to unpickle estimator Pipeline from version 1.0.dev0 when using version 0.24.2. This might lead to breaking code or invalid results. Use at your own risk.
warnings.warn(
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Input</th>
<th>Quota</th>
<th>Target Taxon</th>
<th>Target Gene ID</th>
<th>Target Gene Symbol</th>
<th>Target Transcript</th>
<th>Target Reference Coords</th>
<th>Target Alias</th>
<th>CRISPR Mechanism</th>
<th>Target Domain</th>
<th>...</th>
<th>Off-Target Rank Weight</th>
<th>Combined Rank</th>
<th>Preselected As</th>
<th>Matching Active Arrayed Oligos</th>
<th>Matching Arrayed Constructs</th>
<th>Pools Containing Matching Construct</th>
<th>Pick Order</th>
<th>Picking Round</th>
<th>Picking Notes</th>
<th>Target Score Lite</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>PSMB7</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000136930</td>
<td>PSMB7</td>
<td>ENST00000259457.8</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>1.0</td>
<td>7</td>
<td>GCAGATACAAGAGCAACTGA</td>
<td>NaN</td>
<td>BRDN0004619103</td>
<td>NaN</td>
<td>1</td>
<td>0</td>
<td>Preselected</td>
<td>0.012467</td>
</tr>
<tr>
<th>1</th>
<td>PSMB7</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000136930</td>
<td>PSMB7</td>
<td>ENST00000259457.8</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>1.0</td>
<td>48</td>
<td>AAAACTGGCACGACCATCGC</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>2</td>
<td>0</td>
<td>Preselected</td>
<td>0.048338</td>
</tr>
<tr>
<th>2</th>
<td>PRC1</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000198901</td>
<td>PRC1</td>
<td>ENST00000394249.8</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>1.0</td>
<td>7</td>
<td>AAAAGATTTGCGCACCCAAG</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>1</td>
<td>0</td>
<td>Preselected</td>
<td>-0.129234</td>
</tr>
<tr>
<th>3</th>
<td>PRC1</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000198901</td>
<td>PRC1</td>
<td>ENST00000394249.8</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>1.0</td>
<td>8</td>
<td>CTTTGACCCAGACATAATGG</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>2</td>
<td>0</td>
<td>Preselected</td>
<td>0.061647</td>
</tr>
<tr>
<th>4</th>
<td>TOP1</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000198900</td>
<td>TOP1</td>
<td>ENST00000361337.3</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>1.0</td>
<td>1</td>
<td>NaN</td>
<td>NaN</td>
<td>BRDN0001486452</td>
<td>NaN</td>
<td>2</td>
<td>1</td>
<td>NaN</td>
<td>-0.009100</td>
</tr>
</tbody>
</table>
<p>5 rows × 61 columns</p>
</div>
If you would like to calculate full target scores then follow the sections below.
#### Protein domain input
To calculate full target scores you will also need inputs for protein domains and conservation.
The protein domain input should have 16 binary columns for 16 different protein domain sources in addition to the
`id_cols`. The protein domain sources are 'Pfam', 'PANTHER', 'HAMAP', 'SuperFamily', 'TIGRfam', 'ncoils', 'Gene3D',
'Prosite_patterns', 'Seg', 'SignalP', 'TMHMM', 'MobiDBLite', 'PIRSF', 'PRINTS', 'Smart', 'Prosite_profiles'.
These columns should be kept in order when inputting for scoring.
In this example we will load the protein domain information from a parquet file, which was written
using `write_transcript_data` function in the `targetdata` module. You can also query transcript data on the fly,
by using the `build_translation_overlap_df` function. See the documentation for the `predicttarg` module for more
information on how to do this.
```
domain_df = pd.read_parquet('test_data/target_data/protein_domains.pq', engine='pyarrow',
filters=[[('Transcript Base', 'in', transcript_bases)]])
domain_df.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>type</th>
<th>cigar_string</th>
<th>id</th>
<th>hit_end</th>
<th>feature_type</th>
<th>description</th>
<th>seq_region_name</th>
<th>end</th>
<th>hit_start</th>
<th>translation_id</th>
<th>interpro</th>
<th>hseqname</th>
<th>Transcript Base</th>
<th>align_type</th>
<th>start</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>Pfam</td>
<td></td>
<td>PF12465</td>
<td>36</td>
<td>protein_feature</td>
<td>Proteasome beta subunit, C-terminal</td>
<td>ENSP00000259457</td>
<td>271</td>
<td>1</td>
<td>976188</td>
<td>IPR024689</td>
<td>PF12465</td>
<td>ENST00000259457</td>
<td>None</td>
<td>235</td>
</tr>
<tr>
<th>1</th>
<td>Pfam</td>
<td></td>
<td>PF00227</td>
<td>190</td>
<td>protein_feature</td>
<td>Proteasome, subunit alpha/beta</td>
<td>ENSP00000259457</td>
<td>221</td>
<td>2</td>
<td>976188</td>
<td>IPR001353</td>
<td>PF00227</td>
<td>ENST00000259457</td>
<td>None</td>
<td>41</td>
</tr>
<tr>
<th>2</th>
<td>PRINTS</td>
<td></td>
<td>PR00141</td>
<td>0</td>
<td>protein_feature</td>
<td>Peptidase T1A, proteasome beta-subunit</td>
<td>ENSP00000259457</td>
<td>66</td>
<td>0</td>
<td>976188</td>
<td>IPR000243</td>
<td>PR00141</td>
<td>ENST00000259457</td>
<td>None</td>
<td>51</td>
</tr>
<tr>
<th>3</th>
<td>PRINTS</td>
<td></td>
<td>PR00141</td>
<td>0</td>
<td>protein_feature</td>
<td>Peptidase T1A, proteasome beta-subunit</td>
<td>ENSP00000259457</td>
<td>182</td>
<td>0</td>
<td>976188</td>
<td>IPR000243</td>
<td>PR00141</td>
<td>ENST00000259457</td>
<td>None</td>
<td>171</td>
</tr>
<tr>
<th>4</th>
<td>PRINTS</td>
<td></td>
<td>PR00141</td>
<td>0</td>
<td>protein_feature</td>
<td>Peptidase T1A, proteasome beta-subunit</td>
<td>ENSP00000259457</td>
<td>193</td>
<td>0</td>
<td>976188</td>
<td>IPR000243</td>
<td>PR00141</td>
<td>ENST00000259457</td>
<td>None</td>
<td>182</td>
</tr>
</tbody>
</table>
</div>
Now to transform the `domain_df` into a wide form for model input, we use the `get_protein_domain_features` function
from the `targetfeat` module.
```
domain_feature_df = get_protein_domain_features(design_targ_df, domain_df, id_cols=id_cols)
domain_feature_df.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>sgRNA Context Sequence</th>
<th>Target Cut Length</th>
<th>Target Transcript</th>
<th>Orientation</th>
<th>Pfam</th>
<th>PANTHER</th>
<th>HAMAP</th>
<th>SuperFamily</th>
<th>TIGRfam</th>
<th>ncoils</th>
<th>Gene3D</th>
<th>Prosite_patterns</th>
<th>Seg</th>
<th>SignalP</th>
<th>TMHMM</th>
<th>MobiDBLite</th>
<th>PIRSF</th>
<th>PRINTS</th>
<th>Smart</th>
<th>Prosite_profiles</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>AAAAGAATGATGAAAAGACACCACAGGGAG</td>
<td>244</td>
<td>ENST00000610426.5</td>
<td>sense</td>
<td>1</td>
<td>1</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
<tr>
<th>1</th>
<td>AAAAGAGCCATGAATCTAAACATCAGGAAT</td>
<td>640</td>
<td>ENST00000223073.6</td>
<td>sense</td>
<td>0</td>
<td>1</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
<tr>
<th>2</th>
<td>AAAAGCGCCAAATGGCCCGAGAATTGGGAG</td>
<td>709</td>
<td>ENST00000331923.9</td>
<td>sense</td>
<td>0</td>
<td>1</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
<tr>
<th>3</th>
<td>AAACAGAAAAAGTTAAAATCACCAAGGTGT</td>
<td>496</td>
<td>ENST00000283882.4</td>
<td>sense</td>
<td>0</td>
<td>1</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
<tr>
<th>4</th>
<td>AAACAGATGGAAGATGCTTACCGGGGGACC</td>
<td>132</td>
<td>ENST00000393047.8</td>
<td>sense</td>
<td>0</td>
<td>1</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
</tbody>
</table>
</div>
For input into the `predict_target` function, the `domain_feature_df` should have the `id_cols` as well as
columns for each of the 16 protein domain features.
#### Conservation input
Finally, for the full target model you need to calculate conservation features.
The conservation features represent conservation across evolutionary time at the sgRNA cut site and are quantified
using PhyloP scores. These scores are available for download by the UCSC genome browser
for [hg38](https://hgdownload.soe.ucsc.edu/goldenPath/hg38/database/) (phyloP100way),
and [mm39](https://hgdownload.soe.ucsc.edu/goldenPath/mm39/database/) (phyloP35way).
Within this package we query conservation scores using the UCSC genome browser's
[REST API](http://genome.ucsc.edu/goldenPath/help/api.html).
To get conservation scores, you can use the `build_conservation_df` function from the `targetdata` module.
Here we load conservation scores, which were written to parquet using the `write_conservation_data` function from the
`targetdata` module.
```
conservation_df = pd.read_parquet('test_data/target_data/conservation.pq', engine='pyarrow',
filters=[[('Transcript Base', 'in', transcript_bases)]])
conservation_df.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>exon_id</th>
<th>genomic position</th>
<th>conservation</th>
<th>Transcript Base</th>
<th>target position</th>
<th>chromosome</th>
<th>genome</th>
<th>translation length</th>
<th>Target Transcript</th>
<th>Strand of Target</th>
<th>Target Total Length</th>
<th>ranked_conservation</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>ENSE00001866322</td>
<td>124415425.0</td>
<td>6.46189</td>
<td>ENST00000259457</td>
<td>1</td>
<td>9</td>
<td>hg38</td>
<td>277</td>
<td>ENST00000259457.8</td>
<td>-</td>
<td>834</td>
<td>0.639089</td>
</tr>
<tr>
<th>1</th>
<td>ENSE00001866322</td>
<td>124415424.0</td>
<td>7.48071</td>
<td>ENST00000259457</td>
<td>2</td>
<td>9</td>
<td>hg38</td>
<td>277</td>
<td>ENST00000259457.8</td>
<td>-</td>
<td>834</td>
<td>0.686451</td>
</tr>
<tr>
<th>2</th>
<td>ENSE00001866322</td>
<td>124415423.0</td>
<td>6.36001</td>
<td>ENST00000259457</td>
<td>3</td>
<td>9</td>
<td>hg38</td>
<td>277</td>
<td>ENST00000259457.8</td>
<td>-</td>
<td>834</td>
<td>0.622902</td>
</tr>
<tr>
<th>3</th>
<td>ENSE00001866322</td>
<td>124415422.0</td>
<td>6.36001</td>
<td>ENST00000259457</td>
<td>4</td>
<td>9</td>
<td>hg38</td>
<td>277</td>
<td>ENST00000259457.8</td>
<td>-</td>
<td>834</td>
<td>0.622902</td>
</tr>
<tr>
<th>4</th>
<td>ENSE00001866322</td>
<td>124415421.0</td>
<td>8.09200</td>
<td>ENST00000259457</td>
<td>5</td>
<td>9</td>
<td>hg38</td>
<td>277</td>
<td>ENST00000259457.8</td>
<td>-</td>
<td>834</td>
<td>0.870504</td>
</tr>
</tbody>
</table>
</div>
We normalize conservation scores to a within-gene percent rank, in the 'ranked_conservation' column,
in order to make scores comparable across genes and genomes. Note that a rank of 0 indicates the
least conserved nucleotide and a rank of 1 indicates the most conserved.
To featurize the conservation scores, we average across a window of 4 and 32 nucleotides
centered around the nucleotide preceding the cut site in the direction of transcription.
Note that this nucleotide is the 2nd nucleotide in the window of 4 and the 16th nucleotide in the window of 32.
We use the `get_conservation_features` function from the `targetfeat` module to get these features from the
`conservation_df`.
For the `predict_targ` function, we need the `id_cols` and the columns 'cons_4' and 'cons_32' in the
`conservation_feature_df`.
```
conservation_feature_df = get_conservation_features(design_targ_df, conservation_df,
small_width=2, large_width=16,
conservation_column='ranked_conservation',
id_cols=id_cols)
conservation_feature_df
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>sgRNA Context Sequence</th>
<th>Target Cut Length</th>
<th>Target Transcript</th>
<th>Orientation</th>
<th>cons_4</th>
<th>cons_32</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>AAAAGAATGATGAAAAGACACCACAGGGAG</td>
<td>244</td>
<td>ENST00000610426.5</td>
<td>sense</td>
<td>0.218231</td>
<td>0.408844</td>
</tr>
<tr>
<th>1</th>
<td>AAAAGAGCCATGAATCTAAACATCAGGAAT</td>
<td>640</td>
<td>ENST00000223073.6</td>
<td>sense</td>
<td>0.129825</td>
<td>0.278180</td>
</tr>
<tr>
<th>2</th>
<td>AAAAGCGCCAAATGGCCCGAGAATTGGGAG</td>
<td>709</td>
<td>ENST00000331923.9</td>
<td>sense</td>
<td>0.470906</td>
<td>0.532305</td>
</tr>
<tr>
<th>3</th>
<td>AAACAGAAAAAGTTAAAATCACCAAGGTGT</td>
<td>496</td>
<td>ENST00000283882.4</td>
<td>sense</td>
<td>0.580556</td>
<td>0.602708</td>
</tr>
<tr>
<th>4</th>
<td>AAACAGATGGAAGATGCTTACCGGGGGACC</td>
<td>132</td>
<td>ENST00000393047.8</td>
<td>sense</td>
<td>0.283447</td>
<td>0.414293</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>395</th>
<td>TTTGATTGCATTAAGGTTGGACTCTGGATT</td>
<td>246</td>
<td>ENST00000249269.9</td>
<td>sense</td>
<td>0.580612</td>
<td>0.618707</td>
</tr>
<tr>
<th>396</th>
<td>TTTGCCCACAGCTCCAAAGCATCGCGGAGA</td>
<td>130</td>
<td>ENST00000227618.8</td>
<td>sense</td>
<td>0.323770</td>
<td>0.416368</td>
</tr>
<tr>
<th>397</th>
<td>TTTTACAGTGCGATGTATGATGTATGGCTT</td>
<td>119</td>
<td>ENST00000338366.6</td>
<td>sense</td>
<td>0.788000</td>
<td>0.537417</td>
</tr>
<tr>
<th>398</th>
<td>TTTTGGATCTCGTAGTGATTCAAGAGGGAA</td>
<td>233</td>
<td>ENST00000629496.3</td>
<td>sense</td>
<td>0.239630</td>
<td>0.347615</td>
</tr>
<tr>
<th>399</th>
<td>TTTTTGTTACTACAGGTTCGCTGCTGGGAA</td>
<td>201</td>
<td>ENST00000395840.6</td>
<td>sense</td>
<td>0.693767</td>
<td>0.639044</td>
</tr>
</tbody>
</table>
<p>400 rows × 6 columns</p>
</div>
#### Full Target Scores
In order to calculate Target Scores you must input the feature matrices and design_df to the `predict_target`
function from the `predicttarg` module.
```
target_predictions = predict_target(design_df=design_df,
aa_subseq_df=aa_subseq_df,
domain_feature_df=domain_feature_df,
conservation_feature_df=conservation_feature_df,
id_cols=id_cols)
design_df['Target Score'] = target_predictions
design_df.head()
```
/opt/anaconda3/envs/rs3/lib/python3.8/site-packages/sklearn/base.py:310: UserWarning: Trying to unpickle estimator SimpleImputer from version 1.0.dev0 when using version 0.24.2. This might lead to breaking code or invalid results. Use at your own risk.
warnings.warn(
/opt/anaconda3/envs/rs3/lib/python3.8/site-packages/sklearn/base.py:310: UserWarning: Trying to unpickle estimator Pipeline from version 1.0.dev0 when using version 0.24.2. This might lead to breaking code or invalid results. Use at your own risk.
warnings.warn(
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Input</th>
<th>Quota</th>
<th>Target Taxon</th>
<th>Target Gene ID</th>
<th>Target Gene Symbol</th>
<th>Target Transcript</th>
<th>Target Reference Coords</th>
<th>Target Alias</th>
<th>CRISPR Mechanism</th>
<th>Target Domain</th>
<th>...</th>
<th>Combined Rank</th>
<th>Preselected As</th>
<th>Matching Active Arrayed Oligos</th>
<th>Matching Arrayed Constructs</th>
<th>Pools Containing Matching Construct</th>
<th>Pick Order</th>
<th>Picking Round</th>
<th>Picking Notes</th>
<th>Target Score Lite</th>
<th>Target Score</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>PSMB7</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000136930</td>
<td>PSMB7</td>
<td>ENST00000259457.8</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>7</td>
<td>GCAGATACAAGAGCAACTGA</td>
<td>NaN</td>
<td>BRDN0004619103</td>
<td>NaN</td>
<td>1</td>
<td>0</td>
<td>Preselected</td>
<td>0.012467</td>
<td>0.152037</td>
</tr>
<tr>
<th>1</th>
<td>PSMB7</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000136930</td>
<td>PSMB7</td>
<td>ENST00000259457.8</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>48</td>
<td>AAAACTGGCACGACCATCGC</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>2</td>
<td>0</td>
<td>Preselected</td>
<td>0.048338</td>
<td>0.064880</td>
</tr>
<tr>
<th>2</th>
<td>PRC1</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000198901</td>
<td>PRC1</td>
<td>ENST00000394249.8</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>7</td>
<td>AAAAGATTTGCGCACCCAAG</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>1</td>
<td>0</td>
<td>Preselected</td>
<td>-0.129234</td>
<td>-0.063012</td>
</tr>
<tr>
<th>3</th>
<td>PRC1</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000198901</td>
<td>PRC1</td>
<td>ENST00000394249.8</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>8</td>
<td>CTTTGACCCAGACATAATGG</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>2</td>
<td>0</td>
<td>Preselected</td>
<td>0.061647</td>
<td>-0.126357</td>
</tr>
<tr>
<th>4</th>
<td>TOP1</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000198900</td>
<td>TOP1</td>
<td>ENST00000361337.3</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>1</td>
<td>NaN</td>
<td>NaN</td>
<td>BRDN0001486452</td>
<td>NaN</td>
<td>2</td>
<td>1</td>
<td>NaN</td>
<td>-0.009100</td>
<td>-0.234410</td>
</tr>
</tbody>
</table>
<p>5 rows × 62 columns</p>
</div>
Target Scores can be added directly to the sequence scores for your final Rule Set 3 predictions.
### Predict Function
If you don't want to generate the target matrices themselves, you can use the `predict` function from
the `predict` module.
```
from rs3.predict import predict
import matplotlib.pyplot as plt
import gpplot
import seaborn as sns
```
#### Preloaded data
In this first example with the `predict` function, we calculate predictions for GeckoV2 sgRNAs.
In this example the amino acid sequences, protein domains and conservation scores were prequeried using the
`write_transcript_data` and `write_conservation_data` functions from the targetdata module.
Pre-querying these data can be helpful for large scale design runs.
You can also use the `predict` function without pre-querying and calculate
scores on the fly. You can see an example of this in the next section.
The `predict` function allows for parallel computation
for querying databases (`n_jobs_min`) and featurizing sgRNAs (`n_jobs_max`).
We recommend keeping `n_jobs_min` set to 1 or 2, as the APIs limit the amount of queries per hour.
```
design_df = pd.read_table('test_data/sgrna-designs.txt')
import multiprocessing
max_n_jobs = multiprocessing.cpu_count()
```
```
scored_designs = predict(design_df, tracr=['Hsu2013', 'Chen2013'], target=True,
n_jobs_min=2, n_jobs_max=max_n_jobs,
aa_seq_file='./test_data/target_data/aa_seqs.pq',
domain_file='./test_data/target_data/protein_domains.pq',
conservatin_file='./test_data/target_data/conservation.pq',
lite=False)
scored_designs.head()
```
Calculating sequence-based features
100%|██████████| 400/400 [00:05<00:00, 68.98it/s]
Calculating sequence-based features
100%|██████████| 400/400 [00:01<00:00, 229.85it/s]
/opt/anaconda3/envs/rs3/lib/python3.8/site-packages/sklearn/base.py:310: UserWarning: Trying to unpickle estimator SimpleImputer from version 1.0.dev0 when using version 0.24.2. This might lead to breaking code or invalid results. Use at your own risk.
warnings.warn(
/opt/anaconda3/envs/rs3/lib/python3.8/site-packages/sklearn/base.py:310: UserWarning: Trying to unpickle estimator Pipeline from version 1.0.dev0 when using version 0.24.2. This might lead to breaking code or invalid results. Use at your own risk.
warnings.warn(
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Input</th>
<th>Quota</th>
<th>Target Taxon</th>
<th>Target Gene ID</th>
<th>Target Gene Symbol</th>
<th>Target Transcript</th>
<th>Target Reference Coords</th>
<th>Target Alias</th>
<th>CRISPR Mechanism</th>
<th>Target Domain</th>
<th>...</th>
<th>Picking Round</th>
<th>Picking Notes</th>
<th>RS3 Sequence Score (Hsu2013 tracr)</th>
<th>RS3 Sequence Score (Chen2013 tracr)</th>
<th>AA Index</th>
<th>Transcript Base</th>
<th>Missing conservation information</th>
<th>Target Score</th>
<th>RS3 Sequence (Hsu2013 tracr) + Target Score</th>
<th>RS3 Sequence (Chen2013 tracr) + Target Score</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>PSMB7</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000136930</td>
<td>PSMB7</td>
<td>ENST00000259457.8</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>0</td>
<td>Preselected</td>
<td>0.787640</td>
<td>0.559345</td>
<td>64</td>
<td>ENST00000259457</td>
<td>False</td>
<td>0.152037</td>
<td>0.939676</td>
<td>0.711381</td>
</tr>
<tr>
<th>1</th>
<td>PSMB7</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000136930</td>
<td>PSMB7</td>
<td>ENST00000259457.8</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>0</td>
<td>Preselected</td>
<td>-0.294126</td>
<td>-0.181437</td>
<td>46</td>
<td>ENST00000259457</td>
<td>False</td>
<td>0.064880</td>
<td>-0.229246</td>
<td>-0.116557</td>
</tr>
<tr>
<th>2</th>
<td>PRC1</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000198901</td>
<td>PRC1</td>
<td>ENST00000394249.8</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>0</td>
<td>Preselected</td>
<td>-0.043418</td>
<td>-0.220434</td>
<td>106</td>
<td>ENST00000394249</td>
<td>False</td>
<td>-0.063012</td>
<td>-0.106429</td>
<td>-0.283446</td>
</tr>
<tr>
<th>3</th>
<td>PRC1</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000198901</td>
<td>PRC1</td>
<td>ENST00000394249.8</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>0</td>
<td>Preselected</td>
<td>0.759256</td>
<td>0.453469</td>
<td>263</td>
<td>ENST00000394249</td>
<td>False</td>
<td>-0.126357</td>
<td>0.632899</td>
<td>0.327112</td>
</tr>
<tr>
<th>4</th>
<td>TOP1</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000198900</td>
<td>TOP1</td>
<td>ENST00000361337.3</td>
<td>NaN</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>...</td>
<td>1</td>
<td>NaN</td>
<td>0.424001</td>
<td>-0.197035</td>
<td>140</td>
<td>ENST00000361337</td>
<td>False</td>
<td>-0.234410</td>
<td>0.189591</td>
<td>-0.431445</td>
</tr>
</tbody>
</table>
<p>5 rows × 68 columns</p>
</div>
Here are the details for the keyword arguments of the above function
* `tracr` - tracr to calculate scores for. If a list is supplied instead of a string, scores will be calculated for both tracrs
* `target` - boolean indicating whether to calculate target scores
* `n_jobs_min`, `n_jobs_max` - number of cpus to use for parallel computation
* `aa_seq_file`, `domain_file`, `conservatin_file` - precalculated parquet files. Optional inputs as these features can also be calculated on the fly
* `lite` - boolean indicating whether to calculate lite target scores
By listing both tracrRNAs `tracr=['Hsu2013', 'Chen2013']` and setting `target=True`,
we calculate 5 unique scores: one sequence score for each tracr, the target score,
and the sequence scores plus the target score.
We can compare these predictions against the observed activity from GeckoV2
```
gecko_activity = pd.read_csv('test_data/Aguirre2016_activity.csv')
gecko_activity.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>sgRNA Sequence</th>
<th>sgRNA Context Sequence</th>
<th>Target Gene Symbol</th>
<th>Target Cut %</th>
<th>avg_mean_centered_neg_lfc</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>AAAAAACTTACCCCTTTGAC</td>
<td>AAAAAAAAAACTTACCCCTTTGACTGGCCA</td>
<td>CPSF6</td>
<td>22.2</td>
<td>-1.139819</td>
</tr>
<tr>
<th>1</th>
<td>AAAAACATTATCATTGAGCC</td>
<td>TGGCAAAAACATTATCATTGAGCCTGGATT</td>
<td>SKA3</td>
<td>62.3</td>
<td>-0.793055</td>
</tr>
<tr>
<th>2</th>
<td>AAAAAGAGATTGTCAAATCA</td>
<td>TATGAAAAAGAGATTGTCAAATCAAGGTAG</td>
<td>AQR</td>
<td>3.8</td>
<td>0.946453</td>
</tr>
<tr>
<th>3</th>
<td>AAAAAGCATCTCTAGAAATA</td>
<td>TTCAAAAAAGCATCTCTAGAAATATGGTCC</td>
<td>ZNHIT6</td>
<td>61.7</td>
<td>-0.429590</td>
</tr>
<tr>
<th>4</th>
<td>AAAAAGCGAGATACCCGAAA</td>
<td>AAAAAAAAAGCGAGATACCCGAAAAGGCAG</td>
<td>ABCF1</td>
<td>9.4</td>
<td>0.734196</td>
</tr>
</tbody>
</table>
</div>
```
gecko_activity_scores = (gecko_activity.merge(scored_designs,
how='inner',
on=['sgRNA Sequence', 'sgRNA Context Sequence',
'Target Gene Symbol', 'Target Cut %']))
gecko_activity_scores.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>sgRNA Sequence</th>
<th>sgRNA Context Sequence</th>
<th>Target Gene Symbol</th>
<th>Target Cut %</th>
<th>avg_mean_centered_neg_lfc</th>
<th>Input</th>
<th>Quota</th>
<th>Target Taxon</th>
<th>Target Gene ID</th>
<th>Target Transcript</th>
<th>...</th>
<th>Picking Round</th>
<th>Picking Notes</th>
<th>RS3 Sequence Score (Hsu2013 tracr)</th>
<th>RS3 Sequence Score (Chen2013 tracr)</th>
<th>AA Index</th>
<th>Transcript Base</th>
<th>Missing conservation information</th>
<th>Target Score</th>
<th>RS3 Sequence (Hsu2013 tracr) + Target Score</th>
<th>RS3 Sequence (Chen2013 tracr) + Target Score</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>AAAACTGGCACGACCATCGC</td>
<td>CCGGAAAACTGGCACGACCATCGCTGGGGT</td>
<td>PSMB7</td>
<td>16.4</td>
<td>-1.052943</td>
<td>PSMB7</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000136930</td>
<td>ENST00000259457.8</td>
<td>...</td>
<td>0</td>
<td>Preselected</td>
<td>-0.294126</td>
<td>-0.181437</td>
<td>46</td>
<td>ENST00000259457</td>
<td>False</td>
<td>0.064880</td>
<td>-0.229246</td>
<td>-0.116557</td>
</tr>
<tr>
<th>1</th>
<td>AAAAGATTTGCGCACCCAAG</td>
<td>TAGAAAAAGATTTGCGCACCCAAGTGGAAT</td>
<td>PRC1</td>
<td>17.0</td>
<td>0.028674</td>
<td>PRC1</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000198901</td>
<td>ENST00000394249.8</td>
<td>...</td>
<td>0</td>
<td>Preselected</td>
<td>-0.043418</td>
<td>-0.220434</td>
<td>106</td>
<td>ENST00000394249</td>
<td>False</td>
<td>-0.063012</td>
<td>-0.106429</td>
<td>-0.283446</td>
</tr>
<tr>
<th>2</th>
<td>AAAAGTCCAAGCATAGCAAC</td>
<td>CGGGAAAAGTCCAAGCATAGCAACAGGTAA</td>
<td>TOP1</td>
<td>6.5</td>
<td>0.195309</td>
<td>TOP1</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000198900</td>
<td>ENST00000361337.3</td>
<td>...</td>
<td>0</td>
<td>Preselected</td>
<td>-0.294127</td>
<td>-0.022951</td>
<td>50</td>
<td>ENST00000361337</td>
<td>False</td>
<td>-0.354708</td>
<td>-0.648835</td>
<td>-0.377659</td>
</tr>
<tr>
<th>3</th>
<td>AAAGAAGCCTCAACTTCGTC</td>
<td>AGCGAAAGAAGCCTCAACTTCGTCTGGAGA</td>
<td>CENPW</td>
<td>37.5</td>
<td>-1.338209</td>
<td>CENPW</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000203760</td>
<td>ENST00000368328.5</td>
<td>...</td>
<td>0</td>
<td>Preselected</td>
<td>-0.667399</td>
<td>-0.308794</td>
<td>34</td>
<td>ENST00000368328</td>
<td>False</td>
<td>0.129285</td>
<td>-0.538114</td>
<td>-0.179509</td>
</tr>
<tr>
<th>4</th>
<td>AAAGTGTGCTTTGTTGGAGA</td>
<td>TACTAAAGTGTGCTTTGTTGGAGATGGCTT</td>
<td>NSA2</td>
<td>60.0</td>
<td>-0.175219</td>
<td>NSA2</td>
<td>2</td>
<td>9606</td>
<td>ENSG00000164346</td>
<td>ENST00000610426.5</td>
<td>...</td>
<td>0</td>
<td>Preselected</td>
<td>-0.402220</td>
<td>-0.622492</td>
<td>157</td>
<td>ENST00000610426</td>
<td>False</td>
<td>-0.113577</td>
<td>-0.515797</td>
<td>-0.736069</td>
</tr>
</tbody>
</table>
<p>5 rows × 69 columns</p>
</div>
Since GeckoV2 was screened with the tracrRNA from Hsu et al. 2013, we'll use these scores sequence scores a part of our final prediction.
```
plt.subplots(figsize=(4,4))
gpplot.point_densityplot(gecko_activity_scores, y='avg_mean_centered_neg_lfc',
x='RS3 Sequence (Hsu2013 tracr) + Target Score')
gpplot.add_correlation(gecko_activity_scores, y='avg_mean_centered_neg_lfc',
x='RS3 Sequence (Hsu2013 tracr) + Target Score')
sns.despine()
```

#### Predictions on the fly
You can also make predictions without pre-querying the target data. Here
we use example designs for BCL2L1, MCL1 and EEF2.
```
design_df = pd.read_table('test_data/sgrna-designs_BCL2L1_MCL1_EEF2.txt')
```
```
scored_designs = predict(design_df,
tracr=['Hsu2013', 'Chen2013'], target=True,
n_jobs_min=2, n_jobs_max=8,
lite=False)
scored_designs
```
Calculating sequence-based features
100%|██████████| 849/849 [00:06<00:00, 137.86it/s]
Calculating sequence-based features
100%|██████████| 849/849 [00:02<00:00, 321.44it/s]
Getting amino acid sequences
100%|██████████| 1/1 [00:00<00:00, 1.77it/s]
Getting protein domains
100%|██████████| 3/3 [00:00<00:00, 899.29it/s]
Getting conservation
100%|██████████| 3/3 [00:00<00:00, 10.67it/s]
/opt/anaconda3/envs/rs3/lib/python3.8/site-packages/sklearn/base.py:310: UserWarning: Trying to unpickle estimator SimpleImputer from version 1.0.dev0 when using version 0.24.2. This might lead to breaking code or invalid results. Use at your own risk.
warnings.warn(
/opt/anaconda3/envs/rs3/lib/python3.8/site-packages/sklearn/base.py:310: UserWarning: Trying to unpickle estimator Pipeline from version 1.0.dev0 when using version 0.24.2. This might lead to breaking code or invalid results. Use at your own risk.
warnings.warn(
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Input</th>
<th>Quota</th>
<th>Target Taxon</th>
<th>Target Gene ID</th>
<th>Target Gene Symbol</th>
<th>Target Transcript</th>
<th>Target Alias</th>
<th>CRISPR Mechanism</th>
<th>Target Domain</th>
<th>Reference Sequence</th>
<th>...</th>
<th>Picking Round</th>
<th>Picking Notes</th>
<th>RS3 Sequence Score (Hsu2013 tracr)</th>
<th>RS3 Sequence Score (Chen2013 tracr)</th>
<th>AA Index</th>
<th>Transcript Base</th>
<th>Missing conservation information</th>
<th>Target Score</th>
<th>RS3 Sequence (Hsu2013 tracr) + Target Score</th>
<th>RS3 Sequence (Chen2013 tracr) + Target Score</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>EEF2</td>
<td>5</td>
<td>9606</td>
<td>ENSG00000167658</td>
<td>EEF2</td>
<td>ENST00000309311.7</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>NC_000019.10</td>
<td>...</td>
<td>NaN</td>
<td>Outside Target Window: 5-65%</td>
<td>0.907809</td>
<td>0.769956</td>
<td>666</td>
<td>ENST00000309311</td>
<td>False</td>
<td>-0.115549</td>
<td>0.792261</td>
<td>0.654408</td>
</tr>
<tr>
<th>1</th>
<td>EEF2</td>
<td>5</td>
<td>9606</td>
<td>ENSG00000167658</td>
<td>EEF2</td>
<td>ENST00000309311.7</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>NC_000019.10</td>
<td>...</td>
<td>NaN</td>
<td>BsmBI:CGTCTC; Outside Target Window: 5-65%</td>
<td>0.171870</td>
<td>0.040419</td>
<td>581</td>
<td>ENST00000309311</td>
<td>False</td>
<td>-0.017643</td>
<td>0.154226</td>
<td>0.022776</td>
</tr>
<tr>
<th>2</th>
<td>EEF2</td>
<td>5</td>
<td>9606</td>
<td>ENSG00000167658</td>
<td>EEF2</td>
<td>ENST00000309311.7</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>NC_000019.10</td>
<td>...</td>
<td>1.0</td>
<td>NaN</td>
<td>1.393513</td>
<td>0.577732</td>
<td>107</td>
<td>ENST00000309311</td>
<td>False</td>
<td>0.172910</td>
<td>1.566422</td>
<td>0.750642</td>
</tr>
<tr>
<th>3</th>
<td>EEF2</td>
<td>5</td>
<td>9606</td>
<td>ENSG00000167658</td>
<td>EEF2</td>
<td>ENST00000309311.7</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>NC_000019.10</td>
<td>...</td>
<td>1.0</td>
<td>NaN</td>
<td>0.904446</td>
<td>0.008390</td>
<td>406</td>
<td>ENST00000309311</td>
<td>False</td>
<td>0.121034</td>
<td>1.025480</td>
<td>0.129424</td>
</tr>
<tr>
<th>4</th>
<td>EEF2</td>
<td>5</td>
<td>9606</td>
<td>ENSG00000167658</td>
<td>EEF2</td>
<td>ENST00000309311.7</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>NC_000019.10</td>
<td>...</td>
<td>1.0</td>
<td>NaN</td>
<td>0.831087</td>
<td>0.361594</td>
<td>546</td>
<td>ENST00000309311</td>
<td>False</td>
<td>0.036041</td>
<td>0.867128</td>
<td>0.397635</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>844</th>
<td>MCL1</td>
<td>5</td>
<td>9606</td>
<td>ENSG00000143384</td>
<td>MCL1</td>
<td>ENST00000369026.3</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>NC_000001.11</td>
<td>...</td>
<td>NaN</td>
<td>Off-target Match Bin I matches > 3; Spacing Vi...</td>
<td>-0.792918</td>
<td>-0.663881</td>
<td>52</td>
<td>ENST00000369026</td>
<td>False</td>
<td>-0.299583</td>
<td>-1.092501</td>
<td>-0.963464</td>
</tr>
<tr>
<th>845</th>
<td>MCL1</td>
<td>5</td>
<td>9606</td>
<td>ENSG00000143384</td>
<td>MCL1</td>
<td>ENST00000369026.3</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>NC_000001.11</td>
<td>...</td>
<td>NaN</td>
<td>Outside Target Window: 5-65%; poly(T):TTTT</td>
<td>-1.920374</td>
<td>-1.819985</td>
<td>5</td>
<td>ENST00000369026</td>
<td>False</td>
<td>-0.003507</td>
<td>-1.923881</td>
<td>-1.823491</td>
</tr>
<tr>
<th>846</th>
<td>MCL1</td>
<td>5</td>
<td>9606</td>
<td>ENSG00000143384</td>
<td>MCL1</td>
<td>ENST00000369026.3</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>NC_000001.11</td>
<td>...</td>
<td>NaN</td>
<td>Spacing Violation: Too close to earlier pick a...</td>
<td>-1.101303</td>
<td>-1.295640</td>
<td>24</td>
<td>ENST00000369026</td>
<td>False</td>
<td>-0.285485</td>
<td>-1.386788</td>
<td>-1.581125</td>
</tr>
<tr>
<th>847</th>
<td>MCL1</td>
<td>5</td>
<td>9606</td>
<td>ENSG00000143384</td>
<td>MCL1</td>
<td>ENST00000369026.3</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>NC_000001.11</td>
<td>...</td>
<td>NaN</td>
<td>Spacing Violation: Too close to earlier pick a...</td>
<td>-0.617431</td>
<td>-0.621436</td>
<td>30</td>
<td>ENST00000369026</td>
<td>False</td>
<td>-0.312348</td>
<td>-0.929779</td>
<td>-0.933784</td>
</tr>
<tr>
<th>848</th>
<td>MCL1</td>
<td>5</td>
<td>9606</td>
<td>ENSG00000143384</td>
<td>MCL1</td>
<td>ENST00000369026.3</td>
<td>NaN</td>
<td>CRISPRko</td>
<td>CDS</td>
<td>NC_000001.11</td>
<td>...</td>
<td>NaN</td>
<td>On-Target Efficacy Score < 0.2; Spacing Violat...</td>
<td>-0.586811</td>
<td>-0.664130</td>
<td>30</td>
<td>ENST00000369026</td>
<td>False</td>
<td>-0.312348</td>
<td>-0.899159</td>
<td>-0.976478</td>
</tr>
</tbody>
</table>
<p>849 rows × 61 columns</p>
</div>
We see that the predict function is querying the target data in addition
to making predictions.
| /rs3-0.0.15.tar.gz/rs3-0.0.15/README.md | 0.609873 | 0.864768 | README.md | pypi |
import random as re
from math import sqrt
max = 10000000
def gcd(a, b):
'''for finding the Greatest Common Divisor or Highest Common Factor between two numbers.
It's purpose for this program is to check whether e and phi_of_n are co-prime or not,
which is possible iff their gcd is 1.
'''
while b != 0:
a, b = b, a % b
return a
def mm_inverse(a,b):
'''to calculate the modular multiplicative inverse of a mod b using Extended Euclidean
Algorithm,iff a and b are co-prime.Its primary purpose is to find the same for e & phi.
'''
m=b
a,b=b,a
q,r=a//b,a%b
t1,t2=0,1
t=t1-q*t2
while(b!=0):
a,b=b,a%b
t1,t2=t2,t
if b!=0:
q,r=a//b,a%b
t=t1-q*t2
if t1<0:
t1+=m
return t1
def isPrime(num):
'''to check whether a number is prime or not.
Its purpose is to check whether the numbers, p,q and e are prime.
'''
if num == 2 or num==3:
return True
if num < 2 or num % 2 == 0:
return False
for n in range(3, int(sqrt(num)) + 2, 2):
if num % n == 0:
return False
return True
def random_Prime():
'''to generate two random prime numbers, primarily p and q.'''
while 1:
ranPrime = re.randint(53, max) # generate large prime numbers for greater security
if isPrime(ranPrime):
return ranPrime
def key_Pairs():
'''to generate the p and q, and
the public and private key value pairs for the RSA Algorithm.
'''
p = random_Prime()
q = random_Prime()
n = p * q
'''phi(n) is known as Euler's Totient Function. Here, phi(n)=phi(p)*phi(q), since,
p and q are co-prime. Since, p and q are prime numbers, hence, phi(p)=p-1 and
phi(q)=q-1, since, except themselves, they have no other factors (excluding 1).'''
phi = (p - 1) * (q - 1)
e = re.randint(2, phi-1) # since 1<e<phi(n)
g = gcd(e, phi)
while g != 1:
e = re.randint(2, phi-1)
g = gcd(e, phi)
d = mm_inverse(e, phi)
return (e, n), (d, n)
def encrypt_data(plain_text, public_key):
'''to encrypt the plain_text into a list of some numbers.'''
key, n = public_key
'''pow(a,b,m) is an in-built function which takes 3 arguments, converts them to float,
and then computes {(a to the power of b) modulus of m}.'''
ctext = [pow(ord(char), key, n) for char in plain_text]
return ctext
def decrypt_data(ctext, private_key):
'''to decrypt the cipher text into the original text'''
try:
key, n = private_key
text = [chr(pow(char, key, n)) for char in ctext]
return "".join(text)
except TypeError as e:
print(e) | /rsa_algo_madhusree-0.0.1.tar.gz/rsa_algo_madhusree-0.0.1/rsa_algo_madhusree/__init__.py | 0.41052 | 0.488283 | __init__.py | pypi |
Library to work with Archer REST and Content APIs
===========================================
My original objective was to create Office365 mail to Archer Incidents application connector.Script captures the email, checks if there is an incident ID assigned and add the email to comments section (sub form) in archer record.
This package supports archer part of the connector, if someone interested I can share the whole thing.
#### Release notes:
> v0.1.9 (13 Dec 2019)
> - Fixed method get_field_content in Record, fixed error when method crashed if the value in values list is None
> - Now, if there are several values in values list. Function will return all values, including parent value if you're using leveled values list.
> - Looks like this ["Parent Value:Value", "Parent Value:Value", "Value"]
> - ATTENTION!!! now function returns LIST of values instead of string value.
>
> v0.1.8 (31 Oct 2019)
> - added delete_record(self, record_id=None) to ArcherInstance and few small fixes
>
> v0.1.4 (05 Feb 2019)
> - added user deactivation method and some user method error handling
>
> v0.1.3 (29 Jan 2019)
> - added archer instance method get_value_id_by_field_name_and_value() to be able to set value in record create/update methods
# Archer REST API
## 0. Installation
```bash
pip install rsa-archer
```
## 1. Creating Archer Instance
Create "api" user in Archer with proper permissions
At first, create Archer Instance object and continue to work with it
```python
import rsa_archer
from rsa_archer.archer_instance import ArcherInstance
archer_instance = ArcherInstance("domain","archer instance name","api username", "password")
# e.g.
archer_instance = ArcherInstance("archer.companyzxc.com","risk_management","api", "secure password")
```
## 2. Working with content records
### 2.1 Selecting application
To start working with content records you need to select Archer application (one application per Archer Instance object), without it it'll not work.
```python
archer_instance.from_application("application name")
# e.g.
archer_instance.from_application("Incidents") #same name as in archer application list
```
### 2.2 Creating new record
**NOTE** - right now working natively with record's fields is limited to text fields, for values list, attachemts and other types of fields you need to operate with archer internal ids. Good example of this is working with attachments, it could be found below.
Preparing json with field names and their values (text or ids):
```python
record_json = {"field name1": "value1", "field name2": "value2", "values list field name": [id1,id2,id3..] ...}
# e.g.
record_json = {"Incident Summary": "desired text", "Reporter email": "email","Incident Details": "HTML text", "Severity": [34658]}
```
Creating the record and getting its id:
```python
record_id = archer_instance.create_content_record(record_json)
```
### 2.2 Working with existing records
#### 2.2.1 Getting record content
Getting record object by id:
```python
existing_record = archer_instance.get_record(record_id)
```
Getting values of record fields (including ids):
```python
existing_record.get_field_content("field_name")
# it returns, value of the text field
# array of user internal ids for user field
# proper value for values list
# internal ids for other types of fields
# TODO other types of fields
```
#### 2.2.2 Updating existing record
Preparing updater json
```python
updater_json = {"field name1": "value1", "field name2": "value2", ...}
#e.g.
updater_json = {"Incident Summary": "desired text", "Reporter email": "email","Incident Details": "HTML text"}
```
Updating the record values:
```python
archer_instance.update_content_record(updater_json, record_id)
```
#### 2.2.3 Posting attachments to archer instance
Uploading attachment to Archer and getting its id:
```python
attachment_id = archer_instance.post_attachment("file name", fileinbase64_string)
```
Appending attachment ids into array, you might want to get existing record atttachments ids first and append additional attachment id to it or you will lose the existing ones:
```python
attachment_ids = []
attachment_ids.append(attachment_id)
```
Then associate the ids with the existing record for example:
```python
updater_json = {"Attachments": attachment_ids}
archer_instance.update_content_record(updater_json, record_id)
```
## 3. Working with sub forms in content records
### 3.1 Creating subrecords
Creating sub_record and getting its id:
```python
sub_form_json = {"subform field name1": "value1", "subform field name1": "value1", ...}
sub_record_id = archer_instance.create_sub_record(sub_form_json, "subform field name in target application")
```
Then associate subrecord with content record, in this case existing record:
```python
updater_json = {"subform field name in target application": sub_record_id}
archer_instance.update_content_record(updater_json, record_id)
```
But it will replace the existing subrecords in application, so you should get the existing subrecords first:
```python
current_sub_records_ids = record.get_field_content("subform field name in target application") #get the array of existing attachments ids
if current_sub_records:
final_sub_records = current_sub_records_ids + sub_record_id
else:
final_sub_records = sub_record_id
```
And then update the original application record:
```python
updater_json = {"subform field name in target application": sub_record_id}
archer_instance.update_content_record(updater_json, record_id)
```
### 3.2 Attachments to subrecords
Uploading attachment to Archer and getting its id:
```python
attachment_id = archer_instance.post_attachment("file name", fileinbase64_string)
```
Put attachment ids into array:
```python
attachment_ids = []
attachment_ids.append(attachment_id)
```
Assosiate it with the new sub_record
```python
sub_form_json = {"sub form attachment field name": attachment_ids}
archer_instance.create_sub_record(sub_form_json, "APPLICATION FIELD NAME")
```
## 4. Working with users
### 4.1 Getting user objects:
Getting all user objects:
```python
users = archer_instance.get_users()
```
Getting individual user object:
```python
user = archer_instance.get_user_by_id("user id")
```
Getting users using filters, find full list of filters in Archer REST API documentation:
```python
users = archer_instance.get_users("?$select=Id,DisplayName&$orderby=LastName")
```
Getting active users with no login:
```python
users = archer_instance.get_active_users_with_no_login()
```
### 4.2 Getting users info
Getting user object parameters (added for convenience), all information could be found in user.json:
```python
email = user.get_user_email()
id = user.get_user_id()
display_name = user.get_gisplay_name()
user_name = user.get_username()
last_login = user.get_last_login_date()
```
### 4.3 Working with user object
Assigning user to role:
```python
user.assign_role_to_user("role id")
```
Activating user:
```python
user.activate_user()
```
Deactivating user:
```python
user.deactivate_user()
```
Adding user to group:
```python
archer_instance.get_all_groups() #loads all groups first
user.put_user_to_group("group name")
```
# Archer GRC API (released from 6.4)
To start working in GRC api you need to set an endpoint, it's analog of application we used in REST.
To find the exact name of an endpoint you can use the following method:
```python
archer_instance.find_grc_endpoint_url("application name")
```
With endpoint name you can get content records of the application:
* it'll give you only 1000 records at a time, use skip to get more
* I used this api only to get key field to id mapping, since there is no normal search in REST API
* Method returns array_of_jsons instead of record objects, since these jsons are different from REST jsons and I don't really use them
```python
array_of_jsons = archer_instance.get_grc_endpoint_records("endpoint name", skip=None)
```
I'm building key record field value to record internal id mapping:
* for Incidents application "application key field" was incident #INC-xxx, but key record field stores only integer, for some reason
* so I added prefix, "INC-" in my example to the method
```python
archer_instance.build_unique_value_to_id_mapping("endpoint name", "application key field name", "prefix"=None)
```
So based on key record field value I can get record internal id:
```python
record_id = archer_instance.get_record_id_by_unique_value("key field value")
```
| /rsa_archer-0.1.9.tar.gz/rsa_archer-0.1.9/README.md | 0.453262 | 0.779112 | README.md | pypi |
[](https://opensource.org/licenses/MIT)
What is this for?
=================
If you need to use an [RSA SecurID](//en.wikipedia.org/wiki/RSA_SecurID) software token
to generate [one-time passwords](//en.wikipedia.org/wiki/One-time_password), and
have been given an activation code and a URL like
`https://XXX.com:443/ctkip/services/CtkipService`, this software will allow you to
provision the token and save it in a format that you can use with
[stoken](//github.com/cernekee/stoken).
If you use the RSA's official software (RSA SecurID) to provision the
token, it will obscure the token seed to prevent you from copying it
to another computer:

What is this Fork?
==================
This repo has been forked from [https://github.com/dlenski/rsa_ct_kip](github.com/dlenski/rsa_ct_kip) purely for the purpose of publishing to PyPi.
This fork can be found here: [https://gitlab.com/southgate/apps/softtoken/rsa_ct_kip](gitlab.com/southgate/apps/softtoken/rsa_ct_kip)
What is this Fork?
==================
This repo has been forked from [https://github.com/dlenski/rsa_ct_kip](github.com/dlenski/rsa_ct_kip) purely for the purpose of publishing to PyPi.
This fork can be found here: [https://gitlab.com/southgate/apps/softtoken/rsa_ct_kip](gitlab.com/southgate/apps/softtoken/rsa_ct_kip)
Requirements
============
Client needs Python **3.x**, PyCryptoDome, and requests (which will be automatically installed with `pip`). [stoken](//github.com/cernekee/stoken) is needed to save the resulting tokens in a usable format. Running the [fake server](#fake-server) requires Flask as well.
Install with `pip3` to automatically fetch Python dependencies. (Note that on most systems, `pip` invokes the Python 2.x version, while `pip3` invokes the Python 3.x version.)
```
# Install latest version
$ pip3 install rsa_ct_kip
# Install a specific version
$ pip3 install rsa_ct_kip==0.1
```
Provision token using client
============================
Provide the client with the activation URL and activation code
(usually 12 digits), and a file in which to save the token template.
It will communicate with the RSA CT-KIP server and provision a token,
then attempt to call `stoken` to convert the token to XML/.sdtid format:
```
$ rsa_ct_kip https://server.company.com:443/ctkip/services/CtkipService ACTIVATION_CODE /tmp/test.sdtid
Sending ClientHello request to server...
Received ServerHello response with server nonce (R_S = 28198dbe2c18a00335179cc5bb4eff3a) and 1024-bit RSA public key
Generated client nonce (R_C = 12bec1a6f4d09470986b485561c4d2b5)
Sending ServerFinished request to server, with encrypted client nonce...
MAC verified (0f103bc63a8819ffdbee657d042144f6)
Received ServerFinished response with token information:
Service ID: RSA CT-KIP
Key ID: 838999658504
Token ID: 838999658504
Token User:
Expiration date: 2020-01-23T00:00:00+00:00
OTP mode: 8 Decimal, every 60 seconds
Token seed: 30ade1be20b3867d967bd2927c8eb0ca
Saved token in XML/.sdtid format to /tmp/test.sdtid
```
If `stoken` is not installed in your path, or fails to execute
successfully, then a template file will be left behind instead.
Convert the template output to an RSA SecurID token in XML format with
a working copy of `stoken`, as instructed:
```
$ stoken export --random --sdtid --template=template.xml > 838999658504.sdtid
```
Fake server
===========
The server (invoked with `python3 -m rsa_ct_kip.fakeserver`) mimics a "real" RSA CT-KIP server and can
be used for interoperability testing with a "real" RSA SecurID client.
It accepts the requests sent by the client software at two different
paths: `/` for laziness, and `/ctkip/services/CtkipService`
in case any real client hard-codes this path.
It provisions tokens with randomly-generated 12-digit IDs, which it does
not retain. Official RSA SecurID clients for Windows and Android have
been verified to connect to it, and provision tokens from its output.
The server can run either via HTTP or HTTPS. For HTTPS, create a
`server.pem` file in the same directory. It must contain a trusted,
signed certificate in order for the RSA SecurID app to connect to it.
The package also includes `rsaprivkey.pem`, used by the fake server. This the RSA private key used for token
generation, and shouldn't need to be modified for testing
purposes. (The one included is a 1024-bit key with modulus 65537,
similar to what seem to be used by the "real" RSA CT-KIP server).
Protocol
========
The protocol used here is allegedly documented in the [RFC4758](//tools.ietf.org/html/rfc4758) "draft standard".
There are numerous problems with this protocol:
* The draft RFC is convoluted, overly-complex, and unclear. It's _obviously_ an attempt to describe
the operation of an existing program rather than to design a useful protocol.
* The only "existing implementations" are in RSA's closed-source software. I use scare quotes because
the actual implementations [use algorithms that differ in several ways](//github.com/cernekee/stoken/issues/27#issuecomment-456522178),
making it impossible to interoperate without reverse-engineering.
* The exchange is wrapped in several unnecessary layers of base64+XML+SOAP terribleness… but the official
RSA clients _don't really parse XML_: they just pretend to, and get confused by whitespace differences
and similar trivialities.
* The protocol appears to be trying to solve the following problem,
"How can a client and server agree on a long-term key (the token
secret) in such a way that if the exchange is [MITM](https://en.wikipedia.org/wiki/Man-in-the-middle_attack)ed,
they can't arrive at the same long-term key?" There are numerous things that make this scheme impractical and
confusing and unmaintable, but the icing on the cake is that the official clients don't even give a usefully
distinctive error message when they **are** being MITMed.
Dear RSA: This is one of the most insanely idiotic protocols I have ever seen. At no point in its rambling,
incoherent design is it even close to serving a rational purpose. Everyone involved in reverse
engineering it is now dumber for having studied it. [I award you no points, and may God have mercy on your
souls](https://www.youtube.com/watch?v=LQCU36pkH7c).
Credits
=======
* [@cemeyer](//github.com/cemeyer) for [kicking this off](//github.com/cernekee/stoken/issues/27)
and doing most of the heavy lifting, including figuring out
[all the mistakes](//github.com/cernekee/stoken/issues/27#issuecomment-456522178)
in RSA's atrociously sloppy and misleading [RFC4758](//tools.ietf.org/html/rfc4758), and writing
a [bug-for-bug matching `ct_kip_prf_aes` implementation](//gist.github.com/cemeyer/3293e4fcb3013c4ee2d1b6005e0561bf)
based on it.
* [@rgerganov](//github.com/rgerganov) for
[reverse engineering the official client](//github.com/cernekee/stoken/issues/27#issuecomment-456113939) and
testing.
* [@cernekee](//github.com/cernekee) for writing `stoken` in the first place, and for explaining how to
[convert a raw seed into a token](https://github.com/cernekee/stoken/issues/27#issuecomment-456473711).
* Future time traveler who travels back to 2006, and convinces RSA that they are objectively bad
at designing protocols and should never do it for any reason whatsoever.
TODO
====
* Convert raw seed into usable token _without_ invoking `stoken`?
* Add tests: verify that `client.py` can talk to `fakeserver.py` and negotiate the same `K_TOKEN`.
License
=======
[MIT](LICENSE.txt)
| /rsa_ct_kip-0.6.0.tar.gz/rsa_ct_kip-0.6.0/README.md | 0.461988 | 0.921605 | README.md | pypi |
from Crypto.Cipher import AES
from Crypto.Hash import CMAC
import math
import struct
def cmac(key, msg):
c = CMAC.new(key, ciphermod=AES)
c.update(msg)
return c.digest()
def ct_kip_prf_aes(key, *msg, dslen=16, pad=None):
assert (dslen // 16) < (2**32)
msg = b''.join(msg)
n = math.ceil(dslen / 16)
j = dslen % 16
tag = b''
for i in range(n):
if i == n - 1 and j != 0:
reslen = j
else:
reslen = 16;
# Difference between RFC4758 Appendix D.2.2 and
# actual RSA software behavior:
# --- F (k, s, i) = OMAC1-AES (k, INT (i) || s)
# +++ F (k, s, i) = OMAC1-AES (k, s || INT (i))
xi = struct.pack('>I', i + 1)
tag += cmac(key, msg + xi)
return tag
def main():
from binascii import hexlify, unhexlify, a2b_base64
from Crypto.PublicKey import RSA
from Crypto.Util import number
# Known test vector obtained from actual RSA software:
# https://github.com/rgerganov/ctkip/blob/master/src/com/xakcop/ctkip/Main.java
R_C = unhexlify(b"846cd036914f3bf536e7354ece07b35a")
R_S = unhexlify(b"79956b2fd8502465ad5c5fe99b9e7786")
pubk = RSA.construct( (number.bytes_to_long(a2b_base64(b"1np1DIf3HOHAK2ahcRzZCJsqIC1QMEqtsdanKSEn5CGtLCdLv9LbLUYo6cQxKSJtwvigpeDgBAb/UYcUNXy/7dY7rA5WpYlsaA9h5C9qzPMBHxVGSIe5k61uUbAwdFhCMfLh776wR//VZ7cuypo5d3cCbvgHGwqw4ZuECbKvONM=")), 65537) )
MAC = unhexlify(b"eca98d8e5bf211fb5167dada9c262296") # Expected MAC
# Difference between RFC4758 Section 3.5 and
# actual RSA software behavior:
# --- K_TOKEN = CT-KIP-PRF (R_C, "Key generation" || k || R_S, dsLen)
# +++ K_TOKEN = CT-KIP-PRF (R_C, k || "Key generation" || R_S, dsLen)
k = number.long_to_bytes(pubk.n)
K_TOKEN = ct_kip_prf_aes(R_C, k, b"Key generation", R_S)
print("K_TOKEN, modulus only, no padding, key first:", hexlify(K_TOKEN))
# Difference between RFC4758 Section 3.8.6 and
# actual RSA software behavior:
# --- MAC = CT-KIP-PRF (K_AUTH, "MAC 2 computation" || R_C, dsLen)
# +++ MAC = CT-KIP-PRF (K_AUTH, "MAC 2 Computation" || R_C, dsLen)
MAC_CALC = ct_kip_prf_aes(K_TOKEN, b"MAC 2 Computation", R_C)
print("MAC(calc)", hexlify(MAC_CALC), "MAC(exp)", hexlify(MAC))
if __name__ == "__main__":
main() | /rsa_ct_kip-0.6.0.tar.gz/rsa_ct_kip-0.6.0/rsa_ct_kip/ct_kip_prf_aes.py | 0.683631 | 0.229524 | ct_kip_prf_aes.py | pypi |
import re
from datetime import date, timedelta
from random import choice, randrange
from .constants import (
DATE_OF_BIRTH_FORMAT,
GENDER_FEMALE_MIN,
GENDER_FEMALE_MAX,
GENDER_MALE_MIN,
GENDER_MALE_MAX,
SA_CITIZEN_DIGIT,
PERMANENT_RESIDENT_DIGIT,
RACE_DIGIT,
Gender,
Citizenship,
)
def calculate_checksum_digit(numbers: str) -> int:
"""Calculate the checksum digit for the given number sequence.
Use the Luhn algorithm to calculate a checksum digit, which can be used
for basic error detection in the number sequence.
Args:
numbers: Number sequence to calculate the checksum for.
Returns:
The checksum digit.
"""
digits = [int(d) for d in re.sub(r"\D", "", numbers)]
digits.reverse()
sum = 0
for index, digit in enumerate(digits):
if index % 2 == 0:
digit = digit * 2
if digit > 9:
digit = digit - 9
sum = sum + digit
return sum * 9 % 10
def generate_date_of_birth(start=date(1920, 1, 1), end=None):
"""Return a random date in a given period.
Args:
start: Start date.
end: End date.
Returns:
Random date in the given range.
"""
if not end:
end = date.today()
difference = end - start
days = randrange(difference.days)
return start + timedelta(days=days)
def generate_gender_digits(gender: Gender) -> str:
"""Return a random sequence of digits representing the given gender.
Female: 0000 - 4999
Male: 5000 - 9999
Args:
gender: Gender to generate digits for.
Returns:
Sequence of digits for the given gender.
"""
if gender == Gender.FEMALE:
number = randrange(GENDER_FEMALE_MIN, GENDER_FEMALE_MAX + 1)
else:
number = randrange(GENDER_MALE_MIN, GENDER_MALE_MAX + 1)
return f"{number:03d}"
def make_id_number(
date_of_birth: date, gender: Gender, citizenship: Citizenship
) -> str:
"""Construct a valid RSA ID number from the given information.
Args:
date_of_birth: The date of birth.
gender: The gender indicator.
citizenship: The citizenship indicator.
Returns:
A new valid RSA ID number.
"""
date_of_birth_digits = date_of_birth.strftime(DATE_OF_BIRTH_FORMAT)
gender_digits = generate_gender_digits(gender)
if citizenship == Citizenship.SA_CITIZEN:
citizenship_digit = SA_CITIZEN_DIGIT
else:
citizenship_digit = PERMANENT_RESIDENT_DIGIT
digits = "".join(
[date_of_birth_digits, gender_digits, citizenship_digit, RACE_DIGIT]
)
checksum_digit = calculate_checksum_digit(digits)
return f"{digits}{checksum_digit}"
def generate(
date_of_birth: date = None,
gender: Gender = None,
citizenship: Citizenship = None,
) -> str:
"""Generate a valid RSA ID number.
Generate random values for any of the missing information.
Args:
date_of_birth: The date of birth.
gender: The gender indicator.
citizenship: The citizenship indicator.
Returns:
A new valid RSA ID number.
Examples:
>>> import rsaidnumber
>>> rsaidnumber.generate()
8012215312080
"""
if not date_of_birth:
date_of_birth = generate_date_of_birth()
if not gender:
gender = choice([Gender.MALE, Gender.FEMALE])
if not citizenship:
citizenship = choice(
[Citizenship.SA_CITIZEN, Citizenship.PERMANENT_RESIDENT]
)
return make_id_number(
date_of_birth, gender=gender, citizenship=citizenship
) | /rsa-id-number-0.0.3.tar.gz/rsa-id-number-0.0.3/src/rsaidnumber/random.py | 0.863017 | 0.423816 | random.py | pypi |
import logging
import re
from datetime import datetime
from .constants import (
DATE_OF_BIRTH_FORMAT,
GENDER_FEMALE_MAX,
GENDER_FEMALE_MIN,
PERMANENT_RESIDENT_DIGIT,
RSA_ID_LENGTH,
SA_CITIZEN_DIGIT,
Citizenship,
Gender,
)
from .random import generate
__version__ = "0.0.3"
__all__ = ["Gender", "Citizenship", "IdNumber", "parse", "generate"]
logger = logging.getLogger(__name__)
class IdNumber:
def __init__(self, value: str):
self.value = value
self.error = None
self.date_of_birth = None
self.gender = None
self.citizenship = None
self.parse()
def clean(self):
"""Return the value without any whitespace."""
return re.sub(r"\s", "", self.value or "")
def parse(self):
"""Parse the value and validate against the RSA ID number format."""
self.error = None
self.date_of_birth = None
self.gender = None
self.citizenship = None
value = self.clean()
if not value:
self.error = f"'{value}' is not a valid RSA ID number!"
return
if not value.isdigit():
self.error = f"'{value}' contains non-digit characters!"
return
if len(value) != RSA_ID_LENGTH:
self.error = f"'{value}' is not {RSA_ID_LENGTH} digits!"
return
year = value[0:2]
month = value[2:4]
day = value[4:6]
try:
self.date_of_birth = datetime.strptime(
f"{year}{month}{day}", DATE_OF_BIRTH_FORMAT
)
if self.date_of_birth > datetime.now():
correct_year = self.date_of_birth.year - 100
self.date_of_birth = self.date_of_birth.replace(
year=correct_year
)
except ValueError:
self.error = f"'{value}' contains an invalid date of birth!"
logger.debug(self.error, exc_info=True)
return
gender = int(value[6:10])
if gender >= GENDER_FEMALE_MIN and gender <= GENDER_FEMALE_MAX:
self.gender = Gender.FEMALE
else:
self.gender = Gender.MALE
citizenship = value[10]
if citizenship == SA_CITIZEN_DIGIT:
self.citizenship = Citizenship.SA_CITIZEN
elif citizenship == PERMANENT_RESIDENT_DIGIT:
self.citizenship = Citizenship.PERMANENT_RESIDENT
else:
self.error = f"Invalid citizenship indicator: '{citizenship}'!"
return
digits = [int(d) for d in value]
digits.reverse()
sum = 0
for index, digit in enumerate(digits):
if (index + 1) % 2 == 0:
digit = digit * 2
if digit > 9:
digit = digit - 9
sum = sum + digit
if not sum % 10 == 0:
self.error = f"'{value}' contains an invalid checksum digit!"
return
@property
def valid(self) -> bool:
"""Return True if there is not error, False otherwise."""
return not self.error
def __repr__(self):
return self.clean()
def parse(value: str, raise_exc: bool = True) -> IdNumber:
"""Parse `value` and validate against the RSA ID number format.
Args:
value: ID number string to parse and validate.
Returns:
A new `IdNumber` instance.
Raises:
ValueError: If the ID number is invalid and `raise_exc` is True.
Examples:
>>> import rsaidnumber
>>> value = '1234567890123'
>>> id_number = rsaidnumber.parse(value)
"""
id_number = IdNumber(value)
id_number.parse()
if not id_number.valid and raise_exc:
raise ValueError(id_number.error)
return id_number | /rsa-id-number-0.0.3.tar.gz/rsa-id-number-0.0.3/src/rsaidnumber/__init__.py | 0.76769 | 0.202601 | __init__.py | pypi |
from rcj.utility import rmath
class Key:
"""
A class that holds the product of two primes and exponent of the public or private key.
Parameters:
Product (int): The product of two primes.
Exponent (int): The exponent of the public or private key.
"""
def __init__(self, product: int, exponent: int):
self.product = product
self.exponent = exponent
def __str__(self):
return "Key Contents:\nProduct = {:d}\nExponent = {:d}".format(self.product, self.exponent)
class KeyPair:
"""
A class that holds the public key and private key.
Parameters:
public_key (Key): The public key.
private_key (Key): The private key.
"""
def __init__(self, public_key: Key, private_key: Key):
self.public_key = public_key
self.private_key = private_key
def __str__(self):
return "[PUBLIC KEY]\n{:s}\n\n[PRIVATE KEY]\n{:s}".format(str(self.public_key), str(self.private_key))
class Encryptor:
"""
A class that encrypts data based on the provided key.
Parameters:
public_key (Key): The public key.
"""
def __init__(self, public_key: Key):
self.public_key = public_key
def encrypt(self, message: int) -> int:
"""
A method that takes an integer and encrypts it.
Parameters:
message (int): The integer to be encrypted.
Returns:
int: The encrypted integer.
"""
return pow(message, self.public_key.exponent, self.public_key.product)
class Decryptor:
"""
A class that decrypts data based on the provided key.
Parameters:
private_key (Key): The private key.
"""
def __init__(self, private_key: Key):
self.private_key = private_key
def decrypt(self, message: int) -> int:
"""
A method that takes an integer and decrypts it.
Parameters:
message (int): The integer to be decrypted.
Returns:
int: The decrypted integer.
"""
return pow(message, self.private_key.exponent, self.private_key.product)
def generate_key_pair() -> KeyPair:
"""
A function that generates a public key and a private key.
Returns:
KeyPair: A class that holds the public key and private key.
"""
first_prime = rmath.generate_prime_candidate(1024)
second_prime = rmath.generate_prime_candidate(1024)
product = first_prime * second_prime
lambda_n = rmath.lcd(first_prime - 1, second_prime - 1)
public_exponent = 65537
private_exponent = rmath.gcd_linear_combination(public_exponent, lambda_n)[0] % lambda_n
public_key = Key(product, public_exponent)
private_key = Key(product, private_exponent)
key_pair = KeyPair(public_key, private_key)
return key_pair | /rsa-jpv-1.0.3.tar.gz/rsa-jpv-1.0.3/rcj/cryptosystem/rsa.py | 0.936836 | 0.501221 | rsa.py | pypi |
from dash.development.base_component import Component, _explicitize_args
class ScheduleCard(Component):
"""A ScheduleCard component.
Keyword arguments:
- id (string | dict; optional):
The ID used to identify this component in Dash callbacks.
- dailySchedule (boolean; optional)
- displayShiftEndTime (string; optional)
- displayShiftStartTime (string; optional)
- onOffValue (a value equal to: 'On', 'Off'; optional)
- openCloseOnValue (a value equal to: 'Open', 'Close', 'On'; optional)
- scheduleChanged (number; optional)
- scheduleType (string; optional)
- shiftEndTime (string; optional)
- shiftStartTime (string; optional)
- showOpenCloseBtns (boolean; optional)
- showViewModeCard (boolean; optional)
- vacTrnOffValue (a value equal to: 'Off', 'TRN', 'VAC'; optional)"""
@_explicitize_args
def __init__(self, displayShiftStartTime=Component.UNDEFINED, displayShiftEndTime=Component.UNDEFINED, dailySchedule=Component.UNDEFINED, scheduleChanged=Component.UNDEFINED, id=Component.UNDEFINED, onOffValue=Component.UNDEFINED, openCloseOnValue=Component.UNDEFINED, vacTrnOffValue=Component.UNDEFINED, showOpenCloseBtns=Component.UNDEFINED, showViewModeCard=Component.UNDEFINED, scheduleType=Component.UNDEFINED, shiftStartTime=Component.UNDEFINED, shiftEndTime=Component.UNDEFINED, **kwargs):
self._prop_names = ['id', 'dailySchedule', 'displayShiftEndTime', 'displayShiftStartTime', 'onOffValue', 'openCloseOnValue', 'scheduleChanged', 'scheduleType', 'shiftEndTime', 'shiftStartTime', 'showOpenCloseBtns', 'showViewModeCard', 'vacTrnOffValue']
self._type = 'ScheduleCard'
self._namespace = 'rsa_scheduler_components'
self._valid_wildcard_attributes = []
self.available_properties = ['id', 'dailySchedule', 'displayShiftEndTime', 'displayShiftStartTime', 'onOffValue', 'openCloseOnValue', 'scheduleChanged', 'scheduleType', 'shiftEndTime', 'shiftStartTime', 'showOpenCloseBtns', 'showViewModeCard', 'vacTrnOffValue']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(ScheduleCard, self).__init__(**args) | /rsa_scheduler_components-0.1.8.tar.gz/rsa_scheduler_components-0.1.8/rsa_scheduler_components/ScheduleCard.py | 0.728265 | 0.229158 | ScheduleCard.py | pypi |
import numpy as np
import scipy.stats as ss
import scipy.linalg as sl
from scipy.spatial.distance import squareform
import pyrsa
def make_design(n_cond, n_part):
"""
Makes simple fMRI design with n_cond, each measures n_part times
Args:
n_cond (int): Number of conditions
n_part (int): Number of partitions
Returns:
Tuple (cond_vec, part_vec)
cond_vec (np.ndarray): n_obs vector with condition
part_vec (np.ndarray): n_obs vector with partition
"""
p = np.array(range(0, n_part))
c = np.array(range(0, n_cond))
cond_vec = np.kron(np.ones((n_part,)), c) # Condition Vector
part_vec = np.kron(p, np.ones((n_cond,))) # Partition vector
return (cond_vec, part_vec)
def make_dataset(model, theta, cond_vec, n_channel=30, n_sim=1,
signal=1, noise=1, signal_cov_channel=None,
noise_cov_channel=None, noise_cov_trial=None,
use_exact_signal=False, use_same_signal=False):
"""
Simulates a fMRI-style data set
Args:
model (pyrsa.Model): the model from which to generate data
theta (numpy.ndarray): vector of parameters (one dimensional)
cond_vec (numpy.ndarray): RSA-style model:
vector of experimental conditions
Encoding-style:
design matrix (n_obs x n_cond)
n_channel (int): Number of channels (default = 30)
n_sim (int): Number of simulation with the same signal
(default = 1)
signal (float): Signal variance (multiplied by predicted G)
signal_cov_channel(numpy.ndarray):
Covariance matrix of signal across channels
noise (float):
Noise variance
noise_cov_channel(numpy.ndarray):
Covariance matrix of noise (default = identity)
noise_cov_trial(numpy.ndarray):
Covariance matrix of noise across trials
use_exact_signal (bool): Makes the signal so that G is exactly as
specified (default: False)
use_same_signal (bool): Uses the same signal for all simulation
(default: False)
Returns:
data (list): List of pyrsa.Dataset with obs_descriptors
"""
# Get the model prediction and build second moment matrix
# Note that this step assumes that RDM uses squared Euclidean distances
RDM = model.predict(theta)
D = squareform(RDM)
H = pyrsa.util.matrix.centering(D.shape[0])
G = -0.5 * (H @ D @ H)
# Make design matrix
if cond_vec.ndim == 1:
Zcond = pyrsa.util.matrix.indicator(cond_vec)
elif cond_vec.ndim == 2:
Zcond = cond_vec
else:
raise ValueError("cond_vec needs to be either vector or design matrix")
n_obs, _ = Zcond.shape
# If signal_cov_channel is given, precalculate the cholesky decomp
if signal_cov_channel is None:
signal_chol_channel = None
else:
if signal_cov_channel.shape is not (n_channel, n_channel):
raise ValueError("Signal covariance for channels needs to be \
n_channel x n_channel array")
signal_chol_channel = np.linalg.cholesky(signal_cov_channel)
# If noise_cov_channel is given, precalculate the cholinsky decomp
if noise_cov_channel is None:
noise_chol_channel = None
else:
if noise_cov_channel.shape is not (n_channel, n_channel):
raise ValueError("noise covariance for channels needs to be \
n_channel x n_channel array")
noise_chol_channel = np.linalg.cholesky(noise_cov_channel)
# If noise_cov_trial is given, precalculate the cholinsky decomp
if noise_cov_trial is None:
noise_chol_trial = None
else:
if noise_cov_trial.shape is not (n_channel, n_channel):
raise ValueError("noise covariance for trials needs to be \
n_obs x n_obs array")
noise_chol_trial = np.linalg.cholesky(noise_cov_trial)
# Generate the signal - here same for all simulations
if use_same_signal:
true_U = make_signal(G, n_channel, use_exact_signal,
signal_chol_channel)
# Generate noise as a matrix normal, independent across partitions
# If noise covariance structure is given, it is assumed that it's the same
# across different partitions
obs_des = {"cond_vec": cond_vec}
des = {"signal": signal, "noise": noise,
"model": model.name, "theta": theta}
dataset_list = []
for _ in range(0, n_sim):
# If necessary - make a new signal
if not use_same_signal:
true_U = make_signal(G, n_channel, use_exact_signal,
signal_chol_channel)
# Make noise with normal distribution
# - allows later plugin of other dists
epsilon = np.random.uniform(0, 1, size=(n_obs, n_channel))
epsilon = ss.norm.ppf(epsilon) * np.sqrt(noise)
# Now add spatial and temporal covariance structure as required
if noise_chol_channel is not None:
epsilon = epsilon @ noise_chol_channel
if noise_chol_trial is not None:
epsilon = noise_chol_trial @ epsilon
# Assemble the data set
data = Zcond @ true_U * np.sqrt(signal) + epsilon
dataset = pyrsa.data.Dataset(data,
obs_descriptors=obs_des,
descriptors=des)
dataset_list.append(dataset)
return dataset_list
def make_signal(G, n_channel, make_exact=False, chol_channel=None):
"""
Generates signal exactly with a specified second-moment matrix (G)
To avoid errors: If the number of channels is smaller than the
number of patterns we generate a representation with the minimal
number of dimnensions and then delete dimensions to yield the desired
number of dimensions.
Args:
G(np.array) : desired second moment matrix (ncond x ncond)
n_channel (int) : Number of channels
make_exact (bool) : enforce exact signal distances
(default: False)
chol_channel: Cholensky decomposition of the signal covariance matrix
(default: None - makes signal i.i.d.)
Returns:
np.array (n_cond x n_channel): random signal
"""
# Generate the true patterns with exactly correct second moment matrix
n_cond = G.shape[0]
if n_cond > n_channel:
n_channel_final = n_channel
n_channel = n_cond
else:
n_channel_final = None
# We use two-step procedure allow for different distributions later on
true_U = np.random.uniform(0, 1, size=(n_cond, n_channel))
true_U = ss.norm.ppf(true_U)
true_U = true_U - np.mean(true_U, axis=1, keepdims=True)
# Make orthonormal row vectors
if make_exact:
E = true_U @ true_U.transpose()
L_E, D_E, _ = sl.ldl(E)
D_E[D_E < 1e-15] = 1e-15 # we need an invertible solution!
D_E = np.sqrt(D_E)
E_chol = L_E @ D_E
true_U = np.linalg.solve(E_chol, true_U) * np.sqrt(n_channel)
# Impose spatial covariance matrix
if chol_channel is not None:
true_U = true_U @ chol_channel
# Now produce data with the known second-moment matrix
# Use positive eigenvectors only
# (cholesky does not work with rank-deficient matrices)
L, D, _ = sl.ldl(G)
D[D < 1e-15] = 0
D = np.sqrt(D)
chol_G = L @ D
true_U = (chol_G @ true_U)
if n_channel_final:
true_U = true_U[:, :n_channel_final]
return true_U | /rsa3-3.0.0.post20201106-py3-none-any.whl/pyrsa/simulation/sim.py | 0.895785 | 0.57081 | sim.py | pypi |
import numpy as np
import tqdm
from collections.abc import Iterable
from pyrsa.rdm import compare
from pyrsa.inference import bootstrap_sample
from pyrsa.inference import bootstrap_sample_rdm
from pyrsa.inference import bootstrap_sample_pattern
from pyrsa.model import Model
from pyrsa.util.inference_util import input_check_model
from .result import Result
from .crossvalsets import sets_k_fold
from .noise_ceiling import boot_noise_ceiling
from .noise_ceiling import cv_noise_ceiling
def eval_fixed(models, data, theta=None, method='cosine'):
"""evaluates models on data, without any bootstrapping or
cross-validation
Args:
models(list of pyrsa.model.Model): models to be evaluated
data(pyrsa.rdm.RDMs): data to evaluate on
theta(numpy.ndarray): parameter vector for the models
method(string): comparison method to use
Returns:
float: evaluation
"""
evaluations, theta, _ = input_check_model(models, theta, None, 1)
if isinstance(models, Model):
rdm_pred = models.predict_rdm(theta=theta)
evaluations = np.array([[compare(rdm_pred, data, method)[0]]])
elif isinstance(models, Iterable):
for k in range(len(models)):
rdm_pred = models[k].predict_rdm(theta=theta[k])
evaluations[k] = np.mean(compare(rdm_pred, data, method)[0])
evaluations = evaluations.reshape((1, len(models)))
else:
raise ValueError('models should be a pyrsa.model.Model or a list of'
+ ' such objects')
noise_ceil = boot_noise_ceiling(
data, method=method, rdm_descriptor='index')
result = Result(models, evaluations, method=method,
cv_method='fixed', noise_ceiling=noise_ceil)
return result
def eval_bootstrap(models, data, theta=None, method='cosine', N=1000,
pattern_descriptor='index', rdm_descriptor='index',
boot_noise_ceil=True):
"""evaluates models on data
performs bootstrapping to get a sampling distribution
Args:
models(pyrsa.model.Model): models to be evaluated
data(pyrsa.rdm.RDMs): data to evaluate on
theta(numpy.ndarray): parameter vector for the models
method(string): comparison method to use
N(int): number of samples
pattern_descriptor(string): descriptor to group patterns for bootstrap
rdm_descriptor(string): descriptor to group rdms for bootstrap
Returns:
numpy.ndarray: vector of evaluations
"""
evaluations, theta, fitter = input_check_model(models, theta, None, N)
noise_min = []
noise_max = []
for i in tqdm.trange(N):
sample, rdm_idx, pattern_idx = \
bootstrap_sample(data, rdm_descriptor=rdm_descriptor,
pattern_descriptor=pattern_descriptor)
if len(np.unique(pattern_idx)) >= 3:
if isinstance(models, Model):
rdm_pred = models.predict_rdm(theta=theta)
rdm_pred = rdm_pred.subsample_pattern(pattern_descriptor,
pattern_idx)
evaluations[i] = np.mean(compare(rdm_pred, sample, method))
elif isinstance(models, Iterable):
j = 0
for mod in models:
rdm_pred = mod.predict_rdm(theta=theta[j])
rdm_pred = rdm_pred.subsample_pattern(pattern_descriptor,
pattern_idx)
evaluations[i, j] = np.mean(compare(rdm_pred, sample,
method))
j += 1
if boot_noise_ceil:
noise_min_sample, noise_max_sample = boot_noise_ceiling(
sample, method=method, rdm_descriptor=rdm_descriptor)
noise_min.append(noise_min_sample)
noise_max.append(noise_max_sample)
else:
if isinstance(models, Model):
evaluations[i] = np.nan
elif isinstance(models, Iterable):
evaluations[i, :] = np.nan
noise_min.append(np.nan)
noise_max.append(np.nan)
if isinstance(models, Model):
evaluations = evaluations.reshape((N, 1))
if boot_noise_ceil:
noise_ceil = np.array([noise_min, noise_max])
else:
noise_ceil = np.array(boot_noise_ceiling(
data, method=method, rdm_descriptor=rdm_descriptor))
result = Result(models, evaluations, method=method,
cv_method='bootstrap', noise_ceiling=noise_ceil)
return result
def eval_bootstrap_pattern(models, data, theta=None, method='cosine', N=1000,
pattern_descriptor='index', rdm_descriptor='index',
boot_noise_ceil=True):
"""evaluates a models on data
performs bootstrapping over patterns to get a sampling distribution
Args:
models(pyrsa.model.Model): models to be evaluated
data(pyrsa.rdm.RDMs): data to evaluate on
theta(numpy.ndarray): parameter vector for the models
method(string): comparison method to use
N(int): number of samples
pattern_descriptor(string): descriptor to group patterns for bootstrap
rdm_descriptor(string): descriptor to group patterns for noise
ceiling calculation
Returns:
numpy.ndarray: vector of evaluations
"""
evaluations, theta, fitter = input_check_model(models, theta, None, N)
noise_min = []
noise_max = []
for i in tqdm.trange(N):
sample, pattern_idx = \
bootstrap_sample_pattern(data, pattern_descriptor)
if len(np.unique(pattern_idx)) >= 3:
if isinstance(models, Model):
rdm_pred = models.predict_rdm(theta=theta)
rdm_pred = rdm_pred.subsample_pattern(pattern_descriptor,
pattern_idx)
evaluations[i] = np.mean(compare(rdm_pred, sample, method))
elif isinstance(models, Iterable):
j = 0
for mod in models:
rdm_pred = mod.predict_rdm(theta=theta[j])
rdm_pred = rdm_pred.subsample_pattern(pattern_descriptor,
pattern_idx)
evaluations[i, j] = np.mean(compare(rdm_pred, sample,
method))
j += 1
if boot_noise_ceil:
noise_min_sample, noise_max_sample = boot_noise_ceiling(
sample, method=method, rdm_descriptor=rdm_descriptor)
noise_min.append(noise_min_sample)
noise_max.append(noise_max_sample)
else:
if isinstance(models, Model):
evaluations[i] = np.nan
elif isinstance(models, Iterable):
evaluations[i, :] = np.nan
noise_min.append(np.nan)
noise_max.append(np.nan)
if isinstance(models, Model):
evaluations = evaluations.reshape((N, 1))
if boot_noise_ceil:
noise_ceil = np.array([noise_min, noise_max])
else:
noise_ceil = np.array(boot_noise_ceiling(
data, method=method, rdm_descriptor=rdm_descriptor))
result = Result(models, evaluations, method=method,
cv_method='bootstrap_pattern', noise_ceiling=noise_ceil)
return result
def eval_bootstrap_rdm(models, data, theta=None, method='cosine', N=1000,
rdm_descriptor='index', boot_noise_ceil=True):
"""evaluates models on data
performs bootstrapping to get a sampling distribution
Args:
models(pyrsa.model.Model): models to be evaluated
data(pyrsa.rdm.RDMs): data to evaluate on
theta(numpy.ndarray): parameter vector for the models
method(string): comparison method to use
N(int): number of samples
rdm_descriptor(string): rdm_descriptor to group rdms for bootstrap
Returns:
numpy.ndarray: vector of evaluations
"""
evaluations, theta, _ = input_check_model(models, theta, None, N)
noise_min = []
noise_max = []
for i in tqdm.trange(N):
sample, rdm_idx = bootstrap_sample_rdm(data, rdm_descriptor)
if isinstance(models, Model):
rdm_pred = models.predict_rdm(theta=theta)
evaluations[i] = np.mean(compare(rdm_pred, sample, method))
elif isinstance(models, Iterable):
j = 0
for mod in models:
rdm_pred = mod.predict_rdm(theta=theta[j])
evaluations[i, j] = np.mean(compare(rdm_pred, sample,
method))
j += 1
if boot_noise_ceil:
noise_min_sample, noise_max_sample = boot_noise_ceiling(
sample, method=method, rdm_descriptor=rdm_descriptor)
noise_min.append(noise_min_sample)
noise_max.append(noise_max_sample)
if isinstance(models, Model):
evaluations = evaluations.reshape((N, 1))
if boot_noise_ceil:
noise_ceil = np.array([noise_min, noise_max])
else:
noise_ceil = np.array(boot_noise_ceiling(
data, method=method, rdm_descriptor=rdm_descriptor))
result = Result(models, evaluations, method=method,
cv_method='bootstrap_rdm', noise_ceiling=noise_ceil)
return result
def crossval(models, rdms, train_set, test_set, ceil_set=None, method='cosine',
fitter=None, pattern_descriptor='index'):
"""evaluates models on cross-validation sets
Args:
models(pyrsa.model.Model): models to be evaluated
rdms(pyrsa.rdm.RDMs): full dataset
train_set(list): a list of the training RDMs with 2-tuple entries:
(RDMs, pattern_idx)
test_set(list): a list of the test RDMs with 2-tuple entries:
(RDMs, pattern_idx)
method(string): comparison method to use
pattern_descriptor(string): descriptor to group patterns
Returns:
numpy.ndarray: vector of evaluations
"""
assert len(train_set) == len(test_set), \
'train_set and test_set must have the same length'
if ceil_set is not None:
assert len(ceil_set) == len(test_set), \
'ceil_set and test_set must have the same length'
evaluations = []
noise_ceil = []
for i in range(len(train_set)):
train = train_set[i]
test = test_set[i]
if (train[0].n_rdm == 0 or test[0].n_rdm == 0 or
train[0].n_cond <= 2 or test[0].n_cond <= 2):
if isinstance(models, Model):
evals = np.nan
elif isinstance(models, Iterable):
evals = np.empty(len(models)) * np.nan
else:
if isinstance(models, Model):
if fitter is None:
fitter = models.default_fitter
theta = fitter(models, train[0], method=method,
pattern_idx=train[1],
pattern_descriptor=pattern_descriptor)
pred = models.predict_rdm(theta)
pred = pred.subsample_pattern(by=pattern_descriptor,
value=test[1])
evals = np.mean(compare(pred, test[0], method))
elif isinstance(models, Iterable):
evals, _, fitter = input_check_model(models, None, fitter)
for j in range(len(models)):
theta = fitter[j](models[j], train[0], method=method,
pattern_idx=train[1],
pattern_descriptor=pattern_descriptor)
pred = models[j].predict_rdm(theta)
pred = pred.subsample_pattern(by=pattern_descriptor,
value=test[1])
evals[j] = np.mean(compare(pred, test[0], method))
if ceil_set is None:
noise_ceil.append(boot_noise_ceiling(
rdms.subsample_pattern(by=pattern_descriptor,
value=test[1]),
method=method))
evaluations.append(evals)
if isinstance(models, Model):
models = [models]
evaluations = np.array(evaluations).T # .T to switch models/set order
evaluations = evaluations.reshape((1, len(models), len(train_set)))
if ceil_set is not None:
noise_ceil = cv_noise_ceiling(rdms, ceil_set, test_set, method=method,
pattern_descriptor=pattern_descriptor)
else:
noise_ceil = np.array(noise_ceil).T
result = Result(models, evaluations, method=method,
cv_method='crossvalidation', noise_ceiling=noise_ceil)
return result
def bootstrap_crossval(models, data, method='cosine', fitter=None,
k_pattern=5, k_rdm=5, N=1000,
pattern_descriptor='index', rdm_descriptor='index',
random=True):
"""evaluates models by k-fold crossvalidation within a bootstrap
If a k is set to 1 no crossvalidation is performed over the
corresponding dimension.
Args:
models(pyrsa.model.Model): models to be evaluated
data(pyrsa.rdm.RDMs): RDM data to use
method(string): comparison method to use
fitter(function): fitting method for models
k_pattern(int): #folds over patterns
k_rdm(int): #folds over rdms
N(int): number of bootstrap samples (default: 1000)
pattern_descriptor(string): descriptor to group patterns
rdm_descriptor(string): descriptor to group rdms
random(bool): randomize group assignments (default: True)
Returns:
numpy.ndarray: matrix of evaluations (N x k)
"""
if isinstance(models, Model):
evaluations = np.zeros((N, 1, k_pattern * k_rdm))
elif isinstance(models, Iterable):
evaluations = np.zeros((N, len(models), k_pattern * k_rdm))
noise_ceil = np.zeros((2, N))
for i_sample in tqdm.trange(N):
sample, rdm_idx, pattern_idx = bootstrap_sample(
data,
rdm_descriptor=rdm_descriptor,
pattern_descriptor=pattern_descriptor)
if len(np.unique(rdm_idx)) >= k_rdm \
and len(np.unique(pattern_idx)) >= 3 * k_pattern:
train_set, test_set, ceil_set = sets_k_fold(
sample,
pattern_descriptor=pattern_descriptor,
rdm_descriptor=rdm_descriptor,
k_pattern=k_pattern, k_rdm=k_rdm, random=random)
for idx in range(len(test_set)):
test_set[idx][1] = _concat_sampling(pattern_idx,
test_set[idx][1])
train_set[idx][1] = _concat_sampling(pattern_idx,
train_set[idx][1])
cv_result = crossval(
models, sample,
train_set, test_set,
method=method, fitter=fitter,
pattern_descriptor=pattern_descriptor)
if isinstance(models, Model):
evaluations[i_sample, 0, :] = cv_result.evaluations[0, 0]
elif isinstance(models, Iterable):
evaluations[i_sample, :, :] = cv_result.evaluations[0]
noise_ceil[:, i_sample] = np.mean(cv_result.noise_ceiling, axis=-1)
else: # sample does not allow desired crossvalidation
if isinstance(models, Model):
evaluations[i_sample, 0, :] = np.nan
elif isinstance(models, Iterable):
evaluations[i_sample, :, :] = np.nan
noise_ceil[:, i_sample] = np.nan
result = Result(models, evaluations, method=method,
cv_method='bootstrap_crossval', noise_ceiling=noise_ceil)
return result
def _concat_sampling(sample1, sample2):
""" computes an index vector for the sequential sampling with sample1
and sample2
"""
sample_out = [[i_samp1 for i_samp1 in sample1 if i_samp1 == i_samp2]
for i_samp2 in sample2]
return sum(sample_out, []) | /rsa3-3.0.0.post20201106-py3-none-any.whl/pyrsa/inference/evaluate.py | 0.75392 | 0.384825 | evaluate.py | pypi |
import numpy as np
from pyrsa.util.rdm_utils import add_pattern_index
def sets_leave_one_out_pattern(rdms, pattern_descriptor):
""" generates training and test set combinations by leaving one level
of pattern_descriptor out as a test set.
This is only sensible if pattern_descriptor already defines larger groups!
the ceil_train_set contains the rdms for the test-patterns from the
training-rdms. This is required for computing the noise-ceiling
Args:
rdms(pyrsa.rdm.RDMs): rdms to use
pattern_descriptor(String): descriptor to select groups
Returns:
train_set(list): list of tuples (rdms, pattern_idx)
test_set(list): list of tuples (rdms, pattern_idx)
ceil_set(list): list of tuples (rdms, pattern_idx)
"""
pattern_descriptor, pattern_select = \
add_pattern_index(rdms, pattern_descriptor)
train_set = []
test_set = []
ceil_set = []
for i_pattern in pattern_select:
pattern_idx_train = np.setdiff1d(pattern_select, i_pattern)
rdms_train = rdms.subset_pattern(pattern_descriptor,
pattern_idx_train)
pattern_idx_test = [i_pattern]
rdms_test = rdms.subset_pattern(pattern_descriptor,
pattern_idx_test)
rdms_ceil = rdms.subset_pattern(pattern_descriptor,
pattern_idx_test)
train_set.append((rdms_train, pattern_idx_train))
test_set.append((rdms_test, pattern_idx_test))
ceil_set.append((rdms_ceil, pattern_idx_test))
return train_set, test_set, ceil_set
def sets_leave_one_out_rdm(rdms, rdm_descriptor='index'):
""" generates training and test set combinations by leaving one level
of rdm_descriptor out as a test set.\
Args:
rdms(pyrsa.rdm.RDMs): rdms to use
rdm_descriptor(String): descriptor to select groups
Returns:
train_set(list): list of tuples (rdms, pattern_idx)
test_set(list): list of tuples (rdms, pattern_idx)
ceil_set(list): list of tuples (rdms, pattern_idx)
"""
rdm_select = rdms.rdm_descriptors[rdm_descriptor]
rdm_select = np.unique(rdm_select)
if len(rdm_select) > 1:
train_set = []
test_set = []
for i_pattern in rdm_select:
rdm_idx_train = np.setdiff1d(rdm_select, i_pattern)
rdms_train = rdms.subset(rdm_descriptor,
rdm_idx_train)
rdm_idx_test = [i_pattern]
rdms_test = rdms.subset(rdm_descriptor,
rdm_idx_test)
train_set.append((rdms_train, np.arange(rdms.n_cond)))
test_set.append((rdms_test, np.arange(rdms.n_cond)))
ceil_set = train_set
else:
Warning('leave one out called with only one group')
train_set = [(rdms, np.arange(rdms.n_cond))]
test_set = [(rdms, np.arange(rdms.n_cond))]
ceil_set = [(rdms, np.arange(rdms.n_cond))]
return train_set, test_set, ceil_set
def sets_k_fold(rdms, k_rdm=5, k_pattern=5, random=True,
pattern_descriptor='index', rdm_descriptor='index'):
""" generates training and test set combinations by splitting into k
similar sized groups. This version splits both over rdms and over patterns
resulting in k_rdm * k_pattern (training, test) pairs.
If a k is set to 1 the corresponding dimension is not crossvalidated.
Args:
rdms(pyrsa.rdm.RDMs): rdms to use
pattern_descriptor(String): descriptor to select pattern groups
rdm_descriptor(String): descriptor to select rdm groups
k_rdm(int): number of rdm groups
k_pattern(int): number of pattern groups
random(bool): whether the assignment shall be randomized
Returns:
train_set(list): list of tuples (rdms, pattern_idx)
test_set(list): list of tuples (rdms, pattern_idx)
ceil_set(list): list of tuples (rdms, pattern_idx)
"""
rdm_select = rdms.rdm_descriptors[rdm_descriptor]
rdm_select = np.unique(rdm_select)
assert k_rdm <= len(rdm_select), \
'Can make at most as many groups as rdms'
if random:
np.random.shuffle(rdm_select)
group_size_rdm = np.floor(len(rdm_select) / k_rdm)
additional_rdms = len(rdm_select) % k_rdm
train_set = []
test_set = []
ceil_set = []
for i_group in range(k_rdm):
test_idx = np.arange(i_group * group_size_rdm,
(i_group + 1) * group_size_rdm)
if i_group < additional_rdms:
test_idx = np.concatenate((test_idx, [-(i_group+1)]))
if k_rdm <= 1:
train_idx = test_idx
else:
train_idx = np.setdiff1d(np.arange(len(rdm_select)),
test_idx)
rdm_idx_test = [rdm_select[int(idx)] for idx in test_idx]
rdm_idx_train = [rdm_select[int(idx)] for idx in train_idx]
rdms_test = rdms.subsample(rdm_descriptor,
rdm_idx_test)
rdms_train = rdms.subsample(rdm_descriptor,
rdm_idx_train)
train_new, test_new, _ = sets_k_fold_pattern(
rdms_train, k=k_pattern,
pattern_descriptor=pattern_descriptor, random=random)
ceil_new = test_new.copy()
for i_pattern in range(k_pattern):
test_new[i_pattern][0] = rdms_test.subsample_pattern(
by=pattern_descriptor,
value=test_new[i_pattern][1])
train_set += train_new
test_set += test_new
ceil_set += ceil_new
return train_set, test_set, ceil_set
def sets_k_fold_rdm(rdms, k_rdm=5, random=True, rdm_descriptor='index'):
""" generates training and test set combinations by splitting into k
similar sized groups. This version splits both over rdms and over patterns
resulting in k_rdm * k_pattern (training, test) pairs.
Args:
rdms(pyrsa.rdm.RDMs): rdms to use
rdm_descriptor(String): descriptor to select rdm groups
k_rdm(int): number of rdm groups
random(bool): whether the assignment shall be randomized
Returns:
train_set(list): list of tuples (rdms, pattern_idx)
test_set(list): list of tuples (rdms, pattern_idx)
"""
rdm_select = rdms.rdm_descriptors[rdm_descriptor]
rdm_select = np.unique(rdm_select)
assert k_rdm <= len(rdm_select), \
'Can make at most as many groups as rdms'
if random:
np.random.shuffle(rdm_select)
group_size_rdm = np.floor(len(rdm_select) / k_rdm)
additional_rdms = len(rdm_select) % k_rdm
train_set = []
test_set = []
for i_group in range(k_rdm):
test_idx = np.arange(i_group * group_size_rdm,
(i_group + 1) * group_size_rdm)
if i_group < additional_rdms:
test_idx = np.concatenate((test_idx, [-(i_group+1)]))
train_idx = np.setdiff1d(np.arange(len(rdm_select)),
test_idx)
rdm_idx_test = [rdm_select[int(idx)] for idx in test_idx]
rdm_idx_train = [rdm_select[int(idx)] for idx in train_idx]
rdms_test = rdms.subsample(rdm_descriptor,
rdm_idx_test)
rdms_train = rdms.subsample(rdm_descriptor,
rdm_idx_train)
train_set.append([rdms_train, np.arange(rdms_train.n_cond)])
test_set.append([rdms_test, np.arange(rdms_train.n_cond)])
ceil_set = train_set
return train_set, test_set, ceil_set
def sets_k_fold_pattern(rdms, pattern_descriptor='index', k=5, random=False):
""" generates training and test set combinations by splitting into k
similar sized groups. This version splits in the given order or
randomizes the order. For k=1 training and test_set are whole dataset,
i.e. no crossvalidation is performed.
For only crossvalidating over patterns there is no independent training
set for calculating a noise ceiling for the patterns.
To express this we set ceil_set to None, which makes the crossvalidation
function calculate a leave one rdm out noise ceiling for the right
patterns instead.
Args:
rdms(pyrsa.rdm.RDMs): rdms to use
pattern_descriptor(String): descriptor to select groups
k(int): number of groups
random(bool): whether the assignment shall be randomized
Returns:
train_set(list): list of tuples (rdms, pattern_idx)
test_set(list): list of tuples (rdms, pattern_idx)
ceil_set = None
"""
pattern_descriptor, pattern_select = \
add_pattern_index(rdms, pattern_descriptor)
assert k <= len(pattern_select), \
'Can make at most as many groups as conditions'
if random:
np.random.shuffle(pattern_select)
group_size = np.floor(len(pattern_select) / k)
additional_patterns = len(pattern_select) % k
train_set = []
test_set = []
for i_group in range(k):
test_idx = np.arange(i_group * group_size,
(i_group + 1) * group_size)
if i_group < additional_patterns:
test_idx = np.concatenate((test_idx, [-(i_group+1)]))
if k <= 1:
train_idx = test_idx
else:
train_idx = np.setdiff1d(np.arange(len(pattern_select)),
test_idx)
pattern_idx_test = [pattern_select[int(idx)] for idx in test_idx]
pattern_idx_train = [pattern_select[int(idx)] for idx in train_idx]
rdms_test = rdms.subset_pattern(pattern_descriptor,
pattern_idx_test)
rdms_train = rdms.subset_pattern(pattern_descriptor,
pattern_idx_train)
test_set.append([rdms_test, pattern_idx_test])
train_set.append([rdms_train, pattern_idx_train])
ceil_set = None
return train_set, test_set, ceil_set
def sets_of_k_rdm(rdms, rdm_descriptor='index', k=5, random=False):
""" generates training and test set combinations by splitting into
groups of k. This version splits in the given order or
randomizes the order. If the number of patterns is not divisible by k
patterns are added to the first groups such that those have k+1 patterns
Args:
rdms(pyrsa.rdm.RDMs): rdms to use
pattern_descriptor(String): descriptor to select groups
k(int): number of groups
random(bool): whether the assignment shall be randomized
Returns:
train_set(list): list of tuples (rdms, pattern_idx)
test_set(list): list of tuples (rdms, pattern_idx)
ceil_set(list): list of tuples (rdms, pattern_idx)
"""
rdm_select = rdms.rdm_descriptors[rdm_descriptor]
rdm_select = np.unique(rdm_select)
assert k <= len(rdm_select) / 2, \
'to form groups we can use at most half the patterns per group'
n_groups = int(len(rdm_select) / k)
return sets_k_fold_rdm(rdms, rdm_descriptor=rdm_descriptor,
k=n_groups, random=random)
def sets_of_k_pattern(rdms, pattern_descriptor=None, k=5, random=False):
""" generates training and test set combinations by splitting into
groups of k. This version splits in the given order or
randomizes the order. If the number of patterns is not divisible by k
patterns are added to the first groups such that those have k+1 patterns
Args:
rdms(pyrsa.rdm.RDMs): rdms to use
pattern_descriptor(String): descriptor to select groups
k(int): number of groups
random(bool): whether the assignment shall be randomized
Returns:
train_set(list): list of tuples (rdms, pattern_idx)
test_set(list): list of tuples (rdms, pattern_idx)
"""
pattern_descriptor, pattern_select = \
add_pattern_index(rdms, pattern_descriptor)
assert k <= len(pattern_select) / 2, \
'to form groups we can use at most half the patterns per group'
n_groups = int(len(pattern_select) / k)
return sets_k_fold_pattern(rdms, pattern_descriptor=pattern_descriptor,
k=n_groups, random=random) | /rsa3-3.0.0.post20201106-py3-none-any.whl/pyrsa/inference/crossvalsets.py | 0.752286 | 0.313177 | crossvalsets.py | pypi |
import numpy as np
from pyrsa.util.inference_util import pool_rdm
from pyrsa.rdm import compare
from .crossvalsets import sets_leave_one_out_rdm
def cv_noise_ceiling(rdms, ceil_set, test_set, method='cosine',
pattern_descriptor='index'):
""" calculates the noise ceiling for crossvalidation.
The upper bound is calculated by pooling all rdms for the appropriate
patterns in the testsets.
the lower bound is calculated by using only the appropriate rdms
from ceil_set for training.
Args:
rdms(pyrsa.rdm.RDMs): complete data
ceil_set(list): a list of the training RDMs with 2-tuple entries:
(RDMs, pattern_idx)
test_set(list): a list of the test RDMs with 2-tuple entries:
(RDMs, pattern_idx)
method(string): comparison method to use
pattern_descriptor(string): descriptor to group patterns
Returns:
list: lower nc-bound, upper nc-bound
"""
assert len(ceil_set) == len(test_set), \
'train_set and test_set must have the same length'
noise_min = []
noise_max = []
for i in range(len(ceil_set)):
train = ceil_set[i]
test = test_set[i]
pred_train = pool_rdm(train[0], method=method)
pred_train = pred_train.subsample_pattern(by=pattern_descriptor,
value=test[1])
pred_test = pool_rdm(rdms, method=method)
pred_test = pred_test.subsample_pattern(by=pattern_descriptor,
value=test[1])
noise_min.append(np.mean(compare(pred_train, test[0], method)))
noise_max.append(np.mean(compare(pred_test, test[0], method)))
noise_min = np.mean(np.array(noise_min))
noise_max = np.mean(np.array(noise_max))
return noise_min, noise_max
def boot_noise_ceiling(rdms, method='cosine', rdm_descriptor='index'):
""" calculates a noise ceiling by leave one out & full set
Args:
rdms(pyrsa.rdm.RDMs): data to calculate noise ceiling
method(string): comparison method to use
rdm_descriptor(string): descriptor to group rdms
Returns:
list: [lower nc-bound, upper nc-bound]
"""
_, test_set, ceil_set = sets_leave_one_out_rdm(rdms, rdm_descriptor)
pred_test = pool_rdm(rdms, method=method)
noise_min = []
noise_max = []
for i in range(len(ceil_set)):
train = ceil_set[i]
test = test_set[i]
pred_train = pool_rdm(train[0], method=method)
noise_min.append(np.mean(compare(pred_train, test[0], method)))
noise_max.append(np.mean(compare(pred_test, test[0], method)))
noise_min = np.mean(np.array(noise_min))
noise_max = np.mean(np.array(noise_max))
return noise_min, noise_max | /rsa3-3.0.0.post20201106-py3-none-any.whl/pyrsa/inference/noise_ceiling.py | 0.859899 | 0.601769 | noise_ceiling.py | pypi |
import numpy as np
from pyrsa.util.rdm_utils import add_pattern_index
def bootstrap_sample(rdms, rdm_descriptor='index', pattern_descriptor='index'):
"""Draws a bootstrap_sample from the data.
This function generates a bootstrap sample of RDMs resampled over
measurements and patterns. By default every pattern and RDM sample is
treated independently. If desired descriptor names can be passed in
descriptors and in pattern_descriptors to group rdms instead.
Args:
rdms(pyrsa.rdm.rdms.RDMs): Data to be used
rdm_descriptors(String):
descriptor to group the samples by. For each unique value of
the descriptor each sample will either contain all RDMs with
this value or none
pattern_descriptors(string):
descriptor to group the patterns by. Each group of patterns will
be in or out of the sample as a whole
Returns:
pyrsa.rdm.rdms.RDMs: rdms
subsampled dataset with equal number of groups in both patterns
and measurements of the rdms
numpy.ndarray: rdm_idx
sampled rdm indices
numpy.ndarray: pattern_idx
sampled pattern descriptor indices
"""
rdm_select = np.unique(rdms.rdm_descriptors[rdm_descriptor])
pattern_descriptor, pattern_select = \
add_pattern_index(rdms, pattern_descriptor)
rdm_idx = np.random.randint(0, len(rdm_select) - 1,
size=len(rdm_select))
rdm_idx = rdm_select[rdm_idx]
rdms = rdms.subsample(rdm_descriptor, rdm_idx)
pattern_idx = np.random.randint(0, len(pattern_select) - 1,
size=len(pattern_select))
pattern_idx = pattern_select[pattern_idx]
rdms = rdms.subsample_pattern(pattern_descriptor,
pattern_idx)
return rdms, rdm_idx, pattern_idx
def bootstrap_sample_rdm(rdms, rdm_descriptor='index'):
"""Draws a bootstrap_sample from the data.
This function generates a bootstrap sample of RDMs resampled over
measurements. By default every RDM sample is treated independently.
If desired a descriptor name can be passed inrdm_descriptor to group rdms.
Args:
rdms(pyrsa.rdm.rdms.RDMs): Data to be used
rdm_descriptors(String):
descriptor to group the samples by. For each unique value of
the descriptor each sample will either contain all RDMs with
this value or none
Returns:
pyrsa.rdm.rdms.RDMs: rdm_idx
subsampled dataset with equal number of groups of rdms
numpy.ndarray: rdm_idx
sampled rdm indices
numpy.ndarray: rdm_select
rdm group descritor values
"""
rdm_select = np.unique(rdms.rdm_descriptors[rdm_descriptor])
rdm_sample = np.random.randint(0, len(rdm_select) - 1,
size=len(rdm_select))
rdm_idx = rdm_select[rdm_sample]
rdms = rdms.subsample(rdm_descriptor, rdm_idx)
return rdms, rdm_idx
def bootstrap_sample_pattern(rdms, pattern_descriptor='index'):
"""Draws a bootstrap_sample from the data.
This function generates a bootstrap sample of RDMs resampled over
patterns. By default every pattern is treated independently. If desired
a descriptor name can be passed in pattern_descriptor to group patterns.
Args:
rdms(pyrsa.rdm.rdms.RDMs): Data to be used
pattern_descriptors(string):
descriptor to group the patterns by. Each group of patterns will
be in or out of the sample as a whole
Returns:
pyrsa.rdm.rdms.RDMs: rdm_idx
subsampled dataset with equal number of pattern groups
numpy.ndarray: pattern_idx
sampled pattern descriptor index values for subsampling other rdms
"""
pattern_descriptor, pattern_select = \
add_pattern_index(rdms, pattern_descriptor)
pattern_idx = np.random.randint(0, len(pattern_select) - 1,
size=len(pattern_select))
pattern_idx = pattern_select[pattern_idx]
rdms = rdms.subsample_pattern(pattern_descriptor,
pattern_idx)
return rdms, pattern_idx | /rsa3-3.0.0.post20201106-py3-none-any.whl/pyrsa/inference/bootstrap.py | 0.898197 | 0.568835 | bootstrap.py | pypi |
import numpy as np
import pyrsa.model
from pyrsa.util.file_io import write_dict_hdf5
from pyrsa.util.file_io import write_dict_pkl
from pyrsa.util.file_io import read_dict_hdf5
from pyrsa.util.file_io import read_dict_pkl
class Result:
""" Result class storing results for a set of models with the models,
the results matrix and the noise ceiling
Args:
models(list of pyrsa.model.Model):
the evaluated models
evaluations(numpy.ndarray):
evaluations of the models over bootstrap/crossvalidation
format: bootstrap_samples x models x crossval & others
such that np.mean(evaluations[i,j]) is a valid evaluation for the
jth model on the ith bootstrap-sample
method(String):
the evaluation method
cv_method(String):
crossvalidation specification
noise_ceiling(numpy.ndarray):
noise ceiling such that np.mean(noise_ceiling[0]) is the lower
bound and np.mean(noise_ceiling[1]) is the higher one.
Attributes:
as inputs
"""
def __init__(self, models, evaluations, method, cv_method, noise_ceiling):
if isinstance(models, pyrsa.model.Model):
models = [models]
assert len(models) == evaluations.shape[1], 'evaluations shape does' \
+ 'not match number of models'
self.models = models
self.n_model = len(models)
self.evaluations = np.array(evaluations)
self.method = method
self.cv_method = cv_method
self.noise_ceiling = np.array(noise_ceiling)
def save(self, filename, file_type='hdf5'):
""" saves the results into a file.
Args:
filename(String): path to the file
[or opened file]
file_type(String): Type of file to create:
hdf5: hdf5 file
pkl: pickle file
"""
result_dict = self.to_dict()
if file_type == 'hdf5':
write_dict_hdf5(filename, result_dict)
elif file_type == 'pkl':
write_dict_pkl(filename, result_dict)
def to_dict(self):
""" Converts the RDMs object into a dict, which can be used for saving
Returns:
results_dict(dict): A dictionary with all the information needed
to regenerate the object
"""
result_dict = {}
result_dict['evaluations'] = self.evaluations
result_dict['noise_ceiling'] = self.noise_ceiling
result_dict['method'] = self.method
result_dict['cv_method'] = self.cv_method
result_dict['models'] = {}
for i_model in range(len(self.models)):
key = 'model_%d' % i_model
result_dict['models'][key] = self.models[i_model].to_dict()
return result_dict
def load_results(filename, file_type=None):
""" loads a Result object from disc
Args:
filename(String): path to the filelocation
"""
if file_type is None:
if isinstance(filename, str):
if filename[-4:] == '.pkl':
file_type = 'pkl'
elif filename[-3:] == '.h5' or filename[-4:] == 'hdf5':
file_type = 'hdf5'
if file_type == 'hdf5':
data_dict = read_dict_hdf5(filename)
elif file_type == 'pkl':
data_dict = read_dict_pkl(filename)
else:
raise ValueError('filetype not understood')
return result_from_dict(data_dict)
def result_from_dict(result_dict):
""" recreate Results object from dictionary
Args:
result_dict(dict): dictionary to regenerate
Returns:
result(Result): the recreated object
"""
evaluations = result_dict['evaluations']
method = result_dict['method']
cv_method = result_dict['cv_method']
noise_ceiling = result_dict['noise_ceiling']
models = [None] * len(result_dict['models'])
for i_model in range(len(result_dict['models'])):
key = 'model_%d' % i_model
models[i_model] = pyrsa.model.model_from_dict(
result_dict['models'][key])
return Result(models, evaluations, method, cv_method, noise_ceiling) | /rsa3-3.0.0.post20201106-py3-none-any.whl/pyrsa/inference/result.py | 0.696991 | 0.507324 | result.py | pypi |
from collections.abc import Iterable
import numpy as np
from pyrsa.rdm.rdms import RDMs
from pyrsa.rdm.rdms import concat
from pyrsa.data import average_dataset_by
from pyrsa.util.matrix import pairwise_contrast_sparse
def calc_rdm(dataset, method='euclidean', descriptor=None, noise=None,
cv_descriptor=None, prior_lambda=1, prior_weight=0.1):
"""
calculates an RDM from an input dataset
Args:
dataset (pyrsa.data.dataset.DatasetBase):
The dataset the RDM is computed from
method (String):
a description of the dissimilarity measure (e.g. 'Euclidean')
descriptor (String):
obs_descriptor used to define the rows/columns of the RDM
noise (numpy.ndarray):
dataset.n_channel x dataset.n_channel
precision matrix used to calculate the RDM
used only for Mahalanobis and Crossnobis estimators
defaults to an identity matrix, i.e. euclidean distance
Returns:
pyrsa.rdm.rdms.RDMs: RDMs object with the one RDM
"""
if isinstance(dataset, Iterable):
rdms = []
for i_dat in range(len(dataset)):
if noise is None:
rdms.append(calc_rdm(dataset[i_dat], method=method,
descriptor=descriptor))
elif isinstance(noise, np.ndarray) and noise.ndim == 2:
rdms.append(calc_rdm(dataset[i_dat], method=method,
descriptor=descriptor,
noise=noise))
elif isinstance(noise, Iterable):
rdms.append(calc_rdm(dataset[i_dat], method=method,
descriptor=descriptor,
noise=noise[i_dat]))
rdm = concat(rdms)
else:
if method == 'euclidean':
rdm = calc_rdm_euclid(dataset, descriptor)
elif method == 'correlation':
rdm = calc_rdm_correlation(dataset, descriptor)
elif method == 'mahalanobis':
rdm = calc_rdm_mahalanobis(dataset, descriptor, noise)
elif method == 'crossnobis':
rdm = calc_rdm_crossnobis(dataset, descriptor, noise,
cv_descriptor)
elif method == 'poisson':
rdm = calc_rdm_poisson(dataset, descriptor,
prior_lambda=prior_lambda,
prior_weight=prior_weight)
elif method == 'poisson_cv':
rdm = calc_rdm_poisson_cv(dataset, descriptor,
cv_descriptor=cv_descriptor,
prior_lambda=prior_lambda,
prior_weight=prior_weight)
else:
raise(NotImplementedError)
return rdm
def calc_rdm_movie(dataset, method='euclidean', descriptor=None, noise=None,
cv_descriptor=None, prior_lambda=1, prior_weight=0.1,
time_descriptor = 'time', bins=None):
"""
calculates an RDM movie from an input TemporalDataset
Args:
dataset (pyrsa.data.dataset.TemporalDataset):
The dataset the RDM is computed from
method (String):
a description of the dissimilarity measure (e.g. 'Euclidean')
descriptor (String):
obs_descriptor used to define the rows/columns of the RDM
noise (numpy.ndarray):
dataset.n_channel x dataset.n_channel
precision matrix used to calculate the RDM
used only for Mahalanobis and Crossnobis estimators
defaults to an identity matrix, i.e. euclidean distance
time_descriptor (String): descriptor key that points to the time dimension in
dataset.time_descriptors. Defaults to 'time'.
bins (array-like): list of bins, with bins[i] containing the vector
of time-points for the i-th bin. Defaults to no binning.
Returns:
pyrsa.rdm.rdms.RDMs: RDMs object with RDM movie
"""
if isinstance(dataset, Iterable):
rdms = []
for i_dat, _ in enumerate(dataset):
if noise is None:
rdms.append(calc_rdm_movie(dataset[i_dat], method=method,
descriptor=descriptor))
elif isinstance(noise, np.ndarray) and noise.ndim == 2:
rdms.append(calc_rdm_movie(dataset[i_dat], method=method,
descriptor=descriptor,
noise=noise))
elif isinstance(noise, Iterable):
rdms.append(calc_rdm_movie(dataset[i_dat], method=method,
descriptor=descriptor,
noise=noise[i_dat]))
rdm = concat(rdms)
else:
if bins is not None:
binned_data = dataset.bin_time(time_descriptor, bins)
splited_data = binned_data.split_time(time_descriptor)
time = binned_data.time_descriptors[time_descriptor]
else:
splited_data = dataset.split_time(time_descriptor)
time = dataset.time_descriptors[time_descriptor]
rdms = []
for dat in splited_data:
dat_single = dat.convert_to_dataset(time_descriptor)
rdms.append(calc_rdm(dat_single, method=method,
descriptor=descriptor,noise=noise,
cv_descriptor=cv_descriptor, prior_lambda=prior_lambda,
prior_weight=prior_weight))
rdm = concat(rdms)
rdm.rdm_descriptors[time_descriptor] = time
return rdm
def calc_rdm_euclid(dataset, descriptor=None):
"""
calculates an RDM from an input dataset using euclidean distance
If multiple instances of the same condition are found in the dataset
they are averaged.
Args:
dataset (pyrsa.data.DatasetBase):
The dataset the RDM is computed from
descriptor (String):
obs_descriptor used to define the rows/columns of the RDM
defaults to one row/column per row in the dataset
Returns:
pyrsa.rdm.rdms.RDMs: RDMs object with the one RDM
"""
measurements, desc, descriptor = _parse_input(dataset, descriptor)
diff = _calc_pairwise_differences(measurements)
rdm = np.einsum('ij,ij->i', diff, diff) / measurements.shape[1]
rdm = RDMs(dissimilarities=np.array([rdm]),
dissimilarity_measure='euclidean',
descriptors=dataset.descriptors)
rdm.pattern_descriptors[descriptor] = desc
return rdm
def calc_rdm_correlation(dataset, descriptor=None):
"""
calculates an RDM from an input dataset using correlation distance
If multiple instances of the same condition are found in the dataset
they are averaged.
Args:
dataset (pyrsa.data.DatasetBase):
The dataset the RDM is computed from
descriptor (String):
obs_descriptor used to define the rows/columns of the RDM
defaults to one row/column per row in the dataset
Returns:
pyrsa.rdm.rdms.RDMs: RDMs object with the one RDM
"""
ma, desc, descriptor = _parse_input(dataset, descriptor)
ma = ma - ma.mean(axis=1, keepdims=True)
ma /= np.sqrt(np.einsum('ij,ij->i', ma, ma))[:, None]
rdm = 1 - np.einsum('ik,jk', ma, ma)
rdm = RDMs(dissimilarities=np.array([rdm]),
dissimilarity_measure='correlation',
descriptors=dataset.descriptors)
rdm.pattern_descriptors[descriptor] = desc
return rdm
def calc_rdm_mahalanobis(dataset, descriptor=None, noise=None):
"""
calculates an RDM from an input dataset using mahalanobis distance
If multiple instances of the same condition are found in the dataset
they are averaged.
Args:
dataset (pyrsa.data.dataset.DatasetBase):
The dataset the RDM is computed from
descriptor (String):
obs_descriptor used to define the rows/columns of the RDM
defaults to one row/column per row in the dataset
noise (numpy.ndarray):
dataset.n_channel x dataset.n_channel
precision matrix used to calculate the RDM
default: identity matrix, i.e. euclidean distance
Returns:
pyrsa.rdm.rdms.RDMs: RDMs object with the one RDM
"""
if noise is None:
rdm = calc_rdm_euclid(dataset, descriptor)
else:
measurements, desc, descriptor = _parse_input(dataset, descriptor)
noise = _check_noise(noise, dataset.n_channel)
# calculate difference @ precision @ difference for all pairs
# first calculate the difference vectors diff and precision @ diff
# then calculate the inner product
diff = _calc_pairwise_differences(measurements)
diff2 = (noise @ diff.T).T
rdm = np.einsum('ij,ij->i', diff, diff2) / measurements.shape[1]
rdm = RDMs(dissimilarities=np.array([rdm]),
dissimilarity_measure='Mahalanobis',
descriptors=dataset.descriptors)
rdm.pattern_descriptors[descriptor] = desc
rdm.descriptors['noise'] = noise
return rdm
def calc_rdm_crossnobis(dataset, descriptor, noise=None,
cv_descriptor=None):
"""
calculates an RDM from an input dataset using Cross-nobis distance
This performs leave one out crossvalidation over the cv_descriptor.
As the minimum input provide a dataset and a descriptor-name to
define the rows & columns of the RDM.
You may pass a noise precision. If you don't an identity is assumed.
Also a cv_descriptor can be passed to define the crossvalidation folds.
It is recommended to do this, to assure correct calculations. If you do
not, this function infers a split in order of the dataset, which is
guaranteed to fail if there are any unbalances.
This function also accepts a list of noise precision matricies.
It is then assumed that this is the precision of the mean from
the corresponding crossvalidation fold, i.e. if multiple measurements
enter a fold, please compute the resulting noise precision in advance!
To assert equal ordering in the folds the dataset is initially sorted
according to the descriptor used to define the patterns.
Args:
dataset (pyrsa.data.dataset.DatasetBase):
The dataset the RDM is computed from
descriptor (String):
obs_descriptor used to define the rows/columns of the RDM
defaults to one row/column per row in the dataset
noise (numpy.ndarray):
dataset.n_channel x dataset.n_channel
precision matrix used to calculate the RDM
default: identity matrix, i.e. euclidean distance
cv_descriptor (String):
obs_descriptor which determines the cross-validation folds
Returns:
pyrsa.rdm.rdms.RDMs: RDMs object with the one RDM
"""
noise = _check_noise(noise, dataset.n_channel)
if descriptor is None:
raise ValueError('descriptor must be a string! Crossvalidation' +
'requires multiple measurements to be grouped')
if cv_descriptor is None:
cv_desc = _gen_default_cv_descriptor(dataset, descriptor)
dataset.obs_descriptors['cv_desc'] = cv_desc
cv_descriptor = 'cv_desc'
dataset.sort_by(descriptor)
cv_folds = np.unique(np.array(dataset.obs_descriptors[cv_descriptor]))
weights = []
rdms = []
if noise is None or (isinstance(noise, np.ndarray) and noise.ndim == 2):
for i_fold in range(len(cv_folds)):
fold = cv_folds[i_fold]
data_test = dataset.subset_obs(cv_descriptor, fold)
data_train = dataset.subset_obs(cv_descriptor,
np.setdiff1d(cv_folds, fold))
measurements_train, _, _ = \
average_dataset_by(data_train, descriptor)
measurements_test, _, _ = \
average_dataset_by(data_test, descriptor)
n_cond = measurements_train.shape[0]
rdm = np.empty(int(n_cond * (n_cond-1) / 2))
k = 0
for i_cond in range(n_cond - 1):
for j_cond in range(i_cond + 1, n_cond):
diff_train = measurements_train[i_cond] \
- measurements_train[j_cond]
diff_test = measurements_test[i_cond] \
- measurements_test[j_cond]
if noise is None:
rdm[k] = np.sum(diff_train * diff_test)
else:
rdm[k] = np.sum(diff_train
* np.matmul(noise, diff_test))
k += 1
rdms.append(rdm)
weights.append(data_test.n_obs)
else: # a list of noises was provided
measurements = []
variances = []
for i_fold in range(len(cv_folds)):
data = dataset.subset_obs(cv_descriptor, cv_folds[i_fold])
measurements.append(average_dataset_by(data, descriptor)[0])
variances.append(np.linalg.inv(noise[i_fold]))
for i_fold in range(len(cv_folds)):
for j_fold in range(i_fold + 1, len(cv_folds)):
if i_fold != j_fold:
rdm = _calc_rdm_crossnobis_single(
measurements[i_fold], measurements[j_fold],
np.linalg.inv(variances[i_fold]
+ variances[j_fold]))
rdms.append(rdm)
rdms = np.array(rdms)
rdm = np.einsum('ij->j', rdms)
rdm = RDMs(dissimilarities=np.array([rdm]),
dissimilarity_measure='crossnobis',
descriptors=dataset.descriptors)
_, desc, _ = average_dataset_by(dataset, descriptor)
rdm.pattern_descriptors[descriptor] = desc
rdm.descriptors['noise'] = noise
rdm.descriptors['cv_descriptor'] = cv_descriptor
return rdm
def calc_rdm_poisson(dataset, descriptor=None, prior_lambda=1,
prior_weight=0.1):
"""
calculates an RDM from an input dataset using the symmetrized
KL-divergence assuming a poisson distribution.
If multiple instances of the same condition are found in the dataset
they are averaged.
Args:
dataset (pyrsa.data.DatasetBase):
The dataset the RDM is computed from
descriptor (String):
obs_descriptor used to define the rows/columns of the RDM
defaults to one row/column per row in the dataset
Returns:
pyrsa.rdm.rdms.RDMs: RDMs object with the one RDM
"""
measurements, desc, descriptor = _parse_input(dataset, descriptor)
measurements = (measurements + prior_lambda * prior_weight) \
/ (prior_lambda * prior_weight)
diff = _calc_pairwise_differences(measurements)
diff_log = _calc_pairwise_differences(np.log(measurements))
rdm = np.einsum('ij,ij->i', diff, diff_log) / measurements.shape[1]
rdm = RDMs(dissimilarities=np.array([rdm]),
dissimilarity_measure='poisson',
descriptors=dataset.descriptors)
rdm.pattern_descriptors[descriptor] = desc
return rdm
def calc_rdm_poisson_cv(dataset, descriptor=None, prior_lambda=1,
prior_weight=0.1, cv_descriptor=None):
"""
calculates an RDM from an input dataset using the crossvalidated
symmetrized KL-divergence assuming a poisson distribution
To assert equal ordering in the folds the dataset is initially sorted
according to the descriptor used to define the patterns.
Args:
dataset (pyrsa.data.DatasetBase):
The dataset the RDM is computed from
descriptor (String):
obs_descriptor used to define the rows/columns of the RDM
defaults to one row/column per row in the dataset
cv_descriptor (str): The descriptor that indicates the folds
to use for crossvalidation
Returns:
pyrsa.rdm.rdms.RDMs: RDMs object with the one RDM
"""
if descriptor is None:
raise ValueError('descriptor must be a string! Crossvalidation' +
'requires multiple measurements to be grouped')
if cv_descriptor is None:
cv_desc = _gen_default_cv_descriptor(dataset, descriptor)
dataset.obs_descriptors['cv_desc'] = cv_desc
cv_descriptor = 'cv_desc'
dataset.sort_by(descriptor)
cv_folds = np.unique(np.array(dataset.obs_descriptors[cv_descriptor]))
for i_fold in range(len(cv_folds)):
fold = cv_folds[i_fold]
data_test = dataset.subset_obs(cv_descriptor, fold)
data_train = dataset.subset_obs(cv_descriptor,
np.setdiff1d(cv_folds, fold))
measurements_train, _, _ = average_dataset_by(data_train, descriptor)
measurements_test, _, _ = average_dataset_by(data_test, descriptor)
measurements_train = (measurements_train
+ prior_lambda * prior_weight) \
/ (prior_lambda * prior_weight)
measurements_test = (measurements_test
+ prior_lambda * prior_weight) \
/ (prior_lambda * prior_weight)
diff = _calc_pairwise_differences(measurements_train)
diff_log = _calc_pairwise_differences(np.log(measurements_test))
rdm = np.einsum('ij,ij->i', diff, diff_log) \
/ measurements_train.shape[1]
rdm = RDMs(dissimilarities=np.array([rdm]),
dissimilarity_measure='poisson_cv',
descriptors=dataset.descriptors)
_, desc, _ = average_dataset_by(dataset, descriptor)
rdm.pattern_descriptors[descriptor] = desc
return rdm
def _calc_rdm_crossnobis_single_sparse(measurements1, measurements2, noise):
c_matrix = pairwise_contrast_sparse(np.arange(measurements1.shape[0]))
diff_1 = c_matrix @ measurements1
diff_2 = c_matrix @ measurements2
diff_2 = noise @ diff_2.transpose()
rdm = np.einsum('kj,jk->k', diff_1, diff_2) / measurements1.shape[1]
return rdm
def _calc_rdm_crossnobis_single(measurements1, measurements2, noise):
diff_1 = _calc_pairwise_differences(measurements1)
diff_2 = _calc_pairwise_differences(measurements2)
diff_2 = noise @ diff_2.transpose()
rdm = np.einsum('kj,jk->k', diff_1, diff_2) / measurements1.shape[1]
return rdm
def _gen_default_cv_descriptor(dataset, descriptor):
""" generates a default cv_descriptor for crossnobis
This assumes that the first occurence each descriptor value forms the
first group, the second occurence forms the second group, etc.
"""
desc = dataset.obs_descriptors[descriptor]
values, counts = np.unique(desc, return_counts=True)
assert np.all(counts == counts[0]), (
'cv_descriptor generation failed:\n'
+ 'different number of observations per pattern')
n_repeats = counts[0]
cv_descriptor = np.zeros_like(desc)
for i_val in values:
cv_descriptor[desc == i_val] = np.arange(n_repeats)
return cv_descriptor
def _calc_pairwise_differences(measurements):
n, m = measurements.shape
diff = np.zeros((int(n * (n - 1) / 2), m))
k = 0
for i in range(measurements.shape[0]):
for j in range(i+1, measurements.shape[0]):
diff[k] = measurements[i] - measurements[j]
k += 1
return diff
def _parse_input(dataset, descriptor):
if descriptor is None:
measurements = dataset.measurements
desc = np.arange(measurements.shape[0])
descriptor = 'pattern'
else:
measurements, desc, _ = average_dataset_by(dataset, descriptor)
return measurements, desc, descriptor
def _check_noise(noise, n_channel):
"""
checks that a noise pattern is a matrix with correct dimension
n_channel x n_channel
Args:
noise: noise input to be checked
Returns:
noise(np.ndarray): n_channel x n_channel noise precision matrix
"""
if noise is None:
pass
elif isinstance(noise, np.ndarray) and noise.ndim == 2:
assert np.all(noise.shape == (n_channel, n_channel))
elif isinstance(noise, Iterable):
for i in range(len(noise)):
noise[i] = _check_noise(noise[i], n_channel)
elif isinstance(noise, dict):
for key in noise.keys():
noise[key] = _check_noise(noise[key], n_channel)
else:
raise ValueError('noise(s) must have shape n_channel x n_channel')
return noise | /rsa3-3.0.0.post20201106-py3-none-any.whl/pyrsa/rdm/calc.py | 0.924262 | 0.439507 | calc.py | pypi |
import numpy as np
from scipy.stats import rankdata
from pyrsa.util.rdm_utils import batch_to_vectors
from pyrsa.util.rdm_utils import batch_to_matrices
from pyrsa.util.descriptor_utils import format_descriptor
from pyrsa.util.descriptor_utils import bool_index
from pyrsa.util.descriptor_utils import subset_descriptor
from pyrsa.util.descriptor_utils import check_descriptor_length_error
from pyrsa.util.descriptor_utils import append_descriptor
from pyrsa.util.data_utils import extract_dict
from collections.abc import Iterable
from pyrsa.util.file_io import write_dict_hdf5
from pyrsa.util.file_io import write_dict_pkl
from pyrsa.util.file_io import read_dict_hdf5
from pyrsa.util.file_io import read_dict_pkl
class RDMs:
""" RDMs class
Args:
dissimilarities (numpy.ndarray):
either a 2d np-array (n_rdm x vectorform of dissimilarities)
or a 3d np-array (n_rdm x n_cond x n_cond)
dissimilarity_measure (String):
a description of the dissimilarity measure (e.g. 'Euclidean')
descriptors (dict):
descriptors with 1 value per RDMs object
rdm_descriptors (dict):
descriptors with 1 value per RDM
pattern_descriptors (dict):
descriptors with 1 value per RDM column
Attributes:
n_rdm(int): number of rdms
n_cond(int): number of patterns
"""
def __init__(self, dissimilarities,
dissimilarity_measure=None,
descriptors=None,
rdm_descriptors=None,
pattern_descriptors=None):
self.dissimilarities, self.n_rdm, self.n_cond = \
batch_to_vectors(dissimilarities)
if descriptors is None:
self.descriptors = {}
else:
self.descriptors = descriptors
if rdm_descriptors is None:
self.rdm_descriptors = {}
else:
check_descriptor_length_error(rdm_descriptors,
'rdm_descriptors',
self.n_rdm)
self.rdm_descriptors = rdm_descriptors
if pattern_descriptors is None:
self.pattern_descriptors = {}
else:
check_descriptor_length_error(pattern_descriptors,
'pattern_descriptors',
self.n_cond)
self.pattern_descriptors = pattern_descriptors
if 'index' not in self.pattern_descriptors.keys():
self.pattern_descriptors['index'] = np.arange(self.n_cond)
if 'index' not in self.rdm_descriptors.keys():
self.rdm_descriptors['index'] = np.arange(self.n_rdm)
self.dissimilarity_measure = dissimilarity_measure
def __repr__(self):
"""
defines string which is printed for the object
"""
return (f'pyrsa.rdm.{self.__class__.__name__}(\n'
f'dissimilarity_measure = \n{self.dissimilarity_measure}\n'
f'dissimilarities = \n{self.dissimilarities}\n'
f'descriptors = \n{self.descriptors}\n'
f'rdm_descriptors = \n{self.rdm_descriptors}\n'
f'pattern_descriptors = \n{self.pattern_descriptors}\n'
)
def __str__(self):
"""
defines the output of print
"""
string_desc = format_descriptor(self.descriptors)
rdm_desc = format_descriptor(self.rdm_descriptors)
pattern_desc = format_descriptor(self.pattern_descriptors)
diss = self.get_matrices()[0]
return (f'pyrsa.rdm.{self.__class__.__name__}\n'
f'{self.n_rdm} RDM(s) over {self.n_cond} conditions\n\n'
f'dissimilarity_measure = \n{self.dissimilarity_measure}\n\n'
f'dissimilarities[0] = \n{diss}\n\n'
f'descriptors: \n{string_desc}\n'
f'rdm_descriptors: \n{rdm_desc}\n'
f'pattern_descriptors: \n{pattern_desc}\n'
)
def __getitem__(self, idx):
"""
allows indexing with []
and iterating over RDMs with `for rdm in rdms:`
"""
idx = np.array(idx)
dissimilarities = self.dissimilarities[idx].reshape(
-1, self.dissimilarities.shape[1])
rdm_descriptors = subset_descriptor(self.rdm_descriptors, idx)
rdms = RDMs(dissimilarities,
dissimilarity_measure=self.dissimilarity_measure,
descriptors=self.descriptors,
rdm_descriptors=rdm_descriptors,
pattern_descriptors=self.pattern_descriptors)
return rdms
def __len__(self) -> int:
"""
The number of RDMs in this stack.
Together with __getitem__, allows `reversed(rdms)`.
"""
return self.n_rdm
def get_vectors(self):
""" Returns RDMs as np.ndarray with each RDM as a vector
Returns:
numpy.ndarray: RDMs as a matrix with one row per RDM
"""
return self.dissimilarities
def get_matrices(self):
""" Returns RDMs as np.ndarray with each RDM as a matrix
Returns:
numpy.ndarray: RDMs as a 3-Tensor with one matrix per RDM
"""
matrices, _, _ = batch_to_matrices(self.dissimilarities)
return matrices
def subset_pattern(self, by, value):
""" Returns a smaller RDMs with patterns with certain descriptor values
Args:
by(String): the descriptor by which the subset selection
is made from pattern_descriptors
value: the value by which the subset selection is made
from pattern_descriptors
Returns:
RDMs object, with fewer patterns
"""
if by is None:
by = 'index'
selection = bool_index(self.pattern_descriptors[by], value)
dissimilarities = self.get_matrices()[:, selection][:, :, selection]
descriptors = self.descriptors
pattern_descriptors = extract_dict(
self.pattern_descriptors, selection)
rdm_descriptors = self.rdm_descriptors
dissimilarity_measure = self.dissimilarity_measure
rdms = RDMs(dissimilarities=dissimilarities,
descriptors=descriptors,
rdm_descriptors=rdm_descriptors,
pattern_descriptors=pattern_descriptors,
dissimilarity_measure=dissimilarity_measure)
return rdms
def subsample_pattern(self, by, value):
""" Returns a subsampled RDMs with repetitions if values are repeated
This function now generates Nans where the off-diagonal 0s would
appear. These values are trivial to predict for models and thus
need to be marked and excluded from the evaluation.
Args:
by(String): the descriptor by which the subset selection
is made from descriptors
value: the value by which the subset selection is made
from descriptors
Returns:
RDMs object, with subsampled patterns
"""
if by is None:
by = 'index'
if (
type(value) is list or
type(value) is tuple or
type(value) is np.ndarray):
desc = self.pattern_descriptors[by]
selection = [np.asarray(desc == i).nonzero()[0]
for i in value]
selection = np.concatenate(selection)
else:
selection = np.where(self.rdm_descriptors[by] == value)
selection = np.sort(selection)
dissimilarities = self.get_matrices()
for i_rdm in range(self.n_rdm):
np.fill_diagonal(dissimilarities[i_rdm], np.nan)
selection = np.sort(selection)
dissimilarities = dissimilarities[:, selection][:, :, selection]
descriptors = self.descriptors
pattern_descriptors = extract_dict(
self.pattern_descriptors, selection)
rdm_descriptors = self.rdm_descriptors
dissimilarity_measure = self.dissimilarity_measure
rdms = RDMs(dissimilarities=dissimilarities,
descriptors=descriptors,
rdm_descriptors=rdm_descriptors,
pattern_descriptors=pattern_descriptors,
dissimilarity_measure=dissimilarity_measure)
return rdms
def subset(self, by, value):
""" Returns a set of fewer RDMs matching descriptor values
Args:
by(String): the descriptor by which the subset selection
is made from descriptors
value: the value by which the subset selection is made
from descriptors
Returns:
RDMs object, with fewer RDMs
"""
if by is None:
by = 'index'
selection = bool_index(self.rdm_descriptors[by], value)
dissimilarities = self.dissimilarities[selection, :]
descriptors = self.descriptors
pattern_descriptors = self.pattern_descriptors
rdm_descriptors = extract_dict(self.rdm_descriptors, selection)
dissimilarity_measure = self.dissimilarity_measure
rdms = RDMs(dissimilarities=dissimilarities,
descriptors=descriptors,
rdm_descriptors=rdm_descriptors,
pattern_descriptors=pattern_descriptors,
dissimilarity_measure=dissimilarity_measure)
return rdms
def subsample(self, by, value):
""" Returns a subsampled RDMs with repetitions if values are repeated
Args:
by(String): the descriptor by which the subset selection
is made from descriptors
value: the value by which the subset selection is made
from descriptors
Returns:
RDMs object, with subsampled RDMs
"""
if by is None:
by = 'index'
if (
type(value) is list or
type(value) is tuple or
type(value) is np.ndarray):
selection = [np.asarray(self.rdm_descriptors[by] == i).nonzero()[0]
for i in value]
selection = np.concatenate(selection)
else:
selection = np.where(self.rdm_descriptors[by] == value)
dissimilarities = self.dissimilarities[selection, :]
descriptors = self.descriptors
pattern_descriptors = self.pattern_descriptors
rdm_descriptors = extract_dict(self.rdm_descriptors, selection)
dissimilarity_measure = self.dissimilarity_measure
rdms = RDMs(dissimilarities=dissimilarities,
descriptors=descriptors,
rdm_descriptors=rdm_descriptors,
pattern_descriptors=pattern_descriptors,
dissimilarity_measure=dissimilarity_measure)
return rdms
def append(self, rdm):
""" appends an rdm to the object
The rdm should have the same shape and type as this object.
Its pattern_descriptor and descriptor are ignored
Args:
rdm(pyrsa.rdm.RDMs): the rdm to append
Returns:
"""
assert isinstance(rdm, RDMs), 'appended rdm should be an RDMs'
assert rdm.n_cond == self.n_cond, 'appended rdm had wrong shape'
assert rdm.dissimilarity_measure == self.dissimilarity_measure, \
'appended rdm had wrong dissimilarity measure'
self.dissimilarities = np.concatenate((
self.dissimilarities, rdm.dissimilarities), axis=0)
self.rdm_descriptors = append_descriptor(self.rdm_descriptors,
rdm.rdm_descriptors)
self.n_rdm = self.n_rdm + rdm.n_rdm
def save(self, filename, file_type='hdf5'):
""" saves the RDMs object into a file
Args:
filename(String): path to file to save to
[or opened file]
file_type(String): Type of file to create:
hdf5: hdf5 file
pkl: pickle file
"""
rdm_dict = self.to_dict()
if file_type == 'hdf5':
write_dict_hdf5(filename, rdm_dict)
elif file_type == 'pkl':
write_dict_pkl(filename, rdm_dict)
def to_dict(self):
""" converts the object into a dictionary, which can be saved to disk
Returns:
rdm_dict(dict): dictionary containing all information required to
recreate the RDMs object
"""
rdm_dict = {}
rdm_dict['dissimilarities'] = self.dissimilarities
rdm_dict['descriptors'] = self.descriptors
rdm_dict['rdm_descriptors'] = self.rdm_descriptors
rdm_dict['pattern_descriptors'] = self.pattern_descriptors
rdm_dict['dissimilarity_measure'] = self.dissimilarity_measure
return rdm_dict
def reorder(self, new_order):
"""Reorder the patterns according to the index in new_order
Args:
new_order (numpy.ndarray): new order of patterns,
vector of length equal to the number of patterns
"""
matrices = self.get_matrices()
matrices = matrices[(slice(None),) + np.ix_(new_order, new_order)]
self.dissimilarities = batch_to_vectors(matrices)[0]
for dname, descriptors in self.pattern_descriptors.items():
self.pattern_descriptors[dname] = descriptors[new_order]
def sort_by(self, **kwargs):
"""Reorder the patterns by sorting a descriptor
Pass keyword arguments that correspond to descriptors,
with value 'alpha'.
Example:
Sorts the condition descriptor alphabetically:
`rdms.sort(condition='alpha')`
Raises:
ValueError: Raised if the method chosen is not implemented
"""
for dname, method in kwargs.items():
if method == 'alpha':
descriptor = self.pattern_descriptors[dname]
self.reorder(np.argsort(descriptor))
else:
raise ValueError(f'Unknown sorting method: {method}')
def rdms_from_dict(rdm_dict):
""" creates a RDMs object from a dictionary
Args:
rdm_dict(dict): dictionary with information
Returns:
rdms(RDMs): the regenerated RDMs object
"""
rdms = RDMs(dissimilarities=rdm_dict['dissimilarities'],
descriptors=rdm_dict['descriptors'],
rdm_descriptors=rdm_dict['rdm_descriptors'],
pattern_descriptors=rdm_dict['pattern_descriptors'],
dissimilarity_measure=rdm_dict['dissimilarity_measure'])
return rdms
def load_rdm(filename, file_type=None):
""" loads a RDMs object from disk
Args:
filename(String): path to file to load
"""
if file_type is None:
if isinstance(filename, str):
if filename[-4:] == '.pkl':
file_type = 'pkl'
elif filename[-3:] == '.h5' or filename[-4:] == 'hdf5':
file_type = 'hdf5'
if file_type == 'hdf5':
rdm_dict = read_dict_hdf5(filename)
elif file_type == 'pkl':
rdm_dict = read_dict_pkl(filename)
else:
raise ValueError('filetype not understood')
return rdms_from_dict(rdm_dict)
def rank_transform(rdms, method='average'):
""" applies a rank_transform and generates a new RDMs object
This assigns a rank to each dissimilarity estimate in the RDM,
deals with rank ties and saves ranks as new dissimilarity estimates.
As an effect, all non-diagonal entries of the RDM will
range from 1 to (n_dim²-n_dim)/2, if the RDM has the dimensions
n_dim x n_dim.
Args:
rdms(RDMs): RDMs object
method(String):
controls how ranks are assigned to equal values
other options are: ‘average’, ‘min’, ‘max’, ‘dense’, ‘ordinal’
Returns:
rdms_new(RDMs): RDMs object with rank transformed dissimilarities
"""
dissimilarities = rdms.get_vectors()
dissimilarities = np.array([rankdata(dissimilarities[i], method=method)
for i in range(rdms.n_rdm)])
rdms_new = RDMs(dissimilarities,
dissimilarity_measure=rdms.dissimilarity_measure,
descriptors=rdms.descriptors,
rdm_descriptors=rdms.rdm_descriptors,
pattern_descriptors=rdms.pattern_descriptors)
return rdms_new
def concat(rdms):
""" concatenates rdm objects
requires that the rdms have the same shape
descriptor and pattern descriptors are taken from the first rdms object
for rdm_descriptors concatenation is tried
the rdm index is reinitialized
Args:
rdms(list of pyrsa.rdm.RDMs): RDMs objects to be concatenated
Returns:
pyrsa.rdm.RDMs: concatenated rdms object
"""
rdm = rdms[0]
assert isinstance(rdm, RDMs), 'rdms should be a list of RDMs objects'
for rdm_new in rdms[1:]:
rdm.append(rdm_new)
return rdm
def permute_rdms(rdms, p=None):
""" Permute rows, columns and corresponding pattern descriptors
of RDM matrices according to a permutation vector
Args:
p (numpy.ndarray):
permutation vector (values must be unique integers
from 0 to n_cond of RDM matrix).
If p = None, a random permutation vector is created.
Returns:
rdm_p(pyrsa.rdm.RDMs): the rdm object with a permuted matrix
and pattern descriptors
"""
if p is None:
p = np.random.permutation(rdms.n_cond)
print('No permutation vector specified,'
+ ' performing random permutation.')
assert p.dtype == 'int', "permutation vector must have integer entries."
assert min(p) == 0 and max(p) == rdms.n_cond-1, \
"permutation vector must have entries ranging from 0 to n_cond"
assert len(np.unique(p)) == rdms.n_cond, \
"permutation vector must only have unique integer entries"
rdm_mats = rdms.get_matrices()
descriptors = rdms.descriptors.copy()
rdm_descriptors = rdms.rdm_descriptors.copy()
pattern_descriptors = rdms.pattern_descriptors.copy()
# To easily reverse permutation later
p_inv = np.arange(len(p))[np.argsort(p)]
descriptors.update({'p_inv': p_inv})
rdm_mats = rdm_mats[:, p, :]
rdm_mats = rdm_mats[:, :, p]
stims = np.array(pattern_descriptors['stim'])
pattern_descriptors.update({'stim': list(stims[p].astype(np.str_))})
rdms_p = RDMs(
dissimilarities=rdm_mats,
descriptors=descriptors,
rdm_descriptors=rdm_descriptors,
pattern_descriptors=pattern_descriptors)
return rdms_p
def inverse_permute_rdms(rdms):
""" Gimmick function to reverse the effect of permute_rdms() """
p_inv = rdms.descriptors['p_inv']
rdms_p = permute_rdms(rdms, p=p_inv)
return rdms_p
def get_categorical_rdm(category_vector, category_name='category'):
""" generates an RDM object containing a categorical RDM, i.e. RDM = 0
if the category is the same and 1 if they are different
Args:
category_vector(iterable): a category index per condition
category_name(String): name for the descriptor in the object, defaults
to 'category'
Returns:
pyrsa.rdm.RDMs: constructed RDM
"""
n = len(category_vector)
rdm_list = []
for i_cat in range(n):
for j_cat in range(i_cat + 1, n):
if isinstance(category_vector[i_cat], Iterable):
comparisons = [np.array(category_vector[i_cat][idx])
!= np.array(category_vector[j_cat][idx])
for idx in range(len(category_vector[i_cat]))]
rdm_list.append(np.any(comparisons))
else:
rdm_list.append(
category_vector[i_cat] != category_vector[j_cat])
rdm = RDMs(np.array(rdm_list, dtype=np.float),
pattern_descriptors={category_name: np.array(category_vector)})
return rdm | /rsa3-3.0.0.post20201106-py3-none-any.whl/pyrsa/rdm/rdms.py | 0.829803 | 0.292351 | rdms.py | pypi |
import numpy as np
import scipy.stats
from scipy.stats._stats import _kendall_dis
from pyrsa.util.matrix import pairwise_contrast_sparse
from pyrsa.util.rdm_utils import _get_n_from_reduced_vectors
from pyrsa.util.matrix import row_col_indicator_g
def compare(rdm1, rdm2, method='cosine', sigma_k=None):
"""calculates the similarity between two RDMs objects using a chosen method
Args:
rdm1 (pyrsa.rdm.RDMs):
first set of RDMs
rdm2 (pyrsa.rdm.RDMs):
second set of RDMs
method (string):
which method to use, options are:
'cosine' = cosine similarity
'spearman' = spearman rank correlation
'corr' = pearson correlation
'kendall' = kendall-tau b
'tau-a' = kendall-tau a
'rho-a' = spearman correlation without tie correction
'corr_cov' = pearson correlation after whitening
'cosine_cov' = unbiased distance correlation
which is equivalent to the cosine dinstance after whitening
sigma_k (numpy.ndarray):
covariance matrix of the pattern estimates
Used only for corr_cov and cosine_cov
Returns:
numpy.ndarray: dist:
pariwise similarities between the RDMs from the RDMs objects
"""
if method == 'cosine':
sim = compare_cosine(rdm1, rdm2)
elif method == 'spearman':
sim = compare_spearman(rdm1, rdm2)
elif method == 'corr':
sim = compare_correlation(rdm1, rdm2)
elif method == 'kendall' or method == 'tau-b':
sim = compare_kendall_tau(rdm1, rdm2)
elif method == 'tau-a':
sim = compare_kendall_tau_a(rdm1, rdm2)
elif method == 'rho-a':
sim = compare_rho_a(rdm1, rdm2)
elif method == 'corr_cov':
sim = compare_correlation_cov_weighted(rdm1, rdm2, sigma_k=sigma_k)
elif method == 'cosine_cov':
sim = compare_cosine_cov_weighted(rdm1, rdm2, sigma_k=sigma_k)
else:
raise ValueError('Unknown RDM comparison method requested!')
return sim
def compare_cosine(rdm1, rdm2):
"""calculates the cosine similarities between two RDMs objects
Args:
rdm1 (pyrsa.rdm.RDMs):
first set of RDMs
rdm2 (pyrsa.rdm.RDMs):
second set of RDMs
Returns:
numpy.ndarray: dist
cosine similarity between the two RDMs
"""
vector1, vector2 = _parse_input_rdms(rdm1, rdm2)
sim = _cosine(vector1, vector2)
return sim
def compare_correlation(rdm1, rdm2):
"""calculates the correlations between two RDMs objects
Args:
rdm1 (pyrsa.rdm.RDMs):
first set of RDMs
rdm2 (pyrsa.rdm.RDMs):
second set of RDMs
Returns:
numpy.ndarray: dist:
correlations between the two RDMs
"""
vector1, vector2 = _parse_input_rdms(rdm1, rdm2)
# compute by subtracting the mean and then calculating cosine similarity
vector1 = vector1 - np.mean(vector1, 1, keepdims=True)
vector2 = vector2 - np.mean(vector2, 1, keepdims=True)
sim = _cosine(vector1, vector2)
return sim
def compare_cosine_cov_weighted(rdm1, rdm2, sigma_k=None):
"""calculates the cosine similarities between two RDMs objects
Args:
rdm1 (pyrsa.rdm.RDMs):
first set of RDMs
rdm2 (pyrsa.rdm.RDMs):
second set of RDMs
Returns:
numpy.ndarray: dist:
cosine similarities between the two RDMs
"""
vector1, vector2 = _parse_input_rdms(rdm1, rdm2)
sim = _cosine_cov_weighted(vector1, vector2, sigma_k)
return sim
def compare_correlation_cov_weighted(rdm1, rdm2, sigma_k=None):
"""calculates the correlations between two RDMs objects after whitening
with the covariance of the entries
Args:
rdm1 (pyrsa.rdm.RDMs):
first set of RDMs
rdm2 (pyrsa.rdm.RDMs):
second set of RDMs
Returns:
numpy.ndarray: dist:
correlations between the two RDMs
"""
vector1, vector2 = _parse_input_rdms(rdm1, rdm2)
# compute by subtracting the mean and then calculating cosine similarity
vector1 = vector1 - np.mean(vector1, 1, keepdims=True)
vector2 = vector2 - np.mean(vector2, 1, keepdims=True)
sim = _cosine_cov_weighted(vector1, vector2, sigma_k)
return sim
def compare_spearman(rdm1, rdm2):
"""calculates the spearman rank correlations between
two RDMs objects
Args:
rdm1 (pyrsa.rdm.RDMs):
first set of RDMs
rdm2 (pyrsa.rdm.RDMs):
second set of RDMs
Returns:
numpy.ndarray: dist:
rank correlations between the two RDMs
"""
vector1, vector2 = _parse_input_rdms(rdm1, rdm2)
vector1 = np.apply_along_axis(scipy.stats.rankdata, 1, vector1)
vector2 = np.apply_along_axis(scipy.stats.rankdata, 1, vector2)
vector1 = vector1 - np.mean(vector1, 1, keepdims=True)
vector2 = vector2 - np.mean(vector2, 1, keepdims=True)
sim = _cosine(vector1, vector2)
return sim
def compare_rho_a(rdm1, rdm2):
"""calculates the spearman rank correlations between
two RDMs objects without tie correction
Args:
rdm1 (pyrsa.rdm.RDMs):
first set of RDMs
rdm2 (pyrsa.rdm.RDMs):
second set of RDMs
Returns:
numpy.ndarray: dist:
rank correlations between the two RDMs
"""
vector1, vector2 = _parse_input_rdms(rdm1, rdm2)
vector1 = np.apply_along_axis(scipy.stats.rankdata, 1, vector1)
vector2 = np.apply_along_axis(scipy.stats.rankdata, 1, vector2)
vector1 = vector1 - np.mean(vector1, 1, keepdims=True)
vector2 = vector2 - np.mean(vector2, 1, keepdims=True)
n = vector1.shape[1]
sim = np.einsum('ij,kj->ik', vector1, vector2) / (n ** 3 - n) * 12
return sim
def compare_kendall_tau(rdm1, rdm2):
"""calculates the Kendall-tau bs between two RDMs objects.
Kendall-tau b is the version, which corrects for ties.
We here use the implementation from scipy.
Args:
rdm1 (pyrsa.rdm.RDMs):
first set of RDMs
rdm2 (pyrsa.rdm.RDMs):
second set of RDMs
Returns:
numpy.ndarray: dist:
kendall-tau correlation between the two RDMs
"""
vector1, vector2 = _parse_input_rdms(rdm1, rdm2)
sim = _all_combinations(vector1, vector2, _kendall_tau)
return sim
def compare_kendall_tau_a(rdm1, rdm2):
"""calculates the Kendall-tau a based distance between two RDMs objects.
adequate when some models predict ties
Args:
rdm1 (pyrsa.rdm.RDMs):
first set of RDMs
rdm2 (pyrsa.rdm.RDMs):
second set of RDMs
Returns:
numpy.ndarray: dist:
kendall-tau a between the two RDMs
"""
vector1, vector2 = _parse_input_rdms(rdm1, rdm2)
sim = _all_combinations(vector1, vector2, _tau_a)
return sim
def _all_combinations(vectors1, vectors2, func):
"""runs a function func on all combinations of v1 in vectors1
and v2 in vectors2 and puts the results into an array
Args:
vectors1 (numpy.ndarray):
first set of values
vectors1 (numpy.ndarray):
second set of values
func (function):
function to be applied, should take two input vectors
and return one scalar
Returns:
numpy.ndarray: value: function result over all pairs
"""
value = np.empty((len(vectors1), len(vectors2)))
k1 = 0
for v1 in vectors1:
k2 = 0
for v2 in vectors2:
value[k1, k2] = func(v1, v2)
k2 += 1
k1 += 1
return value
def _cosine_cov_weighted_slow(vector1, vector2, sigma_k=None):
"""computes the cosine similarities between two sets of vectors
after whitening by their covariance.
Args:
vector1 (numpy.ndarray):
first vectors (2D)
vector1 (numpy.ndarray):
second vectors (2D)
sigma_k (Matrix):
optional, covariance between pattern estimates
Returns:
cos (float):
cosine of the angle between vectors
"""
n_cond = _get_n_from_reduced_vectors(vector1)
v = _get_v(n_cond, sigma_k)
# compute V^-1 vector1/2 for all vectors by solving Vx = vector1/2
vector1_m = np.array([scipy.sparse.linalg.cg(v, vector1[i], atol=0)[0]
for i in range(vector1.shape[0])])
vector2_m = np.array([scipy.sparse.linalg.cg(v, vector2[i], atol=0)[0]
for i in range(vector2.shape[0])])
# compute the inner products v1^T (V^-1 v2) for all combinations
cos = np.einsum('ij,kj->ik', vector1, vector2_m)
# divide by sqrt(v1^T (V^-1 v1))
cos /= np.sqrt(np.einsum('ij,ij->i', vector1,
vector1_m)).reshape((-1, 1))
# divide by sqrt(v2^T (V^-1 v2))
cos /= np.sqrt(np.einsum('ij,ij->i', vector2,
vector2_m)).reshape((1, -1))
return cos
def _cosine_cov_weighted(vector1, vector2, sigma_k=None):
"""computes the cosine angles between two sets of vectors
weighted by the covariance
If no covariance is given this is computed using the linear CKA,
which is equivalent in this case and faster to compute.
Otherwise reverts to _cosine_cov_weighted_slow.
Args:
vector1 (numpy.ndarray):
first vectors (2D)
vector1 (numpy.ndarray):
second vectors (2D)
sigma_k (Matrix):
optional, covariance between pattern estimates
Returns:
cos (float):
cosine angle between vectors
"""
if sigma_k is not None:
cos = _cosine_cov_weighted_slow(vector1, vector2, sigma_k=sigma_k)
else:
# Compute the extended version of RDM vectors in whitened space
vector1_m = _cov_weighting(vector1)
vector2_m = _cov_weighting(vector2)
# compute the inner products v1^T V^-1 v2 for all combinations
cos = np.einsum('ij,kj->ik', vector1_m, vector2_m)
# divide by sqrt(v1^T V^-1 v1)
cos /= np.sqrt(np.einsum('ij,ij->i', vector1_m,
vector1_m)).reshape((-1, 1))
# divide by sqrt(v2^T V^-1 v2)
cos /= np.sqrt(np.einsum('ij,ij->i', vector2_m,
vector2_m)).reshape((1, -1))
return cos
def _cov_weighting(vector):
"""Transforms a array of RDM vectors in to representation
in which the elements are isotropic. This is a stretched-out
second moment matrix, with the diagonal elements appended.
To account for the fact that the off-diagonal elements are
only there once, they are multipled by 2
Args:
vector (numpy.ndarray):
RDM vectors (2D) N x n_dist
Returns:
vector_w:
weighted vectors (M x n_dist + n_cond)
"""
N, n_dist = vector.shape
n_cond = _get_n_from_reduced_vectors(vector)
vector_w = -0.5 * np.c_[vector, np.zeros((N, n_cond))]
rowI, colI = row_col_indicator_g(n_cond)
sumI = rowI + colI
m = vector_w @ sumI / n_cond # Column and row means
mm = np.sum(vector_w * 2, axis=1) / (n_cond * n_cond) # Overall mean
mm = mm.reshape(-1, 1)
# subtract the column and row means and add overall mean
vector_w = vector_w - m @ sumI.T + mm
# Weight the off-diagnoal terms double
vector_w[:, :n_dist] = vector_w[:, :n_dist] * np.sqrt(2)
return vector_w
def _cosine(vector1, vector2):
"""computes the cosine angles between two sets of vectors
Args:
vector1 (numpy.ndarray):
first vectors (2D)
vector1 (numpy.ndarray):
second vectors (2D)
Returns:
cos (float):
cosine angle between vectors
"""
# compute all inner products
cos = np.einsum('ij,kj->ik', vector1, vector2)
# divide by sqrt of the inner products with themselves
cos /= np.sqrt(np.einsum('ij,ij->i', vector1, vector1)).reshape((-1, 1))
cos /= np.sqrt(np.einsum('ij,ij->i', vector2, vector2)).reshape((1, -1))
return cos
def _kendall_tau(vector1, vector2):
"""computes the kendall-tau between two vectors
Args:
vector1 (numpy.ndarray):
first vector
vector1 (numpy.ndarray):
second vector
Returns:
tau (float):
kendall-tau
"""
tau = scipy.stats.kendalltau(vector1, vector2).correlation
return tau
def _tau_a(vector1, vector2):
"""computes kendall-tau a between two vectors
based on modifying scipy.stats.kendalltau
Args:
vector1 (numpy.ndarray):
first vector
vector1 (numpy.ndarray):
second vector
Returns:
tau (float):
kendall-tau a
"""
size = vector1.size
vector1, vector2 = _sort_and_rank(vector1, vector2)
vector2, vector1 = _sort_and_rank(vector2, vector1)
dis = _kendall_dis(vector1, vector2) # discordant pairs
obs = np.r_[True, (vector1[1:] != vector1[:-1]) |
(vector2[1:] != vector2[:-1]), True]
cnt = np.diff(np.nonzero(obs)[0]).astype('int64', copy=False)
ntie = (cnt * (cnt - 1) // 2).sum() # joint ties
xtie, x0, x1 = _count_rank_tie(vector1) # ties in x, stats
ytie, y0, y1 = _count_rank_tie(vector2) # ties in y, stats
tot = (size * (size - 1)) // 2
# Note that tot = con + dis + (xtie - ntie) + (ytie - ntie) + ntie
# = con + dis + xtie + ytie - ntie
con_minus_dis = tot - xtie - ytie + ntie - 2 * dis
tau = con_minus_dis / tot
# Limit range to fix computational errors
tau = min(1., max(-1., tau))
return tau
def _sort_and_rank(vector1, vector2):
"""does the sort and rank step of the _tau calculation"""
perm = np.argsort(vector2, kind='mergesort')
vector1 = vector1[perm]
vector2 = vector2[perm]
vector2 = np.r_[True, vector2[1:] != vector2[:-1]].cumsum(dtype=np.intp)
return vector1, vector2
def _count_rank_tie(ranks):
""" counts tied ranks for kendall-tau calculation"""
cnt = np.bincount(ranks).astype('int64', copy=False)
cnt = cnt[cnt > 1]
return ((cnt * (cnt - 1) // 2).sum(),
(cnt * (cnt - 1.) * (cnt - 2)).sum(),
(cnt * (cnt - 1.) * (2*cnt + 5)).sum())
def _get_v(n_cond, sigma_k):
""" get the rdm covariance from sigma_k """
# calculate Xi
c_mat = pairwise_contrast_sparse(np.arange(n_cond))
if sigma_k is None:
xi = c_mat @ c_mat.transpose()
else:
sigma_k = scipy.sparse.csr_matrix(sigma_k)
xi = c_mat @ sigma_k @ c_mat.transpose()
# calculate V
v = xi.multiply(xi).tocsc()
return v
def _parse_input_rdms(rdm1, rdm2):
"""Gets the vector representation of input RDMs, raises an error if
the two RDMs objects have different dimensions
Args:
rdm1 (pyrsa.rdm.RDMs):
first set of RDMs
rdm2 (pyrsa.rdm.RDMs):
second set of RDMs
"""
if not isinstance(rdm1, np.ndarray):
vector1 = rdm1.get_vectors()
else:
if len(rdm1.shape) == 1:
vector1 = rdm1.reshape(1, -1)
else:
vector1 = rdm1
if not isinstance(rdm2, np.ndarray):
vector2 = rdm2.get_vectors()
else:
if len(rdm2.shape) == 1:
vector2 = rdm2.reshape(1, -1)
else:
vector2 = rdm2
if not vector1.shape[1] == vector2.shape[1]:
raise ValueError('rdm1 and rdm2 must be RDMs of equal shape')
vector1_no_nan = vector1[~np.isnan(vector1)].reshape(vector1.shape[0], -1)
vector2_no_nan = vector2[~np.isnan(vector2)].reshape(vector2.shape[0], -1)
if not vector1_no_nan.shape[1] == vector2_no_nan.shape[1]:
raise ValueError('rdm1 and rdm2 have different nan positions')
return vector1_no_nan, vector2_no_nan | /rsa3-3.0.0.post20201106-py3-none-any.whl/pyrsa/rdm/compare.py | 0.890726 | 0.522263 | compare.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.