body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
4bba74c5f0db8fcc0236971884742ce47aa5f3e5034c836cb480075d36be9ff7
def action_message(self, obj): "\n Returns the action message.\n Note: this handles deletions which don't return a change message.\n " change_message = obj.get_change_message() if (not change_message): change_message = '{}.'.format(obj.get_action_flag_display()) return change_message
Returns the action message. Note: this handles deletions which don't return a change message.
radical_translations/utils/admin.py
action_message
kingsdigitallab/radical_translations
3
python
def action_message(self, obj): "\n Returns the action message.\n Note: this handles deletions which don't return a change message.\n " change_message = obj.get_change_message() if (not change_message): change_message = '{}.'.format(obj.get_action_flag_display()) return change_message
def action_message(self, obj): "\n Returns the action message.\n Note: this handles deletions which don't return a change message.\n " change_message = obj.get_change_message() if (not change_message): change_message = '{}.'.format(obj.get_action_flag_display()) return change_message<|docstring|>Returns the action message. Note: this handles deletions which don't return a change message.<|endoftext|>
6d823bfe2d39c5c377cf3b216641db3e942b453cb4be0df4710e2397d85fd7b6
def has_add_permission(self, request): 'Log entries cannot be added manually.' return False
Log entries cannot be added manually.
radical_translations/utils/admin.py
has_add_permission
kingsdigitallab/radical_translations
3
python
def has_add_permission(self, request): return False
def has_add_permission(self, request): return False<|docstring|>Log entries cannot be added manually.<|endoftext|>
d93d09b812078b24fc880fef59c105f3ea9a7f570d40a912a20d2662a468aa3b
def has_change_permission(self, request, obj=None): 'Log entries cannot be changed.' return False
Log entries cannot be changed.
radical_translations/utils/admin.py
has_change_permission
kingsdigitallab/radical_translations
3
python
def has_change_permission(self, request, obj=None): return False
def has_change_permission(self, request, obj=None): return False<|docstring|>Log entries cannot be changed.<|endoftext|>
79e9caaf1850b422ebfdae8733bd01f96f690d3d259f839db48c22d75502c522
def has_delete_permission(self, request, obj=None): 'Log entries can only be deleted when the setting is enabled.' return False
Log entries can only be deleted when the setting is enabled.
radical_translations/utils/admin.py
has_delete_permission
kingsdigitallab/radical_translations
3
python
def has_delete_permission(self, request, obj=None): return False
def has_delete_permission(self, request, obj=None): return False<|docstring|>Log entries can only be deleted when the setting is enabled.<|endoftext|>
57f25a2cec484a80272e9731a8ba2ea260c73b2f445a30e4e6425a3f3b9612af
def __init__(self, source: Union[(str, JavaObject)]): '\n Constructor of Source.\n\n :param source: The java Source object.\n ' super(Source, self).__init__(source)
Constructor of Source. :param source: The java Source object.
flink-python/pyflink/datastream/connectors/base.py
__init__
quxiucheng/flink
0
python
def __init__(self, source: Union[(str, JavaObject)]): '\n Constructor of Source.\n\n :param source: The java Source object.\n ' super(Source, self).__init__(source)
def __init__(self, source: Union[(str, JavaObject)]): '\n Constructor of Source.\n\n :param source: The java Source object.\n ' super(Source, self).__init__(source)<|docstring|>Constructor of Source. :param source: The java Source object.<|endoftext|>
8905a559e758dc878b7565a46028cc46fa47e699ee9ce107f421d6144e1d6011
def __init__(self, sink: Union[(str, JavaObject)]): '\n Constructor of Sink.\n\n :param sink: The java Sink object.\n ' super(Sink, self).__init__(sink)
Constructor of Sink. :param sink: The java Sink object.
flink-python/pyflink/datastream/connectors/base.py
__init__
quxiucheng/flink
0
python
def __init__(self, sink: Union[(str, JavaObject)]): '\n Constructor of Sink.\n\n :param sink: The java Sink object.\n ' super(Sink, self).__init__(sink)
def __init__(self, sink: Union[(str, JavaObject)]): '\n Constructor of Sink.\n\n :param sink: The java Sink object.\n ' super(Sink, self).__init__(sink)<|docstring|>Constructor of Sink. :param sink: The java Sink object.<|endoftext|>
24af882b50176121d945cb7a4f4815cba90fe81c5a1ceddc481f7821ee60b66d
def __init__(self, _net_dim=256, _state_dim=8, _action_dim=2, _learning_rate=0.0001, _if_per_or_gae=False, _env_num=1, _gpu_id=0): "initialize\n replace by different DRL algorithms\n explict call self.init() for multiprocessing.\n `net_dim` the dimension of networks (the width of neural networks)\n `state_dim` the dimension of state (the number of state vector)\n `action_dim` the dimension of action (the number of discrete action)\n `learning_rate` learning rate of optimizer\n `if_per_or_gae` PER (off-policy) or GAE (on-policy) for sparse reward\n `env_num` the env number of VectorEnv. env_num == 1 means don't use VectorEnv\n `gpu_id` the gpu_id of the training device. Use CPU when cuda is not available.\n " self.states = None self.device = None self.traj_list = None self.action_dim = None self.if_off_policy = True self.env_num = 1 self.explore_rate = 1.0 self.explore_noise = 0.1 self.clip_grad_norm = 4.0 'attribute' self.explore_env = None self.get_obj_critic = None self.criterion = torch.nn.SmoothL1Loss() self.cri = self.cri_target = self.if_use_cri_target = self.cri_optim = self.ClassCri = None self.act = self.act_target = self.if_use_act_target = self.act_optim = self.ClassAct = None
initialize replace by different DRL algorithms explict call self.init() for multiprocessing. `net_dim` the dimension of networks (the width of neural networks) `state_dim` the dimension of state (the number of state vector) `action_dim` the dimension of action (the number of discrete action) `learning_rate` learning rate of optimizer `if_per_or_gae` PER (off-policy) or GAE (on-policy) for sparse reward `env_num` the env number of VectorEnv. env_num == 1 means don't use VectorEnv `gpu_id` the gpu_id of the training device. Use CPU when cuda is not available.
elegantrl/agent.py
__init__
Yonv1943/ElegantRL
236
python
def __init__(self, _net_dim=256, _state_dim=8, _action_dim=2, _learning_rate=0.0001, _if_per_or_gae=False, _env_num=1, _gpu_id=0): "initialize\n replace by different DRL algorithms\n explict call self.init() for multiprocessing.\n `net_dim` the dimension of networks (the width of neural networks)\n `state_dim` the dimension of state (the number of state vector)\n `action_dim` the dimension of action (the number of discrete action)\n `learning_rate` learning rate of optimizer\n `if_per_or_gae` PER (off-policy) or GAE (on-policy) for sparse reward\n `env_num` the env number of VectorEnv. env_num == 1 means don't use VectorEnv\n `gpu_id` the gpu_id of the training device. Use CPU when cuda is not available.\n " self.states = None self.device = None self.traj_list = None self.action_dim = None self.if_off_policy = True self.env_num = 1 self.explore_rate = 1.0 self.explore_noise = 0.1 self.clip_grad_norm = 4.0 'attribute' self.explore_env = None self.get_obj_critic = None self.criterion = torch.nn.SmoothL1Loss() self.cri = self.cri_target = self.if_use_cri_target = self.cri_optim = self.ClassCri = None self.act = self.act_target = self.if_use_act_target = self.act_optim = self.ClassAct = None
def __init__(self, _net_dim=256, _state_dim=8, _action_dim=2, _learning_rate=0.0001, _if_per_or_gae=False, _env_num=1, _gpu_id=0): "initialize\n replace by different DRL algorithms\n explict call self.init() for multiprocessing.\n `net_dim` the dimension of networks (the width of neural networks)\n `state_dim` the dimension of state (the number of state vector)\n `action_dim` the dimension of action (the number of discrete action)\n `learning_rate` learning rate of optimizer\n `if_per_or_gae` PER (off-policy) or GAE (on-policy) for sparse reward\n `env_num` the env number of VectorEnv. env_num == 1 means don't use VectorEnv\n `gpu_id` the gpu_id of the training device. Use CPU when cuda is not available.\n " self.states = None self.device = None self.traj_list = None self.action_dim = None self.if_off_policy = True self.env_num = 1 self.explore_rate = 1.0 self.explore_noise = 0.1 self.clip_grad_norm = 4.0 'attribute' self.explore_env = None self.get_obj_critic = None self.criterion = torch.nn.SmoothL1Loss() self.cri = self.cri_target = self.if_use_cri_target = self.cri_optim = self.ClassCri = None self.act = self.act_target = self.if_use_act_target = self.act_optim = self.ClassAct = None<|docstring|>initialize replace by different DRL algorithms explict call self.init() for multiprocessing. `net_dim` the dimension of networks (the width of neural networks) `state_dim` the dimension of state (the number of state vector) `action_dim` the dimension of action (the number of discrete action) `learning_rate` learning rate of optimizer `if_per_or_gae` PER (off-policy) or GAE (on-policy) for sparse reward `env_num` the env number of VectorEnv. env_num == 1 means don't use VectorEnv `gpu_id` the gpu_id of the training device. Use CPU when cuda is not available.<|endoftext|>
673f05e5f0acf779678dd530ca4657b0b8c346ba2bda577ec013b29c859a1276
def init(self, net_dim=256, state_dim=8, action_dim=2, learning_rate=0.0001, if_per_or_gae=False, env_num=1, gpu_id=0): "initialize the self.object in `__init__()`\n replace by different DRL algorithms\n explict call self.init() for multiprocessing.\n `net_dim` the dimension of networks (the width of neural networks)\n `state_dim` the dimension of state (the number of state vector)\n `action_dim` the dimension of action (the number of discrete action)\n `learning_rate` learning rate of optimizer\n `if_per_or_gae` PER (off-policy) or GAE (on-policy) for sparse reward\n `env_num` the env number of VectorEnv. env_num == 1 means don't use VectorEnv\n `gpu_id` the gpu_id of the training device. Use CPU when cuda is not available.\n " self.action_dim = action_dim self.traj_list = [list() for _ in range(env_num)] self.device = torch.device((f'cuda:{gpu_id}' if (torch.cuda.is_available() and (gpu_id >= 0)) else 'cpu')) self.cri = self.ClassCri(int((net_dim * 1.25)), state_dim, action_dim).to(self.device) self.act = (self.ClassAct(net_dim, state_dim, action_dim).to(self.device) if self.ClassAct else self.cri) self.cri_target = (deepcopy(self.cri) if self.if_use_cri_target else self.cri) self.act_target = (deepcopy(self.act) if self.if_use_act_target else self.act) self.cri_optim = torch.optim.Adam(self.cri.parameters(), learning_rate) self.act_optim = (torch.optim.Adam(self.act.parameters(), learning_rate) if self.ClassAct else self.cri) del self.ClassCri, self.ClassAct if (env_num == 1): self.explore_env = self.explore_one_env else: self.explore_env = self.explore_vec_env
initialize the self.object in `__init__()` replace by different DRL algorithms explict call self.init() for multiprocessing. `net_dim` the dimension of networks (the width of neural networks) `state_dim` the dimension of state (the number of state vector) `action_dim` the dimension of action (the number of discrete action) `learning_rate` learning rate of optimizer `if_per_or_gae` PER (off-policy) or GAE (on-policy) for sparse reward `env_num` the env number of VectorEnv. env_num == 1 means don't use VectorEnv `gpu_id` the gpu_id of the training device. Use CPU when cuda is not available.
elegantrl/agent.py
init
Yonv1943/ElegantRL
236
python
def init(self, net_dim=256, state_dim=8, action_dim=2, learning_rate=0.0001, if_per_or_gae=False, env_num=1, gpu_id=0): "initialize the self.object in `__init__()`\n replace by different DRL algorithms\n explict call self.init() for multiprocessing.\n `net_dim` the dimension of networks (the width of neural networks)\n `state_dim` the dimension of state (the number of state vector)\n `action_dim` the dimension of action (the number of discrete action)\n `learning_rate` learning rate of optimizer\n `if_per_or_gae` PER (off-policy) or GAE (on-policy) for sparse reward\n `env_num` the env number of VectorEnv. env_num == 1 means don't use VectorEnv\n `gpu_id` the gpu_id of the training device. Use CPU when cuda is not available.\n " self.action_dim = action_dim self.traj_list = [list() for _ in range(env_num)] self.device = torch.device((f'cuda:{gpu_id}' if (torch.cuda.is_available() and (gpu_id >= 0)) else 'cpu')) self.cri = self.ClassCri(int((net_dim * 1.25)), state_dim, action_dim).to(self.device) self.act = (self.ClassAct(net_dim, state_dim, action_dim).to(self.device) if self.ClassAct else self.cri) self.cri_target = (deepcopy(self.cri) if self.if_use_cri_target else self.cri) self.act_target = (deepcopy(self.act) if self.if_use_act_target else self.act) self.cri_optim = torch.optim.Adam(self.cri.parameters(), learning_rate) self.act_optim = (torch.optim.Adam(self.act.parameters(), learning_rate) if self.ClassAct else self.cri) del self.ClassCri, self.ClassAct if (env_num == 1): self.explore_env = self.explore_one_env else: self.explore_env = self.explore_vec_env
def init(self, net_dim=256, state_dim=8, action_dim=2, learning_rate=0.0001, if_per_or_gae=False, env_num=1, gpu_id=0): "initialize the self.object in `__init__()`\n replace by different DRL algorithms\n explict call self.init() for multiprocessing.\n `net_dim` the dimension of networks (the width of neural networks)\n `state_dim` the dimension of state (the number of state vector)\n `action_dim` the dimension of action (the number of discrete action)\n `learning_rate` learning rate of optimizer\n `if_per_or_gae` PER (off-policy) or GAE (on-policy) for sparse reward\n `env_num` the env number of VectorEnv. env_num == 1 means don't use VectorEnv\n `gpu_id` the gpu_id of the training device. Use CPU when cuda is not available.\n " self.action_dim = action_dim self.traj_list = [list() for _ in range(env_num)] self.device = torch.device((f'cuda:{gpu_id}' if (torch.cuda.is_available() and (gpu_id >= 0)) else 'cpu')) self.cri = self.ClassCri(int((net_dim * 1.25)), state_dim, action_dim).to(self.device) self.act = (self.ClassAct(net_dim, state_dim, action_dim).to(self.device) if self.ClassAct else self.cri) self.cri_target = (deepcopy(self.cri) if self.if_use_cri_target else self.cri) self.act_target = (deepcopy(self.act) if self.if_use_act_target else self.act) self.cri_optim = torch.optim.Adam(self.cri.parameters(), learning_rate) self.act_optim = (torch.optim.Adam(self.act.parameters(), learning_rate) if self.ClassAct else self.cri) del self.ClassCri, self.ClassAct if (env_num == 1): self.explore_env = self.explore_one_env else: self.explore_env = self.explore_vec_env<|docstring|>initialize the self.object in `__init__()` replace by different DRL algorithms explict call self.init() for multiprocessing. `net_dim` the dimension of networks (the width of neural networks) `state_dim` the dimension of state (the number of state vector) `action_dim` the dimension of action (the number of discrete action) `learning_rate` learning rate of optimizer `if_per_or_gae` PER (off-policy) or GAE (on-policy) for sparse reward `env_num` the env number of VectorEnv. env_num == 1 means don't use VectorEnv `gpu_id` the gpu_id of the training device. Use CPU when cuda is not available.<|endoftext|>
e6b9a002021362b3724c2ff7dac2769664aae61bad39c7f3900a842c14cc5e68
def select_actions(self, state: torch.Tensor) -> torch.Tensor: 'Select continuous actions for exploration\n `tensor states` states.shape==(batch_size, state_dim, )\n return `tensor actions` actions.shape==(batch_size, action_dim, ), -1 < action < +1\n ' action = self.act(state.to(self.device)) if (rd.rand() < self.explore_rate): action = (action + (torch.randn_like(action) * self.explore_noise)).clamp((- 1), 1) return action.detach().cpu()
Select continuous actions for exploration `tensor states` states.shape==(batch_size, state_dim, ) return `tensor actions` actions.shape==(batch_size, action_dim, ), -1 < action < +1
elegantrl/agent.py
select_actions
Yonv1943/ElegantRL
236
python
def select_actions(self, state: torch.Tensor) -> torch.Tensor: 'Select continuous actions for exploration\n `tensor states` states.shape==(batch_size, state_dim, )\n return `tensor actions` actions.shape==(batch_size, action_dim, ), -1 < action < +1\n ' action = self.act(state.to(self.device)) if (rd.rand() < self.explore_rate): action = (action + (torch.randn_like(action) * self.explore_noise)).clamp((- 1), 1) return action.detach().cpu()
def select_actions(self, state: torch.Tensor) -> torch.Tensor: 'Select continuous actions for exploration\n `tensor states` states.shape==(batch_size, state_dim, )\n return `tensor actions` actions.shape==(batch_size, action_dim, ), -1 < action < +1\n ' action = self.act(state.to(self.device)) if (rd.rand() < self.explore_rate): action = (action + (torch.randn_like(action) * self.explore_noise)).clamp((- 1), 1) return action.detach().cpu()<|docstring|>Select continuous actions for exploration `tensor states` states.shape==(batch_size, state_dim, ) return `tensor actions` actions.shape==(batch_size, action_dim, ), -1 < action < +1<|endoftext|>
53cc8063a4343b01a87b8b728555730778dbc05f919d8c28207312a89ad2608b
def explore_one_env(self, env, target_step, reward_scale, gamma): 'actor explores in one env, then returns the traj (env transition)\n `object env` RL training environment. env.reset() env.step()\n `int target_step` explored target_step number of step in env\n return `[traj, ...]` for off-policy ReplayBuffer, `traj = [(state, other), ...]`\n ' state = self.states[0] traj = list() for _ in range(target_step): ten_state = torch.as_tensor(state, dtype=torch.float32) ten_action = self.select_actions(ten_state.unsqueeze(0))[0] action = ten_action.numpy() (next_s, reward, done, _) = env.step(action) ten_other = torch.empty((2 + self.action_dim)) ten_other[0] = reward ten_other[1] = done ten_other[2:] = ten_action traj.append((ten_state, ten_other)) state = (env.reset() if done else next_s) self.states[0] = state traj_state = torch.stack([item[0] for item in traj]) traj_other = torch.stack([item[1] for item in traj]) traj_list = [(traj_state, traj_other)] return self.convert_trajectory(traj_list, reward_scale, gamma)
actor explores in one env, then returns the traj (env transition) `object env` RL training environment. env.reset() env.step() `int target_step` explored target_step number of step in env return `[traj, ...]` for off-policy ReplayBuffer, `traj = [(state, other), ...]`
elegantrl/agent.py
explore_one_env
Yonv1943/ElegantRL
236
python
def explore_one_env(self, env, target_step, reward_scale, gamma): 'actor explores in one env, then returns the traj (env transition)\n `object env` RL training environment. env.reset() env.step()\n `int target_step` explored target_step number of step in env\n return `[traj, ...]` for off-policy ReplayBuffer, `traj = [(state, other), ...]`\n ' state = self.states[0] traj = list() for _ in range(target_step): ten_state = torch.as_tensor(state, dtype=torch.float32) ten_action = self.select_actions(ten_state.unsqueeze(0))[0] action = ten_action.numpy() (next_s, reward, done, _) = env.step(action) ten_other = torch.empty((2 + self.action_dim)) ten_other[0] = reward ten_other[1] = done ten_other[2:] = ten_action traj.append((ten_state, ten_other)) state = (env.reset() if done else next_s) self.states[0] = state traj_state = torch.stack([item[0] for item in traj]) traj_other = torch.stack([item[1] for item in traj]) traj_list = [(traj_state, traj_other)] return self.convert_trajectory(traj_list, reward_scale, gamma)
def explore_one_env(self, env, target_step, reward_scale, gamma): 'actor explores in one env, then returns the traj (env transition)\n `object env` RL training environment. env.reset() env.step()\n `int target_step` explored target_step number of step in env\n return `[traj, ...]` for off-policy ReplayBuffer, `traj = [(state, other), ...]`\n ' state = self.states[0] traj = list() for _ in range(target_step): ten_state = torch.as_tensor(state, dtype=torch.float32) ten_action = self.select_actions(ten_state.unsqueeze(0))[0] action = ten_action.numpy() (next_s, reward, done, _) = env.step(action) ten_other = torch.empty((2 + self.action_dim)) ten_other[0] = reward ten_other[1] = done ten_other[2:] = ten_action traj.append((ten_state, ten_other)) state = (env.reset() if done else next_s) self.states[0] = state traj_state = torch.stack([item[0] for item in traj]) traj_other = torch.stack([item[1] for item in traj]) traj_list = [(traj_state, traj_other)] return self.convert_trajectory(traj_list, reward_scale, gamma)<|docstring|>actor explores in one env, then returns the traj (env transition) `object env` RL training environment. env.reset() env.step() `int target_step` explored target_step number of step in env return `[traj, ...]` for off-policy ReplayBuffer, `traj = [(state, other), ...]`<|endoftext|>
037ad6d4cc06006dae10db68023b528767c3f1ec481389f73ab96d247c758295
def explore_vec_env(self, env, target_step, reward_scale, gamma): 'actor explores in VectorEnv, then returns the trajectory (env transition)\n `object env` RL training environment. env.reset() env.step()\n `int target_step` explored target_step number of step in env\n return `[traj, ...]` for off-policy ReplayBuffer, `traj = [(state, other), ...]`\n ' ten_states = self.states traj = list() for _ in range(target_step): ten_actions = self.select_actions(ten_states) (ten_next_states, ten_rewards, ten_dones) = env.step(ten_actions) ten_others = torch.cat((ten_rewards.unsqueeze(0), ten_dones.unsqueeze(0), ten_actions)) traj.append((ten_states, ten_others)) ten_states = ten_next_states self.states = ten_states traj_state = torch.stack([item[0] for item in traj]) traj_other = torch.stack([item[1] for item in traj]) traj_list = [(traj_state[(:, env_i, :)], traj_other[(:, env_i, :)]) for env_i in range(len(self.states))] return self.convert_trajectory(traj_list, reward_scale, gamma)
actor explores in VectorEnv, then returns the trajectory (env transition) `object env` RL training environment. env.reset() env.step() `int target_step` explored target_step number of step in env return `[traj, ...]` for off-policy ReplayBuffer, `traj = [(state, other), ...]`
elegantrl/agent.py
explore_vec_env
Yonv1943/ElegantRL
236
python
def explore_vec_env(self, env, target_step, reward_scale, gamma): 'actor explores in VectorEnv, then returns the trajectory (env transition)\n `object env` RL training environment. env.reset() env.step()\n `int target_step` explored target_step number of step in env\n return `[traj, ...]` for off-policy ReplayBuffer, `traj = [(state, other), ...]`\n ' ten_states = self.states traj = list() for _ in range(target_step): ten_actions = self.select_actions(ten_states) (ten_next_states, ten_rewards, ten_dones) = env.step(ten_actions) ten_others = torch.cat((ten_rewards.unsqueeze(0), ten_dones.unsqueeze(0), ten_actions)) traj.append((ten_states, ten_others)) ten_states = ten_next_states self.states = ten_states traj_state = torch.stack([item[0] for item in traj]) traj_other = torch.stack([item[1] for item in traj]) traj_list = [(traj_state[(:, env_i, :)], traj_other[(:, env_i, :)]) for env_i in range(len(self.states))] return self.convert_trajectory(traj_list, reward_scale, gamma)
def explore_vec_env(self, env, target_step, reward_scale, gamma): 'actor explores in VectorEnv, then returns the trajectory (env transition)\n `object env` RL training environment. env.reset() env.step()\n `int target_step` explored target_step number of step in env\n return `[traj, ...]` for off-policy ReplayBuffer, `traj = [(state, other), ...]`\n ' ten_states = self.states traj = list() for _ in range(target_step): ten_actions = self.select_actions(ten_states) (ten_next_states, ten_rewards, ten_dones) = env.step(ten_actions) ten_others = torch.cat((ten_rewards.unsqueeze(0), ten_dones.unsqueeze(0), ten_actions)) traj.append((ten_states, ten_others)) ten_states = ten_next_states self.states = ten_states traj_state = torch.stack([item[0] for item in traj]) traj_other = torch.stack([item[1] for item in traj]) traj_list = [(traj_state[(:, env_i, :)], traj_other[(:, env_i, :)]) for env_i in range(len(self.states))] return self.convert_trajectory(traj_list, reward_scale, gamma)<|docstring|>actor explores in VectorEnv, then returns the trajectory (env transition) `object env` RL training environment. env.reset() env.step() `int target_step` explored target_step number of step in env return `[traj, ...]` for off-policy ReplayBuffer, `traj = [(state, other), ...]`<|endoftext|>
f36c8d45df128a3424eee42935a2b680305976181397f3c97845bcefb601bf0a
def update_net(self, buffer, batch_size, repeat_times, soft_update_tau) -> tuple: 'update the neural network by sampling batch data from ReplayBuffer\n replace by different DRL algorithms.\n return the objective value as training information to help fine-tuning\n `buffer` Experience replay buffer.\n `int batch_size` sample batch_size of data for Stochastic Gradient Descent\n `float repeat_times` the times of sample batch = int(target_step * repeat_times) in off-policy\n `float soft_update_tau` target_net = target_net * (1-tau) + current_net * tau\n `return tuple` training logging. tuple = (float, float, ...)\n '
update the neural network by sampling batch data from ReplayBuffer replace by different DRL algorithms. return the objective value as training information to help fine-tuning `buffer` Experience replay buffer. `int batch_size` sample batch_size of data for Stochastic Gradient Descent `float repeat_times` the times of sample batch = int(target_step * repeat_times) in off-policy `float soft_update_tau` target_net = target_net * (1-tau) + current_net * tau `return tuple` training logging. tuple = (float, float, ...)
elegantrl/agent.py
update_net
Yonv1943/ElegantRL
236
python
def update_net(self, buffer, batch_size, repeat_times, soft_update_tau) -> tuple: 'update the neural network by sampling batch data from ReplayBuffer\n replace by different DRL algorithms.\n return the objective value as training information to help fine-tuning\n `buffer` Experience replay buffer.\n `int batch_size` sample batch_size of data for Stochastic Gradient Descent\n `float repeat_times` the times of sample batch = int(target_step * repeat_times) in off-policy\n `float soft_update_tau` target_net = target_net * (1-tau) + current_net * tau\n `return tuple` training logging. tuple = (float, float, ...)\n '
def update_net(self, buffer, batch_size, repeat_times, soft_update_tau) -> tuple: 'update the neural network by sampling batch data from ReplayBuffer\n replace by different DRL algorithms.\n return the objective value as training information to help fine-tuning\n `buffer` Experience replay buffer.\n `int batch_size` sample batch_size of data for Stochastic Gradient Descent\n `float repeat_times` the times of sample batch = int(target_step * repeat_times) in off-policy\n `float soft_update_tau` target_net = target_net * (1-tau) + current_net * tau\n `return tuple` training logging. tuple = (float, float, ...)\n '<|docstring|>update the neural network by sampling batch data from ReplayBuffer replace by different DRL algorithms. return the objective value as training information to help fine-tuning `buffer` Experience replay buffer. `int batch_size` sample batch_size of data for Stochastic Gradient Descent `float repeat_times` the times of sample batch = int(target_step * repeat_times) in off-policy `float soft_update_tau` target_net = target_net * (1-tau) + current_net * tau `return tuple` training logging. tuple = (float, float, ...)<|endoftext|>
5bfd6803f227100accc76a11ac76039eec08410d5f038c390b7fc6245058f7f2
@staticmethod def soft_update(target_net, current_net, tau): 'soft update a target network via current network\n `nn.Module target_net` target network update via a current network, it is more stable\n `nn.Module current_net` current network update via an optimizer\n ' for (tar, cur) in zip(target_net.parameters(), current_net.parameters()): tar.data.copy_(((cur.data * tau) + (tar.data * (1.0 - tau))))
soft update a target network via current network `nn.Module target_net` target network update via a current network, it is more stable `nn.Module current_net` current network update via an optimizer
elegantrl/agent.py
soft_update
Yonv1943/ElegantRL
236
python
@staticmethod def soft_update(target_net, current_net, tau): 'soft update a target network via current network\n `nn.Module target_net` target network update via a current network, it is more stable\n `nn.Module current_net` current network update via an optimizer\n ' for (tar, cur) in zip(target_net.parameters(), current_net.parameters()): tar.data.copy_(((cur.data * tau) + (tar.data * (1.0 - tau))))
@staticmethod def soft_update(target_net, current_net, tau): 'soft update a target network via current network\n `nn.Module target_net` target network update via a current network, it is more stable\n `nn.Module current_net` current network update via an optimizer\n ' for (tar, cur) in zip(target_net.parameters(), current_net.parameters()): tar.data.copy_(((cur.data * tau) + (tar.data * (1.0 - tau))))<|docstring|>soft update a target network via current network `nn.Module target_net` target network update via a current network, it is more stable `nn.Module current_net` current network update via an optimizer<|endoftext|>
d0c7e6b9df71673414397d80e3f11e55b036f23c093e06b39c5d7d3fb3e6cef9
def save_or_load_agent(self, cwd, if_save): 'save or load the training files for agent from disk.\n `str cwd` current working directory, where to save training files.\n `bool if_save` True: save files. False: load files.\n ' def load_torch_file(model_or_optim, _path): state_dict = torch.load(_path, map_location=(lambda storage, loc: storage)) model_or_optim.load_state_dict(state_dict) name_obj_list = [('actor', self.act), ('act_target', self.act_target), ('act_optim', self.act_optim), ('critic', self.cri), ('cri_target', self.cri_target), ('cri_optim', self.cri_optim)] name_obj_list = [(name, obj) for (name, obj) in name_obj_list if (obj is not None)] if if_save: for (name, obj) in name_obj_list: save_path = f'{cwd}/{name}.pth' torch.save(obj.state_dict(), save_path) else: for (name, obj) in name_obj_list: save_path = f'{cwd}/{name}.pth' (load_torch_file(obj, save_path) if os.path.isfile(save_path) else None)
save or load the training files for agent from disk. `str cwd` current working directory, where to save training files. `bool if_save` True: save files. False: load files.
elegantrl/agent.py
save_or_load_agent
Yonv1943/ElegantRL
236
python
def save_or_load_agent(self, cwd, if_save): 'save or load the training files for agent from disk.\n `str cwd` current working directory, where to save training files.\n `bool if_save` True: save files. False: load files.\n ' def load_torch_file(model_or_optim, _path): state_dict = torch.load(_path, map_location=(lambda storage, loc: storage)) model_or_optim.load_state_dict(state_dict) name_obj_list = [('actor', self.act), ('act_target', self.act_target), ('act_optim', self.act_optim), ('critic', self.cri), ('cri_target', self.cri_target), ('cri_optim', self.cri_optim)] name_obj_list = [(name, obj) for (name, obj) in name_obj_list if (obj is not None)] if if_save: for (name, obj) in name_obj_list: save_path = f'{cwd}/{name}.pth' torch.save(obj.state_dict(), save_path) else: for (name, obj) in name_obj_list: save_path = f'{cwd}/{name}.pth' (load_torch_file(obj, save_path) if os.path.isfile(save_path) else None)
def save_or_load_agent(self, cwd, if_save): 'save or load the training files for agent from disk.\n `str cwd` current working directory, where to save training files.\n `bool if_save` True: save files. False: load files.\n ' def load_torch_file(model_or_optim, _path): state_dict = torch.load(_path, map_location=(lambda storage, loc: storage)) model_or_optim.load_state_dict(state_dict) name_obj_list = [('actor', self.act), ('act_target', self.act_target), ('act_optim', self.act_optim), ('critic', self.cri), ('cri_target', self.cri_target), ('cri_optim', self.cri_optim)] name_obj_list = [(name, obj) for (name, obj) in name_obj_list if (obj is not None)] if if_save: for (name, obj) in name_obj_list: save_path = f'{cwd}/{name}.pth' torch.save(obj.state_dict(), save_path) else: for (name, obj) in name_obj_list: save_path = f'{cwd}/{name}.pth' (load_torch_file(obj, save_path) if os.path.isfile(save_path) else None)<|docstring|>save or load the training files for agent from disk. `str cwd` current working directory, where to save training files. `bool if_save` True: save files. False: load files.<|endoftext|>
d8adf54b6e325f4069c8d04a50aa8d4d6a2c83f5d4899ce0a4d9b3c32778769d
def init(self, net_dim=256, state_dim=8, action_dim=2, learning_rate=0.0001, if_per_or_gae=False, env_num=1, gpu_id=0): '\n Explict call ``self.init()`` to overwrite the ``self.object`` in ``__init__()`` for multiprocessing. \n ' self.ClassCri = (QNetDuel if self.if_use_dueling else QNet) AgentBase.init(self, net_dim, state_dim, action_dim, learning_rate, if_per_or_gae, env_num, gpu_id) if if_per_or_gae: self.criterion = torch.nn.SmoothL1Loss(reduction='none') self.get_obj_critic = self.get_obj_critic_per else: self.criterion = torch.nn.SmoothL1Loss(reduction='mean') self.get_obj_critic = self.get_obj_critic_raw
Explict call ``self.init()`` to overwrite the ``self.object`` in ``__init__()`` for multiprocessing.
elegantrl/agent.py
init
Yonv1943/ElegantRL
236
python
def init(self, net_dim=256, state_dim=8, action_dim=2, learning_rate=0.0001, if_per_or_gae=False, env_num=1, gpu_id=0): '\n \n ' self.ClassCri = (QNetDuel if self.if_use_dueling else QNet) AgentBase.init(self, net_dim, state_dim, action_dim, learning_rate, if_per_or_gae, env_num, gpu_id) if if_per_or_gae: self.criterion = torch.nn.SmoothL1Loss(reduction='none') self.get_obj_critic = self.get_obj_critic_per else: self.criterion = torch.nn.SmoothL1Loss(reduction='mean') self.get_obj_critic = self.get_obj_critic_raw
def init(self, net_dim=256, state_dim=8, action_dim=2, learning_rate=0.0001, if_per_or_gae=False, env_num=1, gpu_id=0): '\n \n ' self.ClassCri = (QNetDuel if self.if_use_dueling else QNet) AgentBase.init(self, net_dim, state_dim, action_dim, learning_rate, if_per_or_gae, env_num, gpu_id) if if_per_or_gae: self.criterion = torch.nn.SmoothL1Loss(reduction='none') self.get_obj_critic = self.get_obj_critic_per else: self.criterion = torch.nn.SmoothL1Loss(reduction='mean') self.get_obj_critic = self.get_obj_critic_raw<|docstring|>Explict call ``self.init()`` to overwrite the ``self.object`` in ``__init__()`` for multiprocessing.<|endoftext|>
a980e888db735d0c9acc3ed8cdcf69974c24895ac7bf9a0d0f02499724c7be82
def select_actions(self, states) -> np.ndarray: '\n Select discrete actions given an array of states.\n \n .. note::\n Using ϵ-greedy to uniformly random select actions for randomness.\n \n :param states[np.ndarray]: an array of states in a shape (batch_size, state_dim, ).\n :return: an array of actions in a shape (batch_size, action_dim, ) where each action is clipped into range(-1, 1).\n ' if (rd.rand() < self.explore_rate): a_int = torch.randint(self.action_dim, size=state.shape[0]) else: action = self.act(state.to(self.device)) a_int = action.argmax(dim=1) return a_int.detach().cpu()
Select discrete actions given an array of states. .. note:: Using ϵ-greedy to uniformly random select actions for randomness. :param states[np.ndarray]: an array of states in a shape (batch_size, state_dim, ). :return: an array of actions in a shape (batch_size, action_dim, ) where each action is clipped into range(-1, 1).
elegantrl/agent.py
select_actions
Yonv1943/ElegantRL
236
python
def select_actions(self, states) -> np.ndarray: '\n Select discrete actions given an array of states.\n \n .. note::\n Using ϵ-greedy to uniformly random select actions for randomness.\n \n :param states[np.ndarray]: an array of states in a shape (batch_size, state_dim, ).\n :return: an array of actions in a shape (batch_size, action_dim, ) where each action is clipped into range(-1, 1).\n ' if (rd.rand() < self.explore_rate): a_int = torch.randint(self.action_dim, size=state.shape[0]) else: action = self.act(state.to(self.device)) a_int = action.argmax(dim=1) return a_int.detach().cpu()
def select_actions(self, states) -> np.ndarray: '\n Select discrete actions given an array of states.\n \n .. note::\n Using ϵ-greedy to uniformly random select actions for randomness.\n \n :param states[np.ndarray]: an array of states in a shape (batch_size, state_dim, ).\n :return: an array of actions in a shape (batch_size, action_dim, ) where each action is clipped into range(-1, 1).\n ' if (rd.rand() < self.explore_rate): a_int = torch.randint(self.action_dim, size=state.shape[0]) else: action = self.act(state.to(self.device)) a_int = action.argmax(dim=1) return a_int.detach().cpu()<|docstring|>Select discrete actions given an array of states. .. note:: Using ϵ-greedy to uniformly random select actions for randomness. :param states[np.ndarray]: an array of states in a shape (batch_size, state_dim, ). :return: an array of actions in a shape (batch_size, action_dim, ) where each action is clipped into range(-1, 1).<|endoftext|>
80f936043cda6b61f347c535473d64d613784e01565075bb706b2139709ec52f
def explore_one_env(self, env, target_step) -> list: '\n Collect trajectories through the actor-environment interaction for a **single** environment instance.\n \n :param env[object]: the DRL environment instance.\n :param target_step[int]: the total step for the interaction.\n :return: a list of trajectories [traj, ...] where each trajectory is a list of transitions [(state, other), ...].\n ' traj = list() state = self.states[0] for _ in range(target_step): ten_state = torch.as_tensor(state, dtype=torch.float32) ten_action = self.select_actions(ten_state.unsqueeze(0))[0] action = ten_action.numpy() (next_s, reward, done, _) = env.step(action) ten_other = torch.empty((2 + 1)) ten_other[0] = reward ten_other[1] = done ten_other[2] = ten_action traj.append((ten_state, ten_other)) state = (env.reset() if done else next_s) self.states[0] = state traj_state = torch.stack([item[0] for item in traj]) traj_other = torch.stack([item[1] for item in traj]) traj_list = [(traj_state, traj_other)] return self.convert_trajectory(traj_list, reward_scale, gamma)
Collect trajectories through the actor-environment interaction for a **single** environment instance. :param env[object]: the DRL environment instance. :param target_step[int]: the total step for the interaction. :return: a list of trajectories [traj, ...] where each trajectory is a list of transitions [(state, other), ...].
elegantrl/agent.py
explore_one_env
Yonv1943/ElegantRL
236
python
def explore_one_env(self, env, target_step) -> list: '\n Collect trajectories through the actor-environment interaction for a **single** environment instance.\n \n :param env[object]: the DRL environment instance.\n :param target_step[int]: the total step for the interaction.\n :return: a list of trajectories [traj, ...] where each trajectory is a list of transitions [(state, other), ...].\n ' traj = list() state = self.states[0] for _ in range(target_step): ten_state = torch.as_tensor(state, dtype=torch.float32) ten_action = self.select_actions(ten_state.unsqueeze(0))[0] action = ten_action.numpy() (next_s, reward, done, _) = env.step(action) ten_other = torch.empty((2 + 1)) ten_other[0] = reward ten_other[1] = done ten_other[2] = ten_action traj.append((ten_state, ten_other)) state = (env.reset() if done else next_s) self.states[0] = state traj_state = torch.stack([item[0] for item in traj]) traj_other = torch.stack([item[1] for item in traj]) traj_list = [(traj_state, traj_other)] return self.convert_trajectory(traj_list, reward_scale, gamma)
def explore_one_env(self, env, target_step) -> list: '\n Collect trajectories through the actor-environment interaction for a **single** environment instance.\n \n :param env[object]: the DRL environment instance.\n :param target_step[int]: the total step for the interaction.\n :return: a list of trajectories [traj, ...] where each trajectory is a list of transitions [(state, other), ...].\n ' traj = list() state = self.states[0] for _ in range(target_step): ten_state = torch.as_tensor(state, dtype=torch.float32) ten_action = self.select_actions(ten_state.unsqueeze(0))[0] action = ten_action.numpy() (next_s, reward, done, _) = env.step(action) ten_other = torch.empty((2 + 1)) ten_other[0] = reward ten_other[1] = done ten_other[2] = ten_action traj.append((ten_state, ten_other)) state = (env.reset() if done else next_s) self.states[0] = state traj_state = torch.stack([item[0] for item in traj]) traj_other = torch.stack([item[1] for item in traj]) traj_list = [(traj_state, traj_other)] return self.convert_trajectory(traj_list, reward_scale, gamma)<|docstring|>Collect trajectories through the actor-environment interaction for a **single** environment instance. :param env[object]: the DRL environment instance. :param target_step[int]: the total step for the interaction. :return: a list of trajectories [traj, ...] where each trajectory is a list of transitions [(state, other), ...].<|endoftext|>
170df484034259f430985946f5f6343f79ee59fa500e95b717e698c3b760cadb
def explore_vec_env(self, env, target_step) -> list: '\n Collect trajectories through the actor-environment interaction for a **vectorized** environment instance.\n \n :param env[object]: the DRL environment instance.\n :param target_step[int]: the total step for the interaction.\n :return: a list of trajectories [traj, ...] where each trajectory is a list of transitions [(state, other), ...].\n ' ten_states = self.states traj = list() for _ in range(target_step): ten_actions = self.select_actions(ten_states) (ten_next_states, ten_rewards, ten_dones) = env.step(ten_actions) ten_others = torch.cat((ten_rewards.unsqueeze(0), ten_dones.unsqueeze(0), ten_actions.unsqueeze(0))) traj.append((ten_states, ten_others)) ten_states = ten_next_states self.states = ten_states traj_state = torch.stack([item[0] for item in traj]) traj_other = torch.stack([item[1] for item in traj]) traj_list = [(traj_state[(:, env_i, :)], traj_other[(:, env_i, :)]) for env_i in range(len(self.states))] return self.convert_trajectory(traj_list, reward_scale, gamma)
Collect trajectories through the actor-environment interaction for a **vectorized** environment instance. :param env[object]: the DRL environment instance. :param target_step[int]: the total step for the interaction. :return: a list of trajectories [traj, ...] where each trajectory is a list of transitions [(state, other), ...].
elegantrl/agent.py
explore_vec_env
Yonv1943/ElegantRL
236
python
def explore_vec_env(self, env, target_step) -> list: '\n Collect trajectories through the actor-environment interaction for a **vectorized** environment instance.\n \n :param env[object]: the DRL environment instance.\n :param target_step[int]: the total step for the interaction.\n :return: a list of trajectories [traj, ...] where each trajectory is a list of transitions [(state, other), ...].\n ' ten_states = self.states traj = list() for _ in range(target_step): ten_actions = self.select_actions(ten_states) (ten_next_states, ten_rewards, ten_dones) = env.step(ten_actions) ten_others = torch.cat((ten_rewards.unsqueeze(0), ten_dones.unsqueeze(0), ten_actions.unsqueeze(0))) traj.append((ten_states, ten_others)) ten_states = ten_next_states self.states = ten_states traj_state = torch.stack([item[0] for item in traj]) traj_other = torch.stack([item[1] for item in traj]) traj_list = [(traj_state[(:, env_i, :)], traj_other[(:, env_i, :)]) for env_i in range(len(self.states))] return self.convert_trajectory(traj_list, reward_scale, gamma)
def explore_vec_env(self, env, target_step) -> list: '\n Collect trajectories through the actor-environment interaction for a **vectorized** environment instance.\n \n :param env[object]: the DRL environment instance.\n :param target_step[int]: the total step for the interaction.\n :return: a list of trajectories [traj, ...] where each trajectory is a list of transitions [(state, other), ...].\n ' ten_states = self.states traj = list() for _ in range(target_step): ten_actions = self.select_actions(ten_states) (ten_next_states, ten_rewards, ten_dones) = env.step(ten_actions) ten_others = torch.cat((ten_rewards.unsqueeze(0), ten_dones.unsqueeze(0), ten_actions.unsqueeze(0))) traj.append((ten_states, ten_others)) ten_states = ten_next_states self.states = ten_states traj_state = torch.stack([item[0] for item in traj]) traj_other = torch.stack([item[1] for item in traj]) traj_list = [(traj_state[(:, env_i, :)], traj_other[(:, env_i, :)]) for env_i in range(len(self.states))] return self.convert_trajectory(traj_list, reward_scale, gamma)<|docstring|>Collect trajectories through the actor-environment interaction for a **vectorized** environment instance. :param env[object]: the DRL environment instance. :param target_step[int]: the total step for the interaction. :return: a list of trajectories [traj, ...] where each trajectory is a list of transitions [(state, other), ...].<|endoftext|>
5da4d5efb5eee6940032733407140875f17c3c75548b9874793159d2989188c2
def update_net(self, buffer, batch_size, repeat_times, soft_update_tau) -> tuple: '\n Update the neural networks by sampling batch data from ``ReplayBuffer``.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :param repeat_times[float]: the re-using times of each trajectory.\n :param soft_update_tau[float]: the soft update parameter.\n :return: a tuple of the log information.\n ' buffer.update_now_len() obj_critic = q_value = None for _ in range(int(((buffer.now_len / batch_size) * repeat_times))): (obj_critic, q_value) = self.get_obj_critic(buffer, batch_size) self.optim_update(self.cri_optim, obj_critic, self.cri.parameters()) self.soft_update(self.cri_target, self.cri, soft_update_tau) return (obj_critic.item(), q_value.mean().item())
Update the neural networks by sampling batch data from ``ReplayBuffer``. :param buffer[object]: the ReplayBuffer instance that stores the trajectories. :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD). :param repeat_times[float]: the re-using times of each trajectory. :param soft_update_tau[float]: the soft update parameter. :return: a tuple of the log information.
elegantrl/agent.py
update_net
Yonv1943/ElegantRL
236
python
def update_net(self, buffer, batch_size, repeat_times, soft_update_tau) -> tuple: '\n Update the neural networks by sampling batch data from ``ReplayBuffer``.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :param repeat_times[float]: the re-using times of each trajectory.\n :param soft_update_tau[float]: the soft update parameter.\n :return: a tuple of the log information.\n ' buffer.update_now_len() obj_critic = q_value = None for _ in range(int(((buffer.now_len / batch_size) * repeat_times))): (obj_critic, q_value) = self.get_obj_critic(buffer, batch_size) self.optim_update(self.cri_optim, obj_critic, self.cri.parameters()) self.soft_update(self.cri_target, self.cri, soft_update_tau) return (obj_critic.item(), q_value.mean().item())
def update_net(self, buffer, batch_size, repeat_times, soft_update_tau) -> tuple: '\n Update the neural networks by sampling batch data from ``ReplayBuffer``.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :param repeat_times[float]: the re-using times of each trajectory.\n :param soft_update_tau[float]: the soft update parameter.\n :return: a tuple of the log information.\n ' buffer.update_now_len() obj_critic = q_value = None for _ in range(int(((buffer.now_len / batch_size) * repeat_times))): (obj_critic, q_value) = self.get_obj_critic(buffer, batch_size) self.optim_update(self.cri_optim, obj_critic, self.cri.parameters()) self.soft_update(self.cri_target, self.cri, soft_update_tau) return (obj_critic.item(), q_value.mean().item())<|docstring|>Update the neural networks by sampling batch data from ``ReplayBuffer``. :param buffer[object]: the ReplayBuffer instance that stores the trajectories. :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD). :param repeat_times[float]: the re-using times of each trajectory. :param soft_update_tau[float]: the soft update parameter. :return: a tuple of the log information.<|endoftext|>
65c7f228e224e9fbaa669b9c7d066fb9fb04cc4bd163401e2c0fbf06bdeaab69
def get_obj_critic_raw(self, buffer, batch_size): '\n Calculate the loss of the network and predict Q values with **uniform sampling**.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :return: the loss of the network and Q values.\n ' with torch.no_grad(): (reward, mask, action, state, next_s) = buffer.sample_batch(batch_size) next_q = self.cri_target(next_s).max(dim=1, keepdim=True)[0] q_label = (reward + (mask * next_q)) q_value = self.cri(state).gather(1, action.long()) obj_critic = self.criterion(q_value, q_label) return (obj_critic, q_value)
Calculate the loss of the network and predict Q values with **uniform sampling**. :param buffer[object]: the ReplayBuffer instance that stores the trajectories. :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD). :return: the loss of the network and Q values.
elegantrl/agent.py
get_obj_critic_raw
Yonv1943/ElegantRL
236
python
def get_obj_critic_raw(self, buffer, batch_size): '\n Calculate the loss of the network and predict Q values with **uniform sampling**.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :return: the loss of the network and Q values.\n ' with torch.no_grad(): (reward, mask, action, state, next_s) = buffer.sample_batch(batch_size) next_q = self.cri_target(next_s).max(dim=1, keepdim=True)[0] q_label = (reward + (mask * next_q)) q_value = self.cri(state).gather(1, action.long()) obj_critic = self.criterion(q_value, q_label) return (obj_critic, q_value)
def get_obj_critic_raw(self, buffer, batch_size): '\n Calculate the loss of the network and predict Q values with **uniform sampling**.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :return: the loss of the network and Q values.\n ' with torch.no_grad(): (reward, mask, action, state, next_s) = buffer.sample_batch(batch_size) next_q = self.cri_target(next_s).max(dim=1, keepdim=True)[0] q_label = (reward + (mask * next_q)) q_value = self.cri(state).gather(1, action.long()) obj_critic = self.criterion(q_value, q_label) return (obj_critic, q_value)<|docstring|>Calculate the loss of the network and predict Q values with **uniform sampling**. :param buffer[object]: the ReplayBuffer instance that stores the trajectories. :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD). :return: the loss of the network and Q values.<|endoftext|>
cb37364e7eae150cff7c762caf10e5547f601db7077f61cc55ab5d9377c3bd68
def get_obj_critic_per(self, buffer, batch_size): '\n Calculate the loss of the network and predict Q values with **Prioritized Experience Replay (PER)**.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :return: the loss of the network and Q values.\n ' with torch.no_grad(): (reward, mask, action, state, next_s, is_weights) = buffer.sample_batch(batch_size) next_q = self.cri_target(next_s).max(dim=1, keepdim=True)[0] q_label = (reward + (mask * next_q)) q_value = self.cri(state).gather(1, action.long()) td_error = self.criterion(q_value, q_label) obj_critic = (td_error * is_weights).mean() buffer.td_error_update(td_error.detach()) return (obj_critic, q_value)
Calculate the loss of the network and predict Q values with **Prioritized Experience Replay (PER)**. :param buffer[object]: the ReplayBuffer instance that stores the trajectories. :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD). :return: the loss of the network and Q values.
elegantrl/agent.py
get_obj_critic_per
Yonv1943/ElegantRL
236
python
def get_obj_critic_per(self, buffer, batch_size): '\n Calculate the loss of the network and predict Q values with **Prioritized Experience Replay (PER)**.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :return: the loss of the network and Q values.\n ' with torch.no_grad(): (reward, mask, action, state, next_s, is_weights) = buffer.sample_batch(batch_size) next_q = self.cri_target(next_s).max(dim=1, keepdim=True)[0] q_label = (reward + (mask * next_q)) q_value = self.cri(state).gather(1, action.long()) td_error = self.criterion(q_value, q_label) obj_critic = (td_error * is_weights).mean() buffer.td_error_update(td_error.detach()) return (obj_critic, q_value)
def get_obj_critic_per(self, buffer, batch_size): '\n Calculate the loss of the network and predict Q values with **Prioritized Experience Replay (PER)**.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :return: the loss of the network and Q values.\n ' with torch.no_grad(): (reward, mask, action, state, next_s, is_weights) = buffer.sample_batch(batch_size) next_q = self.cri_target(next_s).max(dim=1, keepdim=True)[0] q_label = (reward + (mask * next_q)) q_value = self.cri(state).gather(1, action.long()) td_error = self.criterion(q_value, q_label) obj_critic = (td_error * is_weights).mean() buffer.td_error_update(td_error.detach()) return (obj_critic, q_value)<|docstring|>Calculate the loss of the network and predict Q values with **Prioritized Experience Replay (PER)**. :param buffer[object]: the ReplayBuffer instance that stores the trajectories. :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD). :return: the loss of the network and Q values.<|endoftext|>
fd71c524d8fba8b90f8e77c24e9a95c55ae9a2d1aedd596219a51facb25e7694
def init(self, net_dim=256, state_dim=8, action_dim=2, learning_rate=0.0001, if_per_or_gae=False, env_num=1, gpu_id=0): '\n Explict call ``self.init()`` to overwrite the ``self.object`` in ``__init__()`` for multiprocessing. \n ' self.ClassCri = (QNetTwinDuel if self.if_use_dueling else QNetTwin) AgentDQN.init(self, net_dim, state_dim, action_dim, learning_rate, if_per_or_gae, env_num, gpu_id) if if_per_or_gae: self.criterion = torch.nn.SmoothL1Loss(reduction='none') self.get_obj_critic = self.get_obj_critic_per else: self.criterion = torch.nn.SmoothL1Loss(reduction='mean') self.get_obj_critic = self.get_obj_critic_raw
Explict call ``self.init()`` to overwrite the ``self.object`` in ``__init__()`` for multiprocessing.
elegantrl/agent.py
init
Yonv1943/ElegantRL
236
python
def init(self, net_dim=256, state_dim=8, action_dim=2, learning_rate=0.0001, if_per_or_gae=False, env_num=1, gpu_id=0): '\n \n ' self.ClassCri = (QNetTwinDuel if self.if_use_dueling else QNetTwin) AgentDQN.init(self, net_dim, state_dim, action_dim, learning_rate, if_per_or_gae, env_num, gpu_id) if if_per_or_gae: self.criterion = torch.nn.SmoothL1Loss(reduction='none') self.get_obj_critic = self.get_obj_critic_per else: self.criterion = torch.nn.SmoothL1Loss(reduction='mean') self.get_obj_critic = self.get_obj_critic_raw
def init(self, net_dim=256, state_dim=8, action_dim=2, learning_rate=0.0001, if_per_or_gae=False, env_num=1, gpu_id=0): '\n \n ' self.ClassCri = (QNetTwinDuel if self.if_use_dueling else QNetTwin) AgentDQN.init(self, net_dim, state_dim, action_dim, learning_rate, if_per_or_gae, env_num, gpu_id) if if_per_or_gae: self.criterion = torch.nn.SmoothL1Loss(reduction='none') self.get_obj_critic = self.get_obj_critic_per else: self.criterion = torch.nn.SmoothL1Loss(reduction='mean') self.get_obj_critic = self.get_obj_critic_raw<|docstring|>Explict call ``self.init()`` to overwrite the ``self.object`` in ``__init__()`` for multiprocessing.<|endoftext|>
05aa86f6cf22ea3db83dab7cc3b54390d81b5eeaf7bffb9d1d5137bed252df56
def select_actions(self, state: torch.Tensor) -> torch.Tensor: '\n Select discrete actions given an array of states.\n \n .. note::\n Using softmax to random select actions with proportional probabilities for randomness.\n \n :param states[np.ndarray]: an array of states in a shape (batch_size, state_dim, ).\n :return: an array of actions in a shape (batch_size, action_dim, ) where each action is clipped into range(-1, 1).\n ' action = self.act(state.to(self.device)) if (rd.rand() < self.explore_rate): a_prob = self.soft_max(action) a_int = torch.multinomial(a_prob, num_samples=1, replacement=True)[(:, 0)] else: a_int = action.argmax(dim=1) return a_int.detach().cpu()
Select discrete actions given an array of states. .. note:: Using softmax to random select actions with proportional probabilities for randomness. :param states[np.ndarray]: an array of states in a shape (batch_size, state_dim, ). :return: an array of actions in a shape (batch_size, action_dim, ) where each action is clipped into range(-1, 1).
elegantrl/agent.py
select_actions
Yonv1943/ElegantRL
236
python
def select_actions(self, state: torch.Tensor) -> torch.Tensor: '\n Select discrete actions given an array of states.\n \n .. note::\n Using softmax to random select actions with proportional probabilities for randomness.\n \n :param states[np.ndarray]: an array of states in a shape (batch_size, state_dim, ).\n :return: an array of actions in a shape (batch_size, action_dim, ) where each action is clipped into range(-1, 1).\n ' action = self.act(state.to(self.device)) if (rd.rand() < self.explore_rate): a_prob = self.soft_max(action) a_int = torch.multinomial(a_prob, num_samples=1, replacement=True)[(:, 0)] else: a_int = action.argmax(dim=1) return a_int.detach().cpu()
def select_actions(self, state: torch.Tensor) -> torch.Tensor: '\n Select discrete actions given an array of states.\n \n .. note::\n Using softmax to random select actions with proportional probabilities for randomness.\n \n :param states[np.ndarray]: an array of states in a shape (batch_size, state_dim, ).\n :return: an array of actions in a shape (batch_size, action_dim, ) where each action is clipped into range(-1, 1).\n ' action = self.act(state.to(self.device)) if (rd.rand() < self.explore_rate): a_prob = self.soft_max(action) a_int = torch.multinomial(a_prob, num_samples=1, replacement=True)[(:, 0)] else: a_int = action.argmax(dim=1) return a_int.detach().cpu()<|docstring|>Select discrete actions given an array of states. .. note:: Using softmax to random select actions with proportional probabilities for randomness. :param states[np.ndarray]: an array of states in a shape (batch_size, state_dim, ). :return: an array of actions in a shape (batch_size, action_dim, ) where each action is clipped into range(-1, 1).<|endoftext|>
9657c38c8038b8969fbb03b665866f0420531daa7aabedd14dbbee2ffc1068c6
def get_obj_critic_raw(self, buffer, batch_size) -> (torch.Tensor, torch.Tensor): '\n Calculate the loss of the network and predict Q values with **uniform sampling**.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :return: the loss of the network and Q values.\n ' with torch.no_grad(): (reward, mask, action, state, next_s) = buffer.sample_batch(batch_size) next_q = torch.min(*self.cri_target.get_q1_q2(next_s)).max(dim=1, keepdim=True)[0] q_label = (reward + (mask * next_q)) (q1, q2) = [qs.gather(1, action.long()) for qs in self.act.get_q1_q2(state)] obj_critic = (self.criterion(q1, q_label) + self.criterion(q2, q_label)) return (obj_critic, q1)
Calculate the loss of the network and predict Q values with **uniform sampling**. :param buffer[object]: the ReplayBuffer instance that stores the trajectories. :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD). :return: the loss of the network and Q values.
elegantrl/agent.py
get_obj_critic_raw
Yonv1943/ElegantRL
236
python
def get_obj_critic_raw(self, buffer, batch_size) -> (torch.Tensor, torch.Tensor): '\n Calculate the loss of the network and predict Q values with **uniform sampling**.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :return: the loss of the network and Q values.\n ' with torch.no_grad(): (reward, mask, action, state, next_s) = buffer.sample_batch(batch_size) next_q = torch.min(*self.cri_target.get_q1_q2(next_s)).max(dim=1, keepdim=True)[0] q_label = (reward + (mask * next_q)) (q1, q2) = [qs.gather(1, action.long()) for qs in self.act.get_q1_q2(state)] obj_critic = (self.criterion(q1, q_label) + self.criterion(q2, q_label)) return (obj_critic, q1)
def get_obj_critic_raw(self, buffer, batch_size) -> (torch.Tensor, torch.Tensor): '\n Calculate the loss of the network and predict Q values with **uniform sampling**.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :return: the loss of the network and Q values.\n ' with torch.no_grad(): (reward, mask, action, state, next_s) = buffer.sample_batch(batch_size) next_q = torch.min(*self.cri_target.get_q1_q2(next_s)).max(dim=1, keepdim=True)[0] q_label = (reward + (mask * next_q)) (q1, q2) = [qs.gather(1, action.long()) for qs in self.act.get_q1_q2(state)] obj_critic = (self.criterion(q1, q_label) + self.criterion(q2, q_label)) return (obj_critic, q1)<|docstring|>Calculate the loss of the network and predict Q values with **uniform sampling**. :param buffer[object]: the ReplayBuffer instance that stores the trajectories. :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD). :return: the loss of the network and Q values.<|endoftext|>
9d54226cdd31998470a568a874f9ef1d444005a1fef25c7cbeaf485d5102acbc
def get_obj_critic_per(self, buffer, batch_size): '\n Calculate the loss of the network and predict Q values with **Prioritized Experience Replay (PER)**.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :return: the loss of the network and Q values.\n ' with torch.no_grad(): (reward, mask, action, state, next_s, is_weights) = buffer.sample_batch(batch_size) next_q = torch.min(*self.cri_target.get_q1_q2(next_s)).max(dim=1, keepdim=True)[0] q_label = (reward + (mask * next_q)) (q1, q2) = [qs.gather(1, action.long()) for qs in self.act.get_q1_q2(state)] td_error = (self.criterion(q1, q_label) + self.criterion(q2, q_label)) obj_critic = (td_error * is_weights).mean() buffer.td_error_update(td_error.detach()) return (obj_critic, q1)
Calculate the loss of the network and predict Q values with **Prioritized Experience Replay (PER)**. :param buffer[object]: the ReplayBuffer instance that stores the trajectories. :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD). :return: the loss of the network and Q values.
elegantrl/agent.py
get_obj_critic_per
Yonv1943/ElegantRL
236
python
def get_obj_critic_per(self, buffer, batch_size): '\n Calculate the loss of the network and predict Q values with **Prioritized Experience Replay (PER)**.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :return: the loss of the network and Q values.\n ' with torch.no_grad(): (reward, mask, action, state, next_s, is_weights) = buffer.sample_batch(batch_size) next_q = torch.min(*self.cri_target.get_q1_q2(next_s)).max(dim=1, keepdim=True)[0] q_label = (reward + (mask * next_q)) (q1, q2) = [qs.gather(1, action.long()) for qs in self.act.get_q1_q2(state)] td_error = (self.criterion(q1, q_label) + self.criterion(q2, q_label)) obj_critic = (td_error * is_weights).mean() buffer.td_error_update(td_error.detach()) return (obj_critic, q1)
def get_obj_critic_per(self, buffer, batch_size): '\n Calculate the loss of the network and predict Q values with **Prioritized Experience Replay (PER)**.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :return: the loss of the network and Q values.\n ' with torch.no_grad(): (reward, mask, action, state, next_s, is_weights) = buffer.sample_batch(batch_size) next_q = torch.min(*self.cri_target.get_q1_q2(next_s)).max(dim=1, keepdim=True)[0] q_label = (reward + (mask * next_q)) (q1, q2) = [qs.gather(1, action.long()) for qs in self.act.get_q1_q2(state)] td_error = (self.criterion(q1, q_label) + self.criterion(q2, q_label)) obj_critic = (td_error * is_weights).mean() buffer.td_error_update(td_error.detach()) return (obj_critic, q1)<|docstring|>Calculate the loss of the network and predict Q values with **Prioritized Experience Replay (PER)**. :param buffer[object]: the ReplayBuffer instance that stores the trajectories. :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD). :return: the loss of the network and Q values.<|endoftext|>
a0157b5c569cb4377fd0f3bbc4f2d0c039aa5d3846c9a29f5f396f77d64ed18a
def init(self, net_dim=256, state_dim=8, action_dim=2, learning_rate=0.0001, if_per_or_gae=False, env_num=1, gpu_id=0): '\n Explict call ``self.init()`` to overwrite the ``self.object`` in ``__init__()`` for multiprocessing. \n ' AgentBase.init(self, net_dim, state_dim, action_dim, learning_rate, if_per_or_gae, env_num, gpu_id) self.ou_noise = OrnsteinUhlenbeckNoise(size=action_dim, sigma=self.explore_noise) if if_per_or_gae: self.criterion = torch.nn.SmoothL1Loss(reduction=('none' if if_per_or_gae else 'mean')) self.get_obj_critic = self.get_obj_critic_per else: self.criterion = torch.nn.SmoothL1Loss(reduction=('none' if if_per_or_gae else 'mean')) self.get_obj_critic = self.get_obj_critic_raw
Explict call ``self.init()`` to overwrite the ``self.object`` in ``__init__()`` for multiprocessing.
elegantrl/agent.py
init
Yonv1943/ElegantRL
236
python
def init(self, net_dim=256, state_dim=8, action_dim=2, learning_rate=0.0001, if_per_or_gae=False, env_num=1, gpu_id=0): '\n \n ' AgentBase.init(self, net_dim, state_dim, action_dim, learning_rate, if_per_or_gae, env_num, gpu_id) self.ou_noise = OrnsteinUhlenbeckNoise(size=action_dim, sigma=self.explore_noise) if if_per_or_gae: self.criterion = torch.nn.SmoothL1Loss(reduction=('none' if if_per_or_gae else 'mean')) self.get_obj_critic = self.get_obj_critic_per else: self.criterion = torch.nn.SmoothL1Loss(reduction=('none' if if_per_or_gae else 'mean')) self.get_obj_critic = self.get_obj_critic_raw
def init(self, net_dim=256, state_dim=8, action_dim=2, learning_rate=0.0001, if_per_or_gae=False, env_num=1, gpu_id=0): '\n \n ' AgentBase.init(self, net_dim, state_dim, action_dim, learning_rate, if_per_or_gae, env_num, gpu_id) self.ou_noise = OrnsteinUhlenbeckNoise(size=action_dim, sigma=self.explore_noise) if if_per_or_gae: self.criterion = torch.nn.SmoothL1Loss(reduction=('none' if if_per_or_gae else 'mean')) self.get_obj_critic = self.get_obj_critic_per else: self.criterion = torch.nn.SmoothL1Loss(reduction=('none' if if_per_or_gae else 'mean')) self.get_obj_critic = self.get_obj_critic_raw<|docstring|>Explict call ``self.init()`` to overwrite the ``self.object`` in ``__init__()`` for multiprocessing.<|endoftext|>
bb499647ba19e90f0ce2ed07a7f342e9005e8c61e2e720417d80a278f238aef6
def select_actions(self, state: torch.Tensor) -> torch.Tensor: '\n Select actions given an array of states.\n \n .. note::\n Using ϵ-greedy with Ornstein–Uhlenbeck noise to add noise to actions for randomness.\n \n :param states[np.ndarray]: an array of states in a shape (batch_size, state_dim, ).\n :return: an array of actions in a shape (batch_size, action_dim, ) where each action is clipped into range(-1, 1).\n ' action = self.act(state.to(self.device)) if (rd.rand() < self.explore_rate): ou_noise = torch.as_tensor(self.ou_noise(), dtype=torch.float32, device=self.device).unsqueeze(0) action = (action + ou_noise).clamp((- 1), 1) return action.detach().cpu()
Select actions given an array of states. .. note:: Using ϵ-greedy with Ornstein–Uhlenbeck noise to add noise to actions for randomness. :param states[np.ndarray]: an array of states in a shape (batch_size, state_dim, ). :return: an array of actions in a shape (batch_size, action_dim, ) where each action is clipped into range(-1, 1).
elegantrl/agent.py
select_actions
Yonv1943/ElegantRL
236
python
def select_actions(self, state: torch.Tensor) -> torch.Tensor: '\n Select actions given an array of states.\n \n .. note::\n Using ϵ-greedy with Ornstein–Uhlenbeck noise to add noise to actions for randomness.\n \n :param states[np.ndarray]: an array of states in a shape (batch_size, state_dim, ).\n :return: an array of actions in a shape (batch_size, action_dim, ) where each action is clipped into range(-1, 1).\n ' action = self.act(state.to(self.device)) if (rd.rand() < self.explore_rate): ou_noise = torch.as_tensor(self.ou_noise(), dtype=torch.float32, device=self.device).unsqueeze(0) action = (action + ou_noise).clamp((- 1), 1) return action.detach().cpu()
def select_actions(self, state: torch.Tensor) -> torch.Tensor: '\n Select actions given an array of states.\n \n .. note::\n Using ϵ-greedy with Ornstein–Uhlenbeck noise to add noise to actions for randomness.\n \n :param states[np.ndarray]: an array of states in a shape (batch_size, state_dim, ).\n :return: an array of actions in a shape (batch_size, action_dim, ) where each action is clipped into range(-1, 1).\n ' action = self.act(state.to(self.device)) if (rd.rand() < self.explore_rate): ou_noise = torch.as_tensor(self.ou_noise(), dtype=torch.float32, device=self.device).unsqueeze(0) action = (action + ou_noise).clamp((- 1), 1) return action.detach().cpu()<|docstring|>Select actions given an array of states. .. note:: Using ϵ-greedy with Ornstein–Uhlenbeck noise to add noise to actions for randomness. :param states[np.ndarray]: an array of states in a shape (batch_size, state_dim, ). :return: an array of actions in a shape (batch_size, action_dim, ) where each action is clipped into range(-1, 1).<|endoftext|>
c47fd2599c3622c40ea62914bb150df098aca05b96fa934e7dffb4383c2a78f0
def update_net(self, buffer, batch_size, repeat_times, soft_update_tau) -> tuple: '\n Update the neural networks by sampling batch data from ``ReplayBuffer``.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :param repeat_times[float]: the re-using times of each trajectory.\n :param soft_update_tau[float]: the soft update parameter.\n :return: a tuple of the log information.\n ' buffer.update_now_len() obj_critic = None obj_actor = None for _ in range(int(((buffer.now_len / batch_size) * repeat_times))): (obj_critic, state) = self.get_obj_critic(buffer, batch_size) self.optim_update(self.cri_optim, obj_critic, self.cri.parameters()) self.soft_update(self.cri_target, self.cri, soft_update_tau) action_pg = self.act(state) obj_actor = (- self.cri(state, action_pg).mean()) self.optim_update(self.act_optim, obj_actor, self.act.parameters()) self.soft_update(self.act_target, self.act, soft_update_tau) return (obj_critic.item(), obj_actor.item())
Update the neural networks by sampling batch data from ``ReplayBuffer``. :param buffer[object]: the ReplayBuffer instance that stores the trajectories. :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD). :param repeat_times[float]: the re-using times of each trajectory. :param soft_update_tau[float]: the soft update parameter. :return: a tuple of the log information.
elegantrl/agent.py
update_net
Yonv1943/ElegantRL
236
python
def update_net(self, buffer, batch_size, repeat_times, soft_update_tau) -> tuple: '\n Update the neural networks by sampling batch data from ``ReplayBuffer``.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :param repeat_times[float]: the re-using times of each trajectory.\n :param soft_update_tau[float]: the soft update parameter.\n :return: a tuple of the log information.\n ' buffer.update_now_len() obj_critic = None obj_actor = None for _ in range(int(((buffer.now_len / batch_size) * repeat_times))): (obj_critic, state) = self.get_obj_critic(buffer, batch_size) self.optim_update(self.cri_optim, obj_critic, self.cri.parameters()) self.soft_update(self.cri_target, self.cri, soft_update_tau) action_pg = self.act(state) obj_actor = (- self.cri(state, action_pg).mean()) self.optim_update(self.act_optim, obj_actor, self.act.parameters()) self.soft_update(self.act_target, self.act, soft_update_tau) return (obj_critic.item(), obj_actor.item())
def update_net(self, buffer, batch_size, repeat_times, soft_update_tau) -> tuple: '\n Update the neural networks by sampling batch data from ``ReplayBuffer``.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :param repeat_times[float]: the re-using times of each trajectory.\n :param soft_update_tau[float]: the soft update parameter.\n :return: a tuple of the log information.\n ' buffer.update_now_len() obj_critic = None obj_actor = None for _ in range(int(((buffer.now_len / batch_size) * repeat_times))): (obj_critic, state) = self.get_obj_critic(buffer, batch_size) self.optim_update(self.cri_optim, obj_critic, self.cri.parameters()) self.soft_update(self.cri_target, self.cri, soft_update_tau) action_pg = self.act(state) obj_actor = (- self.cri(state, action_pg).mean()) self.optim_update(self.act_optim, obj_actor, self.act.parameters()) self.soft_update(self.act_target, self.act, soft_update_tau) return (obj_critic.item(), obj_actor.item())<|docstring|>Update the neural networks by sampling batch data from ``ReplayBuffer``. :param buffer[object]: the ReplayBuffer instance that stores the trajectories. :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD). :param repeat_times[float]: the re-using times of each trajectory. :param soft_update_tau[float]: the soft update parameter. :return: a tuple of the log information.<|endoftext|>
86374033ab03916b2a31c07dbe3326b5e77c7755701c9ad960486a2723ffeb27
def get_obj_critic_raw(self, buffer, batch_size): '\n Calculate the loss of networks with **uniform sampling**.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :return: the loss of the network and Q values.\n ' with torch.no_grad(): (reward, mask, action, state, next_s) = buffer.sample_batch(batch_size) next_q = self.cri_target(next_s, self.act_target(next_s)) q_label = (reward + (mask * next_q)) q_value = self.cri(state, action) obj_critic = self.criterion(q_value, q_label) return (obj_critic, state)
Calculate the loss of networks with **uniform sampling**. :param buffer[object]: the ReplayBuffer instance that stores the trajectories. :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD). :return: the loss of the network and Q values.
elegantrl/agent.py
get_obj_critic_raw
Yonv1943/ElegantRL
236
python
def get_obj_critic_raw(self, buffer, batch_size): '\n Calculate the loss of networks with **uniform sampling**.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :return: the loss of the network and Q values.\n ' with torch.no_grad(): (reward, mask, action, state, next_s) = buffer.sample_batch(batch_size) next_q = self.cri_target(next_s, self.act_target(next_s)) q_label = (reward + (mask * next_q)) q_value = self.cri(state, action) obj_critic = self.criterion(q_value, q_label) return (obj_critic, state)
def get_obj_critic_raw(self, buffer, batch_size): '\n Calculate the loss of networks with **uniform sampling**.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :return: the loss of the network and Q values.\n ' with torch.no_grad(): (reward, mask, action, state, next_s) = buffer.sample_batch(batch_size) next_q = self.cri_target(next_s, self.act_target(next_s)) q_label = (reward + (mask * next_q)) q_value = self.cri(state, action) obj_critic = self.criterion(q_value, q_label) return (obj_critic, state)<|docstring|>Calculate the loss of networks with **uniform sampling**. :param buffer[object]: the ReplayBuffer instance that stores the trajectories. :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD). :return: the loss of the network and Q values.<|endoftext|>
fef172c1a0929565f397cefe885ab2277dd0c6f7344de090c26bd8f9ff1f8da3
def get_obj_critic_per(self, buffer, batch_size): '\n Calculate the loss of the network with **Prioritized Experience Replay (PER)**.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :return: the loss of the network and Q values.\n ' with torch.no_grad(): (reward, mask, action, state, next_s, is_weights) = buffer.sample_batch(batch_size) next_q = self.cri_target(next_s, self.act_target(next_s)) q_label = (reward + (mask * next_q)) q_value = self.cri(state, action) td_error = self.criterion(q_value, q_label) obj_critic = (td_error * is_weights).mean() buffer.td_error_update(td_error.detach()) return (obj_critic, state)
Calculate the loss of the network with **Prioritized Experience Replay (PER)**. :param buffer[object]: the ReplayBuffer instance that stores the trajectories. :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD). :return: the loss of the network and Q values.
elegantrl/agent.py
get_obj_critic_per
Yonv1943/ElegantRL
236
python
def get_obj_critic_per(self, buffer, batch_size): '\n Calculate the loss of the network with **Prioritized Experience Replay (PER)**.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :return: the loss of the network and Q values.\n ' with torch.no_grad(): (reward, mask, action, state, next_s, is_weights) = buffer.sample_batch(batch_size) next_q = self.cri_target(next_s, self.act_target(next_s)) q_label = (reward + (mask * next_q)) q_value = self.cri(state, action) td_error = self.criterion(q_value, q_label) obj_critic = (td_error * is_weights).mean() buffer.td_error_update(td_error.detach()) return (obj_critic, state)
def get_obj_critic_per(self, buffer, batch_size): '\n Calculate the loss of the network with **Prioritized Experience Replay (PER)**.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :return: the loss of the network and Q values.\n ' with torch.no_grad(): (reward, mask, action, state, next_s, is_weights) = buffer.sample_batch(batch_size) next_q = self.cri_target(next_s, self.act_target(next_s)) q_label = (reward + (mask * next_q)) q_value = self.cri(state, action) td_error = self.criterion(q_value, q_label) obj_critic = (td_error * is_weights).mean() buffer.td_error_update(td_error.detach()) return (obj_critic, state)<|docstring|>Calculate the loss of the network with **Prioritized Experience Replay (PER)**. :param buffer[object]: the ReplayBuffer instance that stores the trajectories. :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD). :return: the loss of the network and Q values.<|endoftext|>
f90d5bbd810fe07d037756f95dab7c09050d180e799c8ab1733cf68a25ce8bd4
def init(self, net_dim=256, state_dim=8, action_dim=2, learning_rate=0.0001, if_per_or_gae=False, env_num=1, gpu_id=0): '\n Explict call ``self.init()`` to overwrite the ``self.object`` in ``__init__()`` for multiprocessing. \n ' AgentBase.init(self, net_dim, state_dim, action_dim, learning_rate, if_per_or_gae, env_num, gpu_id) if if_per_or_gae: self.criterion = torch.nn.SmoothL1Loss(reduction='none') self.get_obj_critic = self.get_obj_critic_per else: self.criterion = torch.nn.SmoothL1Loss(reduction='mean') self.get_obj_critic = self.get_obj_critic_raw
Explict call ``self.init()`` to overwrite the ``self.object`` in ``__init__()`` for multiprocessing.
elegantrl/agent.py
init
Yonv1943/ElegantRL
236
python
def init(self, net_dim=256, state_dim=8, action_dim=2, learning_rate=0.0001, if_per_or_gae=False, env_num=1, gpu_id=0): '\n \n ' AgentBase.init(self, net_dim, state_dim, action_dim, learning_rate, if_per_or_gae, env_num, gpu_id) if if_per_or_gae: self.criterion = torch.nn.SmoothL1Loss(reduction='none') self.get_obj_critic = self.get_obj_critic_per else: self.criterion = torch.nn.SmoothL1Loss(reduction='mean') self.get_obj_critic = self.get_obj_critic_raw
def init(self, net_dim=256, state_dim=8, action_dim=2, learning_rate=0.0001, if_per_or_gae=False, env_num=1, gpu_id=0): '\n \n ' AgentBase.init(self, net_dim, state_dim, action_dim, learning_rate, if_per_or_gae, env_num, gpu_id) if if_per_or_gae: self.criterion = torch.nn.SmoothL1Loss(reduction='none') self.get_obj_critic = self.get_obj_critic_per else: self.criterion = torch.nn.SmoothL1Loss(reduction='mean') self.get_obj_critic = self.get_obj_critic_raw<|docstring|>Explict call ``self.init()`` to overwrite the ``self.object`` in ``__init__()`` for multiprocessing.<|endoftext|>
a92f5d56c981d760ecf34b41cd2bf4911af316d748717cd5f4d5d6aa2c6ffb0f
def update_net(self, buffer, batch_size, repeat_times, soft_update_tau) -> tuple: '\n Update the neural networks by sampling batch data from ``ReplayBuffer``.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :param repeat_times[float]: the re-using times of each trajectory.\n :param soft_update_tau[float]: the soft update parameter.\n :return: a tuple of the log information.\n ' buffer.update_now_len() obj_critic = None obj_actor = None for update_c in range(int(((buffer.now_len / batch_size) * repeat_times))): (obj_critic, state) = self.get_obj_critic(buffer, batch_size) self.optim_update(self.cri_optim, obj_critic, self.cri.parameters()) if ((update_c % self.update_freq) == 0): action_pg = self.act(state) obj_actor = (- self.cri_target(state, action_pg).mean()) self.optim_update(self.act_optim, obj_actor, self.act.parameters()) self.soft_update(self.cri_target, self.cri, soft_update_tau) self.soft_update(self.act_target, self.act, soft_update_tau) return ((obj_critic.item() / 2), obj_actor.item())
Update the neural networks by sampling batch data from ``ReplayBuffer``. :param buffer[object]: the ReplayBuffer instance that stores the trajectories. :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD). :param repeat_times[float]: the re-using times of each trajectory. :param soft_update_tau[float]: the soft update parameter. :return: a tuple of the log information.
elegantrl/agent.py
update_net
Yonv1943/ElegantRL
236
python
def update_net(self, buffer, batch_size, repeat_times, soft_update_tau) -> tuple: '\n Update the neural networks by sampling batch data from ``ReplayBuffer``.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :param repeat_times[float]: the re-using times of each trajectory.\n :param soft_update_tau[float]: the soft update parameter.\n :return: a tuple of the log information.\n ' buffer.update_now_len() obj_critic = None obj_actor = None for update_c in range(int(((buffer.now_len / batch_size) * repeat_times))): (obj_critic, state) = self.get_obj_critic(buffer, batch_size) self.optim_update(self.cri_optim, obj_critic, self.cri.parameters()) if ((update_c % self.update_freq) == 0): action_pg = self.act(state) obj_actor = (- self.cri_target(state, action_pg).mean()) self.optim_update(self.act_optim, obj_actor, self.act.parameters()) self.soft_update(self.cri_target, self.cri, soft_update_tau) self.soft_update(self.act_target, self.act, soft_update_tau) return ((obj_critic.item() / 2), obj_actor.item())
def update_net(self, buffer, batch_size, repeat_times, soft_update_tau) -> tuple: '\n Update the neural networks by sampling batch data from ``ReplayBuffer``.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :param repeat_times[float]: the re-using times of each trajectory.\n :param soft_update_tau[float]: the soft update parameter.\n :return: a tuple of the log information.\n ' buffer.update_now_len() obj_critic = None obj_actor = None for update_c in range(int(((buffer.now_len / batch_size) * repeat_times))): (obj_critic, state) = self.get_obj_critic(buffer, batch_size) self.optim_update(self.cri_optim, obj_critic, self.cri.parameters()) if ((update_c % self.update_freq) == 0): action_pg = self.act(state) obj_actor = (- self.cri_target(state, action_pg).mean()) self.optim_update(self.act_optim, obj_actor, self.act.parameters()) self.soft_update(self.cri_target, self.cri, soft_update_tau) self.soft_update(self.act_target, self.act, soft_update_tau) return ((obj_critic.item() / 2), obj_actor.item())<|docstring|>Update the neural networks by sampling batch data from ``ReplayBuffer``. :param buffer[object]: the ReplayBuffer instance that stores the trajectories. :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD). :param repeat_times[float]: the re-using times of each trajectory. :param soft_update_tau[float]: the soft update parameter. :return: a tuple of the log information.<|endoftext|>
a8956cf4f73b535f8e2c562c77ee93f3b07af3a3c5fbc9e96effa3dc677459cb
def get_obj_critic_raw(self, buffer, batch_size): '\n Calculate the loss of networks with **uniform sampling**.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :return: the loss of the network and Q values.\n ' with torch.no_grad(): (reward, mask, action, state, next_s) = buffer.sample_batch(batch_size) next_a = self.act_target.get_action(next_s, self.policy_noise) next_q = torch.min(*self.cri_target.get_q1_q2(next_s, next_a)) q_label = (reward + (mask * next_q)) (q1, q2) = self.cri.get_q1_q2(state, action) obj_critic = (self.criterion(q1, q_label) + self.criterion(q2, q_label)) return (obj_critic, state)
Calculate the loss of networks with **uniform sampling**. :param buffer[object]: the ReplayBuffer instance that stores the trajectories. :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD). :return: the loss of the network and Q values.
elegantrl/agent.py
get_obj_critic_raw
Yonv1943/ElegantRL
236
python
def get_obj_critic_raw(self, buffer, batch_size): '\n Calculate the loss of networks with **uniform sampling**.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :return: the loss of the network and Q values.\n ' with torch.no_grad(): (reward, mask, action, state, next_s) = buffer.sample_batch(batch_size) next_a = self.act_target.get_action(next_s, self.policy_noise) next_q = torch.min(*self.cri_target.get_q1_q2(next_s, next_a)) q_label = (reward + (mask * next_q)) (q1, q2) = self.cri.get_q1_q2(state, action) obj_critic = (self.criterion(q1, q_label) + self.criterion(q2, q_label)) return (obj_critic, state)
def get_obj_critic_raw(self, buffer, batch_size): '\n Calculate the loss of networks with **uniform sampling**.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :return: the loss of the network and Q values.\n ' with torch.no_grad(): (reward, mask, action, state, next_s) = buffer.sample_batch(batch_size) next_a = self.act_target.get_action(next_s, self.policy_noise) next_q = torch.min(*self.cri_target.get_q1_q2(next_s, next_a)) q_label = (reward + (mask * next_q)) (q1, q2) = self.cri.get_q1_q2(state, action) obj_critic = (self.criterion(q1, q_label) + self.criterion(q2, q_label)) return (obj_critic, state)<|docstring|>Calculate the loss of networks with **uniform sampling**. :param buffer[object]: the ReplayBuffer instance that stores the trajectories. :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD). :return: the loss of the network and Q values.<|endoftext|>
dbd08729ebc5b709f2e40f29b02bea95fb4d67e893f82532e26ea5032b201a95
def get_obj_critic_per(self, buffer, batch_size): '\n Calculate the loss of the network with **Prioritized Experience Replay (PER)**.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :return: the loss of the network and Q values.\n ' with torch.no_grad(): (reward, mask, action, state, next_s, is_weights) = buffer.sample_batch(batch_size) next_a = self.act_target.get_action(next_s, self.policy_noise) next_q = torch.min(*self.cri_target.get_q1_q2(next_s, next_a)) q_label = (reward + (mask * next_q)) (q1, q2) = self.cri.get_q1_q2(state, action) td_error = (self.criterion(q1, q_label) + self.criterion(q2, q_label)) obj_critic = (td_error * is_weights).mean() buffer.td_error_update(td_error.detach()) return (obj_critic, state)
Calculate the loss of the network with **Prioritized Experience Replay (PER)**. :param buffer[object]: the ReplayBuffer instance that stores the trajectories. :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD). :return: the loss of the network and Q values.
elegantrl/agent.py
get_obj_critic_per
Yonv1943/ElegantRL
236
python
def get_obj_critic_per(self, buffer, batch_size): '\n Calculate the loss of the network with **Prioritized Experience Replay (PER)**.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :return: the loss of the network and Q values.\n ' with torch.no_grad(): (reward, mask, action, state, next_s, is_weights) = buffer.sample_batch(batch_size) next_a = self.act_target.get_action(next_s, self.policy_noise) next_q = torch.min(*self.cri_target.get_q1_q2(next_s, next_a)) q_label = (reward + (mask * next_q)) (q1, q2) = self.cri.get_q1_q2(state, action) td_error = (self.criterion(q1, q_label) + self.criterion(q2, q_label)) obj_critic = (td_error * is_weights).mean() buffer.td_error_update(td_error.detach()) return (obj_critic, state)
def get_obj_critic_per(self, buffer, batch_size): '\n Calculate the loss of the network with **Prioritized Experience Replay (PER)**.\n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :return: the loss of the network and Q values.\n ' with torch.no_grad(): (reward, mask, action, state, next_s, is_weights) = buffer.sample_batch(batch_size) next_a = self.act_target.get_action(next_s, self.policy_noise) next_q = torch.min(*self.cri_target.get_q1_q2(next_s, next_a)) q_label = (reward + (mask * next_q)) (q1, q2) = self.cri.get_q1_q2(state, action) td_error = (self.criterion(q1, q_label) + self.criterion(q2, q_label)) obj_critic = (td_error * is_weights).mean() buffer.td_error_update(td_error.detach()) return (obj_critic, state)<|docstring|>Calculate the loss of the network with **Prioritized Experience Replay (PER)**. :param buffer[object]: the ReplayBuffer instance that stores the trajectories. :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD). :return: the loss of the network and Q values.<|endoftext|>
4bb55d780d66bf93465d107b6f9dc068520bd6d95bd346e52fa59872e8abeb62
def init(self, net_dim=256, state_dim=8, action_dim=2, learning_rate=0.0001, if_per_or_gae=False, env_num=1, gpu_id=0): '\n Explict call ``self.init()`` to overwrite the ``self.object`` in ``__init__()`` for multiprocessing. \n ' AgentBase.init(self, net_dim, state_dim, action_dim, learning_rate, if_per_or_gae, env_num, gpu_id) self.traj_list = [list() for _ in range(env_num)] self.env_num = env_num if if_per_or_gae: self.get_reward_sum = self.get_reward_sum_gae else: self.get_reward_sum = self.get_reward_sum_raw if (env_num == 1): self.explore_env = self.explore_one_env else: self.explore_env = self.explore_vec_env
Explict call ``self.init()`` to overwrite the ``self.object`` in ``__init__()`` for multiprocessing.
elegantrl/agent.py
init
Yonv1943/ElegantRL
236
python
def init(self, net_dim=256, state_dim=8, action_dim=2, learning_rate=0.0001, if_per_or_gae=False, env_num=1, gpu_id=0): '\n \n ' AgentBase.init(self, net_dim, state_dim, action_dim, learning_rate, if_per_or_gae, env_num, gpu_id) self.traj_list = [list() for _ in range(env_num)] self.env_num = env_num if if_per_or_gae: self.get_reward_sum = self.get_reward_sum_gae else: self.get_reward_sum = self.get_reward_sum_raw if (env_num == 1): self.explore_env = self.explore_one_env else: self.explore_env = self.explore_vec_env
def init(self, net_dim=256, state_dim=8, action_dim=2, learning_rate=0.0001, if_per_or_gae=False, env_num=1, gpu_id=0): '\n \n ' AgentBase.init(self, net_dim, state_dim, action_dim, learning_rate, if_per_or_gae, env_num, gpu_id) self.traj_list = [list() for _ in range(env_num)] self.env_num = env_num if if_per_or_gae: self.get_reward_sum = self.get_reward_sum_gae else: self.get_reward_sum = self.get_reward_sum_raw if (env_num == 1): self.explore_env = self.explore_one_env else: self.explore_env = self.explore_vec_env<|docstring|>Explict call ``self.init()`` to overwrite the ``self.object`` in ``__init__()`` for multiprocessing.<|endoftext|>
585b237cae8d4da0bf69e9f7d0213f5f081d26ecb0cb781aff8dffbf402fc260
def select_actions(self, state: torch.Tensor) -> tuple: '\n Select actions given an array of states (continuous action space).\n \n :param states[np.ndarray]: an array of states in a shape (batch_size, state_dim, ).\n :return: an array of actions in a shape (batch_size, action_dim, ) where each action is clipped into range(-1, 1).\n ' state = state.to(self.device) (action, noise) = self.act.get_action(state) return (action.detach().cpu(), noise.detach().cpu())
Select actions given an array of states (continuous action space). :param states[np.ndarray]: an array of states in a shape (batch_size, state_dim, ). :return: an array of actions in a shape (batch_size, action_dim, ) where each action is clipped into range(-1, 1).
elegantrl/agent.py
select_actions
Yonv1943/ElegantRL
236
python
def select_actions(self, state: torch.Tensor) -> tuple: '\n Select actions given an array of states (continuous action space).\n \n :param states[np.ndarray]: an array of states in a shape (batch_size, state_dim, ).\n :return: an array of actions in a shape (batch_size, action_dim, ) where each action is clipped into range(-1, 1).\n ' state = state.to(self.device) (action, noise) = self.act.get_action(state) return (action.detach().cpu(), noise.detach().cpu())
def select_actions(self, state: torch.Tensor) -> tuple: '\n Select actions given an array of states (continuous action space).\n \n :param states[np.ndarray]: an array of states in a shape (batch_size, state_dim, ).\n :return: an array of actions in a shape (batch_size, action_dim, ) where each action is clipped into range(-1, 1).\n ' state = state.to(self.device) (action, noise) = self.act.get_action(state) return (action.detach().cpu(), noise.detach().cpu())<|docstring|>Select actions given an array of states (continuous action space). :param states[np.ndarray]: an array of states in a shape (batch_size, state_dim, ). :return: an array of actions in a shape (batch_size, action_dim, ) where each action is clipped into range(-1, 1).<|endoftext|>
d0c020e72714afcfee5ccf277ae2620d140fd7ee9a3bf2d4a466620d8e885631
def explore_one_env(self, env, target_step, reward_scale, gamma): '\n Collect trajectories through the actor-environment interaction for a **single** environment instance.\n \n :param env[object]: the DRL environment instance.\n :param target_step[int]: the total step for the interaction.\n \n :return: a list of trajectories [traj, ...] where each trajectory is a list of transitions [(state, other), ...].\n ' state = self.states[0] last_done = 0 traj = list() for step_i in range(target_step): ten_states = torch.as_tensor(state, dtype=torch.float32).unsqueeze(0) (ten_actions, ten_noises) = self.select_actions(ten_states) action = ten_actions[0].numpy() (next_s, reward, done, _) = env.step(np.tanh(action)) traj.append((ten_states, reward, done, ten_actions, ten_noises)) if done: state = env.reset() last_done = step_i else: state = next_s self.states[0] = state traj_list = self.splice_trajectory([traj], [last_done]) return self.convert_trajectory(traj_list, reward_scale, gamma)
Collect trajectories through the actor-environment interaction for a **single** environment instance. :param env[object]: the DRL environment instance. :param target_step[int]: the total step for the interaction. :return: a list of trajectories [traj, ...] where each trajectory is a list of transitions [(state, other), ...].
elegantrl/agent.py
explore_one_env
Yonv1943/ElegantRL
236
python
def explore_one_env(self, env, target_step, reward_scale, gamma): '\n Collect trajectories through the actor-environment interaction for a **single** environment instance.\n \n :param env[object]: the DRL environment instance.\n :param target_step[int]: the total step for the interaction.\n \n :return: a list of trajectories [traj, ...] where each trajectory is a list of transitions [(state, other), ...].\n ' state = self.states[0] last_done = 0 traj = list() for step_i in range(target_step): ten_states = torch.as_tensor(state, dtype=torch.float32).unsqueeze(0) (ten_actions, ten_noises) = self.select_actions(ten_states) action = ten_actions[0].numpy() (next_s, reward, done, _) = env.step(np.tanh(action)) traj.append((ten_states, reward, done, ten_actions, ten_noises)) if done: state = env.reset() last_done = step_i else: state = next_s self.states[0] = state traj_list = self.splice_trajectory([traj], [last_done]) return self.convert_trajectory(traj_list, reward_scale, gamma)
def explore_one_env(self, env, target_step, reward_scale, gamma): '\n Collect trajectories through the actor-environment interaction for a **single** environment instance.\n \n :param env[object]: the DRL environment instance.\n :param target_step[int]: the total step for the interaction.\n \n :return: a list of trajectories [traj, ...] where each trajectory is a list of transitions [(state, other), ...].\n ' state = self.states[0] last_done = 0 traj = list() for step_i in range(target_step): ten_states = torch.as_tensor(state, dtype=torch.float32).unsqueeze(0) (ten_actions, ten_noises) = self.select_actions(ten_states) action = ten_actions[0].numpy() (next_s, reward, done, _) = env.step(np.tanh(action)) traj.append((ten_states, reward, done, ten_actions, ten_noises)) if done: state = env.reset() last_done = step_i else: state = next_s self.states[0] = state traj_list = self.splice_trajectory([traj], [last_done]) return self.convert_trajectory(traj_list, reward_scale, gamma)<|docstring|>Collect trajectories through the actor-environment interaction for a **single** environment instance. :param env[object]: the DRL environment instance. :param target_step[int]: the total step for the interaction. :return: a list of trajectories [traj, ...] where each trajectory is a list of transitions [(state, other), ...].<|endoftext|>
42fc0d2e7caaff3490f84d34635d5fff2f0411acddd28999a7b247a4aeb02840
def explore_vec_env(self, env, target_step, reward_scale, gamma): '\n Collect trajectories through the actor-environment interaction for a **vectorized** environment instance.\n \n :param env[object]: the DRL environment instance.\n :param target_step[int]: the total step for the interaction.\n :return: a list of trajectories [traj, ...] where each trajectory is a list of transitions [(state, other), ...].\n ' ten_states = self.states env_num = len(self.traj_list) traj_list = [list() for _ in range(env_num)] last_done_list = [0 for _ in range(env_num)] for step_i in range(target_step): (ten_actions, ten_noises) = self.select_actions(ten_states) (tem_next_states, ten_rewards, ten_dones) = env.step(ten_actions.tanh()) for env_i in range(env_num): traj_list[env_i].append((ten_states[env_i], ten_rewards[env_i], ten_dones[env_i], ten_actions[env_i], ten_noises[env_i])) if ten_dones[env_i]: last_done_list[env_i] = step_i ten_states = tem_next_states self.states = ten_states traj_list = self.splice_trajectory(traj_list, last_done_list) return self.convert_trajectory(traj_list, reward_scale, gamma)
Collect trajectories through the actor-environment interaction for a **vectorized** environment instance. :param env[object]: the DRL environment instance. :param target_step[int]: the total step for the interaction. :return: a list of trajectories [traj, ...] where each trajectory is a list of transitions [(state, other), ...].
elegantrl/agent.py
explore_vec_env
Yonv1943/ElegantRL
236
python
def explore_vec_env(self, env, target_step, reward_scale, gamma): '\n Collect trajectories through the actor-environment interaction for a **vectorized** environment instance.\n \n :param env[object]: the DRL environment instance.\n :param target_step[int]: the total step for the interaction.\n :return: a list of trajectories [traj, ...] where each trajectory is a list of transitions [(state, other), ...].\n ' ten_states = self.states env_num = len(self.traj_list) traj_list = [list() for _ in range(env_num)] last_done_list = [0 for _ in range(env_num)] for step_i in range(target_step): (ten_actions, ten_noises) = self.select_actions(ten_states) (tem_next_states, ten_rewards, ten_dones) = env.step(ten_actions.tanh()) for env_i in range(env_num): traj_list[env_i].append((ten_states[env_i], ten_rewards[env_i], ten_dones[env_i], ten_actions[env_i], ten_noises[env_i])) if ten_dones[env_i]: last_done_list[env_i] = step_i ten_states = tem_next_states self.states = ten_states traj_list = self.splice_trajectory(traj_list, last_done_list) return self.convert_trajectory(traj_list, reward_scale, gamma)
def explore_vec_env(self, env, target_step, reward_scale, gamma): '\n Collect trajectories through the actor-environment interaction for a **vectorized** environment instance.\n \n :param env[object]: the DRL environment instance.\n :param target_step[int]: the total step for the interaction.\n :return: a list of trajectories [traj, ...] where each trajectory is a list of transitions [(state, other), ...].\n ' ten_states = self.states env_num = len(self.traj_list) traj_list = [list() for _ in range(env_num)] last_done_list = [0 for _ in range(env_num)] for step_i in range(target_step): (ten_actions, ten_noises) = self.select_actions(ten_states) (tem_next_states, ten_rewards, ten_dones) = env.step(ten_actions.tanh()) for env_i in range(env_num): traj_list[env_i].append((ten_states[env_i], ten_rewards[env_i], ten_dones[env_i], ten_actions[env_i], ten_noises[env_i])) if ten_dones[env_i]: last_done_list[env_i] = step_i ten_states = tem_next_states self.states = ten_states traj_list = self.splice_trajectory(traj_list, last_done_list) return self.convert_trajectory(traj_list, reward_scale, gamma)<|docstring|>Collect trajectories through the actor-environment interaction for a **vectorized** environment instance. :param env[object]: the DRL environment instance. :param target_step[int]: the total step for the interaction. :return: a list of trajectories [traj, ...] where each trajectory is a list of transitions [(state, other), ...].<|endoftext|>
e17e5613ca63e2548515614ec6f24cdc75d53bc7586f7224e2867ecb8c701a7e
def update_net(self, buffer, batch_size, repeat_times, soft_update_tau): '\n Update the neural networks by sampling batch data from ``ReplayBuffer``.\n \n \n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :param repeat_times[float]: the re-using times of each trajectory.\n :param soft_update_tau[float]: the soft update parameter.\n :return: a tuple of the log information.\n ' with torch.no_grad(): buf_len = buffer[0].shape[0] (buf_state, buf_reward, buf_mask, buf_action, buf_noise) = [ten.to(self.device) for ten in buffer] 'get buf_r_sum, buf_logprob' bs = (2 ** 10) buf_value = [self.cri_target(buf_state[i:(i + bs)]) for i in range(0, buf_len, bs)] buf_value = torch.cat(buf_value, dim=0) buf_logprob = self.act.get_old_logprob(buf_action, buf_noise) (buf_r_sum, buf_adv_v) = self.get_reward_sum(buf_len, buf_reward, buf_mask, buf_value) buf_adv_v = ((buf_adv_v - buf_adv_v.mean()) * (self.lambda_a_value / (buf_adv_v.std() + 1e-05))) del buf_noise, buffer[:] obj_critic = None obj_actor = None update_times = int(((buf_len / batch_size) * repeat_times)) for update_i in range(1, (update_times + 1)): indices = torch.randint(buf_len, size=(batch_size,), requires_grad=False, device=self.device) state = buf_state[indices] r_sum = buf_r_sum[indices] adv_v = buf_adv_v[indices] action = buf_action[indices] logprob = buf_logprob[indices] 'PPO: Surrogate objective of Trust Region' (new_logprob, obj_entropy) = self.act.get_logprob_entropy(state, action) ratio = (new_logprob - logprob.detach()).exp() surrogate1 = (adv_v * ratio) surrogate2 = (adv_v * ratio.clamp((1 - self.ratio_clip), (1 + self.ratio_clip))) obj_surrogate = (- torch.min(surrogate1, surrogate2).mean()) obj_actor = (obj_surrogate + (obj_entropy * self.lambda_entropy)) self.optim_update(self.act_optim, obj_actor, self.act.parameters()) value = self.cri(state).squeeze(1) obj_critic = (self.criterion(value, r_sum) / (r_sum.std() + 1e-06)) self.optim_update(self.cri_optim, obj_critic, self.cri.parameters()) (self.soft_update(self.cri_target, self.cri, soft_update_tau) if (self.cri_target is not self.cri) else None) a_std_log = getattr(self.act, 'a_std_log', torch.zeros(1)).mean() return (obj_critic.item(), obj_actor.item(), a_std_log.item())
Update the neural networks by sampling batch data from ``ReplayBuffer``. :param buffer[object]: the ReplayBuffer instance that stores the trajectories. :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD). :param repeat_times[float]: the re-using times of each trajectory. :param soft_update_tau[float]: the soft update parameter. :return: a tuple of the log information.
elegantrl/agent.py
update_net
Yonv1943/ElegantRL
236
python
def update_net(self, buffer, batch_size, repeat_times, soft_update_tau): '\n Update the neural networks by sampling batch data from ``ReplayBuffer``.\n \n \n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :param repeat_times[float]: the re-using times of each trajectory.\n :param soft_update_tau[float]: the soft update parameter.\n :return: a tuple of the log information.\n ' with torch.no_grad(): buf_len = buffer[0].shape[0] (buf_state, buf_reward, buf_mask, buf_action, buf_noise) = [ten.to(self.device) for ten in buffer] 'get buf_r_sum, buf_logprob' bs = (2 ** 10) buf_value = [self.cri_target(buf_state[i:(i + bs)]) for i in range(0, buf_len, bs)] buf_value = torch.cat(buf_value, dim=0) buf_logprob = self.act.get_old_logprob(buf_action, buf_noise) (buf_r_sum, buf_adv_v) = self.get_reward_sum(buf_len, buf_reward, buf_mask, buf_value) buf_adv_v = ((buf_adv_v - buf_adv_v.mean()) * (self.lambda_a_value / (buf_adv_v.std() + 1e-05))) del buf_noise, buffer[:] obj_critic = None obj_actor = None update_times = int(((buf_len / batch_size) * repeat_times)) for update_i in range(1, (update_times + 1)): indices = torch.randint(buf_len, size=(batch_size,), requires_grad=False, device=self.device) state = buf_state[indices] r_sum = buf_r_sum[indices] adv_v = buf_adv_v[indices] action = buf_action[indices] logprob = buf_logprob[indices] 'PPO: Surrogate objective of Trust Region' (new_logprob, obj_entropy) = self.act.get_logprob_entropy(state, action) ratio = (new_logprob - logprob.detach()).exp() surrogate1 = (adv_v * ratio) surrogate2 = (adv_v * ratio.clamp((1 - self.ratio_clip), (1 + self.ratio_clip))) obj_surrogate = (- torch.min(surrogate1, surrogate2).mean()) obj_actor = (obj_surrogate + (obj_entropy * self.lambda_entropy)) self.optim_update(self.act_optim, obj_actor, self.act.parameters()) value = self.cri(state).squeeze(1) obj_critic = (self.criterion(value, r_sum) / (r_sum.std() + 1e-06)) self.optim_update(self.cri_optim, obj_critic, self.cri.parameters()) (self.soft_update(self.cri_target, self.cri, soft_update_tau) if (self.cri_target is not self.cri) else None) a_std_log = getattr(self.act, 'a_std_log', torch.zeros(1)).mean() return (obj_critic.item(), obj_actor.item(), a_std_log.item())
def update_net(self, buffer, batch_size, repeat_times, soft_update_tau): '\n Update the neural networks by sampling batch data from ``ReplayBuffer``.\n \n \n \n :param buffer[object]: the ReplayBuffer instance that stores the trajectories.\n :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD).\n :param repeat_times[float]: the re-using times of each trajectory.\n :param soft_update_tau[float]: the soft update parameter.\n :return: a tuple of the log information.\n ' with torch.no_grad(): buf_len = buffer[0].shape[0] (buf_state, buf_reward, buf_mask, buf_action, buf_noise) = [ten.to(self.device) for ten in buffer] 'get buf_r_sum, buf_logprob' bs = (2 ** 10) buf_value = [self.cri_target(buf_state[i:(i + bs)]) for i in range(0, buf_len, bs)] buf_value = torch.cat(buf_value, dim=0) buf_logprob = self.act.get_old_logprob(buf_action, buf_noise) (buf_r_sum, buf_adv_v) = self.get_reward_sum(buf_len, buf_reward, buf_mask, buf_value) buf_adv_v = ((buf_adv_v - buf_adv_v.mean()) * (self.lambda_a_value / (buf_adv_v.std() + 1e-05))) del buf_noise, buffer[:] obj_critic = None obj_actor = None update_times = int(((buf_len / batch_size) * repeat_times)) for update_i in range(1, (update_times + 1)): indices = torch.randint(buf_len, size=(batch_size,), requires_grad=False, device=self.device) state = buf_state[indices] r_sum = buf_r_sum[indices] adv_v = buf_adv_v[indices] action = buf_action[indices] logprob = buf_logprob[indices] 'PPO: Surrogate objective of Trust Region' (new_logprob, obj_entropy) = self.act.get_logprob_entropy(state, action) ratio = (new_logprob - logprob.detach()).exp() surrogate1 = (adv_v * ratio) surrogate2 = (adv_v * ratio.clamp((1 - self.ratio_clip), (1 + self.ratio_clip))) obj_surrogate = (- torch.min(surrogate1, surrogate2).mean()) obj_actor = (obj_surrogate + (obj_entropy * self.lambda_entropy)) self.optim_update(self.act_optim, obj_actor, self.act.parameters()) value = self.cri(state).squeeze(1) obj_critic = (self.criterion(value, r_sum) / (r_sum.std() + 1e-06)) self.optim_update(self.cri_optim, obj_critic, self.cri.parameters()) (self.soft_update(self.cri_target, self.cri, soft_update_tau) if (self.cri_target is not self.cri) else None) a_std_log = getattr(self.act, 'a_std_log', torch.zeros(1)).mean() return (obj_critic.item(), obj_actor.item(), a_std_log.item())<|docstring|>Update the neural networks by sampling batch data from ``ReplayBuffer``. :param buffer[object]: the ReplayBuffer instance that stores the trajectories. :param batch_size[int]: the size of batch data for Stochastic Gradient Descent (SGD). :param repeat_times[float]: the re-using times of each trajectory. :param soft_update_tau[float]: the soft update parameter. :return: a tuple of the log information.<|endoftext|>
5ce7981917a9819507a513159dd929b10c89104b0b82dc9c8d40760dfae825d9
def __init__(self, size, theta=0.15, sigma=0.3, ou_noise=0.0, dt=0.01): "The noise of Ornstein-Uhlenbeck Process\n Source: https://github.com/slowbull/DDPG/blob/master/src/explorationnoise.py\n It makes Zero-mean Gaussian Noise more stable.\n It helps agent explore better in a inertial system.\n Don't abuse OU Process. OU process has too much hyper-parameters and over fine-tuning make no sense.\n :int size: the size of noise, noise.shape==(-1, action_dim)\n :float theta: related to the not independent of OU-noise\n :float sigma: related to action noise std\n :float ou_noise: initialize OU-noise\n :float dt: derivative\n " self.theta = theta self.sigma = sigma self.ou_noise = ou_noise self.dt = dt self.size = size
The noise of Ornstein-Uhlenbeck Process Source: https://github.com/slowbull/DDPG/blob/master/src/explorationnoise.py It makes Zero-mean Gaussian Noise more stable. It helps agent explore better in a inertial system. Don't abuse OU Process. OU process has too much hyper-parameters and over fine-tuning make no sense. :int size: the size of noise, noise.shape==(-1, action_dim) :float theta: related to the not independent of OU-noise :float sigma: related to action noise std :float ou_noise: initialize OU-noise :float dt: derivative
elegantrl/agent.py
__init__
Yonv1943/ElegantRL
236
python
def __init__(self, size, theta=0.15, sigma=0.3, ou_noise=0.0, dt=0.01): "The noise of Ornstein-Uhlenbeck Process\n Source: https://github.com/slowbull/DDPG/blob/master/src/explorationnoise.py\n It makes Zero-mean Gaussian Noise more stable.\n It helps agent explore better in a inertial system.\n Don't abuse OU Process. OU process has too much hyper-parameters and over fine-tuning make no sense.\n :int size: the size of noise, noise.shape==(-1, action_dim)\n :float theta: related to the not independent of OU-noise\n :float sigma: related to action noise std\n :float ou_noise: initialize OU-noise\n :float dt: derivative\n " self.theta = theta self.sigma = sigma self.ou_noise = ou_noise self.dt = dt self.size = size
def __init__(self, size, theta=0.15, sigma=0.3, ou_noise=0.0, dt=0.01): "The noise of Ornstein-Uhlenbeck Process\n Source: https://github.com/slowbull/DDPG/blob/master/src/explorationnoise.py\n It makes Zero-mean Gaussian Noise more stable.\n It helps agent explore better in a inertial system.\n Don't abuse OU Process. OU process has too much hyper-parameters and over fine-tuning make no sense.\n :int size: the size of noise, noise.shape==(-1, action_dim)\n :float theta: related to the not independent of OU-noise\n :float sigma: related to action noise std\n :float ou_noise: initialize OU-noise\n :float dt: derivative\n " self.theta = theta self.sigma = sigma self.ou_noise = ou_noise self.dt = dt self.size = size<|docstring|>The noise of Ornstein-Uhlenbeck Process Source: https://github.com/slowbull/DDPG/blob/master/src/explorationnoise.py It makes Zero-mean Gaussian Noise more stable. It helps agent explore better in a inertial system. Don't abuse OU Process. OU process has too much hyper-parameters and over fine-tuning make no sense. :int size: the size of noise, noise.shape==(-1, action_dim) :float theta: related to the not independent of OU-noise :float sigma: related to action noise std :float ou_noise: initialize OU-noise :float dt: derivative<|endoftext|>
e99c2ababa87247155b6240675b83fa817b67dae3d661f2e91e7b9b2db692815
def __call__(self) -> float: 'output a OU-noise\n :return array ou_noise: a noise generated by Ornstein-Uhlenbeck Process\n ' noise = ((self.sigma * np.sqrt(self.dt)) * rd.normal(size=self.size)) self.ou_noise -= (((self.theta * self.ou_noise) * self.dt) + noise) return self.ou_noise
output a OU-noise :return array ou_noise: a noise generated by Ornstein-Uhlenbeck Process
elegantrl/agent.py
__call__
Yonv1943/ElegantRL
236
python
def __call__(self) -> float: 'output a OU-noise\n :return array ou_noise: a noise generated by Ornstein-Uhlenbeck Process\n ' noise = ((self.sigma * np.sqrt(self.dt)) * rd.normal(size=self.size)) self.ou_noise -= (((self.theta * self.ou_noise) * self.dt) + noise) return self.ou_noise
def __call__(self) -> float: 'output a OU-noise\n :return array ou_noise: a noise generated by Ornstein-Uhlenbeck Process\n ' noise = ((self.sigma * np.sqrt(self.dt)) * rd.normal(size=self.size)) self.ou_noise -= (((self.theta * self.ou_noise) * self.dt) + noise) return self.ou_noise<|docstring|>output a OU-noise :return array ou_noise: a noise generated by Ornstein-Uhlenbeck Process<|endoftext|>
0cc0fa5f57da773e444501d049a6434fdf569b8e939598272398243c22ea3ef4
@classmethod def from_locus_read(cls, locus_read): "\n Given a single LocusRead object, return either an AlleleRead or None\n\n Parameters\n ----------\n locus_read : LocusRead\n Read which overlaps a variant locus but doesn't necessarily contain the\n alternate nucleotides\n " sequence = locus_read.sequence read_name = locus_read.name reference_base0_start_inclusive = locus_read.reference_base0_start_inclusive reference_base0_end_exclusive = locus_read.reference_base0_end_exclusive read_base0_start_inclusive = locus_read.read_base0_start_inclusive read_base0_end_exclusive = locus_read.read_base0_end_exclusive if ((read_base0_start_inclusive is None) or (read_base0_end_exclusive is None)): logger.debug("Skipping read '%s' because required bases in reference interval %s:%s aren't mapped", read_name, reference_base0_start_inclusive, reference_base0_end_exclusive) return None reference_positions = locus_read.reference_positions n_ref_bases = (reference_base0_end_exclusive - reference_base0_start_inclusive) insertion = (n_ref_bases == 0) if insertion: for read_index in range(read_base0_start_inclusive, read_base0_end_exclusive): if (reference_positions[read_index] is not None): logger.debug("Skipping read '%s', inserted nucleotides shouldn't map to reference", read_name) return None nucleotides_at_variant_locus = convert_from_bytes_if_necessary(sequence[read_base0_start_inclusive:read_base0_end_exclusive]) if ('N' in nucleotides_at_variant_locus): logger.debug("Skipping read '%s', found N nucleotides at variant locus", read_name) prefix = convert_from_bytes_if_necessary(sequence[:read_base0_start_inclusive]) suffix = convert_from_bytes_if_necessary(sequence[read_base0_end_exclusive:]) (prefix, suffix) = trim_N_nucleotides(prefix, suffix) return AlleleRead(prefix, nucleotides_at_variant_locus, suffix, name=read_name)
Given a single LocusRead object, return either an AlleleRead or None Parameters ---------- locus_read : LocusRead Read which overlaps a variant locus but doesn't necessarily contain the alternate nucleotides
isovar/allele_read.py
from_locus_read
openvax/isovar
12
python
@classmethod def from_locus_read(cls, locus_read): "\n Given a single LocusRead object, return either an AlleleRead or None\n\n Parameters\n ----------\n locus_read : LocusRead\n Read which overlaps a variant locus but doesn't necessarily contain the\n alternate nucleotides\n " sequence = locus_read.sequence read_name = locus_read.name reference_base0_start_inclusive = locus_read.reference_base0_start_inclusive reference_base0_end_exclusive = locus_read.reference_base0_end_exclusive read_base0_start_inclusive = locus_read.read_base0_start_inclusive read_base0_end_exclusive = locus_read.read_base0_end_exclusive if ((read_base0_start_inclusive is None) or (read_base0_end_exclusive is None)): logger.debug("Skipping read '%s' because required bases in reference interval %s:%s aren't mapped", read_name, reference_base0_start_inclusive, reference_base0_end_exclusive) return None reference_positions = locus_read.reference_positions n_ref_bases = (reference_base0_end_exclusive - reference_base0_start_inclusive) insertion = (n_ref_bases == 0) if insertion: for read_index in range(read_base0_start_inclusive, read_base0_end_exclusive): if (reference_positions[read_index] is not None): logger.debug("Skipping read '%s', inserted nucleotides shouldn't map to reference", read_name) return None nucleotides_at_variant_locus = convert_from_bytes_if_necessary(sequence[read_base0_start_inclusive:read_base0_end_exclusive]) if ('N' in nucleotides_at_variant_locus): logger.debug("Skipping read '%s', found N nucleotides at variant locus", read_name) prefix = convert_from_bytes_if_necessary(sequence[:read_base0_start_inclusive]) suffix = convert_from_bytes_if_necessary(sequence[read_base0_end_exclusive:]) (prefix, suffix) = trim_N_nucleotides(prefix, suffix) return AlleleRead(prefix, nucleotides_at_variant_locus, suffix, name=read_name)
@classmethod def from_locus_read(cls, locus_read): "\n Given a single LocusRead object, return either an AlleleRead or None\n\n Parameters\n ----------\n locus_read : LocusRead\n Read which overlaps a variant locus but doesn't necessarily contain the\n alternate nucleotides\n " sequence = locus_read.sequence read_name = locus_read.name reference_base0_start_inclusive = locus_read.reference_base0_start_inclusive reference_base0_end_exclusive = locus_read.reference_base0_end_exclusive read_base0_start_inclusive = locus_read.read_base0_start_inclusive read_base0_end_exclusive = locus_read.read_base0_end_exclusive if ((read_base0_start_inclusive is None) or (read_base0_end_exclusive is None)): logger.debug("Skipping read '%s' because required bases in reference interval %s:%s aren't mapped", read_name, reference_base0_start_inclusive, reference_base0_end_exclusive) return None reference_positions = locus_read.reference_positions n_ref_bases = (reference_base0_end_exclusive - reference_base0_start_inclusive) insertion = (n_ref_bases == 0) if insertion: for read_index in range(read_base0_start_inclusive, read_base0_end_exclusive): if (reference_positions[read_index] is not None): logger.debug("Skipping read '%s', inserted nucleotides shouldn't map to reference", read_name) return None nucleotides_at_variant_locus = convert_from_bytes_if_necessary(sequence[read_base0_start_inclusive:read_base0_end_exclusive]) if ('N' in nucleotides_at_variant_locus): logger.debug("Skipping read '%s', found N nucleotides at variant locus", read_name) prefix = convert_from_bytes_if_necessary(sequence[:read_base0_start_inclusive]) suffix = convert_from_bytes_if_necessary(sequence[read_base0_end_exclusive:]) (prefix, suffix) = trim_N_nucleotides(prefix, suffix) return AlleleRead(prefix, nucleotides_at_variant_locus, suffix, name=read_name)<|docstring|>Given a single LocusRead object, return either an AlleleRead or None Parameters ---------- locus_read : LocusRead Read which overlaps a variant locus but doesn't necessarily contain the alternate nucleotides<|endoftext|>
298f9fcb40d4106ff863cfc6453984608da1ed2c175f6e4b182e982eb0ab2f89
def query(self, s=None, i=None): '\n This is a function-level docblock\n \n Parameters:\n - s\n - i\n ' pass
This is a function-level docblock Parameters: - s - i
thrift/compiler/test/fixtures/includes/gen-py/service/MyService.py
query
Skory/fbthrift
2
python
def query(self, s=None, i=None): '\n This is a function-level docblock\n \n Parameters:\n - s\n - i\n ' pass
def query(self, s=None, i=None): '\n This is a function-level docblock\n \n Parameters:\n - s\n - i\n ' pass<|docstring|>This is a function-level docblock Parameters: - s - i<|endoftext|>
8f11830d53feeb541b4a061a27d49e9ae833f607ddfb7d9d4561b109909430b4
def has_arg_docs(self, s=None, i=None): '\n Parameters:\n - s\n - i: arg doc\n ' pass
Parameters: - s - i: arg doc
thrift/compiler/test/fixtures/includes/gen-py/service/MyService.py
has_arg_docs
Skory/fbthrift
2
python
def has_arg_docs(self, s=None, i=None): '\n Parameters:\n - s\n - i: arg doc\n ' pass
def has_arg_docs(self, s=None, i=None): '\n Parameters:\n - s\n - i: arg doc\n ' pass<|docstring|>Parameters: - s - i: arg doc<|endoftext|>
eaaa3b238a0f93758e935d90a0be01c681855e1c94b01e4a47c8fafb46120c00
def query(self, handler_ctx, s=None, i=None): '\n This is a function-level docblock\n \n Parameters:\n - s\n - i\n ' pass
This is a function-level docblock Parameters: - s - i
thrift/compiler/test/fixtures/includes/gen-py/service/MyService.py
query
Skory/fbthrift
2
python
def query(self, handler_ctx, s=None, i=None): '\n This is a function-level docblock\n \n Parameters:\n - s\n - i\n ' pass
def query(self, handler_ctx, s=None, i=None): '\n This is a function-level docblock\n \n Parameters:\n - s\n - i\n ' pass<|docstring|>This is a function-level docblock Parameters: - s - i<|endoftext|>
29db048636e320fa7dc277bae9275d65ae4c39312430b81cb862b90a8ba35cd3
def has_arg_docs(self, handler_ctx, s=None, i=None): '\n Parameters:\n - s\n - i: arg doc\n ' pass
Parameters: - s - i: arg doc
thrift/compiler/test/fixtures/includes/gen-py/service/MyService.py
has_arg_docs
Skory/fbthrift
2
python
def has_arg_docs(self, handler_ctx, s=None, i=None): '\n Parameters:\n - s\n - i: arg doc\n ' pass
def has_arg_docs(self, handler_ctx, s=None, i=None): '\n Parameters:\n - s\n - i: arg doc\n ' pass<|docstring|>Parameters: - s - i: arg doc<|endoftext|>
7177da26addc0320d2592e9ac3722ee3ea1b9f4319bcce400cb9d62fd4974670
def query(self, s=None, i=None): '\n This is a function-level docblock\n \n Parameters:\n - s\n - i\n ' self.send_query(s, i) self.recv_query()
This is a function-level docblock Parameters: - s - i
thrift/compiler/test/fixtures/includes/gen-py/service/MyService.py
query
Skory/fbthrift
2
python
def query(self, s=None, i=None): '\n This is a function-level docblock\n \n Parameters:\n - s\n - i\n ' self.send_query(s, i) self.recv_query()
def query(self, s=None, i=None): '\n This is a function-level docblock\n \n Parameters:\n - s\n - i\n ' self.send_query(s, i) self.recv_query()<|docstring|>This is a function-level docblock Parameters: - s - i<|endoftext|>
c5c3e56200aae5a9aaa0101fc489d0312d5c28eca4cd0522d98cc2c72499eaf4
def has_arg_docs(self, s=None, i=None): '\n Parameters:\n - s\n - i: arg doc\n ' self.send_has_arg_docs(s, i) self.recv_has_arg_docs()
Parameters: - s - i: arg doc
thrift/compiler/test/fixtures/includes/gen-py/service/MyService.py
has_arg_docs
Skory/fbthrift
2
python
def has_arg_docs(self, s=None, i=None): '\n Parameters:\n - s\n - i: arg doc\n ' self.send_has_arg_docs(s, i) self.recv_has_arg_docs()
def has_arg_docs(self, s=None, i=None): '\n Parameters:\n - s\n - i: arg doc\n ' self.send_has_arg_docs(s, i) self.recv_has_arg_docs()<|docstring|>Parameters: - s - i: arg doc<|endoftext|>
52508cbe71c61c60c540697671dccec9c7045aaa65e80c4add26fed44de2fe4b
def get_tree_size(start_path): '\n return size (in bytes) of filesystem tree\n ' if (not os.path.exists(start_path)): raise ValueError(('Incorrect path: %s' % start_path)) total_size = 0 for (dirpath, dirnames, filenames) in os.walk(start_path): for f in filenames: fp = os.path.join(dirpath, f) total_size += os.path.getsize(fp) return total_size
return size (in bytes) of filesystem tree
digits/utils/filesystem.py
get_tree_size
InnovArul/DIGITS
2
python
def get_tree_size(start_path): '\n \n ' if (not os.path.exists(start_path)): raise ValueError(('Incorrect path: %s' % start_path)) total_size = 0 for (dirpath, dirnames, filenames) in os.walk(start_path): for f in filenames: fp = os.path.join(dirpath, f) total_size += os.path.getsize(fp) return total_size
def get_tree_size(start_path): '\n \n ' if (not os.path.exists(start_path)): raise ValueError(('Incorrect path: %s' % start_path)) total_size = 0 for (dirpath, dirnames, filenames) in os.walk(start_path): for f in filenames: fp = os.path.join(dirpath, f) total_size += os.path.getsize(fp) return total_size<|docstring|>return size (in bytes) of filesystem tree<|endoftext|>
a813ff306b9538a7cc7d3ab516166dabfc2c61ca7b59b1ed2f36e8b27ba5b8c4
def tail(file, n=40): '\n Returns last n lines of text file (or all lines if the file has fewer lines)\n\n Arguments:\n file -- full path of that file, calling side must ensure its existence\n n -- the number of tailing lines to return\n ' if (platform.system() in ['Linux', 'Darwin']): import subprocess output = subprocess.check_output(['tail', '-n{}'.format(n), file]) else: from collections import deque tailing_lines = deque() with open(file) as f: for line in f: tailing_lines.append(line) if (len(tailing_lines) > n): tailing_lines.popleft() output = ''.join(tailing_lines) return output
Returns last n lines of text file (or all lines if the file has fewer lines) Arguments: file -- full path of that file, calling side must ensure its existence n -- the number of tailing lines to return
digits/utils/filesystem.py
tail
InnovArul/DIGITS
2
python
def tail(file, n=40): '\n Returns last n lines of text file (or all lines if the file has fewer lines)\n\n Arguments:\n file -- full path of that file, calling side must ensure its existence\n n -- the number of tailing lines to return\n ' if (platform.system() in ['Linux', 'Darwin']): import subprocess output = subprocess.check_output(['tail', '-n{}'.format(n), file]) else: from collections import deque tailing_lines = deque() with open(file) as f: for line in f: tailing_lines.append(line) if (len(tailing_lines) > n): tailing_lines.popleft() output = .join(tailing_lines) return output
def tail(file, n=40): '\n Returns last n lines of text file (or all lines if the file has fewer lines)\n\n Arguments:\n file -- full path of that file, calling side must ensure its existence\n n -- the number of tailing lines to return\n ' if (platform.system() in ['Linux', 'Darwin']): import subprocess output = subprocess.check_output(['tail', '-n{}'.format(n), file]) else: from collections import deque tailing_lines = deque() with open(file) as f: for line in f: tailing_lines.append(line) if (len(tailing_lines) > n): tailing_lines.popleft() output = .join(tailing_lines) return output<|docstring|>Returns last n lines of text file (or all lines if the file has fewer lines) Arguments: file -- full path of that file, calling side must ensure its existence n -- the number of tailing lines to return<|endoftext|>
a2028df1438fcffaf1f8a77678431d150642812c5c4ead2d6809fbf9696a4d6d
def getid(obj): "\n Abstracts the common pattern of allowing both an object or an object's ID\n as a parameter when dealing with relationships.\n " try: return obj.id except AttributeError: return obj
Abstracts the common pattern of allowing both an object or an object's ID as a parameter when dealing with relationships.
python/venv/lib/python2.7/site-packages/cinderclient/base.py
getid
sjsucohort6/simple-openstack-paas
0
python
def getid(obj): "\n Abstracts the common pattern of allowing both an object or an object's ID\n as a parameter when dealing with relationships.\n " try: return obj.id except AttributeError: return obj
def getid(obj): "\n Abstracts the common pattern of allowing both an object or an object's ID\n as a parameter when dealing with relationships.\n " try: return obj.id except AttributeError: return obj<|docstring|>Abstracts the common pattern of allowing both an object or an object's ID as a parameter when dealing with relationships.<|endoftext|>
2e86ea596cb44a981011c9f485c926febc94bb6b4b2ce8d43e4f71dc817d4d44
@contextlib.contextmanager def completion_cache(self, cache_type, obj_class, mode): '\n The completion cache store items that can be used for bash\n autocompletion, like UUIDs or human-friendly IDs.\n\n A resource listing will clear and repopulate the cache.\n\n A resource create will append to the cache.\n\n Delete is not handled because listings are assumed to be performed\n often enough to keep the cache reasonably up-to-date.\n ' base_dir = utils.env('CINDERCLIENT_UUID_CACHE_DIR', default='~/.cinderclient') username = utils.env('OS_USERNAME', 'CINDER_USERNAME') url = utils.env('OS_URL', 'CINDER_URL') uniqifier = hashlib.md5((username.encode('utf-8') + url.encode('utf-8'))).hexdigest() cache_dir = os.path.expanduser(os.path.join(base_dir, uniqifier)) try: os.makedirs(cache_dir, 493) except OSError: pass resource = obj_class.__name__.lower() filename = ('%s-%s-cache' % (resource, cache_type.replace('_', '-'))) path = os.path.join(cache_dir, filename) cache_attr = ('_%s_cache' % cache_type) try: setattr(self, cache_attr, open(path, mode)) except IOError: pass try: (yield) finally: cache = getattr(self, cache_attr, None) if cache: cache.close() delattr(self, cache_attr)
The completion cache store items that can be used for bash autocompletion, like UUIDs or human-friendly IDs. A resource listing will clear and repopulate the cache. A resource create will append to the cache. Delete is not handled because listings are assumed to be performed often enough to keep the cache reasonably up-to-date.
python/venv/lib/python2.7/site-packages/cinderclient/base.py
completion_cache
sjsucohort6/simple-openstack-paas
0
python
@contextlib.contextmanager def completion_cache(self, cache_type, obj_class, mode): '\n The completion cache store items that can be used for bash\n autocompletion, like UUIDs or human-friendly IDs.\n\n A resource listing will clear and repopulate the cache.\n\n A resource create will append to the cache.\n\n Delete is not handled because listings are assumed to be performed\n often enough to keep the cache reasonably up-to-date.\n ' base_dir = utils.env('CINDERCLIENT_UUID_CACHE_DIR', default='~/.cinderclient') username = utils.env('OS_USERNAME', 'CINDER_USERNAME') url = utils.env('OS_URL', 'CINDER_URL') uniqifier = hashlib.md5((username.encode('utf-8') + url.encode('utf-8'))).hexdigest() cache_dir = os.path.expanduser(os.path.join(base_dir, uniqifier)) try: os.makedirs(cache_dir, 493) except OSError: pass resource = obj_class.__name__.lower() filename = ('%s-%s-cache' % (resource, cache_type.replace('_', '-'))) path = os.path.join(cache_dir, filename) cache_attr = ('_%s_cache' % cache_type) try: setattr(self, cache_attr, open(path, mode)) except IOError: pass try: (yield) finally: cache = getattr(self, cache_attr, None) if cache: cache.close() delattr(self, cache_attr)
@contextlib.contextmanager def completion_cache(self, cache_type, obj_class, mode): '\n The completion cache store items that can be used for bash\n autocompletion, like UUIDs or human-friendly IDs.\n\n A resource listing will clear and repopulate the cache.\n\n A resource create will append to the cache.\n\n Delete is not handled because listings are assumed to be performed\n often enough to keep the cache reasonably up-to-date.\n ' base_dir = utils.env('CINDERCLIENT_UUID_CACHE_DIR', default='~/.cinderclient') username = utils.env('OS_USERNAME', 'CINDER_USERNAME') url = utils.env('OS_URL', 'CINDER_URL') uniqifier = hashlib.md5((username.encode('utf-8') + url.encode('utf-8'))).hexdigest() cache_dir = os.path.expanduser(os.path.join(base_dir, uniqifier)) try: os.makedirs(cache_dir, 493) except OSError: pass resource = obj_class.__name__.lower() filename = ('%s-%s-cache' % (resource, cache_type.replace('_', '-'))) path = os.path.join(cache_dir, filename) cache_attr = ('_%s_cache' % cache_type) try: setattr(self, cache_attr, open(path, mode)) except IOError: pass try: (yield) finally: cache = getattr(self, cache_attr, None) if cache: cache.close() delattr(self, cache_attr)<|docstring|>The completion cache store items that can be used for bash autocompletion, like UUIDs or human-friendly IDs. A resource listing will clear and repopulate the cache. A resource create will append to the cache. Delete is not handled because listings are assumed to be performed often enough to keep the cache reasonably up-to-date.<|endoftext|>
356cf82966414e2209d7ff33dbc43dfb37e0923173eb2cd1be87fcee511baf7b
def find(self, **kwargs): "\n Find a single item with attributes matching ``**kwargs``.\n\n This isn't very efficient for search options which require the\n Python side filtering(e.g. 'human_id')\n " matches = self.findall(**kwargs) num_matches = len(matches) if (num_matches == 0): msg = ('No %s matching %s.' % (self.resource_class.__name__, kwargs)) raise exceptions.NotFound(404, msg) elif (num_matches > 1): raise exceptions.NoUniqueMatch else: return matches[0]
Find a single item with attributes matching ``**kwargs``. This isn't very efficient for search options which require the Python side filtering(e.g. 'human_id')
python/venv/lib/python2.7/site-packages/cinderclient/base.py
find
sjsucohort6/simple-openstack-paas
0
python
def find(self, **kwargs): "\n Find a single item with attributes matching ``**kwargs``.\n\n This isn't very efficient for search options which require the\n Python side filtering(e.g. 'human_id')\n " matches = self.findall(**kwargs) num_matches = len(matches) if (num_matches == 0): msg = ('No %s matching %s.' % (self.resource_class.__name__, kwargs)) raise exceptions.NotFound(404, msg) elif (num_matches > 1): raise exceptions.NoUniqueMatch else: return matches[0]
def find(self, **kwargs): "\n Find a single item with attributes matching ``**kwargs``.\n\n This isn't very efficient for search options which require the\n Python side filtering(e.g. 'human_id')\n " matches = self.findall(**kwargs) num_matches = len(matches) if (num_matches == 0): msg = ('No %s matching %s.' % (self.resource_class.__name__, kwargs)) raise exceptions.NotFound(404, msg) elif (num_matches > 1): raise exceptions.NoUniqueMatch else: return matches[0]<|docstring|>Find a single item with attributes matching ``**kwargs``. This isn't very efficient for search options which require the Python side filtering(e.g. 'human_id')<|endoftext|>
6cfeebe1a3f1c2bf2dbf2acdf9ffc9d37f00b4c446a8cc3577c0268f5d08e3fb
def findall(self, **kwargs): "\n Find all items with attributes matching ``**kwargs``.\n\n This isn't very efficient for search options which require the\n Python side filtering(e.g. 'human_id')\n " search_opts = {'all_tenants': 1} if ('name' in kwargs): search_opts['name'] = kwargs['name'] elif ('display_name' in kwargs): search_opts['display_name'] = kwargs['display_name'] found = [] searches = kwargs.items() for obj in self.list(search_opts=search_opts): try: if all(((getattr(obj, attr) == value) for (attr, value) in searches)): found.append(obj) except AttributeError: continue return found
Find all items with attributes matching ``**kwargs``. This isn't very efficient for search options which require the Python side filtering(e.g. 'human_id')
python/venv/lib/python2.7/site-packages/cinderclient/base.py
findall
sjsucohort6/simple-openstack-paas
0
python
def findall(self, **kwargs): "\n Find all items with attributes matching ``**kwargs``.\n\n This isn't very efficient for search options which require the\n Python side filtering(e.g. 'human_id')\n " search_opts = {'all_tenants': 1} if ('name' in kwargs): search_opts['name'] = kwargs['name'] elif ('display_name' in kwargs): search_opts['display_name'] = kwargs['display_name'] found = [] searches = kwargs.items() for obj in self.list(search_opts=search_opts): try: if all(((getattr(obj, attr) == value) for (attr, value) in searches)): found.append(obj) except AttributeError: continue return found
def findall(self, **kwargs): "\n Find all items with attributes matching ``**kwargs``.\n\n This isn't very efficient for search options which require the\n Python side filtering(e.g. 'human_id')\n " search_opts = {'all_tenants': 1} if ('name' in kwargs): search_opts['name'] = kwargs['name'] elif ('display_name' in kwargs): search_opts['display_name'] = kwargs['display_name'] found = [] searches = kwargs.items() for obj in self.list(search_opts=search_opts): try: if all(((getattr(obj, attr) == value) for (attr, value) in searches)): found.append(obj) except AttributeError: continue return found<|docstring|>Find all items with attributes matching ``**kwargs``. This isn't very efficient for search options which require the Python side filtering(e.g. 'human_id')<|endoftext|>
74c0409f8898582b34852937e683cdbc581de8c33228ad2c7dc8faae484ca3fc
def get_auth_token(http_requests, url='https://danielvaughan.eu.auth0.com/oauth/token', client_id='Zdsog4nDAnhQ99yiKwMQWAPc2qUDlR99', client_secret='EXAMPLE_KEY', audience='http://localhost:8080', grant_type='client_credentials'): 'Request and get the access token for a trusted client from Auth0.\n\n .. note::\n\n We have hard-coded some test credentials here temporarily, which do not give any special\n permissions in the ingest service.\n\n Args:\n http_requests (HttpRequests): the HttpRequests object to use\n url (str): the url to the Auth0 domain oauth endpoint.\n client_id (str): the value of the Client ID field of the Non Interactive Client of Auth0.\n client_secret (str): the value of the Client Secret field of the Non Interactive Client of Auth0.\n audience (str): the value of the Identifier field of the Auth0 Management API.\n grant_type (str): type of OAuth 2.0 flow you want to run. e.g. client_credentials\n\n Returns:\n auth_token (dict): A dict containing the JWT (JSON Web Token) and its expiry (24h by default),\n the scopes granted, and the token type.\n\n Raises:\n requests.HTTPError: for 4xx errors or 5xx errors beyond timeout\n ' url = url headers = {'content-type': 'application/json'} payload = {'client_id': client_id, 'client_secret': client_secret, 'audience': audience, 'grant_type': grant_type} response = http_requests.post(url=url, headers=headers, json=payload) response.raise_for_status() auth_token = response.json() return auth_token
Request and get the access token for a trusted client from Auth0. .. note:: We have hard-coded some test credentials here temporarily, which do not give any special permissions in the ingest service. Args: http_requests (HttpRequests): the HttpRequests object to use url (str): the url to the Auth0 domain oauth endpoint. client_id (str): the value of the Client ID field of the Non Interactive Client of Auth0. client_secret (str): the value of the Client Secret field of the Non Interactive Client of Auth0. audience (str): the value of the Identifier field of the Auth0 Management API. grant_type (str): type of OAuth 2.0 flow you want to run. e.g. client_credentials Returns: auth_token (dict): A dict containing the JWT (JSON Web Token) and its expiry (24h by default), the scopes granted, and the token type. Raises: requests.HTTPError: for 4xx errors or 5xx errors beyond timeout
pipeline_tools/shared/dcp_utils.py
get_auth_token
HumanCellAtlas/pipeline-tools
5
python
def get_auth_token(http_requests, url='https://danielvaughan.eu.auth0.com/oauth/token', client_id='Zdsog4nDAnhQ99yiKwMQWAPc2qUDlR99', client_secret='EXAMPLE_KEY', audience='http://localhost:8080', grant_type='client_credentials'): 'Request and get the access token for a trusted client from Auth0.\n\n .. note::\n\n We have hard-coded some test credentials here temporarily, which do not give any special\n permissions in the ingest service.\n\n Args:\n http_requests (HttpRequests): the HttpRequests object to use\n url (str): the url to the Auth0 domain oauth endpoint.\n client_id (str): the value of the Client ID field of the Non Interactive Client of Auth0.\n client_secret (str): the value of the Client Secret field of the Non Interactive Client of Auth0.\n audience (str): the value of the Identifier field of the Auth0 Management API.\n grant_type (str): type of OAuth 2.0 flow you want to run. e.g. client_credentials\n\n Returns:\n auth_token (dict): A dict containing the JWT (JSON Web Token) and its expiry (24h by default),\n the scopes granted, and the token type.\n\n Raises:\n requests.HTTPError: for 4xx errors or 5xx errors beyond timeout\n ' url = url headers = {'content-type': 'application/json'} payload = {'client_id': client_id, 'client_secret': client_secret, 'audience': audience, 'grant_type': grant_type} response = http_requests.post(url=url, headers=headers, json=payload) response.raise_for_status() auth_token = response.json() return auth_token
def get_auth_token(http_requests, url='https://danielvaughan.eu.auth0.com/oauth/token', client_id='Zdsog4nDAnhQ99yiKwMQWAPc2qUDlR99', client_secret='EXAMPLE_KEY', audience='http://localhost:8080', grant_type='client_credentials'): 'Request and get the access token for a trusted client from Auth0.\n\n .. note::\n\n We have hard-coded some test credentials here temporarily, which do not give any special\n permissions in the ingest service.\n\n Args:\n http_requests (HttpRequests): the HttpRequests object to use\n url (str): the url to the Auth0 domain oauth endpoint.\n client_id (str): the value of the Client ID field of the Non Interactive Client of Auth0.\n client_secret (str): the value of the Client Secret field of the Non Interactive Client of Auth0.\n audience (str): the value of the Identifier field of the Auth0 Management API.\n grant_type (str): type of OAuth 2.0 flow you want to run. e.g. client_credentials\n\n Returns:\n auth_token (dict): A dict containing the JWT (JSON Web Token) and its expiry (24h by default),\n the scopes granted, and the token type.\n\n Raises:\n requests.HTTPError: for 4xx errors or 5xx errors beyond timeout\n ' url = url headers = {'content-type': 'application/json'} payload = {'client_id': client_id, 'client_secret': client_secret, 'audience': audience, 'grant_type': grant_type} response = http_requests.post(url=url, headers=headers, json=payload) response.raise_for_status() auth_token = response.json() return auth_token<|docstring|>Request and get the access token for a trusted client from Auth0. .. note:: We have hard-coded some test credentials here temporarily, which do not give any special permissions in the ingest service. Args: http_requests (HttpRequests): the HttpRequests object to use url (str): the url to the Auth0 domain oauth endpoint. client_id (str): the value of the Client ID field of the Non Interactive Client of Auth0. client_secret (str): the value of the Client Secret field of the Non Interactive Client of Auth0. audience (str): the value of the Identifier field of the Auth0 Management API. grant_type (str): type of OAuth 2.0 flow you want to run. e.g. client_credentials Returns: auth_token (dict): A dict containing the JWT (JSON Web Token) and its expiry (24h by default), the scopes granted, and the token type. Raises: requests.HTTPError: for 4xx errors or 5xx errors beyond timeout<|endoftext|>
f3319499bbba6b27585bd8cb9d2a7849a201976f7c330a4ae67241e2c132b3a9
def make_auth_header(auth_token): 'Make the authorization headers to communicate with endpoints which implement Auth0 authentication API.\n\n Args:\n auth_token (dict): a dict obtained from the Auth0 domain oauth endpoint, containing the signed JWT\n (JSON Web Token), its expiry, the scopes granted, and the token type.\n\n Returns:\n headers (dict): A dict representing the headers with necessary token information to talk to Auth0 authentication\n required endpoints.\n ' token_type = auth_token['token_type'] access_token = auth_token['access_token'] headers = {'Content-type': 'application/json', 'Authorization': '{token_type} {access_token}'.format(token_type=token_type, access_token=access_token)} return headers
Make the authorization headers to communicate with endpoints which implement Auth0 authentication API. Args: auth_token (dict): a dict obtained from the Auth0 domain oauth endpoint, containing the signed JWT (JSON Web Token), its expiry, the scopes granted, and the token type. Returns: headers (dict): A dict representing the headers with necessary token information to talk to Auth0 authentication required endpoints.
pipeline_tools/shared/dcp_utils.py
make_auth_header
HumanCellAtlas/pipeline-tools
5
python
def make_auth_header(auth_token): 'Make the authorization headers to communicate with endpoints which implement Auth0 authentication API.\n\n Args:\n auth_token (dict): a dict obtained from the Auth0 domain oauth endpoint, containing the signed JWT\n (JSON Web Token), its expiry, the scopes granted, and the token type.\n\n Returns:\n headers (dict): A dict representing the headers with necessary token information to talk to Auth0 authentication\n required endpoints.\n ' token_type = auth_token['token_type'] access_token = auth_token['access_token'] headers = {'Content-type': 'application/json', 'Authorization': '{token_type} {access_token}'.format(token_type=token_type, access_token=access_token)} return headers
def make_auth_header(auth_token): 'Make the authorization headers to communicate with endpoints which implement Auth0 authentication API.\n\n Args:\n auth_token (dict): a dict obtained from the Auth0 domain oauth endpoint, containing the signed JWT\n (JSON Web Token), its expiry, the scopes granted, and the token type.\n\n Returns:\n headers (dict): A dict representing the headers with necessary token information to talk to Auth0 authentication\n required endpoints.\n ' token_type = auth_token['token_type'] access_token = auth_token['access_token'] headers = {'Content-type': 'application/json', 'Authorization': '{token_type} {access_token}'.format(token_type=token_type, access_token=access_token)} return headers<|docstring|>Make the authorization headers to communicate with endpoints which implement Auth0 authentication API. Args: auth_token (dict): a dict obtained from the Auth0 domain oauth endpoint, containing the signed JWT (JSON Web Token), its expiry, the scopes granted, and the token type. Returns: headers (dict): A dict representing the headers with necessary token information to talk to Auth0 authentication required endpoints.<|endoftext|>
957540d5b39a2d564817d3c0800453267146640c21feaf1973aed21b4414dd58
def test_inside_tetrahedra(self): 'Tests the inside tetrahedra algorithm.' (v1x, v1y, v1z) = (0, 0, 0) (v2x, v2y, v2z) = (1, 0, 0) (v3x, v3y, v3z) = (0, 1, 0) (v4x, v4y, v4z) = (0, 0, 1) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v1x, v1y, v1z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v2x, v2y, v2z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v3x, v3y, v3z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v4x, v4y, v4z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0.5, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0.5, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0.5, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.25, 0.25, 0.25)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, (- 0.5), (- 0.5), (- 0.5))) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, (- 0.01), 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, (- 0.01), 0.5, 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.25, 0.25, (- 0.01))) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0.5, 0.0001)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 1.0, 1.0, 1.0)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.3333, 0.3333, 0.335))
Tests the inside tetrahedra algorithm.
raysect/core/math/cython/tests/test_tetrahedra.py
test_inside_tetrahedra
raysect/source
71
python
def test_inside_tetrahedra(self): (v1x, v1y, v1z) = (0, 0, 0) (v2x, v2y, v2z) = (1, 0, 0) (v3x, v3y, v3z) = (0, 1, 0) (v4x, v4y, v4z) = (0, 0, 1) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v1x, v1y, v1z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v2x, v2y, v2z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v3x, v3y, v3z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v4x, v4y, v4z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0.5, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0.5, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0.5, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.25, 0.25, 0.25)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, (- 0.5), (- 0.5), (- 0.5))) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, (- 0.01), 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, (- 0.01), 0.5, 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.25, 0.25, (- 0.01))) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0.5, 0.0001)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 1.0, 1.0, 1.0)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.3333, 0.3333, 0.335))
def test_inside_tetrahedra(self): (v1x, v1y, v1z) = (0, 0, 0) (v2x, v2y, v2z) = (1, 0, 0) (v3x, v3y, v3z) = (0, 1, 0) (v4x, v4y, v4z) = (0, 0, 1) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v1x, v1y, v1z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v2x, v2y, v2z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v3x, v3y, v3z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, v4x, v4y, v4z)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0.5, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0, 0.5, 0.5)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0.5, 0)) self.assertTrue(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.25, 0.25, 0.25)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, (- 0.5), (- 0.5), (- 0.5))) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, (- 0.01), 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, (- 0.01), 0.5, 0.5)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.25, 0.25, (- 0.01))) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.5, 0.5, 0.0001)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 1.0, 1.0, 1.0)) self.assertFalse(inside_tetrahedra(v1x, v1y, v1z, v2x, v2y, v2z, v3x, v3y, v3z, v4x, v4y, v4z, 0.3333, 0.3333, 0.335))<|docstring|>Tests the inside tetrahedra algorithm.<|endoftext|>
06828bd146c38669c7f7af0fa983edd04912bf5560f3cf4b8ddcb5b1039a17c0
def draw_single_series(df, use_std=False, row_labels=[], col_values_to_include=dict(), rows_to_group_by=[], x_range_row=0, y_val_rows=[]): '\n\n :return:\n ' plt_util.plot_from_multiple_experiment_variables(df, row_indices_to_filter=rows_to_group_by, series_indices_to_add=y_val_rows, row_labels=row_labels, x_range_row=x_range_row, col_values_to_include=col_values_to_include, show_plot=False, use_std=use_std)
:return:
paper_results/figure_1/make_figure1_plot.py
draw_single_series
h3shiri/Liquid-Simulations
0
python
def draw_single_series(df, use_std=False, row_labels=[], col_values_to_include=dict(), rows_to_group_by=[], x_range_row=0, y_val_rows=[]): '\n\n \n ' plt_util.plot_from_multiple_experiment_variables(df, row_indices_to_filter=rows_to_group_by, series_indices_to_add=y_val_rows, row_labels=row_labels, x_range_row=x_range_row, col_values_to_include=col_values_to_include, show_plot=False, use_std=use_std)
def draw_single_series(df, use_std=False, row_labels=[], col_values_to_include=dict(), rows_to_group_by=[], x_range_row=0, y_val_rows=[]): '\n\n \n ' plt_util.plot_from_multiple_experiment_variables(df, row_indices_to_filter=rows_to_group_by, series_indices_to_add=y_val_rows, row_labels=row_labels, x_range_row=x_range_row, col_values_to_include=col_values_to_include, show_plot=False, use_std=use_std)<|docstring|>:return:<|endoftext|>
dd15ca52c69c4f6ac84924b6076bf387e931f1dcc1542419773c686c2983bed0
def add_plot_from_path(ax, path, annealing_in_data=True): '\n Draw (1,1) positioned plot -- the top left one.\n :param annealing_in_data: True iff the data at the given path is an experiment that was run using annealing.\n :return:\n ' plt.sca(ax) df = plt_util.load_results_dataframe(path) draw_single_series(df, row_labels=['direct'], col_values_to_include={1: ['max']}, rows_to_group_by=[1], x_range_row=0, y_val_rows=[2]) draw_single_series(df, row_labels=['liquid better'], col_values_to_include={1: ['random_better']}, rows_to_group_by=[1], x_range_row=0, y_val_rows=[4]) draw_single_series(df, row_labels=['liquid best'], col_values_to_include={1: ['max']}, rows_to_group_by=[1], x_range_row=0, y_val_rows=[4]) if annealing_in_data: draw_single_series(df, row_labels=['sortition'], col_values_to_include={1: ['max']}, rows_to_group_by=[1], x_range_row=0, y_val_rows=[8]) draw_single_series(df, row_labels=['annealing'], col_values_to_include={1: ['max']}, rows_to_group_by=[1], x_range_row=0, y_val_rows=[6]) else: draw_single_series(df, row_labels=['sortition'], col_values_to_include={1: ['max']}, rows_to_group_by=[1], x_range_row=0, y_val_rows=[6])
Draw (1,1) positioned plot -- the top left one. :param annealing_in_data: True iff the data at the given path is an experiment that was run using annealing. :return:
paper_results/figure_1/make_figure1_plot.py
add_plot_from_path
h3shiri/Liquid-Simulations
0
python
def add_plot_from_path(ax, path, annealing_in_data=True): '\n Draw (1,1) positioned plot -- the top left one.\n :param annealing_in_data: True iff the data at the given path is an experiment that was run using annealing.\n :return:\n ' plt.sca(ax) df = plt_util.load_results_dataframe(path) draw_single_series(df, row_labels=['direct'], col_values_to_include={1: ['max']}, rows_to_group_by=[1], x_range_row=0, y_val_rows=[2]) draw_single_series(df, row_labels=['liquid better'], col_values_to_include={1: ['random_better']}, rows_to_group_by=[1], x_range_row=0, y_val_rows=[4]) draw_single_series(df, row_labels=['liquid best'], col_values_to_include={1: ['max']}, rows_to_group_by=[1], x_range_row=0, y_val_rows=[4]) if annealing_in_data: draw_single_series(df, row_labels=['sortition'], col_values_to_include={1: ['max']}, rows_to_group_by=[1], x_range_row=0, y_val_rows=[8]) draw_single_series(df, row_labels=['annealing'], col_values_to_include={1: ['max']}, rows_to_group_by=[1], x_range_row=0, y_val_rows=[6]) else: draw_single_series(df, row_labels=['sortition'], col_values_to_include={1: ['max']}, rows_to_group_by=[1], x_range_row=0, y_val_rows=[6])
def add_plot_from_path(ax, path, annealing_in_data=True): '\n Draw (1,1) positioned plot -- the top left one.\n :param annealing_in_data: True iff the data at the given path is an experiment that was run using annealing.\n :return:\n ' plt.sca(ax) df = plt_util.load_results_dataframe(path) draw_single_series(df, row_labels=['direct'], col_values_to_include={1: ['max']}, rows_to_group_by=[1], x_range_row=0, y_val_rows=[2]) draw_single_series(df, row_labels=['liquid better'], col_values_to_include={1: ['random_better']}, rows_to_group_by=[1], x_range_row=0, y_val_rows=[4]) draw_single_series(df, row_labels=['liquid best'], col_values_to_include={1: ['max']}, rows_to_group_by=[1], x_range_row=0, y_val_rows=[4]) if annealing_in_data: draw_single_series(df, row_labels=['sortition'], col_values_to_include={1: ['max']}, rows_to_group_by=[1], x_range_row=0, y_val_rows=[8]) draw_single_series(df, row_labels=['annealing'], col_values_to_include={1: ['max']}, rows_to_group_by=[1], x_range_row=0, y_val_rows=[6]) else: draw_single_series(df, row_labels=['sortition'], col_values_to_include={1: ['max']}, rows_to_group_by=[1], x_range_row=0, y_val_rows=[6])<|docstring|>Draw (1,1) positioned plot -- the top left one. :param annealing_in_data: True iff the data at the given path is an experiment that was run using annealing. :return:<|endoftext|>
2acfca91aeb5b4bfbf06081d5e38dad1b4afecaa5c501d147a52869eb7693a18
def poll(action): 'poll for a result in a loop' count = 0 interval = 1 while True: count += 1 complete = action() if complete: break time.sleep(interval) if ((count % 3) == 0): interval += 1
poll for a result in a loop
cumulusci/utils/waiting.py
poll
adamlincoln/CumulusCI
163
python
def poll(action): count = 0 interval = 1 while True: count += 1 complete = action() if complete: break time.sleep(interval) if ((count % 3) == 0): interval += 1
def poll(action): count = 0 interval = 1 while True: count += 1 complete = action() if complete: break time.sleep(interval) if ((count % 3) == 0): interval += 1<|docstring|>poll for a result in a loop<|endoftext|>
842a6d7b1989a95fca9878d27e2c49b2a8c1f06fc9bdf67f28a831ea104604bd
def _get_base_rolequeryset(self): 'Get base rolequerysets used by subclasses.\n\n Get :class:`~devilry.apps.core.models.AssignmentGroup`s and prefetch related\n :class:`~devilry.apps.core.models.Examiner`s and :class:`~devilry.apps.core.models.Candidate`s.\n\n Returns:\n QuerySet: A queryset of :class:`~devilry.apps.core.models.AssignmentGroup`s.\n\n ' return AssignmentGroup.objects.annotate_with_is_waiting_for_feedback_count().annotate_with_is_waiting_for_deliveries_count().annotate_with_is_corrected_count().select_related('parentnode__parentnode__parentnode').prefetch_related(models.Prefetch('candidates', queryset=self._get_candidatequeryset())).prefetch_related(models.Prefetch('examiners', queryset=self._get_examinerqueryset())).prefetch_related(models.Prefetch('cached_data', queryset=self._get_assignment_group_cacheddata_queryset()))
Get base rolequerysets used by subclasses. Get :class:`~devilry.apps.core.models.AssignmentGroup`s and prefetch related :class:`~devilry.apps.core.models.Examiner`s and :class:`~devilry.apps.core.models.Candidate`s. Returns: QuerySet: A queryset of :class:`~devilry.apps.core.models.AssignmentGroup`s.
devilry/devilry_group/cradmin_instances/crinstance_base.py
_get_base_rolequeryset
devilry/devilry-django
29
python
def _get_base_rolequeryset(self): 'Get base rolequerysets used by subclasses.\n\n Get :class:`~devilry.apps.core.models.AssignmentGroup`s and prefetch related\n :class:`~devilry.apps.core.models.Examiner`s and :class:`~devilry.apps.core.models.Candidate`s.\n\n Returns:\n QuerySet: A queryset of :class:`~devilry.apps.core.models.AssignmentGroup`s.\n\n ' return AssignmentGroup.objects.annotate_with_is_waiting_for_feedback_count().annotate_with_is_waiting_for_deliveries_count().annotate_with_is_corrected_count().select_related('parentnode__parentnode__parentnode').prefetch_related(models.Prefetch('candidates', queryset=self._get_candidatequeryset())).prefetch_related(models.Prefetch('examiners', queryset=self._get_examinerqueryset())).prefetch_related(models.Prefetch('cached_data', queryset=self._get_assignment_group_cacheddata_queryset()))
def _get_base_rolequeryset(self): 'Get base rolequerysets used by subclasses.\n\n Get :class:`~devilry.apps.core.models.AssignmentGroup`s and prefetch related\n :class:`~devilry.apps.core.models.Examiner`s and :class:`~devilry.apps.core.models.Candidate`s.\n\n Returns:\n QuerySet: A queryset of :class:`~devilry.apps.core.models.AssignmentGroup`s.\n\n ' return AssignmentGroup.objects.annotate_with_is_waiting_for_feedback_count().annotate_with_is_waiting_for_deliveries_count().annotate_with_is_corrected_count().select_related('parentnode__parentnode__parentnode').prefetch_related(models.Prefetch('candidates', queryset=self._get_candidatequeryset())).prefetch_related(models.Prefetch('examiners', queryset=self._get_examinerqueryset())).prefetch_related(models.Prefetch('cached_data', queryset=self._get_assignment_group_cacheddata_queryset()))<|docstring|>Get base rolequerysets used by subclasses. Get :class:`~devilry.apps.core.models.AssignmentGroup`s and prefetch related :class:`~devilry.apps.core.models.Examiner`s and :class:`~devilry.apps.core.models.Candidate`s. Returns: QuerySet: A queryset of :class:`~devilry.apps.core.models.AssignmentGroup`s.<|endoftext|>
2b1ea2c44f0a269f11a100712b6bd1b8b9bf68c58ddd2051b4e87a0b2091fa3f
def _get_candidatequeryset(self): 'Get candidates.\n\n Returns:\n QuerySet: A queryset of :class:`~devilry.apps.core.models.Candidate`s.\n ' return Candidate.objects.select_related('relatedstudent').order_by(Lower(Concat('relatedstudent__user__fullname', 'relatedstudent__user__shortname', output_field=models.CharField())))
Get candidates. Returns: QuerySet: A queryset of :class:`~devilry.apps.core.models.Candidate`s.
devilry/devilry_group/cradmin_instances/crinstance_base.py
_get_candidatequeryset
devilry/devilry-django
29
python
def _get_candidatequeryset(self): 'Get candidates.\n\n Returns:\n QuerySet: A queryset of :class:`~devilry.apps.core.models.Candidate`s.\n ' return Candidate.objects.select_related('relatedstudent').order_by(Lower(Concat('relatedstudent__user__fullname', 'relatedstudent__user__shortname', output_field=models.CharField())))
def _get_candidatequeryset(self): 'Get candidates.\n\n Returns:\n QuerySet: A queryset of :class:`~devilry.apps.core.models.Candidate`s.\n ' return Candidate.objects.select_related('relatedstudent').order_by(Lower(Concat('relatedstudent__user__fullname', 'relatedstudent__user__shortname', output_field=models.CharField())))<|docstring|>Get candidates. Returns: QuerySet: A queryset of :class:`~devilry.apps.core.models.Candidate`s.<|endoftext|>
e9ae7d1f31fe3f274694722996f40e21c768384181af04bdadb0a2ba0aa650dc
def _get_examinerqueryset(self): 'Get examiners.\n\n Returns:\n QuerySet: A queryset of :class:`~devilry.apps.core.models.Examiner`s.\n ' return Examiner.objects.select_related('relatedexaminer').order_by(Lower(Concat('relatedexaminer__user__fullname', 'relatedexaminer__user__shortname', output_field=models.CharField())))
Get examiners. Returns: QuerySet: A queryset of :class:`~devilry.apps.core.models.Examiner`s.
devilry/devilry_group/cradmin_instances/crinstance_base.py
_get_examinerqueryset
devilry/devilry-django
29
python
def _get_examinerqueryset(self): 'Get examiners.\n\n Returns:\n QuerySet: A queryset of :class:`~devilry.apps.core.models.Examiner`s.\n ' return Examiner.objects.select_related('relatedexaminer').order_by(Lower(Concat('relatedexaminer__user__fullname', 'relatedexaminer__user__shortname', output_field=models.CharField())))
def _get_examinerqueryset(self): 'Get examiners.\n\n Returns:\n QuerySet: A queryset of :class:`~devilry.apps.core.models.Examiner`s.\n ' return Examiner.objects.select_related('relatedexaminer').order_by(Lower(Concat('relatedexaminer__user__fullname', 'relatedexaminer__user__shortname', output_field=models.CharField())))<|docstring|>Get examiners. Returns: QuerySet: A queryset of :class:`~devilry.apps.core.models.Examiner`s.<|endoftext|>
967dc113d55b21a4ee8162256b6541679e71f0ee34a5dcc783831def2021314b
def get_titletext_for_role(self, role): 'String representation for the role.\n\n Args:\n role: An :obj:`~devilry.apps.core.models.AssignmentGroup`\n instance of the roleclass for the crinstance.\n\n Returns:\n str: Formatted string reprensentation of the crinstance role.\n ' return '{} - {}'.format(role.period, role.assignment.short_name)
String representation for the role. Args: role: An :obj:`~devilry.apps.core.models.AssignmentGroup` instance of the roleclass for the crinstance. Returns: str: Formatted string reprensentation of the crinstance role.
devilry/devilry_group/cradmin_instances/crinstance_base.py
get_titletext_for_role
devilry/devilry-django
29
python
def get_titletext_for_role(self, role): 'String representation for the role.\n\n Args:\n role: An :obj:`~devilry.apps.core.models.AssignmentGroup`\n instance of the roleclass for the crinstance.\n\n Returns:\n str: Formatted string reprensentation of the crinstance role.\n ' return '{} - {}'.format(role.period, role.assignment.short_name)
def get_titletext_for_role(self, role): 'String representation for the role.\n\n Args:\n role: An :obj:`~devilry.apps.core.models.AssignmentGroup`\n instance of the roleclass for the crinstance.\n\n Returns:\n str: Formatted string reprensentation of the crinstance role.\n ' return '{} - {}'.format(role.period, role.assignment.short_name)<|docstring|>String representation for the role. Args: role: An :obj:`~devilry.apps.core.models.AssignmentGroup` instance of the roleclass for the crinstance. Returns: str: Formatted string reprensentation of the crinstance role.<|endoftext|>
fc2f2e9716ea19a1c519f14bdb8ee00a7c49217c433c9146f137a50610f208d8
def add_footer(file_name, sync_header): "\n 添加header/footer到文件结尾,方便replace_hex/extract_hex查找特定字段\n :param file_name: 文件名地址,以十六进制表示,如 b'eb906767'\n :param sync_header: 标识符,以十六进制表示,如 b'6767'\n :return: 返回内容末尾添加标识符的文件\n " footer = binascii.unhexlify(sync_header) with open(file_name, 'a+b') as f: f.write(footer)
添加header/footer到文件结尾,方便replace_hex/extract_hex查找特定字段 :param file_name: 文件名地址,以十六进制表示,如 b'eb906767' :param sync_header: 标识符,以十六进制表示,如 b'6767' :return: 返回内容末尾添加标识符的文件
utils/repair_hex.py
add_footer
xujinzh/timestamp
0
python
def add_footer(file_name, sync_header): "\n 添加header/footer到文件结尾,方便replace_hex/extract_hex查找特定字段\n :param file_name: 文件名地址,以十六进制表示,如 b'eb906767'\n :param sync_header: 标识符,以十六进制表示,如 b'6767'\n :return: 返回内容末尾添加标识符的文件\n " footer = binascii.unhexlify(sync_header) with open(file_name, 'a+b') as f: f.write(footer)
def add_footer(file_name, sync_header): "\n 添加header/footer到文件结尾,方便replace_hex/extract_hex查找特定字段\n :param file_name: 文件名地址,以十六进制表示,如 b'eb906767'\n :param sync_header: 标识符,以十六进制表示,如 b'6767'\n :return: 返回内容末尾添加标识符的文件\n " footer = binascii.unhexlify(sync_header) with open(file_name, 'a+b') as f: f.write(footer)<|docstring|>添加header/footer到文件结尾,方便replace_hex/extract_hex查找特定字段 :param file_name: 文件名地址,以十六进制表示,如 b'eb906767' :param sync_header: 标识符,以十六进制表示,如 b'6767' :return: 返回内容末尾添加标识符的文件<|endoftext|>
08569888de082651c55814a8b333497416d2f7a372e3df37e35fd0886915160e
def delete_footer(file_name, sync_header): "\n 删除最后添加的footer,恢复文件内容\n :param file_name: 文件名地址,以十六进制表示,如 b'eb906767'\n :param sync_header: 标识符,以十六进制表示,如 b'6767'\n :return: 返回删除掉标识符的文件\n " for i in range(len(binascii.unhexlify(sync_header))): with open(file_name, 'r+b') as f: f.seek((- 1), os.SEEK_END) f.truncate()
删除最后添加的footer,恢复文件内容 :param file_name: 文件名地址,以十六进制表示,如 b'eb906767' :param sync_header: 标识符,以十六进制表示,如 b'6767' :return: 返回删除掉标识符的文件
utils/repair_hex.py
delete_footer
xujinzh/timestamp
0
python
def delete_footer(file_name, sync_header): "\n 删除最后添加的footer,恢复文件内容\n :param file_name: 文件名地址,以十六进制表示,如 b'eb906767'\n :param sync_header: 标识符,以十六进制表示,如 b'6767'\n :return: 返回删除掉标识符的文件\n " for i in range(len(binascii.unhexlify(sync_header))): with open(file_name, 'r+b') as f: f.seek((- 1), os.SEEK_END) f.truncate()
def delete_footer(file_name, sync_header): "\n 删除最后添加的footer,恢复文件内容\n :param file_name: 文件名地址,以十六进制表示,如 b'eb906767'\n :param sync_header: 标识符,以十六进制表示,如 b'6767'\n :return: 返回删除掉标识符的文件\n " for i in range(len(binascii.unhexlify(sync_header))): with open(file_name, 'r+b') as f: f.seek((- 1), os.SEEK_END) f.truncate()<|docstring|>删除最后添加的footer,恢复文件内容 :param file_name: 文件名地址,以十六进制表示,如 b'eb906767' :param sync_header: 标识符,以十六进制表示,如 b'6767' :return: 返回删除掉标识符的文件<|endoftext|>
26aef7f00966f4d8305d20a9d24999df1c5bf21731f9280c9f64e2460c36ecdc
async def test_no_fans(hass, aioclient_mock): 'Test that no fan entities are created.' (await setup_deconz_integration(hass, aioclient_mock)) assert (len(hass.states.async_all()) == 0)
Test that no fan entities are created.
tests/components/deconz/test_fan.py
test_no_fans
TonyApuzzo/home-assistant
4
python
async def test_no_fans(hass, aioclient_mock): (await setup_deconz_integration(hass, aioclient_mock)) assert (len(hass.states.async_all()) == 0)
async def test_no_fans(hass, aioclient_mock): (await setup_deconz_integration(hass, aioclient_mock)) assert (len(hass.states.async_all()) == 0)<|docstring|>Test that no fan entities are created.<|endoftext|>
98b1e160ee4f68273bb94886880f6e2e90b5ea990e9d8198be5b52440bb1a35a
async def test_fans(hass, aioclient_mock): 'Test that all supported fan entities are created.' data = deepcopy(DECONZ_WEB_REQUEST) data['lights'] = deepcopy(FANS) config_entry = (await setup_deconz_integration(hass, aioclient_mock, get_state_response=data)) gateway = get_gateway_from_config_entry(hass, config_entry) assert (len(hass.states.async_all()) == 2) assert hass.states.get('fan.ceiling_fan') assert (hass.states.get('fan.ceiling_fan').state == STATE_ON) assert (hass.states.get('fan.ceiling_fan').attributes['speed'] == SPEED_HIGH) state_changed_event = {'t': 'event', 'e': 'changed', 'r': 'lights', 'id': '1', 'state': {'speed': 0}} gateway.api.event_handler(state_changed_event) (await hass.async_block_till_done()) assert (hass.states.get('fan.ceiling_fan').state == STATE_OFF) assert (hass.states.get('fan.ceiling_fan').attributes['speed'] == SPEED_OFF) mock_deconz_put_request(aioclient_mock, config_entry.data, '/lights/1/state') (await hass.services.async_call(FAN_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: 'fan.ceiling_fan'}, blocking=True)) assert (aioclient_mock.mock_calls[1][2] == {'speed': 4}) (await hass.services.async_call(FAN_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: 'fan.ceiling_fan'}, blocking=True)) assert (aioclient_mock.mock_calls[2][2] == {'speed': 0}) (await hass.services.async_call(FAN_DOMAIN, SERVICE_SET_SPEED, {ATTR_ENTITY_ID: 'fan.ceiling_fan', ATTR_SPEED: SPEED_LOW}, blocking=True)) assert (aioclient_mock.mock_calls[3][2] == {'speed': 1}) (await hass.services.async_call(FAN_DOMAIN, SERVICE_SET_SPEED, {ATTR_ENTITY_ID: 'fan.ceiling_fan', ATTR_SPEED: SPEED_MEDIUM}, blocking=True)) assert (aioclient_mock.mock_calls[4][2] == {'speed': 2}) (await hass.services.async_call(FAN_DOMAIN, SERVICE_SET_SPEED, {ATTR_ENTITY_ID: 'fan.ceiling_fan', ATTR_SPEED: SPEED_HIGH}, blocking=True)) assert (aioclient_mock.mock_calls[5][2] == {'speed': 4}) (await hass.services.async_call(FAN_DOMAIN, SERVICE_SET_SPEED, {ATTR_ENTITY_ID: 'fan.ceiling_fan', ATTR_SPEED: SPEED_OFF}, blocking=True)) assert (aioclient_mock.mock_calls[6][2] == {'speed': 0}) with pytest.raises(ValueError): (await hass.services.async_call(FAN_DOMAIN, SERVICE_SET_SPEED, {ATTR_ENTITY_ID: 'fan.ceiling_fan', ATTR_SPEED: 'bad value'}, blocking=True)) state_changed_event = {'t': 'event', 'e': 'changed', 'r': 'lights', 'id': '1', 'state': {'speed': 3}} gateway.api.event_handler(state_changed_event) (await hass.async_block_till_done()) assert (hass.states.get('fan.ceiling_fan').state == STATE_ON) assert (hass.states.get('fan.ceiling_fan').attributes['speed'] == SPEED_MEDIUM) (await hass.config_entries.async_unload(config_entry.entry_id)) states = hass.states.async_all() assert (len(hass.states.async_all()) == 2) for state in states: assert (state.state == STATE_UNAVAILABLE) (await hass.config_entries.async_remove(config_entry.entry_id)) (await hass.async_block_till_done()) assert (len(hass.states.async_all()) == 0)
Test that all supported fan entities are created.
tests/components/deconz/test_fan.py
test_fans
TonyApuzzo/home-assistant
4
python
async def test_fans(hass, aioclient_mock): data = deepcopy(DECONZ_WEB_REQUEST) data['lights'] = deepcopy(FANS) config_entry = (await setup_deconz_integration(hass, aioclient_mock, get_state_response=data)) gateway = get_gateway_from_config_entry(hass, config_entry) assert (len(hass.states.async_all()) == 2) assert hass.states.get('fan.ceiling_fan') assert (hass.states.get('fan.ceiling_fan').state == STATE_ON) assert (hass.states.get('fan.ceiling_fan').attributes['speed'] == SPEED_HIGH) state_changed_event = {'t': 'event', 'e': 'changed', 'r': 'lights', 'id': '1', 'state': {'speed': 0}} gateway.api.event_handler(state_changed_event) (await hass.async_block_till_done()) assert (hass.states.get('fan.ceiling_fan').state == STATE_OFF) assert (hass.states.get('fan.ceiling_fan').attributes['speed'] == SPEED_OFF) mock_deconz_put_request(aioclient_mock, config_entry.data, '/lights/1/state') (await hass.services.async_call(FAN_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: 'fan.ceiling_fan'}, blocking=True)) assert (aioclient_mock.mock_calls[1][2] == {'speed': 4}) (await hass.services.async_call(FAN_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: 'fan.ceiling_fan'}, blocking=True)) assert (aioclient_mock.mock_calls[2][2] == {'speed': 0}) (await hass.services.async_call(FAN_DOMAIN, SERVICE_SET_SPEED, {ATTR_ENTITY_ID: 'fan.ceiling_fan', ATTR_SPEED: SPEED_LOW}, blocking=True)) assert (aioclient_mock.mock_calls[3][2] == {'speed': 1}) (await hass.services.async_call(FAN_DOMAIN, SERVICE_SET_SPEED, {ATTR_ENTITY_ID: 'fan.ceiling_fan', ATTR_SPEED: SPEED_MEDIUM}, blocking=True)) assert (aioclient_mock.mock_calls[4][2] == {'speed': 2}) (await hass.services.async_call(FAN_DOMAIN, SERVICE_SET_SPEED, {ATTR_ENTITY_ID: 'fan.ceiling_fan', ATTR_SPEED: SPEED_HIGH}, blocking=True)) assert (aioclient_mock.mock_calls[5][2] == {'speed': 4}) (await hass.services.async_call(FAN_DOMAIN, SERVICE_SET_SPEED, {ATTR_ENTITY_ID: 'fan.ceiling_fan', ATTR_SPEED: SPEED_OFF}, blocking=True)) assert (aioclient_mock.mock_calls[6][2] == {'speed': 0}) with pytest.raises(ValueError): (await hass.services.async_call(FAN_DOMAIN, SERVICE_SET_SPEED, {ATTR_ENTITY_ID: 'fan.ceiling_fan', ATTR_SPEED: 'bad value'}, blocking=True)) state_changed_event = {'t': 'event', 'e': 'changed', 'r': 'lights', 'id': '1', 'state': {'speed': 3}} gateway.api.event_handler(state_changed_event) (await hass.async_block_till_done()) assert (hass.states.get('fan.ceiling_fan').state == STATE_ON) assert (hass.states.get('fan.ceiling_fan').attributes['speed'] == SPEED_MEDIUM) (await hass.config_entries.async_unload(config_entry.entry_id)) states = hass.states.async_all() assert (len(hass.states.async_all()) == 2) for state in states: assert (state.state == STATE_UNAVAILABLE) (await hass.config_entries.async_remove(config_entry.entry_id)) (await hass.async_block_till_done()) assert (len(hass.states.async_all()) == 0)
async def test_fans(hass, aioclient_mock): data = deepcopy(DECONZ_WEB_REQUEST) data['lights'] = deepcopy(FANS) config_entry = (await setup_deconz_integration(hass, aioclient_mock, get_state_response=data)) gateway = get_gateway_from_config_entry(hass, config_entry) assert (len(hass.states.async_all()) == 2) assert hass.states.get('fan.ceiling_fan') assert (hass.states.get('fan.ceiling_fan').state == STATE_ON) assert (hass.states.get('fan.ceiling_fan').attributes['speed'] == SPEED_HIGH) state_changed_event = {'t': 'event', 'e': 'changed', 'r': 'lights', 'id': '1', 'state': {'speed': 0}} gateway.api.event_handler(state_changed_event) (await hass.async_block_till_done()) assert (hass.states.get('fan.ceiling_fan').state == STATE_OFF) assert (hass.states.get('fan.ceiling_fan').attributes['speed'] == SPEED_OFF) mock_deconz_put_request(aioclient_mock, config_entry.data, '/lights/1/state') (await hass.services.async_call(FAN_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: 'fan.ceiling_fan'}, blocking=True)) assert (aioclient_mock.mock_calls[1][2] == {'speed': 4}) (await hass.services.async_call(FAN_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: 'fan.ceiling_fan'}, blocking=True)) assert (aioclient_mock.mock_calls[2][2] == {'speed': 0}) (await hass.services.async_call(FAN_DOMAIN, SERVICE_SET_SPEED, {ATTR_ENTITY_ID: 'fan.ceiling_fan', ATTR_SPEED: SPEED_LOW}, blocking=True)) assert (aioclient_mock.mock_calls[3][2] == {'speed': 1}) (await hass.services.async_call(FAN_DOMAIN, SERVICE_SET_SPEED, {ATTR_ENTITY_ID: 'fan.ceiling_fan', ATTR_SPEED: SPEED_MEDIUM}, blocking=True)) assert (aioclient_mock.mock_calls[4][2] == {'speed': 2}) (await hass.services.async_call(FAN_DOMAIN, SERVICE_SET_SPEED, {ATTR_ENTITY_ID: 'fan.ceiling_fan', ATTR_SPEED: SPEED_HIGH}, blocking=True)) assert (aioclient_mock.mock_calls[5][2] == {'speed': 4}) (await hass.services.async_call(FAN_DOMAIN, SERVICE_SET_SPEED, {ATTR_ENTITY_ID: 'fan.ceiling_fan', ATTR_SPEED: SPEED_OFF}, blocking=True)) assert (aioclient_mock.mock_calls[6][2] == {'speed': 0}) with pytest.raises(ValueError): (await hass.services.async_call(FAN_DOMAIN, SERVICE_SET_SPEED, {ATTR_ENTITY_ID: 'fan.ceiling_fan', ATTR_SPEED: 'bad value'}, blocking=True)) state_changed_event = {'t': 'event', 'e': 'changed', 'r': 'lights', 'id': '1', 'state': {'speed': 3}} gateway.api.event_handler(state_changed_event) (await hass.async_block_till_done()) assert (hass.states.get('fan.ceiling_fan').state == STATE_ON) assert (hass.states.get('fan.ceiling_fan').attributes['speed'] == SPEED_MEDIUM) (await hass.config_entries.async_unload(config_entry.entry_id)) states = hass.states.async_all() assert (len(hass.states.async_all()) == 2) for state in states: assert (state.state == STATE_UNAVAILABLE) (await hass.config_entries.async_remove(config_entry.entry_id)) (await hass.async_block_till_done()) assert (len(hass.states.async_all()) == 0)<|docstring|>Test that all supported fan entities are created.<|endoftext|>
13c48dc09e56aae30a760c5e313af203ae38fd7dcec5925343412ddf6b7e5292
def dynamic_builtin(f, *args, **kwargs): 'Converts a builtin function call inline.' if ((not tf_inspect.isbuiltin(f)) and (f not in (range,))): return f(*args, **kwargs) if (f is len): return dynamic_len(*args, **kwargs) if (six.PY2 and (f is xrange)): return dynamic_range(*args, **kwargs) if (f is range): return dynamic_range(*args, **kwargs) raise NotImplementedError(('The "%s" builtin is not yet supported.' % f.__name__))
Converts a builtin function call inline.
tensorflow/contrib/autograph/utils/builtins.py
dynamic_builtin
dharaneesh92/Tensor-flow
3
python
def dynamic_builtin(f, *args, **kwargs): if ((not tf_inspect.isbuiltin(f)) and (f not in (range,))): return f(*args, **kwargs) if (f is len): return dynamic_len(*args, **kwargs) if (six.PY2 and (f is xrange)): return dynamic_range(*args, **kwargs) if (f is range): return dynamic_range(*args, **kwargs) raise NotImplementedError(('The "%s" builtin is not yet supported.' % f.__name__))
def dynamic_builtin(f, *args, **kwargs): if ((not tf_inspect.isbuiltin(f)) and (f not in (range,))): return f(*args, **kwargs) if (f is len): return dynamic_len(*args, **kwargs) if (six.PY2 and (f is xrange)): return dynamic_range(*args, **kwargs) if (f is range): return dynamic_range(*args, **kwargs) raise NotImplementedError(('The "%s" builtin is not yet supported.' % f.__name__))<|docstring|>Converts a builtin function call inline.<|endoftext|>
ae4b1ef89cb4fcc974ceaa50379b8ff8833d856b33ac5e4f9c79f154d1c4befe
def dynamic_len(list_or_tensor): 'Implementation of len using dynamic dispatch.' if tensor_util.is_tensor(list_or_tensor): shape = list_or_tensor.shape if (not shape): raise ValueError(('len requires non-zero rank for tensor "%s"' % list_or_tensor)) return array_ops.shape(list_or_tensor)[0] return len(list_or_tensor)
Implementation of len using dynamic dispatch.
tensorflow/contrib/autograph/utils/builtins.py
dynamic_len
dharaneesh92/Tensor-flow
3
python
def dynamic_len(list_or_tensor): if tensor_util.is_tensor(list_or_tensor): shape = list_or_tensor.shape if (not shape): raise ValueError(('len requires non-zero rank for tensor "%s"' % list_or_tensor)) return array_ops.shape(list_or_tensor)[0] return len(list_or_tensor)
def dynamic_len(list_or_tensor): if tensor_util.is_tensor(list_or_tensor): shape = list_or_tensor.shape if (not shape): raise ValueError(('len requires non-zero rank for tensor "%s"' % list_or_tensor)) return array_ops.shape(list_or_tensor)[0] return len(list_or_tensor)<|docstring|>Implementation of len using dynamic dispatch.<|endoftext|>
0805fa117022d2b61bcb3cb596e05fbeacc71e7176903184c1de6f9c2d3e3503
def dynamic_range(start_or_stop, stop=None, step=None): 'Implementation of range using dynamic dispatch.' if type_check.is_tensor(start_or_stop, stop, step): if (step is not None): return math_ops.range(start_or_stop, stop, step) if (stop is not None): return math_ops.range(start_or_stop, stop) return math_ops.range(start_or_stop) if (step is not None): return range(start_or_stop, stop, step) elif (stop is not None): return range(start_or_stop, stop) return range(start_or_stop)
Implementation of range using dynamic dispatch.
tensorflow/contrib/autograph/utils/builtins.py
dynamic_range
dharaneesh92/Tensor-flow
3
python
def dynamic_range(start_or_stop, stop=None, step=None): if type_check.is_tensor(start_or_stop, stop, step): if (step is not None): return math_ops.range(start_or_stop, stop, step) if (stop is not None): return math_ops.range(start_or_stop, stop) return math_ops.range(start_or_stop) if (step is not None): return range(start_or_stop, stop, step) elif (stop is not None): return range(start_or_stop, stop) return range(start_or_stop)
def dynamic_range(start_or_stop, stop=None, step=None): if type_check.is_tensor(start_or_stop, stop, step): if (step is not None): return math_ops.range(start_or_stop, stop, step) if (stop is not None): return math_ops.range(start_or_stop, stop) return math_ops.range(start_or_stop) if (step is not None): return range(start_or_stop, stop, step) elif (stop is not None): return range(start_or_stop, stop) return range(start_or_stop)<|docstring|>Implementation of range using dynamic dispatch.<|endoftext|>
21d7d4d9b78f49c2fb4e34526be84671e74ae1ce9fec5d7bfc5e0e00e81f1489
def dynamic_print(*values): 'Implementartion of print using dynamic dispatch.\n\n The function attempts to use tf.Print if all the values are compatible.\n Otherwise, it will fall back to py_func.\n\n Args:\n *values: values to print\n Returns:\n A dummy value indicating the print completed. If tf.\n ' if all(map(is_tf_print_compatible, values)): return logging_ops.Print(1, values) def flushed_print(*vals): print(*vals) sys.stdout.flush() return py_func.wrap_py_func(flushed_print, None, values, use_dummy_return=True)
Implementartion of print using dynamic dispatch. The function attempts to use tf.Print if all the values are compatible. Otherwise, it will fall back to py_func. Args: *values: values to print Returns: A dummy value indicating the print completed. If tf.
tensorflow/contrib/autograph/utils/builtins.py
dynamic_print
dharaneesh92/Tensor-flow
3
python
def dynamic_print(*values): 'Implementartion of print using dynamic dispatch.\n\n The function attempts to use tf.Print if all the values are compatible.\n Otherwise, it will fall back to py_func.\n\n Args:\n *values: values to print\n Returns:\n A dummy value indicating the print completed. If tf.\n ' if all(map(is_tf_print_compatible, values)): return logging_ops.Print(1, values) def flushed_print(*vals): print(*vals) sys.stdout.flush() return py_func.wrap_py_func(flushed_print, None, values, use_dummy_return=True)
def dynamic_print(*values): 'Implementartion of print using dynamic dispatch.\n\n The function attempts to use tf.Print if all the values are compatible.\n Otherwise, it will fall back to py_func.\n\n Args:\n *values: values to print\n Returns:\n A dummy value indicating the print completed. If tf.\n ' if all(map(is_tf_print_compatible, values)): return logging_ops.Print(1, values) def flushed_print(*vals): print(*vals) sys.stdout.flush() return py_func.wrap_py_func(flushed_print, None, values, use_dummy_return=True)<|docstring|>Implementartion of print using dynamic dispatch. The function attempts to use tf.Print if all the values are compatible. Otherwise, it will fall back to py_func. Args: *values: values to print Returns: A dummy value indicating the print completed. If tf.<|endoftext|>
e69407a206ac79e754f303ef5a58db6e26b2e0f9a71d1dd0bb321a7ff41e646f
def dynamic_dataset(iterated): 'Implementartion of smart tf.data.Dataset epoch wrapping.\n\n The function checks if the input is a tf.data.Dataset and if so then wraps it\n so that for each element it returns it also returns the current epoch the\n dataset iteration is in, for two epochs. If the input is not a\n tf.data.Dataset then it just returns the input.\n\n Args:\n iterated: The iterable or tf.data.Dataset that is being iterated over.\n Returns:\n Either just the untouched input, or in the case of input being a\n tf.data.Dataset then it returns a wrapped tf.data.Dataset where for each\n element it returns it also returns the current epoch the dataset iteration\n is in.\n ' if (not isinstance(iterated, dataset_ops.Dataset)): return iterated def epoch_dataset_number_helper(i): return dataset_ops.Dataset.zip((dataset_ops.Dataset.from_tensors(i).repeat(), iterated)) epoch_numbers = dataset_ops.Dataset.range(2) return epoch_numbers.flat_map(epoch_dataset_number_helper)
Implementartion of smart tf.data.Dataset epoch wrapping. The function checks if the input is a tf.data.Dataset and if so then wraps it so that for each element it returns it also returns the current epoch the dataset iteration is in, for two epochs. If the input is not a tf.data.Dataset then it just returns the input. Args: iterated: The iterable or tf.data.Dataset that is being iterated over. Returns: Either just the untouched input, or in the case of input being a tf.data.Dataset then it returns a wrapped tf.data.Dataset where for each element it returns it also returns the current epoch the dataset iteration is in.
tensorflow/contrib/autograph/utils/builtins.py
dynamic_dataset
dharaneesh92/Tensor-flow
3
python
def dynamic_dataset(iterated): 'Implementartion of smart tf.data.Dataset epoch wrapping.\n\n The function checks if the input is a tf.data.Dataset and if so then wraps it\n so that for each element it returns it also returns the current epoch the\n dataset iteration is in, for two epochs. If the input is not a\n tf.data.Dataset then it just returns the input.\n\n Args:\n iterated: The iterable or tf.data.Dataset that is being iterated over.\n Returns:\n Either just the untouched input, or in the case of input being a\n tf.data.Dataset then it returns a wrapped tf.data.Dataset where for each\n element it returns it also returns the current epoch the dataset iteration\n is in.\n ' if (not isinstance(iterated, dataset_ops.Dataset)): return iterated def epoch_dataset_number_helper(i): return dataset_ops.Dataset.zip((dataset_ops.Dataset.from_tensors(i).repeat(), iterated)) epoch_numbers = dataset_ops.Dataset.range(2) return epoch_numbers.flat_map(epoch_dataset_number_helper)
def dynamic_dataset(iterated): 'Implementartion of smart tf.data.Dataset epoch wrapping.\n\n The function checks if the input is a tf.data.Dataset and if so then wraps it\n so that for each element it returns it also returns the current epoch the\n dataset iteration is in, for two epochs. If the input is not a\n tf.data.Dataset then it just returns the input.\n\n Args:\n iterated: The iterable or tf.data.Dataset that is being iterated over.\n Returns:\n Either just the untouched input, or in the case of input being a\n tf.data.Dataset then it returns a wrapped tf.data.Dataset where for each\n element it returns it also returns the current epoch the dataset iteration\n is in.\n ' if (not isinstance(iterated, dataset_ops.Dataset)): return iterated def epoch_dataset_number_helper(i): return dataset_ops.Dataset.zip((dataset_ops.Dataset.from_tensors(i).repeat(), iterated)) epoch_numbers = dataset_ops.Dataset.range(2) return epoch_numbers.flat_map(epoch_dataset_number_helper)<|docstring|>Implementartion of smart tf.data.Dataset epoch wrapping. The function checks if the input is a tf.data.Dataset and if so then wraps it so that for each element it returns it also returns the current epoch the dataset iteration is in, for two epochs. If the input is not a tf.data.Dataset then it just returns the input. Args: iterated: The iterable or tf.data.Dataset that is being iterated over. Returns: Either just the untouched input, or in the case of input being a tf.data.Dataset then it returns a wrapped tf.data.Dataset where for each element it returns it also returns the current epoch the dataset iteration is in.<|endoftext|>
fe28fb7d9f6ad52b1ac9fdc1ebf5ca30a0047afae9e66b1fbdc4ace35a92663d
def dynamic_for_cond(iteration, iterated): 'Implementartion of smart while-loop condition using dynamic dispatch.\n\n The function checks if it is iterating over a tf.data.Dataset or not, and in\n the case it is not then it simply returns if we are still in range of the\n iterated and the next element. If it is iterating over a dataset then it only\n iterates for a single epoch.\n\n Args:\n iteration: The current iteration of the loop.\n iterated: The iterable or tf.data.Dataset that is being iterated over.\n Returns:\n A tuple of a bool that indicates whether the loop should continue, and the\n next element in iterated.\n ' if isinstance(iterated, dataset_ops.Dataset): (curr_epoch, next_elem) = iterated.make_one_shot_iterator().get_next() return (math_ops.less(curr_epoch, 1), next_elem) elif tensor_util.is_tensor(iterated): if (iterated.shape.ndims > 1): elem_shape = array_ops.shape(iterated)[1:] else: elem_shape = () if ((iterated.shape.ndims == 0) or (iterated.shape[0] == 0)): return (False, array_ops.zeros(elem_shape, iterated.dtype)) return control_flow_ops.cond(math_ops.less(iteration, dynamic_len(iterated)), (lambda : (True, iterated[iteration])), (lambda : (False, array_ops.zeros(elem_shape, iterated.dtype)))) elif hasattr(iterated, '__len__'): if (iteration < len(iterated)): return (True, iterated[iteration]) return (False, None) else: raise NotImplementedError('Python iterators not yet supported.')
Implementartion of smart while-loop condition using dynamic dispatch. The function checks if it is iterating over a tf.data.Dataset or not, and in the case it is not then it simply returns if we are still in range of the iterated and the next element. If it is iterating over a dataset then it only iterates for a single epoch. Args: iteration: The current iteration of the loop. iterated: The iterable or tf.data.Dataset that is being iterated over. Returns: A tuple of a bool that indicates whether the loop should continue, and the next element in iterated.
tensorflow/contrib/autograph/utils/builtins.py
dynamic_for_cond
dharaneesh92/Tensor-flow
3
python
def dynamic_for_cond(iteration, iterated): 'Implementartion of smart while-loop condition using dynamic dispatch.\n\n The function checks if it is iterating over a tf.data.Dataset or not, and in\n the case it is not then it simply returns if we are still in range of the\n iterated and the next element. If it is iterating over a dataset then it only\n iterates for a single epoch.\n\n Args:\n iteration: The current iteration of the loop.\n iterated: The iterable or tf.data.Dataset that is being iterated over.\n Returns:\n A tuple of a bool that indicates whether the loop should continue, and the\n next element in iterated.\n ' if isinstance(iterated, dataset_ops.Dataset): (curr_epoch, next_elem) = iterated.make_one_shot_iterator().get_next() return (math_ops.less(curr_epoch, 1), next_elem) elif tensor_util.is_tensor(iterated): if (iterated.shape.ndims > 1): elem_shape = array_ops.shape(iterated)[1:] else: elem_shape = () if ((iterated.shape.ndims == 0) or (iterated.shape[0] == 0)): return (False, array_ops.zeros(elem_shape, iterated.dtype)) return control_flow_ops.cond(math_ops.less(iteration, dynamic_len(iterated)), (lambda : (True, iterated[iteration])), (lambda : (False, array_ops.zeros(elem_shape, iterated.dtype)))) elif hasattr(iterated, '__len__'): if (iteration < len(iterated)): return (True, iterated[iteration]) return (False, None) else: raise NotImplementedError('Python iterators not yet supported.')
def dynamic_for_cond(iteration, iterated): 'Implementartion of smart while-loop condition using dynamic dispatch.\n\n The function checks if it is iterating over a tf.data.Dataset or not, and in\n the case it is not then it simply returns if we are still in range of the\n iterated and the next element. If it is iterating over a dataset then it only\n iterates for a single epoch.\n\n Args:\n iteration: The current iteration of the loop.\n iterated: The iterable or tf.data.Dataset that is being iterated over.\n Returns:\n A tuple of a bool that indicates whether the loop should continue, and the\n next element in iterated.\n ' if isinstance(iterated, dataset_ops.Dataset): (curr_epoch, next_elem) = iterated.make_one_shot_iterator().get_next() return (math_ops.less(curr_epoch, 1), next_elem) elif tensor_util.is_tensor(iterated): if (iterated.shape.ndims > 1): elem_shape = array_ops.shape(iterated)[1:] else: elem_shape = () if ((iterated.shape.ndims == 0) or (iterated.shape[0] == 0)): return (False, array_ops.zeros(elem_shape, iterated.dtype)) return control_flow_ops.cond(math_ops.less(iteration, dynamic_len(iterated)), (lambda : (True, iterated[iteration])), (lambda : (False, array_ops.zeros(elem_shape, iterated.dtype)))) elif hasattr(iterated, '__len__'): if (iteration < len(iterated)): return (True, iterated[iteration]) return (False, None) else: raise NotImplementedError('Python iterators not yet supported.')<|docstring|>Implementartion of smart while-loop condition using dynamic dispatch. The function checks if it is iterating over a tf.data.Dataset or not, and in the case it is not then it simply returns if we are still in range of the iterated and the next element. If it is iterating over a dataset then it only iterates for a single epoch. Args: iteration: The current iteration of the loop. iterated: The iterable or tf.data.Dataset that is being iterated over. Returns: A tuple of a bool that indicates whether the loop should continue, and the next element in iterated.<|endoftext|>
43cd2f3fe0bd912f9c07c657b7722b5e972222104e0fceaf045b13645a982e71
def _start_thread(offset, parse): '\n 启动线程\n\n :param offset: 端口偏移值\n ' dht = DHTServer(SERVER_HOST, (SERVER_PORT + offset), offset) threads = [Thread(target=dht.send_find_node_forever), Thread(target=dht.receive_response_forever), Thread(target=dht.bs_timer)] if parse: threads.append(Thread(target=magnet2torrent)) for t in threads: t.start() for t in threads: t.join()
启动线程 :param offset: 端口偏移值
magnet_dht/run.py
_start_thread
show0925/magnet-dht
0
python
def _start_thread(offset, parse): '\n 启动线程\n\n :param offset: 端口偏移值\n ' dht = DHTServer(SERVER_HOST, (SERVER_PORT + offset), offset) threads = [Thread(target=dht.send_find_node_forever), Thread(target=dht.receive_response_forever), Thread(target=dht.bs_timer)] if parse: threads.append(Thread(target=magnet2torrent)) for t in threads: t.start() for t in threads: t.join()
def _start_thread(offset, parse): '\n 启动线程\n\n :param offset: 端口偏移值\n ' dht = DHTServer(SERVER_HOST, (SERVER_PORT + offset), offset) threads = [Thread(target=dht.send_find_node_forever), Thread(target=dht.receive_response_forever), Thread(target=dht.bs_timer)] if parse: threads.append(Thread(target=magnet2torrent)) for t in threads: t.start() for t in threads: t.join()<|docstring|>启动线程 :param offset: 端口偏移值<|endoftext|>
c72b3339b326bda90a3961e073b15666ca998ca5da47e749d0e4b2528640bebb
def start_server(parse=False): '\n 多线程启动服务\n ' processes = [] for i in range(MAX_PROCESSES): processes.append(Process(target=_start_thread, args=(i, parse))) for p in processes: p.start() for p in processes: p.join()
多线程启动服务
magnet_dht/run.py
start_server
show0925/magnet-dht
0
python
def start_server(parse=False): '\n \n ' processes = [] for i in range(MAX_PROCESSES): processes.append(Process(target=_start_thread, args=(i, parse))) for p in processes: p.start() for p in processes: p.join()
def start_server(parse=False): '\n \n ' processes = [] for i in range(MAX_PROCESSES): processes.append(Process(target=_start_thread, args=(i, parse))) for p in processes: p.start() for p in processes: p.join()<|docstring|>多线程启动服务<|endoftext|>
07249c89a7a91d3a6bab39425b3b249fca93009c4ff24a7d078223757c2d13fa
def __init__(self, default: Any, *args: Any, **kwargs: Any): 'Init Default Dict.' super().__init__(*args, **kwargs) self.default_factory: Callable[(..., Any)] = (default if callable(default) else (lambda : default))
Init Default Dict.
h1st_contrib/utils/default_dict.py
__init__
h1st-ai/h1st-contrib
1
python
def __init__(self, default: Any, *args: Any, **kwargs: Any): super().__init__(*args, **kwargs) self.default_factory: Callable[(..., Any)] = (default if callable(default) else (lambda : default))
def __init__(self, default: Any, *args: Any, **kwargs: Any): super().__init__(*args, **kwargs) self.default_factory: Callable[(..., Any)] = (default if callable(default) else (lambda : default))<|docstring|>Init Default Dict.<|endoftext|>
678159ff4f17b805ca901e9b172d3a6ccd04351a169e8d2c8b8cc0fbfd6c22e6
def __getitem__(self, item: str, /) -> Any: 'Get item.' return (super().__getitem__(item) if (item in self) else self.default_factory())
Get item.
h1st_contrib/utils/default_dict.py
__getitem__
h1st-ai/h1st-contrib
1
python
def __getitem__(self, item: str, /) -> Any: return (super().__getitem__(item) if (item in self) else self.default_factory())
def __getitem__(self, item: str, /) -> Any: return (super().__getitem__(item) if (item in self) else self.default_factory())<|docstring|>Get item.<|endoftext|>
c117f665be3ca3e29780f2ad17cb958d8ac9e0ef47499467e6bbef5deb47e8ff
@property def default(self) -> Any: 'Get default value.' return self.default_factory()
Get default value.
h1st_contrib/utils/default_dict.py
default
h1st-ai/h1st-contrib
1
python
@property def default(self) -> Any: return self.default_factory()
@property def default(self) -> Any: return self.default_factory()<|docstring|>Get default value.<|endoftext|>
fe514b889d092e5711110a52d1017692403b18e81081b4b92a279eee4571b329
@default.setter def default(self, default: Any, /): 'Set default value.' if callable(default): self.default_factory: Callable[(..., Any)] = default elif (default != self.default_factory()): self.default_factory: Callable[(..., Any)] = (lambda : default)
Set default value.
h1st_contrib/utils/default_dict.py
default
h1st-ai/h1st-contrib
1
python
@default.setter def default(self, default: Any, /): if callable(default): self.default_factory: Callable[(..., Any)] = default elif (default != self.default_factory()): self.default_factory: Callable[(..., Any)] = (lambda : default)
@default.setter def default(self, default: Any, /): if callable(default): self.default_factory: Callable[(..., Any)] = default elif (default != self.default_factory()): self.default_factory: Callable[(..., Any)] = (lambda : default)<|docstring|>Set default value.<|endoftext|>
c8400d157c0af04950784e798c302dec610e9184a022e6637f3d45cf67c47e01
def _accuracy(output, target, topk=(1,)): '\n Computes the top-k accuracy specified values of k (copied from PyTorch source code)\n ' with torch.no_grad(): maxk = max(topk) (_, pred) = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, (- 1)).expand_as(pred)) res = [] for k in topk: res.append(correct[:k].reshape((- 1)).float().sum(0, keepdim=True)) return res
Computes the top-k accuracy specified values of k (copied from PyTorch source code)
evaluate.py
_accuracy
mil-ad/prospr
4
python
def _accuracy(output, target, topk=(1,)): '\n \n ' with torch.no_grad(): maxk = max(topk) (_, pred) = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, (- 1)).expand_as(pred)) res = [] for k in topk: res.append(correct[:k].reshape((- 1)).float().sum(0, keepdim=True)) return res
def _accuracy(output, target, topk=(1,)): '\n \n ' with torch.no_grad(): maxk = max(topk) (_, pred) = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, (- 1)).expand_as(pred)) res = [] for k in topk: res.append(correct[:k].reshape((- 1)).float().sum(0, keepdim=True)) return res<|docstring|>Computes the top-k accuracy specified values of k (copied from PyTorch source code)<|endoftext|>
9796f1d7e98f8c69611d2202a2d84a14977cf57276a9cba8114334a9d0d83bed
def createtb_tasks(self): '\n 暂时没有用到,后续为定时任务使用\n :return:\n ' return self.runsql('Create Table tasks(user TEXT , taskkey TEXT, cron TEXT );')
暂时没有用到,后续为定时任务使用 :return:
utils/dbclass.py
createtb_tasks
mawentao119/uniRobot
1
python
def createtb_tasks(self): '\n 暂时没有用到,后续为定时任务使用\n :return:\n ' return self.runsql('Create Table tasks(user TEXT , taskkey TEXT, cron TEXT );')
def createtb_tasks(self): '\n 暂时没有用到,后续为定时任务使用\n :return:\n ' return self.runsql('Create Table tasks(user TEXT , taskkey TEXT, cron TEXT );')<|docstring|>暂时没有用到,后续为定时任务使用 :return:<|endoftext|>
6b1c2efbcb59a6a8934d696c3b67d594c884da3d3ecbc3d6946a54d5ab22337a
def createtb_testcase(self): '\n 保存测试用例\n :return:\n ' return self.runsql("create table testcase(\n info_key TEXT,\n info_name TEXT,\n info_casecontent TEXT DEFAULT '',\n info_doc TEXT DEFAULT '',\n info_tags TEXT DEFAULT '',\n ontime TIMESTAMP DEFAULT (datetime('now', 'localtime')),\n run_status TEXT DEFAULT 'unknown',\n run_user TEXT,\n run_elapsedtime INTEGER DEFAULT 0,\n run_starttime TEXT,\n run_endtime TEXT,\n rcd_handtime TIMESTAMP DEFAULT 0,\n rcd_runtimes INTEGER DEFAULT 0,\n rcd_successtimes INTEGER DEFAULT 0,\n rcd_failtimes INTEGER DEFAULT 0,\n rcd_runusers TEXT,\n primary key (info_key,info_name) \n ); ")
保存测试用例 :return:
utils/dbclass.py
createtb_testcase
mawentao119/uniRobot
1
python
def createtb_testcase(self): '\n 保存测试用例\n :return:\n ' return self.runsql("create table testcase(\n info_key TEXT,\n info_name TEXT,\n info_casecontent TEXT DEFAULT ,\n info_doc TEXT DEFAULT ,\n info_tags TEXT DEFAULT ,\n ontime TIMESTAMP DEFAULT (datetime('now', 'localtime')),\n run_status TEXT DEFAULT 'unknown',\n run_user TEXT,\n run_elapsedtime INTEGER DEFAULT 0,\n run_starttime TEXT,\n run_endtime TEXT,\n rcd_handtime TIMESTAMP DEFAULT 0,\n rcd_runtimes INTEGER DEFAULT 0,\n rcd_successtimes INTEGER DEFAULT 0,\n rcd_failtimes INTEGER DEFAULT 0,\n rcd_runusers TEXT,\n primary key (info_key,info_name) \n ); ")
def createtb_testcase(self): '\n 保存测试用例\n :return:\n ' return self.runsql("create table testcase(\n info_key TEXT,\n info_name TEXT,\n info_casecontent TEXT DEFAULT ,\n info_doc TEXT DEFAULT ,\n info_tags TEXT DEFAULT ,\n ontime TIMESTAMP DEFAULT (datetime('now', 'localtime')),\n run_status TEXT DEFAULT 'unknown',\n run_user TEXT,\n run_elapsedtime INTEGER DEFAULT 0,\n run_starttime TEXT,\n run_endtime TEXT,\n rcd_handtime TIMESTAMP DEFAULT 0,\n rcd_runtimes INTEGER DEFAULT 0,\n rcd_successtimes INTEGER DEFAULT 0,\n rcd_failtimes INTEGER DEFAULT 0,\n rcd_runusers TEXT,\n primary key (info_key,info_name) \n ); ")<|docstring|>保存测试用例 :return:<|endoftext|>
f3d010e1f95ad48deb2d7a4d066d4fb071bc81e385bb922a6ca58ee65303cd2c
def createtb_caserecord(self): '\n 用于用例的历史结果比对\n :return:\n ' return self.runsql("create table caserecord(\n info_key TEXT,\n info_name TEXT,\n info_testproject TEXT DEFAULT '',\n info_projectversion TEXT DEFAULT '',\n ontime TIMESTAMP DEFAULT (datetime('now', 'localtime')),\n run_status TEXT DEFAULT 'unknown',\n run_elapsedtime INTEGER DEFAULT 0,\n run_user TEXT,\n primary key (info_key,info_name,ontime) \n ); ")
用于用例的历史结果比对 :return:
utils/dbclass.py
createtb_caserecord
mawentao119/uniRobot
1
python
def createtb_caserecord(self): '\n 用于用例的历史结果比对\n :return:\n ' return self.runsql("create table caserecord(\n info_key TEXT,\n info_name TEXT,\n info_testproject TEXT DEFAULT ,\n info_projectversion TEXT DEFAULT ,\n ontime TIMESTAMP DEFAULT (datetime('now', 'localtime')),\n run_status TEXT DEFAULT 'unknown',\n run_elapsedtime INTEGER DEFAULT 0,\n run_user TEXT,\n primary key (info_key,info_name,ontime) \n ); ")
def createtb_caserecord(self): '\n 用于用例的历史结果比对\n :return:\n ' return self.runsql("create table caserecord(\n info_key TEXT,\n info_name TEXT,\n info_testproject TEXT DEFAULT ,\n info_projectversion TEXT DEFAULT ,\n ontime TIMESTAMP DEFAULT (datetime('now', 'localtime')),\n run_status TEXT DEFAULT 'unknown',\n run_elapsedtime INTEGER DEFAULT 0,\n run_user TEXT,\n primary key (info_key,info_name,ontime) \n ); ")<|docstring|>用于用例的历史结果比对 :return:<|endoftext|>
42f6502f6d80dbf122d572fa77802d675eef528cf306537128453acd6ebd56b5
def createtb_loginfo(self): '\n 保存所有执行日志,用于统计报表和审计\n :return:\n ' self.runsql(" CREATE TABLE loginfo(\n logtime TIMESTAMP DEFAULT (datetime('now', 'localtime')),\n user TEXT DEFAULT '',\n target TEXT DEFAULT '',\n action TEXT DEFAULT '',\n key TEXT DEFAULT '',\n result TEXT DEFAULT ''\n );")
保存所有执行日志,用于统计报表和审计 :return:
utils/dbclass.py
createtb_loginfo
mawentao119/uniRobot
1
python
def createtb_loginfo(self): '\n 保存所有执行日志,用于统计报表和审计\n :return:\n ' self.runsql(" CREATE TABLE loginfo(\n logtime TIMESTAMP DEFAULT (datetime('now', 'localtime')),\n user TEXT DEFAULT ,\n target TEXT DEFAULT ,\n action TEXT DEFAULT ,\n key TEXT DEFAULT ,\n result TEXT DEFAULT \n );")
def createtb_loginfo(self): '\n 保存所有执行日志,用于统计报表和审计\n :return:\n ' self.runsql(" CREATE TABLE loginfo(\n logtime TIMESTAMP DEFAULT (datetime('now', 'localtime')),\n user TEXT DEFAULT ,\n target TEXT DEFAULT ,\n action TEXT DEFAULT ,\n key TEXT DEFAULT ,\n result TEXT DEFAULT \n );")<|docstring|>保存所有执行日志,用于统计报表和审计 :return:<|endoftext|>
9531f87478d1a93a9dde9c9e7389c87c56bd65cfe858e5b8e1e4496bd6825013
def export_package(filename: str, format: str='YAML'): "Write package specification to the given file.\n\n Parameters\n ----------\n filename: string\n Name of the output file\n format: string, optional\n One of 'YAML' or 'JSON'\n " pckg.export_package(filename, PACKAGE_DATA, DATA_COMMANDS, format=format)
Write package specification to the given file. Parameters ---------- filename: string Name of the output file format: string, optional One of 'YAML' or 'JSON'
vizier/engine/packages/vizual/data.py
export_package
VizierDB/web-api-async
2
python
def export_package(filename: str, format: str='YAML'): "Write package specification to the given file.\n\n Parameters\n ----------\n filename: string\n Name of the output file\n format: string, optional\n One of 'YAML' or 'JSON'\n " pckg.export_package(filename, PACKAGE_DATA, DATA_COMMANDS, format=format)
def export_package(filename: str, format: str='YAML'): "Write package specification to the given file.\n\n Parameters\n ----------\n filename: string\n Name of the output file\n format: string, optional\n One of 'YAML' or 'JSON'\n " pckg.export_package(filename, PACKAGE_DATA, DATA_COMMANDS, format=format)<|docstring|>Write package specification to the given file. Parameters ---------- filename: string Name of the output file format: string, optional One of 'YAML' or 'JSON'<|endoftext|>
e22869638c87f8ec4372c27815aa1684ca75269e5b67130552b5e3e45c2965f7
def forward_train(self, img, img_metas): "\n Args:\n img (tensor): Input images of shape (N, C, H, W).\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): A list of image info dict where each dict\n contains: 'img_shape', 'filename', and may also contain\n 'ori_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n :class:`mmdet.datasets.pipelines.Collect`.\n\n Returns:\n dict[str, tensor]: A dictionary of loss components.\n " feat = self.extract_feat(img) feat = feat[(- 1)] gt_labels = [img_meta['text'] for img_meta in img_metas] targets_dict = self.label_convertor.str2tensor(gt_labels) out_enc = None if (self.encoder is not None): out_enc = self.encoder(feat) out_dec = self.decoder(feat, out_enc, targets_dict, img_metas, train_mode=True) loss_inputs = (out_dec, targets_dict, img_metas) losses = self.loss(*loss_inputs) return losses
Args: img (tensor): Input images of shape (N, C, H, W). Typically these should be mean centered and std scaled. img_metas (list[dict]): A list of image info dict where each dict contains: 'img_shape', 'filename', and may also contain 'ori_shape', and 'img_norm_cfg'. For details on the values of these keys see :class:`mmdet.datasets.pipelines.Collect`. Returns: dict[str, tensor]: A dictionary of loss components.
mmocr/models/textrecog/recognizer/master.py
forward_train
yanlieting/TableMASTER-mmocr
206
python
def forward_train(self, img, img_metas): "\n Args:\n img (tensor): Input images of shape (N, C, H, W).\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): A list of image info dict where each dict\n contains: 'img_shape', 'filename', and may also contain\n 'ori_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n :class:`mmdet.datasets.pipelines.Collect`.\n\n Returns:\n dict[str, tensor]: A dictionary of loss components.\n " feat = self.extract_feat(img) feat = feat[(- 1)] gt_labels = [img_meta['text'] for img_meta in img_metas] targets_dict = self.label_convertor.str2tensor(gt_labels) out_enc = None if (self.encoder is not None): out_enc = self.encoder(feat) out_dec = self.decoder(feat, out_enc, targets_dict, img_metas, train_mode=True) loss_inputs = (out_dec, targets_dict, img_metas) losses = self.loss(*loss_inputs) return losses
def forward_train(self, img, img_metas): "\n Args:\n img (tensor): Input images of shape (N, C, H, W).\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): A list of image info dict where each dict\n contains: 'img_shape', 'filename', and may also contain\n 'ori_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n :class:`mmdet.datasets.pipelines.Collect`.\n\n Returns:\n dict[str, tensor]: A dictionary of loss components.\n " feat = self.extract_feat(img) feat = feat[(- 1)] gt_labels = [img_meta['text'] for img_meta in img_metas] targets_dict = self.label_convertor.str2tensor(gt_labels) out_enc = None if (self.encoder is not None): out_enc = self.encoder(feat) out_dec = self.decoder(feat, out_enc, targets_dict, img_metas, train_mode=True) loss_inputs = (out_dec, targets_dict, img_metas) losses = self.loss(*loss_inputs) return losses<|docstring|>Args: img (tensor): Input images of shape (N, C, H, W). Typically these should be mean centered and std scaled. img_metas (list[dict]): A list of image info dict where each dict contains: 'img_shape', 'filename', and may also contain 'ori_shape', and 'img_norm_cfg'. For details on the values of these keys see :class:`mmdet.datasets.pipelines.Collect`. Returns: dict[str, tensor]: A dictionary of loss components.<|endoftext|>
3e5b11584e365c3d7053795d17047a93b18ac04bae0bc9d5718eeab3c418f67b
def simple_test(self, img, img_metas, **kwargs): 'Test function with test time augmentation.\n\n Args:\n imgs (torch.Tensor): Image input tensor.\n img_metas (list[dict]): List of image information.\n\n Returns:\n list[str]: Text label result of each image.\n ' feat = self.extract_feat(img) feat = feat[(- 1)] out_enc = None if (self.encoder is not None): out_enc = self.encoder(feat) out_dec = self.decoder(feat, out_enc, None, img_metas, train_mode=False) (label_indexes, label_scores) = self.label_convertor.tensor2idx(out_dec, img_metas) label_strings = self.label_convertor.idx2str(label_indexes) results = [] for (string, score) in zip(label_strings, label_scores): results.append(dict(text=string, score=score)) return results
Test function with test time augmentation. Args: imgs (torch.Tensor): Image input tensor. img_metas (list[dict]): List of image information. Returns: list[str]: Text label result of each image.
mmocr/models/textrecog/recognizer/master.py
simple_test
yanlieting/TableMASTER-mmocr
206
python
def simple_test(self, img, img_metas, **kwargs): 'Test function with test time augmentation.\n\n Args:\n imgs (torch.Tensor): Image input tensor.\n img_metas (list[dict]): List of image information.\n\n Returns:\n list[str]: Text label result of each image.\n ' feat = self.extract_feat(img) feat = feat[(- 1)] out_enc = None if (self.encoder is not None): out_enc = self.encoder(feat) out_dec = self.decoder(feat, out_enc, None, img_metas, train_mode=False) (label_indexes, label_scores) = self.label_convertor.tensor2idx(out_dec, img_metas) label_strings = self.label_convertor.idx2str(label_indexes) results = [] for (string, score) in zip(label_strings, label_scores): results.append(dict(text=string, score=score)) return results
def simple_test(self, img, img_metas, **kwargs): 'Test function with test time augmentation.\n\n Args:\n imgs (torch.Tensor): Image input tensor.\n img_metas (list[dict]): List of image information.\n\n Returns:\n list[str]: Text label result of each image.\n ' feat = self.extract_feat(img) feat = feat[(- 1)] out_enc = None if (self.encoder is not None): out_enc = self.encoder(feat) out_dec = self.decoder(feat, out_enc, None, img_metas, train_mode=False) (label_indexes, label_scores) = self.label_convertor.tensor2idx(out_dec, img_metas) label_strings = self.label_convertor.idx2str(label_indexes) results = [] for (string, score) in zip(label_strings, label_scores): results.append(dict(text=string, score=score)) return results<|docstring|>Test function with test time augmentation. Args: imgs (torch.Tensor): Image input tensor. img_metas (list[dict]): List of image information. Returns: list[str]: Text label result of each image.<|endoftext|>
cff9a58ebd2239cd2c8e382bd4e90703080ba5f4af11967846ecd78553f8e88c
@task def create_temp_dir(name: str) -> Path: 'Creates a dir in the local temp folder\n\n Parameters\n ----------\n name: str\n Name of the folder to be created\n\n Returns\n -------\n path: Path\n The path to the temp folder.\n ' try: path = Path(mkdtemp(prefix=name)) if (not (path.exists() and path.is_dir())): path.mkdir(parents=True) return path except TypeError as error: print(f'Error trying to find {path}: {error!s}') return None
Creates a dir in the local temp folder Parameters ---------- name: str Name of the folder to be created Returns ------- path: Path The path to the temp folder.
nl_open_data/tasks.py
create_temp_dir
dataverbinders/nl-open-data
0
python
@task def create_temp_dir(name: str) -> Path: 'Creates a dir in the local temp folder\n\n Parameters\n ----------\n name: str\n Name of the folder to be created\n\n Returns\n -------\n path: Path\n The path to the temp folder.\n ' try: path = Path(mkdtemp(prefix=name)) if (not (path.exists() and path.is_dir())): path.mkdir(parents=True) return path except TypeError as error: print(f'Error trying to find {path}: {error!s}') return None
@task def create_temp_dir(name: str) -> Path: 'Creates a dir in the local temp folder\n\n Parameters\n ----------\n name: str\n Name of the folder to be created\n\n Returns\n -------\n path: Path\n The path to the temp folder.\n ' try: path = Path(mkdtemp(prefix=name)) if (not (path.exists() and path.is_dir())): path.mkdir(parents=True) return path except TypeError as error: print(f'Error trying to find {path}: {error!s}') return None<|docstring|>Creates a dir in the local temp folder Parameters ---------- name: str Name of the folder to be created Returns ------- path: Path The path to the temp folder.<|endoftext|>
ab4ab57d7eed936ee972fd010b03b76f34df0a0fd65aae2e95f965f1c08e5b8c
@task def create_dir(path: Union[(Path, str)]) -> Path: 'Checks whether a path exists and is a directory, and creates it if not.\n\n Parameters\n ----------\n path: Path\n A path to the directory.\n\n Returns\n -------\n path: Path\n The same input path, to new or existing directory.\n ' try: path = Path(path) if (not (path.exists() and path.is_dir())): path.mkdir(parents=True) return path except TypeError as error: print(f'Error trying to find {path}: {error!s}') return None
Checks whether a path exists and is a directory, and creates it if not. Parameters ---------- path: Path A path to the directory. Returns ------- path: Path The same input path, to new or existing directory.
nl_open_data/tasks.py
create_dir
dataverbinders/nl-open-data
0
python
@task def create_dir(path: Union[(Path, str)]) -> Path: 'Checks whether a path exists and is a directory, and creates it if not.\n\n Parameters\n ----------\n path: Path\n A path to the directory.\n\n Returns\n -------\n path: Path\n The same input path, to new or existing directory.\n ' try: path = Path(path) if (not (path.exists() and path.is_dir())): path.mkdir(parents=True) return path except TypeError as error: print(f'Error trying to find {path}: {error!s}') return None
@task def create_dir(path: Union[(Path, str)]) -> Path: 'Checks whether a path exists and is a directory, and creates it if not.\n\n Parameters\n ----------\n path: Path\n A path to the directory.\n\n Returns\n -------\n path: Path\n The same input path, to new or existing directory.\n ' try: path = Path(path) if (not (path.exists() and path.is_dir())): path.mkdir(parents=True) return path except TypeError as error: print(f'Error trying to find {path}: {error!s}') return None<|docstring|>Checks whether a path exists and is a directory, and creates it if not. Parameters ---------- path: Path A path to the directory. Returns ------- path: Path The same input path, to new or existing directory.<|endoftext|>
cc512cbfb6aab8aaef6c2c02627b8be901f7de98aeb1386af881ca9d023a982d
@task(log_stdout=True) def curl_cmd(url: str, filepath: Union[(str, Path)], limit_retries: bool=True, std_out: bool=False, **kwargs) -> str: 'Template for curl command to download file.\n\n Uses `curl -fL -o` that fails silently and follows redirects.\n\n Parameters\n ----------\n url : str\n Url to download\n filepath : str or Path\n File for saving fecthed url\n **kwargs\n Keyword arguments passed to Task constructor\n\n Returns\n -------\n str\n curl command\n\n Raises\n ------\n SKIP\n if filepath exists\n\n Example\n -------\n ```\n from pathlib import Path\n\n from prefect import Parameter, Flow\n from prefect.tasks.shell import ShellTask\n\n curl_download = ShellTask(name=\'curl_download\')\n\n with Flow(\'test\') as flow:\n filepath = Parameter("filepath", required=True)\n curl_command = curl_cmd("https://some/url", filepath)\n curl_download = curl_download(command=curl_command)\n\n flow.run(parameters={\'filepath\': Path.home() / \'test.zip\'})\n ```\n ' if Path(filepath).exists(): raise SKIP(f'File {filepath} already exists.') cmd = (f"curl -fL '{url}' -o '{filepath}'" if limit_retries else f"curl --max-redirs -1 -fL '{url}' -o '{filepath}'") if std_out: print(cmd) return cmd
Template for curl command to download file. Uses `curl -fL -o` that fails silently and follows redirects. Parameters ---------- url : str Url to download filepath : str or Path File for saving fecthed url **kwargs Keyword arguments passed to Task constructor Returns ------- str curl command Raises ------ SKIP if filepath exists Example ------- ``` from pathlib import Path from prefect import Parameter, Flow from prefect.tasks.shell import ShellTask curl_download = ShellTask(name='curl_download') with Flow('test') as flow: filepath = Parameter("filepath", required=True) curl_command = curl_cmd("https://some/url", filepath) curl_download = curl_download(command=curl_command) flow.run(parameters={'filepath': Path.home() / 'test.zip'}) ```
nl_open_data/tasks.py
curl_cmd
dataverbinders/nl-open-data
0
python
@task(log_stdout=True) def curl_cmd(url: str, filepath: Union[(str, Path)], limit_retries: bool=True, std_out: bool=False, **kwargs) -> str: 'Template for curl command to download file.\n\n Uses `curl -fL -o` that fails silently and follows redirects.\n\n Parameters\n ----------\n url : str\n Url to download\n filepath : str or Path\n File for saving fecthed url\n **kwargs\n Keyword arguments passed to Task constructor\n\n Returns\n -------\n str\n curl command\n\n Raises\n ------\n SKIP\n if filepath exists\n\n Example\n -------\n ```\n from pathlib import Path\n\n from prefect import Parameter, Flow\n from prefect.tasks.shell import ShellTask\n\n curl_download = ShellTask(name=\'curl_download\')\n\n with Flow(\'test\') as flow:\n filepath = Parameter("filepath", required=True)\n curl_command = curl_cmd("https://some/url", filepath)\n curl_download = curl_download(command=curl_command)\n\n flow.run(parameters={\'filepath\': Path.home() / \'test.zip\'})\n ```\n ' if Path(filepath).exists(): raise SKIP(f'File {filepath} already exists.') cmd = (f"curl -fL '{url}' -o '{filepath}'" if limit_retries else f"curl --max-redirs -1 -fL '{url}' -o '{filepath}'") if std_out: print(cmd) return cmd
@task(log_stdout=True) def curl_cmd(url: str, filepath: Union[(str, Path)], limit_retries: bool=True, std_out: bool=False, **kwargs) -> str: 'Template for curl command to download file.\n\n Uses `curl -fL -o` that fails silently and follows redirects.\n\n Parameters\n ----------\n url : str\n Url to download\n filepath : str or Path\n File for saving fecthed url\n **kwargs\n Keyword arguments passed to Task constructor\n\n Returns\n -------\n str\n curl command\n\n Raises\n ------\n SKIP\n if filepath exists\n\n Example\n -------\n ```\n from pathlib import Path\n\n from prefect import Parameter, Flow\n from prefect.tasks.shell import ShellTask\n\n curl_download = ShellTask(name=\'curl_download\')\n\n with Flow(\'test\') as flow:\n filepath = Parameter("filepath", required=True)\n curl_command = curl_cmd("https://some/url", filepath)\n curl_download = curl_download(command=curl_command)\n\n flow.run(parameters={\'filepath\': Path.home() / \'test.zip\'})\n ```\n ' if Path(filepath).exists(): raise SKIP(f'File {filepath} already exists.') cmd = (f"curl -fL '{url}' -o '{filepath}'" if limit_retries else f"curl --max-redirs -1 -fL '{url}' -o '{filepath}'") if std_out: print(cmd) return cmd<|docstring|>Template for curl command to download file. Uses `curl -fL -o` that fails silently and follows redirects. Parameters ---------- url : str Url to download filepath : str or Path File for saving fecthed url **kwargs Keyword arguments passed to Task constructor Returns ------- str curl command Raises ------ SKIP if filepath exists Example ------- ``` from pathlib import Path from prefect import Parameter, Flow from prefect.tasks.shell import ShellTask curl_download = ShellTask(name='curl_download') with Flow('test') as flow: filepath = Parameter("filepath", required=True) curl_command = curl_cmd("https://some/url", filepath) curl_download = curl_download(command=curl_command) flow.run(parameters={'filepath': Path.home() / 'test.zip'}) ```<|endoftext|>
5ae7f9e175b9de469f543048b614ba62bb64804848a29a7fd9386e93d06b89e9
@task() def create_linked_dataset(dataset_name: str, gcs_uris: list, config: Box, gcp_env: str='dev', prod_env: str=None, **kwargs): "Creates a BQ dataset and nests tables linked to GCS parquet files.\n\n Parameters\n ----------\n dataset_name : str\n Name of dataset in BQ\n gcs_uris : list of str\n List contiaing the gsutil URIS to parquet file to be linked as tables\n config : Config\n Config object\n gcp_env: str, default='dev'\n determines which GCP configuration to use from config.gcp. Options: ['dev', 'test', 'prod']\n prod_env : str, default=None\n Determines which production environmnet to use, if using gcp_env='prod'\n\n Returns\n -------\n tables\n [description]\n " gcp = nlu.set_gcp(config=config, gcp_env=gcp_env, prod_env=prod_env) dataset_id = dataset_name if nlu.check_bq_dataset(dataset_id=dataset_id, gcp=gcp): nlu.delete_bq_dataset(dataset_id=dataset_id, gcp=gcp) dataset_id = nlu.create_bq_dataset(name=dataset_name, gcp=gcp, **kwargs) tables = nlu.create_linked_tables(gcs_uris, gcp, dataset_id) return tables
Creates a BQ dataset and nests tables linked to GCS parquet files. Parameters ---------- dataset_name : str Name of dataset in BQ gcs_uris : list of str List contiaing the gsutil URIS to parquet file to be linked as tables config : Config Config object gcp_env: str, default='dev' determines which GCP configuration to use from config.gcp. Options: ['dev', 'test', 'prod'] prod_env : str, default=None Determines which production environmnet to use, if using gcp_env='prod' Returns ------- tables [description]
nl_open_data/tasks.py
create_linked_dataset
dataverbinders/nl-open-data
0
python
@task() def create_linked_dataset(dataset_name: str, gcs_uris: list, config: Box, gcp_env: str='dev', prod_env: str=None, **kwargs): "Creates a BQ dataset and nests tables linked to GCS parquet files.\n\n Parameters\n ----------\n dataset_name : str\n Name of dataset in BQ\n gcs_uris : list of str\n List contiaing the gsutil URIS to parquet file to be linked as tables\n config : Config\n Config object\n gcp_env: str, default='dev'\n determines which GCP configuration to use from config.gcp. Options: ['dev', 'test', 'prod']\n prod_env : str, default=None\n Determines which production environmnet to use, if using gcp_env='prod'\n\n Returns\n -------\n tables\n [description]\n " gcp = nlu.set_gcp(config=config, gcp_env=gcp_env, prod_env=prod_env) dataset_id = dataset_name if nlu.check_bq_dataset(dataset_id=dataset_id, gcp=gcp): nlu.delete_bq_dataset(dataset_id=dataset_id, gcp=gcp) dataset_id = nlu.create_bq_dataset(name=dataset_name, gcp=gcp, **kwargs) tables = nlu.create_linked_tables(gcs_uris, gcp, dataset_id) return tables
@task() def create_linked_dataset(dataset_name: str, gcs_uris: list, config: Box, gcp_env: str='dev', prod_env: str=None, **kwargs): "Creates a BQ dataset and nests tables linked to GCS parquet files.\n\n Parameters\n ----------\n dataset_name : str\n Name of dataset in BQ\n gcs_uris : list of str\n List contiaing the gsutil URIS to parquet file to be linked as tables\n config : Config\n Config object\n gcp_env: str, default='dev'\n determines which GCP configuration to use from config.gcp. Options: ['dev', 'test', 'prod']\n prod_env : str, default=None\n Determines which production environmnet to use, if using gcp_env='prod'\n\n Returns\n -------\n tables\n [description]\n " gcp = nlu.set_gcp(config=config, gcp_env=gcp_env, prod_env=prod_env) dataset_id = dataset_name if nlu.check_bq_dataset(dataset_id=dataset_id, gcp=gcp): nlu.delete_bq_dataset(dataset_id=dataset_id, gcp=gcp) dataset_id = nlu.create_bq_dataset(name=dataset_name, gcp=gcp, **kwargs) tables = nlu.create_linked_tables(gcs_uris, gcp, dataset_id) return tables<|docstring|>Creates a BQ dataset and nests tables linked to GCS parquet files. Parameters ---------- dataset_name : str Name of dataset in BQ gcs_uris : list of str List contiaing the gsutil URIS to parquet file to be linked as tables config : Config Config object gcp_env: str, default='dev' determines which GCP configuration to use from config.gcp. Options: ['dev', 'test', 'prod'] prod_env : str, default=None Determines which production environmnet to use, if using gcp_env='prod' Returns ------- tables [description]<|endoftext|>
5f174afbc5eb36cf0b0c166cfe931ad86d285ce8fc3a680e7082a8bc0b1aa325
def create_rl_agents(self, discount, memory_capacity, hidden_sizes, learning_rate): '\n Create DQN agents for both mutation and crossover.\n ' state_space_size = len(self.dims) action_space_size = (len(self.dims) + 1) mutation_agent = DQNAgent('mutate', device, state_space_size, action_space_size, discount=discount, eps=self.epsilon, memory_capacity=memory_capacity, hidden_sizes=hidden_sizes, learning_rate=learning_rate) state_space_size = (len(self.dims) * 2) action_space_size = (len(self.dims) - 2) crossover_agent = DQNAgent('crossover', device, state_space_size, action_space_size, discount=discount, eps=self.epsilon, memory_capacity=memory_capacity, hidden_sizes=hidden_sizes, learning_rate=learning_rate) return (mutation_agent, crossover_agent)
Create DQN agents for both mutation and crossover.
rl_tuner/ga_dqn_tuner.py
create_rl_agents
lhutton1/benchmark-tvm
1
python
def create_rl_agents(self, discount, memory_capacity, hidden_sizes, learning_rate): '\n \n ' state_space_size = len(self.dims) action_space_size = (len(self.dims) + 1) mutation_agent = DQNAgent('mutate', device, state_space_size, action_space_size, discount=discount, eps=self.epsilon, memory_capacity=memory_capacity, hidden_sizes=hidden_sizes, learning_rate=learning_rate) state_space_size = (len(self.dims) * 2) action_space_size = (len(self.dims) - 2) crossover_agent = DQNAgent('crossover', device, state_space_size, action_space_size, discount=discount, eps=self.epsilon, memory_capacity=memory_capacity, hidden_sizes=hidden_sizes, learning_rate=learning_rate) return (mutation_agent, crossover_agent)
def create_rl_agents(self, discount, memory_capacity, hidden_sizes, learning_rate): '\n \n ' state_space_size = len(self.dims) action_space_size = (len(self.dims) + 1) mutation_agent = DQNAgent('mutate', device, state_space_size, action_space_size, discount=discount, eps=self.epsilon, memory_capacity=memory_capacity, hidden_sizes=hidden_sizes, learning_rate=learning_rate) state_space_size = (len(self.dims) * 2) action_space_size = (len(self.dims) - 2) crossover_agent = DQNAgent('crossover', device, state_space_size, action_space_size, discount=discount, eps=self.epsilon, memory_capacity=memory_capacity, hidden_sizes=hidden_sizes, learning_rate=learning_rate) return (mutation_agent, crossover_agent)<|docstring|>Create DQN agents for both mutation and crossover.<|endoftext|>
8ec57cb3f6608be1e0d55c58de13d270bf69ed321509ee1da101d803fa81c5a7
def has_next(self): '\n Return true to continue tuning, false if not.\n ' return (len(self.visited) < len(self.space))
Return true to continue tuning, false if not.
rl_tuner/ga_dqn_tuner.py
has_next
lhutton1/benchmark-tvm
1
python
def has_next(self): '\n \n ' return (len(self.visited) < len(self.space))
def has_next(self): '\n \n ' return (len(self.visited) < len(self.space))<|docstring|>Return true to continue tuning, false if not.<|endoftext|>
e7cb2f06952ff95ac63814bbc1d7a0359ba0289a58087f5aa7ef130c16bf0102
def reserve_elites(self): '\n Swap elite genes with elite genes from previous population.\n ' scores = [t.score for t in self.population] elite_indexes = np.argpartition(scores, (- self.elite_num))[(- self.elite_num):] self.elite_population = [] for idx in elite_indexes: self.elite_population.append(self.population[idx])
Swap elite genes with elite genes from previous population.
rl_tuner/ga_dqn_tuner.py
reserve_elites
lhutton1/benchmark-tvm
1
python
def reserve_elites(self): '\n \n ' scores = [t.score for t in self.population] elite_indexes = np.argpartition(scores, (- self.elite_num))[(- self.elite_num):] self.elite_population = [] for idx in elite_indexes: self.elite_population.append(self.population[idx])
def reserve_elites(self): '\n \n ' scores = [t.score for t in self.population] elite_indexes = np.argpartition(scores, (- self.elite_num))[(- self.elite_num):] self.elite_population = [] for idx in elite_indexes: self.elite_population.append(self.population[idx])<|docstring|>Swap elite genes with elite genes from previous population.<|endoftext|>
882e7b403ab901089ec48cfadf21d71fd30d396028f0444e25a73b53aa71c3ab
def normalise_state(self, state, pad=False): '\n Normalise a state to within 0-1 range. This improves training as it\n removes bias from larger values.\n ' if ((not pad) and (len(state) == len(self.dims))): return np.divide(state, self.dims) if (len(state) == len(self.dims)): normalised = np.divide(state, self.dims) return np.pad(normalised, (0, len(state)), 'constant', constant_values=0) dims = (self.dims + self.dims) return np.divide(state, dims)
Normalise a state to within 0-1 range. This improves training as it removes bias from larger values.
rl_tuner/ga_dqn_tuner.py
normalise_state
lhutton1/benchmark-tvm
1
python
def normalise_state(self, state, pad=False): '\n Normalise a state to within 0-1 range. This improves training as it\n removes bias from larger values.\n ' if ((not pad) and (len(state) == len(self.dims))): return np.divide(state, self.dims) if (len(state) == len(self.dims)): normalised = np.divide(state, self.dims) return np.pad(normalised, (0, len(state)), 'constant', constant_values=0) dims = (self.dims + self.dims) return np.divide(state, dims)
def normalise_state(self, state, pad=False): '\n Normalise a state to within 0-1 range. This improves training as it\n removes bias from larger values.\n ' if ((not pad) and (len(state) == len(self.dims))): return np.divide(state, self.dims) if (len(state) == len(self.dims)): normalised = np.divide(state, self.dims) return np.pad(normalised, (0, len(state)), 'constant', constant_values=0) dims = (self.dims + self.dims) return np.divide(state, dims)<|docstring|>Normalise a state to within 0-1 range. This improves training as it removes bias from larger values.<|endoftext|>
ecf3b0de312cae55000b7e09411670a861206d6a321a0806baacd14f1bc594a5
def calculate_reward(self, score): '\n Calculate reward based on reward function chosen.\n ' scale = 1000000000.0 reward_multiplier = 5 if (self.reward_function == RewardFunction.R1): return ((self.initial_score - score) / scale) elif (self.reward_function == RewardFunction.R2): reward = (score if (score >= self.best_flops) else 0) return (reward / scale) elif (self.reward_function == RewardFunction.R3): if (score >= self.best_flops): reward = (score * reward_multiplier) elif (self.prev_fitness < score <= self.best_flops): reward = score elif (self.prev_fitness == score): reward = 0 else: reward = (score * (- 1)) return (reward / scale)
Calculate reward based on reward function chosen.
rl_tuner/ga_dqn_tuner.py
calculate_reward
lhutton1/benchmark-tvm
1
python
def calculate_reward(self, score): '\n \n ' scale = 1000000000.0 reward_multiplier = 5 if (self.reward_function == RewardFunction.R1): return ((self.initial_score - score) / scale) elif (self.reward_function == RewardFunction.R2): reward = (score if (score >= self.best_flops) else 0) return (reward / scale) elif (self.reward_function == RewardFunction.R3): if (score >= self.best_flops): reward = (score * reward_multiplier) elif (self.prev_fitness < score <= self.best_flops): reward = score elif (self.prev_fitness == score): reward = 0 else: reward = (score * (- 1)) return (reward / scale)
def calculate_reward(self, score): '\n \n ' scale = 1000000000.0 reward_multiplier = 5 if (self.reward_function == RewardFunction.R1): return ((self.initial_score - score) / scale) elif (self.reward_function == RewardFunction.R2): reward = (score if (score >= self.best_flops) else 0) return (reward / scale) elif (self.reward_function == RewardFunction.R3): if (score >= self.best_flops): reward = (score * reward_multiplier) elif (self.prev_fitness < score <= self.best_flops): reward = score elif (self.prev_fitness == score): reward = 0 else: reward = (score * (- 1)) return (reward / scale)<|docstring|>Calculate reward based on reward function chosen.<|endoftext|>
d5d6f4492a550a413552c01ed75ac754c9b09374e3392ace1457382675530158
def rl_mutate(self, transitions): '\n Mutate genes using DQN to suggest which knob to randomly mutate.\n Mutations happen inplace on the "transitions" that are input.\n ' for (i, transition) in enumerate(transitions): gene = transition.gene next_gene = gene.copy() if (len(self.visited) < len(self.space)): action = self.mutation_agent.get_action(gene) if (action != 0): next_gene[(action - 1)] = np.random.randint(self.dims[(action - 1)]) while (knob2point(next_gene, self.dims) in self.visited): action = np.random.randint(len(self.dims)) next_gene[action] = np.random.randint(self.dims[action]) transitions[i] = Transition(self.normalise_state(gene), self.normalise_state(next_gene), action, next_gene) self.visited.add(knob2point(gene, self.dims)) else: break
Mutate genes using DQN to suggest which knob to randomly mutate. Mutations happen inplace on the "transitions" that are input.
rl_tuner/ga_dqn_tuner.py
rl_mutate
lhutton1/benchmark-tvm
1
python
def rl_mutate(self, transitions): '\n Mutate genes using DQN to suggest which knob to randomly mutate.\n Mutations happen inplace on the "transitions" that are input.\n ' for (i, transition) in enumerate(transitions): gene = transition.gene next_gene = gene.copy() if (len(self.visited) < len(self.space)): action = self.mutation_agent.get_action(gene) if (action != 0): next_gene[(action - 1)] = np.random.randint(self.dims[(action - 1)]) while (knob2point(next_gene, self.dims) in self.visited): action = np.random.randint(len(self.dims)) next_gene[action] = np.random.randint(self.dims[action]) transitions[i] = Transition(self.normalise_state(gene), self.normalise_state(next_gene), action, next_gene) self.visited.add(knob2point(gene, self.dims)) else: break
def rl_mutate(self, transitions): '\n Mutate genes using DQN to suggest which knob to randomly mutate.\n Mutations happen inplace on the "transitions" that are input.\n ' for (i, transition) in enumerate(transitions): gene = transition.gene next_gene = gene.copy() if (len(self.visited) < len(self.space)): action = self.mutation_agent.get_action(gene) if (action != 0): next_gene[(action - 1)] = np.random.randint(self.dims[(action - 1)]) while (knob2point(next_gene, self.dims) in self.visited): action = np.random.randint(len(self.dims)) next_gene[action] = np.random.randint(self.dims[action]) transitions[i] = Transition(self.normalise_state(gene), self.normalise_state(next_gene), action, next_gene) self.visited.add(knob2point(gene, self.dims)) else: break<|docstring|>Mutate genes using DQN to suggest which knob to randomly mutate. Mutations happen inplace on the "transitions" that are input.<|endoftext|>
3b562879837bed4d1d7faa526b800a444825d105408f53282791874bc333220d
def rl_crossover(self, probabilities, indices, batch_size): '\n Crossover genes using DQN to suggest the crossover point.\n ' tmp_genes = [] for i in range(batch_size): (p1, p2) = np.random.choice(indices, size=2, replace=False, p=probabilities) (p1, p2) = (self.population[p1].gene, self.population[p2].gene) state = (p1 + p2) point = self.crossover_agent.get_action(state) next_gene = (p1[:point] + p2[point:]) tmp_genes.append(Transition(self.normalise_state(state), self.normalise_state(next_gene, pad=True), point, next_gene)) return tmp_genes
Crossover genes using DQN to suggest the crossover point.
rl_tuner/ga_dqn_tuner.py
rl_crossover
lhutton1/benchmark-tvm
1
python
def rl_crossover(self, probabilities, indices, batch_size): '\n \n ' tmp_genes = [] for i in range(batch_size): (p1, p2) = np.random.choice(indices, size=2, replace=False, p=probabilities) (p1, p2) = (self.population[p1].gene, self.population[p2].gene) state = (p1 + p2) point = self.crossover_agent.get_action(state) next_gene = (p1[:point] + p2[point:]) tmp_genes.append(Transition(self.normalise_state(state), self.normalise_state(next_gene, pad=True), point, next_gene)) return tmp_genes
def rl_crossover(self, probabilities, indices, batch_size): '\n \n ' tmp_genes = [] for i in range(batch_size): (p1, p2) = np.random.choice(indices, size=2, replace=False, p=probabilities) (p1, p2) = (self.population[p1].gene, self.population[p2].gene) state = (p1 + p2) point = self.crossover_agent.get_action(state) next_gene = (p1[:point] + p2[point:]) tmp_genes.append(Transition(self.normalise_state(state), self.normalise_state(next_gene, pad=True), point, next_gene)) return tmp_genes<|docstring|>Crossover genes using DQN to suggest the crossover point.<|endoftext|>
36f65a6e7263035bd847b82acda5753192a55547d366259c7c15af8f98caa90b
def mutate_update(self, n_parallel, measure_batch, callbacks): '\n Perform RL mutation on the population.\n ' if (self.step_count >= self.pop_size): for i in range(ceil((self.pop_size / self.train_frequency))): batch_size = min(self.train_frequency, (self.pop_size - (i * self.train_frequency))) transitions_offset = ((i * self.train_frequency) - 1) transitions = self.population[transitions_offset:(transitions_offset + batch_size)] self.rl_mutate(transitions) self.measure_configs(transitions, n_parallel, measure_batch, callbacks) for (j, transition) in enumerate(transitions): reward = self.calculate_reward(transition.score) self.mutation_agent.memory.store_experience([transition.prev_state, transition.action, transition.state, reward]) self.population[(transitions_offset + j)] = transition if ((self.mutation_step_count > 0) and (((self.mutation_step_count + j) % self.update_frequency) == 0)): self.mutation_agent.increment_target() self.mutation_step_count += batch_size if (self.mutation_step_count > self.learn_start): self.mutation_agent.train(self.agent_batch_size) self.mutation_agent.reduce_epsilon() self.prev_fitness = np.mean(self.scores[(- self.pop_size):])
Perform RL mutation on the population.
rl_tuner/ga_dqn_tuner.py
mutate_update
lhutton1/benchmark-tvm
1
python
def mutate_update(self, n_parallel, measure_batch, callbacks): '\n \n ' if (self.step_count >= self.pop_size): for i in range(ceil((self.pop_size / self.train_frequency))): batch_size = min(self.train_frequency, (self.pop_size - (i * self.train_frequency))) transitions_offset = ((i * self.train_frequency) - 1) transitions = self.population[transitions_offset:(transitions_offset + batch_size)] self.rl_mutate(transitions) self.measure_configs(transitions, n_parallel, measure_batch, callbacks) for (j, transition) in enumerate(transitions): reward = self.calculate_reward(transition.score) self.mutation_agent.memory.store_experience([transition.prev_state, transition.action, transition.state, reward]) self.population[(transitions_offset + j)] = transition if ((self.mutation_step_count > 0) and (((self.mutation_step_count + j) % self.update_frequency) == 0)): self.mutation_agent.increment_target() self.mutation_step_count += batch_size if (self.mutation_step_count > self.learn_start): self.mutation_agent.train(self.agent_batch_size) self.mutation_agent.reduce_epsilon() self.prev_fitness = np.mean(self.scores[(- self.pop_size):])
def mutate_update(self, n_parallel, measure_batch, callbacks): '\n \n ' if (self.step_count >= self.pop_size): for i in range(ceil((self.pop_size / self.train_frequency))): batch_size = min(self.train_frequency, (self.pop_size - (i * self.train_frequency))) transitions_offset = ((i * self.train_frequency) - 1) transitions = self.population[transitions_offset:(transitions_offset + batch_size)] self.rl_mutate(transitions) self.measure_configs(transitions, n_parallel, measure_batch, callbacks) for (j, transition) in enumerate(transitions): reward = self.calculate_reward(transition.score) self.mutation_agent.memory.store_experience([transition.prev_state, transition.action, transition.state, reward]) self.population[(transitions_offset + j)] = transition if ((self.mutation_step_count > 0) and (((self.mutation_step_count + j) % self.update_frequency) == 0)): self.mutation_agent.increment_target() self.mutation_step_count += batch_size if (self.mutation_step_count > self.learn_start): self.mutation_agent.train(self.agent_batch_size) self.mutation_agent.reduce_epsilon() self.prev_fitness = np.mean(self.scores[(- self.pop_size):])<|docstring|>Perform RL mutation on the population.<|endoftext|>