text stringlengths 0 1.05M | meta dict |
|---|---|
"""Asynchronous Advantage Actor-Critic (A3C) algorithm for reinforcement learning."""
from deepchem.models import TensorGraph
from deepchem.models.tensorgraph import TFWrapper
from deepchem.models.tensorgraph.layers import Feature, Weights, Label, Layer
import numpy as np
import tensorflow as tf
import collections
import copy
import multiprocessing
import os
import re
import threading
class A3CLoss(Layer):
"""This layer computes the loss function for A3C."""
def __init__(self, value_weight, entropy_weight, **kwargs):
super(A3CLoss, self).__init__(**kwargs)
self.value_weight = value_weight
self.entropy_weight = entropy_weight
def create_tensor(self, **kwargs):
reward, action, prob, value, advantage = [
layer.out_tensor for layer in self.in_layers
]
prob = prob + np.finfo(np.float32).eps
log_prob = tf.log(prob)
policy_loss = -tf.reduce_mean(advantage * tf.reduce_sum(action * log_prob))
value_loss = tf.reduce_mean(tf.square(reward - value))
entropy = -tf.reduce_mean(tf.reduce_sum(prob * log_prob, axis=1))
self.out_tensor = policy_loss + self.value_weight * value_loss - self.entropy_weight * entropy
return self.out_tensor
class A3C(object):
"""
Implements the Asynchronous Advantage Actor-Critic (A3C) algorithm for reinforcement learning.
The algorithm is described in Mnih et al, "Asynchronous Methods for Deep Reinforcement Learning"
(https://arxiv.org/abs/1602.01783). This class requires the policy to output two quantities:
a vector giving the probability of taking each action, and an estimate of the value function for
the current state. It optimizes both outputs at once using a loss that is the sum of three terms:
1. The policy loss, which seeks to maximize the discounted reward for each action.
2. The value loss, which tries to make the value estimate match the actual discounted reward
that was attained at each step.
3. An entropy term to encourage exploration.
This class only supports environments with discrete action spaces, not continuous ones. The
"action" argument passed to the environment is an integer, giving the index of the action to perform.
This class supports Generalized Advantage Estimation as described in Schulman et al., "High-Dimensional
Continuous Control Using Generalized Advantage Estimation" (https://arxiv.org/abs/1506.02438).
This is a method of trading off bias and variance in the advantage estimate, which can sometimes
improve the rate of convergance. Use the advantage_lambda parameter to adjust the tradeoff.
This class supports Hindsight Experience Replay as described in Andrychowicz et al., "Hindsight
Experience Replay" (https://arxiv.org/abs/1707.01495). This is a method that can enormously
accelerate learning when rewards are very rare. It requires that the environment state contains
information about the goal the agent is trying to achieve. Each time it generates a rollout, it
processes that rollout twice: once using the actual goal the agent was pursuing while generating
it, and again using the final state of that rollout as the goal. This guarantees that half of
all rollouts processed will be ones that achieved their goals, and hence received a reward.
To use this feature, specify use_hindsight=True to the constructor. The environment must have
a method defined as follows:
def apply_hindsight(self, states, actions, goal):
...
return new_states, rewards
The method receives the list of states generated during the rollout, the action taken for each one,
and a new goal state. It should generate a new list of states that are identical to the input ones,
except specifying the new goal. It should return that list of states, and the rewards that would
have been received for taking the specified actions from those states.
"""
def __init__(self,
env,
policy,
max_rollout_length=20,
discount_factor=0.99,
advantage_lambda=0.98,
value_weight=1.0,
entropy_weight=0.01,
optimizer=None,
model_dir=None,
use_hindsight=False):
"""Create an object for optimizing a policy.
Parameters
----------
env: Environment
the Environment to interact with
policy: Policy
the Policy to optimize. Its create_layers() method must return a map containing the
keys 'action_prob' and 'value', corresponding to the action probabilities and value estimate
max_rollout_length: int
the maximum length of rollouts to generate
discount_factor: float
the discount factor to use when computing rewards
value_weight: float
a scale factor for the value loss term in the loss function
entropy_weight: float
a scale factor for the entropy term in the loss function
optimizer: TFWrapper
a callable object that creates the optimizer to use. If None, a default optimizer is used.
model_dir: str
the directory in which the model will be saved. If None, a temporary directory will be created.
use_hindsight: bool
if True, use Hindsight Experience Replay
"""
self._env = env
self._policy = policy
self.max_rollout_length = max_rollout_length
self.discount_factor = discount_factor
self.advantage_lambda = advantage_lambda
self.value_weight = value_weight
self.entropy_weight = entropy_weight
self.use_hindsight = use_hindsight
self._state_is_list = isinstance(env.state_shape[0], collections.Sequence)
if optimizer is None:
self._optimizer = TFWrapper(
tf.train.AdamOptimizer, learning_rate=0.001, beta1=0.9, beta2=0.999)
else:
self._optimizer = optimizer
(self._graph, self._features, self._rewards, self._actions,
self._action_prob, self._value, self._advantages) = self._build_graph(
None, 'global', model_dir)
with self._graph._get_tf("Graph").as_default():
self._session = tf.Session()
self._rnn_states = self._graph.rnn_zero_states
def _build_graph(self, tf_graph, scope, model_dir):
"""Construct a TensorGraph containing the policy and loss calculations."""
state_shape = self._env.state_shape
if not self._state_is_list:
state_shape = [state_shape]
features = [Feature(shape=[None] + list(s)) for s in state_shape]
policy_layers = self._policy.create_layers(features)
action_prob = policy_layers['action_prob']
value = policy_layers['value']
rewards = Weights(shape=(None,))
advantages = Weights(shape=(None,))
actions = Label(shape=(None, self._env.n_actions))
loss = A3CLoss(
self.value_weight,
self.entropy_weight,
in_layers=[rewards, actions, action_prob, value, advantages])
graph = TensorGraph(
batch_size=self.max_rollout_length,
use_queue=False,
graph=tf_graph,
model_dir=model_dir)
for f in features:
graph._add_layer(f)
graph.add_output(action_prob)
graph.add_output(value)
graph.set_loss(loss)
graph.set_optimizer(self._optimizer)
with graph._get_tf("Graph").as_default():
with tf.variable_scope(scope):
graph.build()
return graph, features, rewards, actions, action_prob, value, advantages
def fit(self,
total_steps,
max_checkpoints_to_keep=5,
checkpoint_interval=600,
restore=False):
"""Train the policy.
Parameters
----------
total_steps: int
the total number of time steps to perform on the environment, across all rollouts
on all threads
max_checkpoints_to_keep: int
the maximum number of checkpoint files to keep. When this number is reached, older
files are deleted.
checkpoint_interval: float
the time interval at which to save checkpoints, measured in seconds
restore: bool
if True, restore the model from the most recent checkpoint and continue training
from there. If False, retrain the model from scratch.
"""
with self._graph._get_tf("Graph").as_default():
step_count = [0]
workers = []
threads = []
for i in range(multiprocessing.cpu_count()):
workers.append(_Worker(self, i))
self._session.run(tf.global_variables_initializer())
if restore:
self.restore()
for worker in workers:
thread = threading.Thread(
name=worker.scope,
target=lambda: worker.run(step_count, total_steps))
threads.append(thread)
thread.start()
variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='global')
saver = tf.train.Saver(variables, max_to_keep=max_checkpoints_to_keep)
checkpoint_index = 0
while True:
threads = [t for t in threads if t.isAlive()]
if len(threads) > 0:
threads[0].join(checkpoint_interval)
checkpoint_index += 1
saver.save(
self._session, self._graph.save_file, global_step=checkpoint_index)
if len(threads) == 0:
break
def predict(self, state, use_saved_states=True, save_states=True):
"""Compute the policy's output predictions for a state.
If the policy involves recurrent layers, this method can preserve their internal
states between calls. Use the use_saved_states and save_states arguments to specify
how it should behave.
Parameters
----------
state: array
the state of the environment for which to generate predictions
use_saved_states: bool
if True, the states most recently saved by a previous call to predict() or select_action()
will be used as the initial states. If False, the internal states of all recurrent layers
will be set to all zeros before computing the predictions.
save_states: bool
if True, the internal states of all recurrent layers at the end of the calculation
will be saved, and any previously saved states will be discarded. If False, the
states at the end of the calculation will be discarded, and any previously saved
states will be kept.
Returns
-------
the array of action probabilities, and the estimated value function
"""
if not self._state_is_list:
state = [state]
with self._graph._get_tf("Graph").as_default():
feed_dict = self._create_feed_dict(state, use_saved_states)
tensors = [self._action_prob.out_tensor, self._value.out_tensor]
if save_states:
tensors += self._graph.rnn_final_states
results = self._session.run(tensors, feed_dict=feed_dict)
if save_states:
self._rnn_states = results[2:]
return results[:2]
def select_action(self,
state,
deterministic=False,
use_saved_states=True,
save_states=True):
"""Select an action to perform based on the environment's state.
If the policy involves recurrent layers, this method can preserve their internal
states between calls. Use the use_saved_states and save_states arguments to specify
how it should behave.
Parameters
----------
state: array
the state of the environment for which to select an action
deterministic: bool
if True, always return the best action (that is, the one with highest probability).
If False, randomly select an action based on the computed probabilities.
use_saved_states: bool
if True, the states most recently saved by a previous call to predict() or select_action()
will be used as the initial states. If False, the internal states of all recurrent layers
will be set to all zeros before computing the predictions.
save_states: bool
if True, the internal states of all recurrent layers at the end of the calculation
will be saved, and any previously saved states will be discarded. If False, the
states at the end of the calculation will be discarded, and any previously saved
states will be kept.
Returns
-------
the index of the selected action
"""
if not self._state_is_list:
state = [state]
with self._graph._get_tf("Graph").as_default():
feed_dict = self._create_feed_dict(state, use_saved_states)
tensors = [self._action_prob.out_tensor]
if save_states:
tensors += self._graph.rnn_final_states
results = self._session.run(tensors, feed_dict=feed_dict)
probabilities = results[0]
if save_states:
self._rnn_states = results[1:]
if deterministic:
return probabilities.argmax()
else:
return np.random.choice(
np.arange(self._env.n_actions), p=probabilities[0])
def restore(self):
"""Reload the model parameters from the most recent checkpoint file."""
last_checkpoint = tf.train.latest_checkpoint(self._graph.model_dir)
if last_checkpoint is None:
raise ValueError('No checkpoint found')
with self._graph._get_tf("Graph").as_default():
variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='global')
saver = tf.train.Saver(variables)
saver.restore(self._session, last_checkpoint)
def _create_feed_dict(self, state, use_saved_states):
"""Create a feed dict for use by predict() or select_action()."""
feed_dict = dict((f.out_tensor, np.expand_dims(s, axis=0))
for f, s in zip(self._features, state))
if use_saved_states:
rnn_states = self._rnn_states
else:
rnn_states = self._graph.rnn_zero_states
for (placeholder, value) in zip(self._graph.rnn_initial_states, rnn_states):
feed_dict[placeholder] = value
return feed_dict
class _Worker(object):
"""A Worker object is created for each training thread."""
def __init__(self, a3c, index):
self.a3c = a3c
self.index = index
self.scope = 'worker%d' % index
self.env = copy.deepcopy(a3c._env)
self.env.reset()
self.graph, self.features, self.rewards, self.actions, self.action_prob, self.value, self.advantages = a3c._build_graph(
a3c._graph._get_tf('Graph'), self.scope, None)
self.rnn_states = self.graph.rnn_zero_states
with a3c._graph._get_tf("Graph").as_default():
local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
self.scope)
global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
'global')
gradients = tf.gradients(self.graph.loss.out_tensor, local_vars)
grads_and_vars = list(zip(gradients, global_vars))
self.train_op = a3c._graph._get_tf('Optimizer').apply_gradients(
grads_and_vars)
self.update_local_variables = tf.group(
* [tf.assign(v1, v2) for v1, v2 in zip(local_vars, global_vars)])
def run(self, step_count, total_steps):
with self.graph._get_tf("Graph").as_default():
while step_count[0] < total_steps:
self.a3c._session.run(self.update_local_variables)
initial_rnn_states = self.rnn_states
states, actions, rewards, values = self.create_rollout()
self.process_rollout(states, actions, rewards, values,
initial_rnn_states)
if self.a3c.use_hindsight:
self.process_rollout_with_hindsight(states, actions,
initial_rnn_states)
step_count[0] += len(actions)
def create_rollout(self):
"""Generate a rollout."""
n_actions = self.env.n_actions
session = self.a3c._session
states = []
actions = []
rewards = []
values = []
# Generate the rollout.
for i in range(self.a3c.max_rollout_length):
if self.env.terminated:
break
state = self.env.state
states.append(state)
feed_dict = self.create_feed_dict(state)
results = session.run(
[self.action_prob.out_tensor, self.value.out_tensor] +
self.graph.rnn_final_states,
feed_dict=feed_dict)
probabilities, value = results[:2]
self.rnn_states = results[2:]
action = np.random.choice(np.arange(n_actions), p=probabilities[0])
actions.append(action)
values.append(float(value))
rewards.append(self.env.step(action))
# Compute an estimate of the reward for the rest of the episode.
if not self.env.terminated:
feed_dict = self.create_feed_dict(self.env.state)
final_value = self.a3c.discount_factor * float(
session.run(self.value.out_tensor, feed_dict))
else:
final_value = 0.0
values.append(final_value)
if self.env.terminated:
self.env.reset()
self.rnn_states = self.graph.rnn_zero_states
return states, actions, np.array(rewards), np.array(values)
def process_rollout(self, states, actions, rewards, values,
initial_rnn_states):
"""Train the network based on a rollout."""
# Compute the discounted rewards and advantages.
discounted_rewards = rewards.copy()
discounted_rewards[-1] += values[-1]
advantages = rewards - values[:-1] + self.a3c.discount_factor * np.array(
values[1:])
for j in range(len(rewards) - 1, 0, -1):
discounted_rewards[j -
1] += self.a3c.discount_factor * discounted_rewards[j]
advantages[
j -
1] += self.a3c.discount_factor * self.a3c.advantage_lambda * advantages[
j]
# Convert the actions to one-hot.
n_actions = self.env.n_actions
actions_matrix = []
for action in actions:
a = np.zeros(n_actions)
a[action] = 1.0
actions_matrix.append(a)
# Rearrange the states into the proper set of arrays.
if self.a3c._state_is_list:
state_arrays = [[] for i in range(len(self.features))]
for state in states:
for j in range(len(state)):
state_arrays[j].append(state[j])
else:
state_arrays = [states]
# Build the feed dict and apply gradients.
feed_dict = {}
for placeholder, value in zip(self.graph.rnn_initial_states,
initial_rnn_states):
feed_dict[placeholder] = value
for f, s in zip(self.features, state_arrays):
feed_dict[f.out_tensor] = s
feed_dict[self.rewards.out_tensor] = discounted_rewards
feed_dict[self.actions.out_tensor] = actions_matrix
feed_dict[self.advantages.out_tensor] = advantages
self.a3c._session.run(self.train_op, feed_dict=feed_dict)
def process_rollout_with_hindsight(self, states, actions, initial_rnn_states):
"""Create a new rollout by applying hindsight to an existing one, then train the network."""
hindsight_states, rewards = self.env.apply_hindsight(
states, actions, states[-1])
if self.a3c._state_is_list:
state_arrays = [[] for i in range(len(self.features))]
for state in hindsight_states:
for j in range(len(state)):
state_arrays[j].append(state[j])
else:
state_arrays = [hindsight_states]
feed_dict = {}
for placeholder, value in zip(self.graph.rnn_initial_states,
initial_rnn_states):
feed_dict[placeholder] = value
for f, s in zip(self.features, state_arrays):
feed_dict[f.out_tensor] = s
values = self.a3c._session.run(self.value.out_tensor, feed_dict=feed_dict)
values = np.append(values.flatten(), 0.0)
self.process_rollout(hindsight_states, actions,
np.array(rewards), np.array(values),
initial_rnn_states)
def create_feed_dict(self, state):
"""Create a feed dict for use during a rollout."""
if not self.a3c._state_is_list:
state = [state]
feed_dict = dict((f.out_tensor, np.expand_dims(s, axis=0))
for f, s in zip(self.features, state))
for (placeholder, value) in zip(self.graph.rnn_initial_states,
self.rnn_states):
feed_dict[placeholder] = value
return feed_dict
| {
"repo_name": "rbharath/deepchem",
"path": "deepchem/rl/a3c.py",
"copies": "1",
"size": "20119",
"license": "mit",
"hash": -1300909854354987500,
"line_mean": 40.1431492843,
"line_max": 124,
"alpha_frac": 0.6594761171,
"autogenerated": false,
"ratio": 3.8402366863905324,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9961011816868319,
"avg_score": 0.007740197324442681,
"num_lines": 489
} |
"""Asynchronous Advantage Actor-Critic (A3C) algorithm for reinforcement learning."""
from deepchem.models import TensorGraph
from deepchem.models.tensorgraph.optimizers import Adam
from deepchem.models.tensorgraph.layers import Feature, Weights, Label, Layer
import numpy as np
import tensorflow as tf
import collections
import copy
import multiprocessing
import os
import re
import threading
class A3CLossDiscrete(Layer):
"""This layer computes the loss function for A3C with discrete action spaces."""
def __init__(self, value_weight, entropy_weight, **kwargs):
super(A3CLossDiscrete, self).__init__(**kwargs)
self.value_weight = value_weight
self.entropy_weight = entropy_weight
def create_tensor(self, **kwargs):
reward, action, prob, value, advantage = [
layer.out_tensor for layer in self.in_layers
]
prob = prob + np.finfo(np.float32).eps
log_prob = tf.log(prob)
policy_loss = -tf.reduce_mean(
advantage * tf.reduce_sum(action * log_prob, axis=1))
value_loss = tf.reduce_mean(tf.square(reward - value))
entropy = -tf.reduce_mean(tf.reduce_sum(prob * log_prob, axis=1))
self.out_tensor = policy_loss + self.value_weight * value_loss - self.entropy_weight * entropy
return self.out_tensor
class A3CLossContinuous(Layer):
"""This layer computes the loss function for A3C with continuous action spaces."""
def __init__(self, value_weight, entropy_weight, **kwargs):
super(A3CLossContinuous, self).__init__(**kwargs)
self.value_weight = value_weight
self.entropy_weight = entropy_weight
def create_tensor(self, **kwargs):
reward, action, mean, std, value, advantage = [
layer.out_tensor for layer in self.in_layers
]
distrib = tf.distributions.Normal(mean, std)
reduce_axes = list(range(1, len(action.shape)))
log_prob = tf.reduce_sum(distrib.log_prob(action), reduce_axes)
policy_loss = -tf.reduce_mean(advantage * log_prob)
value_loss = tf.reduce_mean(tf.square(reward - value))
entropy = tf.reduce_mean(distrib.entropy())
self.out_tensor = policy_loss + self.value_weight * value_loss - self.entropy_weight * entropy
return self.out_tensor
class A3C(object):
"""
Implements the Asynchronous Advantage Actor-Critic (A3C) algorithm for reinforcement learning.
The algorithm is described in Mnih et al, "Asynchronous Methods for Deep Reinforcement Learning"
(https://arxiv.org/abs/1602.01783). This class supports environments with both discrete and
continuous action spaces. For discrete action spaces, the "action" argument passed to the
environment is an integer giving the index of the action to perform. The policy must output
a vector called "action_prob" giving the probability of taking each action. For continous
action spaces, the action is an array where each element is chosen independently from a
normal distribution. The policy must output two arrays of the same shape: "action_mean"
gives the mean value for each element, and "action_std" gives the standard deviation for
each element. In either case, the policy must also output a scalar called "value" which
is an estimate of the value function for the current state.
The algorithm optimizes all outputs at once using a loss that is the sum of three terms:
1. The policy loss, which seeks to maximize the discounted reward for each action.
2. The value loss, which tries to make the value estimate match the actual discounted reward
that was attained at each step.
3. An entropy term to encourage exploration.
This class supports Generalized Advantage Estimation as described in Schulman et al., "High-Dimensional
Continuous Control Using Generalized Advantage Estimation" (https://arxiv.org/abs/1506.02438).
This is a method of trading off bias and variance in the advantage estimate, which can sometimes
improve the rate of convergance. Use the advantage_lambda parameter to adjust the tradeoff.
This class supports Hindsight Experience Replay as described in Andrychowicz et al., "Hindsight
Experience Replay" (https://arxiv.org/abs/1707.01495). This is a method that can enormously
accelerate learning when rewards are very rare. It requires that the environment state contains
information about the goal the agent is trying to achieve. Each time it generates a rollout, it
processes that rollout twice: once using the actual goal the agent was pursuing while generating
it, and again using the final state of that rollout as the goal. This guarantees that half of
all rollouts processed will be ones that achieved their goals, and hence received a reward.
To use this feature, specify use_hindsight=True to the constructor. The environment must have
a method defined as follows:
def apply_hindsight(self, states, actions, goal):
...
return new_states, rewards
The method receives the list of states generated during the rollout, the action taken for each one,
and a new goal state. It should generate a new list of states that are identical to the input ones,
except specifying the new goal. It should return that list of states, and the rewards that would
have been received for taking the specified actions from those states.
"""
def __init__(self,
env,
policy,
max_rollout_length=20,
discount_factor=0.99,
advantage_lambda=0.98,
value_weight=1.0,
entropy_weight=0.01,
optimizer=None,
model_dir=None,
use_hindsight=False):
"""Create an object for optimizing a policy.
Parameters
----------
env: Environment
the Environment to interact with
policy: Policy
the Policy to optimize. Its create_layers() method must return a dict containing the
keys 'action_prob' and 'value' (for discrete action spaces) or 'action_mean', 'action_std',
and 'value' (for continuous action spaces)
max_rollout_length: int
the maximum length of rollouts to generate
discount_factor: float
the discount factor to use when computing rewards
advantage_lambda: float
the parameter for trading bias vs. variance in Generalized Advantage Estimation
value_weight: float
a scale factor for the value loss term in the loss function
entropy_weight: float
a scale factor for the entropy term in the loss function
optimizer: Optimizer
the optimizer to use. If None, a default optimizer is used.
model_dir: str
the directory in which the model will be saved. If None, a temporary directory will be created.
use_hindsight: bool
if True, use Hindsight Experience Replay
"""
self._env = env
self._policy = policy
self.max_rollout_length = max_rollout_length
self.discount_factor = discount_factor
self.advantage_lambda = advantage_lambda
self.value_weight = value_weight
self.entropy_weight = entropy_weight
self.use_hindsight = use_hindsight
self._state_is_list = isinstance(env.state_shape[0], collections.Sequence)
if optimizer is None:
self._optimizer = Adam(learning_rate=0.001, beta1=0.9, beta2=0.999)
else:
self._optimizer = optimizer
fields = self._build_graph(None, 'global', model_dir)
if self.continuous:
(self._graph, self._features, self._rewards, self._actions,
self._action_mean, self._action_std, self._value,
self._advantages) = fields
else:
(self._graph, self._features, self._rewards, self._actions,
self._action_prob, self._value, self._advantages) = fields
with self._graph._get_tf("Graph").as_default():
self._session = tf.Session()
self._rnn_states = self._graph.rnn_zero_states
def _build_graph(self, tf_graph, scope, model_dir):
"""Construct a TensorGraph containing the policy and loss calculations."""
state_shape = self._env.state_shape
state_dtype = self._env.state_dtype
if not self._state_is_list:
state_shape = [state_shape]
state_dtype = [state_dtype]
features = []
for s, d in zip(state_shape, state_dtype):
features.append(Feature(shape=[None] + list(s), dtype=tf.as_dtype(d)))
policy_layers = self._policy.create_layers(features)
value = policy_layers['value']
rewards = Weights(shape=(None,))
advantages = Weights(shape=(None,))
graph = TensorGraph(
batch_size=self.max_rollout_length,
use_queue=False,
graph=tf_graph,
model_dir=model_dir)
for f in features:
graph._add_layer(f)
if 'action_prob' in policy_layers:
self.continuous = False
action_prob = policy_layers['action_prob']
actions = Label(shape=(None, self._env.n_actions))
loss = A3CLossDiscrete(
self.value_weight,
self.entropy_weight,
in_layers=[rewards, actions, action_prob, value, advantages])
graph.add_output(action_prob)
else:
self.continuous = True
action_mean = policy_layers['action_mean']
action_std = policy_layers['action_std']
actions = Label(shape=[None] + list(self._env.action_shape))
loss = A3CLossContinuous(
self.value_weight,
self.entropy_weight,
in_layers=[
rewards, actions, action_mean, action_std, value, advantages
])
graph.add_output(action_mean)
graph.add_output(action_std)
graph.add_output(value)
graph.set_loss(loss)
graph.set_optimizer(self._optimizer)
with graph._get_tf("Graph").as_default():
with tf.variable_scope(scope):
graph.build()
if self.continuous:
return graph, features, rewards, actions, action_mean, action_std, value, advantages
else:
return graph, features, rewards, actions, action_prob, value, advantages
def fit(self,
total_steps,
max_checkpoints_to_keep=5,
checkpoint_interval=600,
restore=False):
"""Train the policy.
Parameters
----------
total_steps: int
the total number of time steps to perform on the environment, across all rollouts
on all threads
max_checkpoints_to_keep: int
the maximum number of checkpoint files to keep. When this number is reached, older
files are deleted.
checkpoint_interval: float
the time interval at which to save checkpoints, measured in seconds
restore: bool
if True, restore the model from the most recent checkpoint and continue training
from there. If False, retrain the model from scratch.
"""
with self._graph._get_tf("Graph").as_default():
step_count = [0]
workers = []
threads = []
for i in range(multiprocessing.cpu_count()):
workers.append(_Worker(self, i))
self._session.run(tf.global_variables_initializer())
if restore:
self.restore()
for worker in workers:
thread = threading.Thread(
name=worker.scope,
target=lambda: worker.run(step_count, total_steps))
threads.append(thread)
thread.start()
variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='global')
saver = tf.train.Saver(variables, max_to_keep=max_checkpoints_to_keep)
checkpoint_index = 0
while True:
threads = [t for t in threads if t.isAlive()]
if len(threads) > 0:
threads[0].join(checkpoint_interval)
checkpoint_index += 1
saver.save(
self._session, self._graph.save_file, global_step=checkpoint_index)
if len(threads) == 0:
break
def predict(self, state, use_saved_states=True, save_states=True):
"""Compute the policy's output predictions for a state.
If the policy involves recurrent layers, this method can preserve their internal
states between calls. Use the use_saved_states and save_states arguments to specify
how it should behave.
Parameters
----------
state: array
the state of the environment for which to generate predictions
use_saved_states: bool
if True, the states most recently saved by a previous call to predict() or select_action()
will be used as the initial states. If False, the internal states of all recurrent layers
will be set to all zeros before computing the predictions.
save_states: bool
if True, the internal states of all recurrent layers at the end of the calculation
will be saved, and any previously saved states will be discarded. If False, the
states at the end of the calculation will be discarded, and any previously saved
states will be kept.
Returns
-------
the array of action probabilities, and the estimated value function
"""
if self.continuous:
outputs = [self._action_mean, self._action_std, self._value]
else:
outputs = [self._action_prob, self._value]
return self._predict_outputs(outputs, state, use_saved_states, save_states)
def select_action(self,
state,
deterministic=False,
use_saved_states=True,
save_states=True):
"""Select an action to perform based on the environment's state.
If the policy involves recurrent layers, this method can preserve their internal
states between calls. Use the use_saved_states and save_states arguments to specify
how it should behave.
Parameters
----------
state: array
the state of the environment for which to select an action
deterministic: bool
if True, always return the best action (that is, the one with highest probability).
If False, randomly select an action based on the computed probabilities.
use_saved_states: bool
if True, the states most recently saved by a previous call to predict() or select_action()
will be used as the initial states. If False, the internal states of all recurrent layers
will be set to all zeros before computing the predictions.
save_states: bool
if True, the internal states of all recurrent layers at the end of the calculation
will be saved, and any previously saved states will be discarded. If False, the
states at the end of the calculation will be discarded, and any previously saved
states will be kept.
Returns
-------
the index of the selected action
"""
if self.continuous:
tensors = [self._action_mean, self._action_std]
else:
tensors = [self._action_prob]
outputs = self._predict_outputs(tensors, state, use_saved_states,
save_states)
return self._select_action_from_outputs(outputs, deterministic)
def restore(self):
"""Reload the model parameters from the most recent checkpoint file."""
last_checkpoint = tf.train.latest_checkpoint(self._graph.model_dir)
if last_checkpoint is None:
raise ValueError('No checkpoint found')
with self._graph._get_tf("Graph").as_default():
variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='global')
saver = tf.train.Saver(variables)
saver.restore(self._session, last_checkpoint)
def _create_feed_dict(self, state, use_saved_states):
"""Create a feed dict for use by predict() or select_action()."""
feed_dict = dict((f.out_tensor, np.expand_dims(s, axis=0))
for f, s in zip(self._features, state))
if use_saved_states:
rnn_states = self._rnn_states
else:
rnn_states = self._graph.rnn_zero_states
for (placeholder, value) in zip(self._graph.rnn_initial_states, rnn_states):
feed_dict[placeholder] = value
return feed_dict
def _predict_outputs(self, outputs, state, use_saved_states, save_states):
"""Compute a set of outputs for a state. """
if not self._state_is_list:
state = [state]
with self._graph._get_tf("Graph").as_default():
feed_dict = self._create_feed_dict(state, use_saved_states)
if save_states:
tensors = outputs + self._graph.rnn_final_states
else:
tensors = outputs
results = self._session.run(tensors, feed_dict=feed_dict)
if save_states:
self._rnn_states = results[len(outputs):]
return results[:len(outputs)]
def _select_action_from_outputs(self, outputs, deterministic):
"""Given the policy outputs, select an action to perform."""
if self.continuous:
action_mean, action_std = outputs
if deterministic:
return action_mean[0]
else:
return np.random.normal(action_mean[0], action_std[0])
else:
action_prob = outputs[0]
if deterministic:
return action_prob.argmax()
else:
action_prob = action_prob.flatten()
return np.random.choice(np.arange(len(action_prob)), p=action_prob)
class _Worker(object):
"""A Worker object is created for each training thread."""
def __init__(self, a3c, index):
self.a3c = a3c
self.index = index
self.scope = 'worker%d' % index
self.env = copy.deepcopy(a3c._env)
self.env.reset()
fields = a3c._build_graph(a3c._graph._get_tf('Graph'), self.scope, None)
if a3c.continuous:
self.graph, self.features, self.rewards, self.actions, self.action_mean, self.action_std, self.value, self.advantages = fields
else:
self.graph, self.features, self.rewards, self.actions, self.action_prob, self.value, self.advantages = fields
self.rnn_states = self.graph.rnn_zero_states
with a3c._graph._get_tf("Graph").as_default():
local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
self.scope)
global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
'global')
gradients = tf.gradients(self.graph.loss.out_tensor, local_vars)
grads_and_vars = list(zip(gradients, global_vars))
self.train_op = a3c._graph._get_tf('Optimizer').apply_gradients(
grads_and_vars)
self.update_local_variables = tf.group(
*[tf.assign(v1, v2) for v1, v2 in zip(local_vars, global_vars)])
self.global_step = self.graph.get_global_step()
def run(self, step_count, total_steps):
with self.graph._get_tf("Graph").as_default():
while step_count[0] < total_steps:
self.a3c._session.run(self.update_local_variables)
initial_rnn_states = self.rnn_states
states, actions, rewards, values = self.create_rollout()
self.process_rollout(states, actions, rewards, values,
initial_rnn_states, step_count[0])
if self.a3c.use_hindsight:
self.process_rollout_with_hindsight(states, actions,
initial_rnn_states, step_count[0])
step_count[0] += len(actions)
def create_rollout(self):
"""Generate a rollout."""
n_actions = self.env.n_actions
session = self.a3c._session
states = []
actions = []
rewards = []
values = []
# Generate the rollout.
for i in range(self.a3c.max_rollout_length):
if self.env.terminated:
break
state = self.env.state
states.append(state)
feed_dict = self.create_feed_dict(state)
if self.a3c.continuous:
tensors = [self.action_mean, self.action_std, self.value]
else:
tensors = [self.action_prob, self.value]
results = session.run(
tensors + self.graph.rnn_final_states, feed_dict=feed_dict)
value = results[len(tensors) - 1]
self.rnn_states = results[len(tensors):]
action = self.a3c._select_action_from_outputs(results[:len(tensors) - 1],
False)
actions.append(action)
values.append(float(value))
rewards.append(self.env.step(action))
# Compute an estimate of the reward for the rest of the episode.
if not self.env.terminated:
feed_dict = self.create_feed_dict(self.env.state)
final_value = self.a3c.discount_factor * float(
session.run(self.value.out_tensor, feed_dict))
else:
final_value = 0.0
values.append(final_value)
if self.env.terminated:
self.env.reset()
self.rnn_states = self.graph.rnn_zero_states
return states, actions, np.array(
rewards, dtype=np.float32), np.array(
values, dtype=np.float32)
def process_rollout(self, states, actions, rewards, values,
initial_rnn_states, step_count):
"""Train the network based on a rollout."""
# Compute the discounted rewards and advantages.
discounted_rewards = rewards.copy()
discounted_rewards[-1] += values[-1]
advantages = rewards - values[:-1] + self.a3c.discount_factor * np.array(
values[1:])
for j in range(len(rewards) - 1, 0, -1):
discounted_rewards[j -
1] += self.a3c.discount_factor * discounted_rewards[j]
advantages[
j -
1] += self.a3c.discount_factor * self.a3c.advantage_lambda * advantages[
j]
# Record the actions, converting to one-hot if necessary.
actions_matrix = []
if self.a3c.continuous:
for action in actions:
actions_matrix.append(action)
else:
n_actions = self.env.n_actions
for action in actions:
a = np.zeros(n_actions)
a[action] = 1.0
actions_matrix.append(a)
# Rearrange the states into the proper set of arrays.
if self.a3c._state_is_list:
state_arrays = [[] for i in range(len(self.features))]
for state in states:
for j in range(len(state)):
state_arrays[j].append(state[j])
else:
state_arrays = [states]
# Build the feed dict and apply gradients.
feed_dict = {}
for placeholder, value in zip(self.graph.rnn_initial_states,
initial_rnn_states):
feed_dict[placeholder] = value
for f, s in zip(self.features, state_arrays):
feed_dict[f] = s
feed_dict[self.rewards] = discounted_rewards
feed_dict[self.actions] = actions_matrix
feed_dict[self.advantages] = advantages
feed_dict[self.global_step] = step_count
self.a3c._session.run(self.train_op, feed_dict=feed_dict)
def process_rollout_with_hindsight(self, states, actions, initial_rnn_states,
step_count):
"""Create a new rollout by applying hindsight to an existing one, then train the network."""
hindsight_states, rewards = self.env.apply_hindsight(
states, actions, states[-1])
if self.a3c._state_is_list:
state_arrays = [[] for i in range(len(self.features))]
for state in hindsight_states:
for j in range(len(state)):
state_arrays[j].append(state[j])
else:
state_arrays = [hindsight_states]
feed_dict = {}
for placeholder, value in zip(self.graph.rnn_initial_states,
initial_rnn_states):
feed_dict[placeholder] = value
for f, s in zip(self.features, state_arrays):
feed_dict[f.out_tensor] = s
values = self.a3c._session.run(self.value.out_tensor, feed_dict=feed_dict)
values = np.append(values.flatten(), 0.0)
self.process_rollout(hindsight_states, actions, np.array(rewards),
np.array(values), initial_rnn_states, step_count)
def create_feed_dict(self, state):
"""Create a feed dict for use during a rollout."""
if not self.a3c._state_is_list:
state = [state]
feed_dict = dict((f.out_tensor, np.expand_dims(s, axis=0))
for f, s in zip(self.features, state))
for (placeholder, value) in zip(self.graph.rnn_initial_states,
self.rnn_states):
feed_dict[placeholder] = value
return feed_dict
| {
"repo_name": "Agent007/deepchem",
"path": "deepchem/rl/a3c.py",
"copies": "1",
"size": "23920",
"license": "mit",
"hash": -4710104663599846000,
"line_mean": 40.5277777778,
"line_max": 132,
"alpha_frac": 0.6591137124,
"autogenerated": false,
"ratio": 3.834562359730683,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9953729965503422,
"avg_score": 0.007989221325452201,
"num_lines": 576
} |
'''Asynchronous application for serving requests
on sockets. This is the base class of :class:`.WSGIServer`.
All is needed by a :class:`.SocketServer` application is a callable
which build a :class:`.ProtocolConsumer` for each new client request
received.
This is an example of a script for an Echo server::
import pulsar
from pulsar.apps.socket import SocketServer
class EchoServerProtocol(pulsar.ProtocolConsumer):
...
if __name__ == '__main__':
SocketServer(EchoServerProtocol).start()
Check the :ref:`echo server example <tutorials-writing-clients>` for detailed
implementation of the ``EchoServerProtocol`` class.
.. _socket-server-settings:
Socket Server Settings
==============================
All standard :ref:`application settings <settings>` can be applied to a
:class:`SocketServer`. In addition, the following are
specific to sockets and can be used to fine tune your application:
bind
------
To specify the address to bind the server to::
python script.py --bind 127.0.0.1:8070
This will listen for both ipv4 and ipv6 sockets on all hosts on port 8080::
python script.py --bind :8080
backlog
---------
To specify the maximum number of queued connections you can use the
:ref:`backlog <setting-backlog>` settings. For example::
python script.py --backlog 1000
rarely used.
keep_alive
---------------
To control how long a server :class:`.Connection` is kept alive after the
last read from the remote client, one can use the
:ref:`keep-alive <setting-keep_alive>` setting::
python script.py --keep-alive 10
will close client connections which have been idle for 10 seconds.
.. _socket-server-ssl:
TLS/SSL support
------------------------
Transport Layer Security (often known as Secure Sockets Layer) is handled by
the :ref:`cert-file <setting-cert_file>` and :ref:`key-file <setting-key_file>`
settings::
python script.py --cert-file server.crt --key-file server.key
.. _socket-server-concurrency:
Concurrency
==================
When running a :class:`SocketServer` in multi-process mode (default),
the application, create a listening socket in the parent (Arbiter) process
and then spawn several process-based actors which listen on the
same shared socket.
This is how pre-forking servers operate.
When running a :class:`SocketServer` in threading mode::
python script.py --concurrency thread
the number of :class:`.Actor` serving the application is set
to ``0`` so that the application is actually served by the
arbiter event-loop (we refer this to a single process server).
This configuration is used when debugging, testing, benchmarking or on small
load servers.
In addition, a :class:`SocketServer` in multi-process mode is only available
for:
* Posix systems.
* Windows running python 3.2 or above (python 2 on windows does not support
the creation of sockets from file descriptors).
Check the :meth:`SocketServer.monitor_start` method for implementation details.
'''
import os
import socket
from math import log
from random import lognormvariate
from functools import partial
import pulsar
from pulsar import (asyncio, TcpServer, DatagramServer, Connection,
ImproperlyConfigured)
from pulsar.utils.internet import parse_address, SSLContext
from pulsar.utils.config import pass_through
class SocketSetting(pulsar.Setting):
virtual = True
app = 'socket'
section = "Socket Servers"
class Bind(SocketSetting):
name = "bind"
flags = ["-b", "--bind"]
meta = "ADDRESS"
default = "127.0.0.1:{0}".format(pulsar.DEFAULT_PORT)
desc = """\
The socket to bind.
A string of the form: ``HOST``, ``HOST:PORT``, ``unix:PATH``.
An IP is a valid HOST.
"""
class KeepAlive(SocketSetting):
name = "keep_alive"
flags = ["--keep-alive"]
validator = pulsar.validate_pos_int
type = int
default = 15
desc = """\
The number of seconds to keep an idle client connection
open."""
class Backlog(SocketSetting):
name = "backlog"
flags = ["--backlog"]
validator = pulsar.validate_pos_int
type = int
default = 2048
desc = """\
The maximum number of queued connections in a socket.
This refers to the number of clients that can be waiting to be served.
Exceeding this number results in the client getting an error when
attempting to connect. It should only affect servers under significant
load.
Must be a positive integer. Generally set in the 64-2048 range.
"""
class KeyFile(SocketSetting):
name = "key_file"
flags = ["--key-file"]
meta = "FILE"
default = None
desc = """\
SSL key file
"""
class CertFile(SocketSetting):
name = "cert_file"
flags = ["--cert-file"]
meta = "FILE"
default = None
desc = """\
SSL certificate file
"""
class WrapTransport:
def __init__(self, transport):
self.extra = transport._extra
self.sock = self.extra.pop('socket')
self.transport = transport.__class__
# For some reasons if we don't delete the _sock from the
# transport, it get closed by python garbadge collector
# on python 3.4.3 mac os x
del transport._sock
def __call__(self, loop, protocol):
return self.transport(loop, self.sock, protocol, extra=self.extra)
class SocketServer(pulsar.Application):
'''A :class:`.Application` which serve application on a socket.
It bind a socket to a given address and listen for requests. The request
handler is constructed from the callable passed during initialisation.
.. attribute:: address
The socket address, available once the application has started.
'''
name = 'socket'
cfg = pulsar.Config(apps=['socket'])
def protocol_factory(self):
'''Factory of :class:`.ProtocolConsumer` used by the server.
By default it returns the :meth:`.Application.callable`.
'''
return partial(Connection, self.cfg.callable)
def monitor_start(self, monitor):
'''Create the socket listening to the ``bind`` address.
If the platform does not support multiprocessing sockets set the
number of workers to 0.
'''
cfg = self.cfg
loop = monitor._loop
if (not pulsar.platform.has_multiProcessSocket or
cfg.concurrency == 'thread'):
cfg.set('workers', 0)
if not cfg.address:
raise ImproperlyConfigured('Could not open a socket. '
'No address to bind to')
ssl = None
if cfg.cert_file or cfg.key_file:
if cfg.cert_file and not os.path.exists(cfg.cert_file):
raise ImproperlyConfigured('cert_file "%s" does not exist' %
cfg.cert_file)
if cfg.key_file and not os.path.exists(cfg.key_file):
raise ImproperlyConfigured('key_file "%s" does not exist' %
cfg.key_file)
ssl = SSLContext(keyfile=cfg.key_file, certfile=cfg.cert_file)
address = parse_address(self.cfg.address)
# First create the sockets
try:
server = yield from loop.create_server(asyncio.Protocol, *address)
except socket.error as e:
raise ImproperlyConfigured(e)
else:
addresses = []
sockets = []
for sock in server.sockets:
addresses.append(sock.getsockname())
sockets.append(sock)
loop.remove_reader(sock.fileno())
monitor.sockets = sockets
monitor.ssl = ssl
cfg.addresses = addresses
def actorparams(self, monitor, params):
params.update({'sockets': monitor.sockets, 'ssl': monitor.ssl})
def worker_start(self, worker, exc=None):
'''Start the worker by invoking the :meth:`create_server` method.
'''
if not exc:
server = self.create_server(worker)
server.bind_event('stop', lambda _, **kw: worker.stop())
worker.servers[self.name] = server
def worker_stopping(self, worker, exc=None):
server = worker.servers.get(self.name)
if server:
return server.close()
def worker_info(self, worker, info):
server = worker.servers.get(self.name)
if server:
info['%sserver' % self.name] = server.info()
return info
def server_factory(self, *args, **kw):
'''Create a :class:`.TcpServer`.
'''
return TcpServer(*args, **kw)
# INTERNALS
def create_server(self, worker):
'''Create the Server which will listen for requests.
:return: a :class:`.TcpServer`.
'''
sockets = worker.sockets
cfg = self.cfg
max_requests = cfg.max_requests
if max_requests:
max_requests = int(lognormvariate(log(max_requests), 0.2))
server = self.server_factory(self.protocol_factory(),
worker._loop,
sockets=sockets,
max_requests=max_requests,
keep_alive=cfg.keep_alive,
name=self.name,
logger=self.logger)
for event in ('connection_made', 'pre_request', 'post_request',
'connection_lost'):
callback = getattr(cfg, event)
if callback != pass_through:
server.bind_event(event, callback)
server.start_serving(cfg.backlog, sslcontext=worker.ssl)
return server
class UdpSocketServer(SocketServer):
'''A :class:`.SocketServer` which serves application on a UDP sockets.
It binds a socket to a given address and listen for requests. The request
handler is constructed from the callable passed during initialisation.
.. attribute:: address
The socket address, available once the application has started.
'''
name = 'udpsocket'
cfg = pulsar.Config(apps=['socket'])
def protocol_factory(self):
'''Return the :class:`.DatagramProtocol` factory.
'''
return self.cfg.callable
def monitor_start(self, monitor):
'''Create the socket listening to the ``bind`` address.
If the platform does not support multiprocessing sockets set the
number of workers to 0.
'''
cfg = self.cfg
loop = monitor._loop
if (not pulsar.platform.has_multiProcessSocket or
cfg.concurrency == 'thread'):
cfg.set('workers', 0)
if not cfg.address:
raise pulsar.ImproperlyConfigured('Could not open a socket. '
'No address to bind to')
address = parse_address(self.cfg.address)
# First create the sockets
t, _ = yield from loop.create_datagram_endpoint(
asyncio.DatagramProtocol, address)
sock = t.get_extra_info('socket')
assert loop.remove_reader(sock.fileno())
cfg.addresses = [sock.getsockname()]
monitor.sockets = [WrapTransport(t)]
def actorparams(self, monitor, params):
params.update({'sockets': monitor.sockets})
def server_factory(self, *args, **kw):
'''By default returns a new :class:`.DatagramServer`.
'''
return DatagramServer(*args, **kw)
# INTERNALS
def create_server(self, worker):
'''Create the Server which will listen for requests.
:return: the server obtained from :meth:`server_factory`.
'''
cfg = self.cfg
max_requests = cfg.max_requests
if max_requests:
max_requests = int(lognormvariate(log(max_requests), 0.2))
server = self.server_factory(self.protocol_factory(),
worker._loop,
sockets=worker.sockets,
max_requests=max_requests,
name=self.name,
logger=self.logger)
server.bind_event('stop', lambda _, **kw: worker.stop())
for event in ('pre_request', 'post_request'):
callback = getattr(cfg, event)
if callback != pass_through:
server.bind_event(event, callback)
server.create_endpoint()
return server
| {
"repo_name": "inirudebwoy/pulsar",
"path": "pulsar/apps/socket/__init__.py",
"copies": "5",
"size": "12565",
"license": "bsd-3-clause",
"hash": -5193187080484450000,
"line_mean": 32.2407407407,
"line_max": 79,
"alpha_frac": 0.6127337843,
"autogenerated": false,
"ratio": 4.359819569743234,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7472553354043234,
"avg_score": null,
"num_lines": null
} |
"""Asynchronous client for swaggerpy
Copyright (c) 2014, Matthew Jordan
Matthew Jordan <mjordan@digium.com>
This program is free software, distributed under the terms of
the MIT License (MIT)
This module provides an asynchronous client for SwaggerPy. The normal
SwaggerClient - unfortunately - assumes that the HTTP client is synchronous,
as it loads all resources in its constructor. This is especially odd
given that the http_client is pluggable... but Loader assumes that all
http_client instances return an object from the requests library.
Refactoring this is not an easy task without breaking existing consumers.
We work around this in this module by querying for the resources
asynchronously using our asynchronous HTTP client, storing them to a file,
then processing things in the SwaggerClient. Our asynchronous Swagger client
does not inherit from SwaggerClient; rather, it encapsulates it.
"""
import urlparse
import json
import logging
import tempfile
import shutil
import os
from swaggerpy.client import SwaggerClient
from twisted.python.failure import Failure
from twisted.internet.defer import Deferred, DeferredList
from twisted.internet.protocol import Protocol
LOGGER = logging.getLogger(__name__)
class JSONBodyReceiver(Protocol):
def __init__(self, finished_deferred):
self.finished_deferred = finished_deferred
self.received = ""
def dataReceived(self, bytes):
self.received += bytes
def connectionLost(self, reason):
try:
json_body = json.loads(self.received)
self.finished_deferred.callback(json_body)
except ValueError as value_error:
self.finished_deferred.errback(value_error)
class AsyncSwaggerClient(object):
def __init__(self, base_url):
self._swagger_client = None
self._base_url = base_url
self._temp_dir = tempfile.mkdtemp()
os.mkdir(os.path.join(self._temp_dir, 'api-docs'))
def load_resources(self, http_client):
"""Load the resources for this Swagger client
This routine will load the resources located at base_url. It does this
in the following way:
(1) Retrieve resources.json, saving it to a temporary file
(2) For each API in resources.json, retrieve the API and save it to a
temporary file.
(3) If all succeed, pass the resource files to the underlying SwaggerPy
client
Args:
http_client: The twisted HTTP client to use to get the resources
Returns:
A twisted.internet.defer.Deferred that is called on success or failure
"""
def write_resource_file(json_body):
"""Writes a Swagger resource out to disk
Args:
json_body: The JSON representation of the resource
"""
if 'basePath' in json_body:
json_body['basePath'] = 'file:///{0}'.format(self._temp_dir)
if 'resourcePath' in json_body:
api_file = str(json_body['resourcePath'])
api_file = api_file.replace('{format}', 'json')
api_file.strip('/')
full_path = '{}/{}'.format(self._temp_dir, api_file)
else:
full_path = '{}/{}'.format(self._temp_dir, 'resources.json')
with open(full_path, 'w') as resource_file:
resource_file.write(json.dumps(json_body))
def create_http_failure(url, response):
"""Create a twisted.python.failure.Failure from a bad HTTP response
Args:
url: The url that failed
response: The twisted.web.client.Response object
Returns:
A twisted.python.failure.Failure representing the failure
"""
msg = 'While requesting {0}: {1} - {2}'.format(url,
response.code,
response.phrase)
fail = Failure(Exception(msg))
return fail
url = urlparse.urljoin(self._base_url, "ari/api-docs/resources.json")
resource_finished = Deferred()
def on_error(failure):
"""Generic deferred error callback
This ensures that our top most deferred gets called if any nested
deferred errors out
Args:
failure: The twisted.python.failure.Failure object
Returns:
failure
"""
shutil.rmtree(self._temp_dir)
resource_finished.errback(failure)
return failure
def on_resource_finished(response):
"""Success callback for when resources.json is parsed
Args:
response: The twisted.web.client.Response for the HTTP request
Returns:
response if the request was successful
A twisted.python.failure.Failure object if the request failed
"""
if response.code / 100 != 2:
fail = self._create_http_failure(url, response)
resource_finished.errback(fail)
return fail
finished_deferred = Deferred()
def on_resource_body_read(resource_json_body):
"""Success callback for reading the body of resources.json
Args:
resource_json_body: The JSON body of resources.json
Returns:
A twisted.internet.defer.Deferred that is fired when all API
resources are processed on success
A twisted.python.failure.Failure on error
"""
write_resource_file(resource_json_body)
def on_api_finished(response, url):
"""Success callback when an API response is received
Args:
response: The twisted.web.client.Response for the HTTP
request
url: The url for this API request
Returns:
A twisted.internet.defer.Deferred that is fired when the API
body is processed
A twisted.python.failure.Failure on error
"""
if response.code / 100 != 2:
return self._create_http_failure(url, response)
api_finished_deferred = Deferred()
def on_api_body_read(api_json_body):
"""Success callback for reading the body of an API
Args:
api_json_body: The JSON body for the API
Returns:
api_json_body
"""
write_resource_file(api_json_body)
return api_json_body
api_finished_deferred.addCallbacks(on_api_body_read,
on_error)
response.deliverBody(JSONBodyReceiver(api_finished_deferred))
return api_finished_deferred
api_deferreds = []
for api in resource_json_body.get('apis'):
path = api.get('path').replace('{format}', 'json')
api_url = urlparse.urljoin(self._base_url + '/', 'ari')
api_url = urlparse.urljoin(api_url + '/', path.strip('/'))
try:
api_deferred = http_client.request('GET', api_url)
api_deferred.addCallback(on_api_finished, api_url)
api_deferred.addErrback(on_error)
api_deferreds.append(api_deferred)
except Exception as e:
fail = Failure(e)
resource_finished.errback(fail)
return fail
def apis_processed(results):
"""Callback called when all API resources are processed
Args:
results: The list of (success, result) tuples returned from
the API request callbacks
Returns:
results on success
twisted.python.failure.Failure on error
"""
if any([result for result in results if not result[0]]):
msg = "Failed to process all API resources"
fail = Failure(Exception(msg))
finished_deferred.errback(fail)
return fail
resource_finished.callback(None)
return results
apis_finished = DeferredList(api_deferreds)
apis_finished.addCallback(apis_processed)
return apis_finished
finished_deferred.addCallbacks(on_resource_body_read, on_error)
response.deliverBody(JSONBodyReceiver(finished_deferred))
return finished_deferred
http_client.request('GET', url).addCallbacks(on_resource_finished,
on_error)
def on_resources_finished(result):
resource_file = 'file://{}/resources.json'.format(self._temp_dir)
self._swagger_client = SwaggerClient(resource_file)
print self._swagger_client
shutil.rmtree(self._temp_dir)
print "NIFTY"
resource_finished.addCallback(on_resources_finished)
return resource_finished
def __repr__(self):
return self._swagger_client.__repr__()
def __getattr__(self, item):
"""Promote resource objects to be client fields.
:param item: Name of the attribute to get.
:return: Resource object.
"""
return self._swagger_client.__getattr__(item)
def close(self):
"""Close the SwaggerClient, and underlying resources.
"""
self._swagger_client.close()
def get_resource(self, name):
"""Gets a Swagger resource by name.
:param name: Name of the resource to get
:rtype: Resource
:return: Resource, or None if not found.
"""
return self._swagger_client.resources.get(name)
| {
"repo_name": "matt-jordan/twisted-ari",
"path": "twisted_ari/swagger_client.py",
"copies": "1",
"size": "10209",
"license": "mit",
"hash": -2983559699464332000,
"line_mean": 35.4607142857,
"line_max": 81,
"alpha_frac": 0.5693995494,
"autogenerated": false,
"ratio": 4.891710589362722,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5961110138762723,
"avg_score": null,
"num_lines": null
} |
# Asynchronous file example. Works with Linux, OS X and other Unix
# variants, but not Windows, as in Windows sockets don't support file
# I/O for asynchronous I/O.
# The file descriptor is associated with socket. The client sends data
# in chunks and server reads lines from the data receivd from
# client. Both compute checksum to check that data is received
# correctly.
# argv[1] must be a text file
import socket, hashlib, sys, os
import asyncoro
import asyncoro.asyncfile
def client_proc(host, port, input, coro=None):
# client reads input file and sends data in chunks
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock = asyncoro.AsyncSocket(sock)
yield sock.connect((host, port))
# data can be written to this asynchronous socket; however, for
# illustration, convert its file descriptor to asynchronous file
# and write to that instead
afd = asyncoro.asyncfile.AsyncFile(sock)
input = open(input)
csum = hashlib.sha1()
while True:
data = os.read(input.fileno(), 16*1024)
if not data:
break
csum.update(data)
n = yield afd.write(data, full=True)
afd.close()
print('client sha1 csum: %s' % csum.hexdigest())
def server_proc(conn, coro=None):
# conn is a synchronous socket (as it is obtained from synchronous
# 'accept'); it's file-descriptor is converted to asynchronous
# file to read data from that
afd = asyncoro.asyncfile.AsyncFile(conn)
csum = hashlib.sha1()
nlines = 0
while True:
# read lines from data
line = yield afd.readline()
if not line:
break
csum.update(line)
nlines += 1
afd.close()
print('server sha1 csum: %s' % (csum.hexdigest()))
print('lines: %s' % (nlines))
asyncoro.logger.setLevel(asyncoro.Logger.DEBUG)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(5)
host, port = sock.getsockname()
print('host: %s, port: %s' % (host, port))
asyncoro.Coro(client_proc, host, port, sys.argv[1] if len(sys.argv) > 1 else sys.argv[0])
conn, addr = sock.accept()
asyncoro.Coro(server_proc, conn)
| {
"repo_name": "pgiri/asyncoro",
"path": "examples/socket_afile.py",
"copies": "1",
"size": "2169",
"license": "mit",
"hash": -4630182158640078000,
"line_mean": 32.890625,
"line_max": 89,
"alpha_frac": 0.6763485477,
"autogenerated": false,
"ratio": 3.3732503888024885,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9540701842370065,
"avg_score": 0.0017794188264844946,
"num_lines": 64
} |
"""Asynchronous HTTP client for swaggerpy
Copyright (c) 2014, Matthew Jordan
Matthew Jordan <mjordan@digium.com>
This program is free software, distributed under the terms of
the MIT License (MIT)
This module provides an asynchronous HTTP client for SwaggerPy, along with
supporting classes. The asynchronous HTTP client uses a twisted HTTP agent
for HTTP requests, and an Autobahn websocket for the ARI events. Both the
agent and the websocket are handled by the Asynchronous HTTP client.
Generally, users of this module should:
(1) Implement the IARIEventReceiver interface in some class
(2) Create an instance of AsyncHTTPClient for SwaggerPy, providing it an
instance of IARIEventReceiver
(3) Process and handle events as needed; query ARI through the instance of
AsyncHTTPClient
"""
import json
import urllib
import logging
from base64 import b64encode
from zope.interface import Interface, implements
from twisted.internet import reactor
from twisted.internet.defer import succeed
from twisted.web.client import Agent, HTTPConnectionPool
from twisted.web.http_headers import Headers
from twisted.web.iweb import IBodyProducer
from swaggerpy.http_client import HttpClient, Authenticator
from autobahn.twisted.websocket import WebSocketClientFactory, \
WebSocketClientProtocol, connectWS
LOGGER = logging.getLogger(__name__)
class Request(object):
"""A working object for an HTTP or WS request
Attributes:
method: The HTTP verb to use for this request
url: The base URL for the request
params: Parameters for the request (unencoded)
headers: The twisted.web.http_headers HTTP headers object
body_producer: A twisted.web.iweb IBodyProducer. Assign if needed.
"""
def __init__(self, method, url, params=None):
"""Constructor
Args:
method: The HTTP verb to use for this request
url: The base url for the request
params: Parameters for the request
"""
self.method = method
self.url = url
self.params = params or {}
self.headers = Headers()
self.body_producer = None
def build_url(self):
"""Build the URL from this object
This will encode the parameters and append them to the URL
Returns:
A string representation of the URL
"""
if len(self.params) == 0:
return self.url
encoded_params = urllib.urlencode(self.params)
return "%s?%s" % (self.url, encoded_params)
class JSONBodyProducer(object):
"""A twisted.web.iweb.IBodyProducer for application/json content types
Attributes:
body: A string representation of the JSON body
length: The length of body
"""
implements(IBodyProducer)
def __init__(self, body):
"""Constructor
Args:
body: The JSON to produce for the HTTP request
"""
self.body = json.dumps(body, separators=(',', ':'))
self.length = len(body)
#pylint: disable=C0103
def startProducing(self, consumer):
"""IBodyProducer.startProducing override
This will write the JSON body to the consumer
Args:
consumer: The consumer to write the body to
Returns:
A twisted.internet.defer.succeed
"""
consumer.write(self.body)
return succeed(None)
#pylint: disable=C0103,C0111
def pauseProducing(self):
pass
#pylint: disable=C0103,C0111
def stopProducing(self):
pass
class BasicAuthenticator(Authenticator):
"""A swaggerpy.Authenticator authentication object for basic HTTP auth
Attributes:
userpass: Base 64 encoded username/password
"""
def __init__(self, host, username, password):
"""Constructor
Args:
host: The host to authenticate for
username: The user's name
password: The user's password
"""
super(BasicAuthenticator, self).__init__(host)
self.userpass = b64encode(b'%s:%s' % (username, password))
def apply(self, request):
"""Apply the authentication to the Request object
This will add an Authorization header with the base 64 encoded
username and password
Args:
request: The request to apply authentication to
"""
request.headers.addRawHeader(name=b'Authorization',
value=b'Basic ' + self.userpass)
class ApiKeyAuthenticator(Authenticator):
"""A swaggerpy.Authenticator for API key authentication (query param)
Attributes:
param_name: The name of the query parameter
api_key: The API key that the query parameter will specify
"""
def __init__(self, host, api_key, param_name='api_key'):
"""Constructor
Args:
host: The host to provide authentication for
api_key: The API key to use
param_name: Optional. The parameter name for the API key. Defaults to
api_key.
"""
super(ApiKeyAuthenticator, self).__init__(host)
self.param_name = param_name
self.api_key = api_key
def apply(self, request):
"""Apply the authentication to the Request object
This will add a query parameter (param_name) with the API key (api_key)
Args:
request: The request to apply authentication to
"""
request.params[self.param_name] = self.api_key
class IARIEventReceiver(Interface):
"""An event receiver used with the ARI protocol
An implementation of this interface is passed to an ARI protocol factory,
and will be used with whatever instances of the protocol are made. The
protocol, in turn, will call the implementation when events occur in the
protocol, such as when an event is received from Asterisk.
"""
def on_open(protocol):
"""Callback called when the ARI protocol connects
Args:
protocol: An instance of the ARI protocol communicating with Asterisk
"""
def on_close(protocol, was_clean, code, reason):
"""Callback called when the ARI protocol disconnects
Args:
protocol: An instance of the ARI protocol communicating with Asterisk
was_clean: True iff the WebSocket was closed cleanly
code: Close status code, as sent by the WebSocket peer
reason: Close reason, as sent by the WebSocket peer
"""
def on_event(protocol, event_obj):
"""Callback called when an ARI event is received from Asterisk
Args:
protocol: An instance of the ARI protocol communicating with Asterisk
event_obj: The ARI event object received from Asterisk
"""
class ARIWebSocketClientFactory(WebSocketClientFactory):
"""Twisted WebSocket client protocol factory for ARI
This is a relatively simple wrapper around WebSocketClientFactory that
produces instances of ARIWebSocketClientProtocol.
Attributes:
receiver An instance of IARIEventReceiver
"""
def __init__(self, receiver, url, debug=True):
"""Constructor
Args:
receiver: The instance of IARIEventReceiver that will receive updates
url: The URL to connect the WebSocket to
debug: Optional. Enable greater debugging in WebSocketClientFactory.
Defaults to True.
"""
WebSocketClientFactory.__init__(self, url, debug=debug,
protocols=['ari'])
self.receiver = receiver
#pylint: disable=C0103
def buildProtocol(self, addr):
"""Build an ARIWebSocketClientProtocol instance
Returns:
An instance of ARIWebSocketClientProtocol
"""
return ARIWebSocketClientProtocol(self, self.receiver)
def connect(self):
"""Connect the client factory to the WebSocket server
Returns:
An instance of twisted.internet.interfaces.IConnector
"""
return connectWS(self)
#pylint: disable=R0904
class ARIWebSocketClientProtocol(WebSocketClientProtocol):
"""Twisted WebSocket client protocol for ARI
Attributes:
receiver: Our IARIEventReceiver receiver object
"""
def __init__(self, receiver):
"""Constructor
Args:
receiver: Our IARIEventReceiver receiver object
"""
self.receiver = receiver
#pylint: disable=C0103
def onOpen(self):
"""Callback called when the WebSocket connection is made"""
self.receiver.on_open(self)
#pylint: disable=C0103
def onClose(self, wasClean, code, reason):
"""Callback called when the WebSocket connection is closed
Args:
wasClean: True iff the WebSocket was closed cleanly
code: Close status code, as sent by the WebSocket peer
reason: Close reason, as sent by the WebSocket peer
"""
self.receiver.on_close(self, wasClean, code, reason)
#pylint: disable=C0103,W0613
def onMessage(self, msg, isBinary):
"""Callback called when the WebSocket receives a message
Args:
msg: Message payload received from the WebSocket
isBinary: True iff msg is binary
"""
# Ignore binary messages - we can't understand those!
if isBinary:
return
event = None
try:
event = json.loads(msg)
except ValueError:
LOGGER.warn('Received invalid JSON from server: %s' % msg)
return
self.receiver.on_event(event)
class AsyncHTTPClient(HttpClient):
"""An asynchronous swaggerpy.HttpClient using twisted
Attributes:
receiver: An instance of IARIEventReceiver
http_pool: A twisted.web.client.HTTPConnectionPool, used with agent
agent: A twisted.web.client.Agent, for sending HTTP requests
authenticator: The authenticator used with agent and ws_conn
ws_conn: A twisted.internet.interfaces.IConnector representing the
WebSocket connection
"""
def __init__(self, receiver):
"""Constructor
Args:
receiver: An instance of IARIEventReceiver
"""
super(AsyncHTTPClient, self).__init__()
self.receiver = receiver
self.http_pool = HTTPConnectionPool(reactor)
self.agent = Agent(reactor, pool=self.http_pool)
self.authenticator = None
self.ws_conn = None
def close(self):
"""Close the HTTP persistent connections and the WebSocket connection
"""
self.http_pool.closeCachedConnections()
if self.ws_conn:
self.ws_conn.disconnect()
def set_basic_auth(self, host, username, password):
"""Set up a SwaggerPy basic authenticator
Args:
host: The host to authenticate
username: The user's name
password: The user's password
"""
self.authenticator = BasicAuthenticator(
host=host, username=username, password=password)
def set_api_key(self, host, api_key, param_name='api_key'):
"""Set up a SwaggerPy API key authenticator
Args:
host: The host to authenticate
api_key: The API key
param_name: The query parameter for api_key
"""
self.authenticator = ApiKeyAuthenticator(
host=host, api_key=api_key, param_name=param_name)
def apply_authentication(self, req):
"""Apply authentication to a request
Args:
req The Request instance to apply authentication to
"""
if self.authenticator and self.authenticator.matches(req.url):
self.authenticator.apply(req)
def request(self, method, url, params=None, data=None):
"""Perform an HTTP request
Args:
method: The HTTP verb to use for the request
url: The base URL of the request
params: Optional. Query parameters to use with the request.
data: A JSON body to encode and provide with the request
Returns:
twisted.Deferred that will be called when the request completes. On
success, the callback will be called; on failure, the errback will be
called.
"""
request = Request(method, url, params=params)
self.apply_authentication(request)
if data:
request.headers.addRawHeader('Content-Type', 'application/json')
request.body_producer = JSONBodyProducer(data)
deferred = self.agent.request(request.method,
request.build_url(),
request.headers,
request.body_producer)
return deferred
def ws_connect(self, url, params=None):
"""Websocket-client based implementation.
Args:
url: The base url of the request
params: Optional. Query parameters to use with the request.
Note that the connection object returned by this function
should generally not be used to close the WebSocket connection.
The close method should be used instead, as that will close
both the WebSocket as well as the persistent HTTP connection.
Returns:
An instance of twisted.internet.interfaces.IConnector
"""
request = Request('GET', url, params=params)
self.apply_authentication(request)
ws_factory = ARIWebSocketClientFactory(self.receiver,
request.build_url())
self.ws_conn = ws_factory.connect()
return self.ws_conn
| {
"repo_name": "matt-jordan/twisted-ari",
"path": "twisted_ari/http_client.py",
"copies": "1",
"size": "13525",
"license": "mit",
"hash": 6020989052415412000,
"line_mean": 30.6744730679,
"line_max": 79,
"alpha_frac": 0.6508687616,
"autogenerated": false,
"ratio": 4.590970807875085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0031574613185447113,
"num_lines": 427
} |
"""Asynchronous IO library for python using greenlet based coroutines.
Copyright (c) 2009, Erik Gorset
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY ERIK GORSET, AND CONTRIBUTORS ``AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Here's an implementation of a scheduler/Channel library which can be used to
implement servers using coroutines and asynchronous IO. It provides a
SocketServer mix-in which can be combined with BaseHTTPServer to implement a
comet enabled server which can support a high number of concurrent connections.
To demonstrate the capabilities of the library, an example of a handler for
BaseHTTPServer is shown to tackle the c10k problem, and to be an excellent
comet enabled server.
Please note that there's nothing strange with the handler implementation. By
providing a thread or fork compatible implementation of Channel, it should be
possible to run it with the builtin forking or threading SocketServer mixins.
"""
import BaseHTTPServer
class TestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
waiters = [] # global list we use to track all clients waiting to be notified
def do_GET(self):
# send headers
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
if self.path == '/wait': # wait for result
c = Channel()
self.waiters.append(c)
n = c.read()
elif self.path == '/notify': # notify everyone
n = len(self.waiters)
for i in self.waiters:
i.write(n)
del self.waiters[:]
else: # current number of waiters
n = len(self.waiters)
self.wfile.write('%s' % n)
def log_message(self, *args, **vargs):
pass # mute log messages
"""
Then we need a mix-in to handle scheduler/channel activities. This is where we
put the magic, and is comperable to ThreadingMixIn and ForkingMixIn.
"""
class ScheduledMixIn:
"Mix-in class to handle each request in a new coroutine"
def process_request(self, request, client_address):
# the BaseHTTPServer framework uses only the "file protocol" for a file
# descriptors, so we put the request in an object which will wrap all
# IO calls using kqueue/epoll and schedule/Channel.
request = ScheduledFile.fromSocket(request)
@go
def runner():
self.finish_request(request, client_address)
self.close_request(request)
def handle_request(self):
return self._handle_request_noblock()
def serve_forever(self):
while True:
self.handle_request()
def server_activate(self):
self.socket.listen(self.request_queue_size)
self.socket.setblocking(False)
self.acceptStream = Channel() # use a channel for new connections
def runner(n, eof):
for i in xrange(n): # kqueue will provide the number of connections waiting
try:
client = self.socket.accept()
except socket.error, e:
if e.errno == errno.EAGAIN: # either epoll, a kernel bug or a race condition
break
# FIXME: more error handling?
raise
self.acceptStream.write(client)
if not eof:
return runner
_goRead(self.socket.fileno(), runner)
def get_request(self):
return self.acceptStream.read()
"""
To test this we will first start the server, create N clients that will
connect and wait, then finally connect with a client that notifies everyone. At
the same time we will continuously connect a client to get the status.
"""
def testScheduledServer(n):
"test http server with n clients"
class ScheduledHTTPServer(ScheduledMixIn, BaseHTTPServer.HTTPServer):
pass
# start web server at a random port
httpd = ScheduledHTTPServer(('', 0), TestHandler)
address = httpd.server_name, httpd.server_port
go(httpd.serve_forever)
def httpClientGet(client, path):
"super simple http client"
try:
client.write('GET %s HTTP/1.0\r\n\r\n' % path)
data = ''.join(client)
pos = data.find('\r\n\r\n')
return data[:pos], data[pos+4:]
finally:
client.close()
# spin up a few clients
for i in xrange(n):
def runner(client):
header, body = httpClientGet(client, '/wait')
assert int(body) == n
go(partial(runner, ScheduledFile.connectTcp(address)))
# wait until all clients are ready
count = 0
while count != n:
header, body = httpClientGet(ScheduledFile.connectTcp(address), '/')
count = int(body)
# notify everyone
header, body = httpClientGet(ScheduledFile.connectTcp(address), '/notify')
assert int(body) == n
# wait for everyone to finish
count = -1
while count:
header, body = httpClientGet(ScheduledFile.connectTcp(address), '/')
count = int(body)
"""
Example run of testScheduledServer on a mbp 13" 2.53 GHz:
% python naglfar/core.py 10000
done 10000
Time spent in user mode (CPU seconds) : 10.567s
Time spent in kernel mode (CPU seconds) : 3.344s
Total time : 0:15.67s
CPU utilisation (percentage) : 88.7%
Even though it's only 10k clients, they are all running in the same
process/thread as the server, which makes it 20k sockets. The amount of time
spent in user mode vs kernel mode tells us that python makes up for about 75%
of the cpu usage.
The next step would perhaps be to look into optimizing BaseHTTPServer. A faster
language or implementation should make it possible to get closer to the 3.344s
theretical limit using the same design. It's also likely that the kernel itself
could be optimized for this kind of work. The only tuning performed was
increasing the maximum number of descriptors.
Finally, the code to make it all work:
"""
import os
import errno
import select
import socket
import traceback
from greenlet import greenlet, getcurrent
from functools import partial
from collections import deque, namedtuple
# This is just a job queue which we routinely pop to do more work. There's no
# "switch thread after N time" mecanism, so each job needs to behave.
queue = deque()
def go(callable, *args, **vargs):
"Create a new coroutine for callable(*args, **vargs)"
def runner():
callable(*args, **vargs)
scheduler.switch() # switch back the scheduler when done
g = greenlet(runner, scheduler) # scheduler must be parent
queue.append(g.switch)
def scheduler():
try:
while queue:
queue.popleft()()
except Exception, e:
traceback.print_exc()
os._exit(1)
scheduler = greenlet(scheduler)
class Channel(object):
"An asynchronous channel"
def __init__(self):
self.q = deque()
self.waiting = []
def write(self, msg):
"Write to the channel"
self.q.append(msg)
# notify everyone
queue.extend(self.waiting)
self.waiting = []
def wait(self):
while not self.q:
# block until we have data
self.waiting.append(getcurrent().switch)
scheduler.switch()
def read(self):
"Read from the channel, blocking if it's empty"
self.wait()
return self.q.popleft()
def readWaiting(self, block=False):
if block:
self.wait()
result = list(self.q)
self.q.clear()
return result
def iterateWaiting(self):
while True:
yield self.readWaiting(True)
def __iter__(self):
while True:
yield self.read()
def goRead(fd, n=None):
"Read n bytes, or the next chunk if n is None"
c = Channel()
buffer = bytearray()
def reader(bytesReady, eof):
if bytesReady:
# read maxmium or the bytes remaing
try:
data = os.read(fd, bytesReady if n is None else min(bytesReady, n - len(buffer)))
except OSError, e:
if e.errno == errno.EAGAIN: # potentation race condition
return reader
data = ''
eof = not data
buffer.extend(data)
if not eof and n is not None and len(buffer) < n:
return reader
c.write(str(buffer))
_goRead(fd, reader)
return c.read
def goWrite(fd, data):
"Write data to fd and return the number of bytes written"
o = dict(offset=0)
c = Channel()
def writer(bytesReady, eof):
offset = o['offset']
if not eof:
try:
offset += os.write(fd, str(data[offset:offset+bytesReady]))
except OSError, e:
if e.errno == errno.EAGAIN:
pass
eof = True # treat all other errors as eof
if not eof and offset < len(data):
o['offset'] = offset
return writer
c.write(offset)
_goWrite(fd, writer)
return c.read
def goSendfile(fdFile, fd, offset, nbytes):
assert type(fd) == int
assert nbytes > 0
o = dict(offset=offset, nbytes=nbytes)
c = Channel()
def writer(bytesReady, eof):
if bytesReady and not eof:
try:
n = sendfile(fdFile, fd, o['offset'], min(bytesReady, o['nbytes']))
except OSError, e:
if e.errno == errno.EAGAIN:
return writer
return # do more here?
o['offset'] += n
o['nbytes'] -= n
assert o['nbytes'] >= 0
if n and o['nbytes']:
return writer
c.write(o['offset'] - offset)
_goWrite(fd, writer)
return c.read
def goClose(fd):
"Close the fd and do kqueue cleanup"
assert fd != -1 and fd is not None
# file descriptors are reused instantly, so we need to remove any left overs
_goClose(fd)
os.close(fd)
if hasattr(select, 'epoll'):
epoll = select.epoll()
io = {}
ioState = {}
def _ioCore():
for fd, eventmask in epoll.poll(0 if queue else -1):
assert not eventmask & select.EPOLLPRI
removeMask = 0
for mask in (select.EPOLLIN, select.EPOLLOUT):
key = fd, mask
if eventmask & mask:
callback = io.pop(key)(32768, bool(eventmask & (select.EPOLLHUP | select.EPOLLERR)))
if callback:
assert key not in io
io[key] = callback
else:
removeMask |= mask
if removeMask:
ioState[fd] ^= removeMask
epoll.modify(fd, ioState[fd])
return bool(io)
def _goEpoll(ident, mask, m):
if ident not in ioState:
ioState[ident] = mask
epoll.register(ident, mask)
else:
ioState[ident] = eventmask = ioState[ident] | mask
epoll.modify(ident, eventmask)
io[ident, mask] = m
_ioRunner.activate()
_goWrite = lambda fd, m:_goEpoll(fd, select.EPOLLOUT, m)
_goRead = lambda fd, m:_goEpoll(fd, select.EPOLLIN, m)
def _goClose(fd):
if fd in ioState:
del ioState[fd]
for key in (fd, select.EPOLLIN), (fd, select.EPOLLOUT):
if key in io:
del io[key]
elif hasattr(select, 'kqueue'):
import patch_kqueue # kqueue is broken in python <=2.6.4. This will fix it using ctypes
kq = select.kqueue()
io = {}
ioChanges = {}
def _ioCore():
"Add changes and poll for events, blocking if scheduler queue is empty"
changes = ioChanges.values()
ioChanges.clear()
for event in kq.control(changes, len(io), 0 if queue else None):
assert not event.flags & select.KQ_EV_ERROR
key = event.ident, event.filter
callback = io.pop(key)(event.data, bool(event.flags & select.KQ_EV_EOF))
if callback:
assert key not in io
io[key] = callback
else:
ioChanges[key] = select.kevent(event.ident, event.filter, select.KQ_EV_DELETE)
return bool(io)
def _goRead(fd, m):
ioChanges[fd, select.KQ_FILTER_READ] = select.kevent(fd, select.KQ_FILTER_READ, select.KQ_EV_ADD | select.KQ_EV_ENABLE)
io[fd, select.KQ_FILTER_READ] = m
_ioRunner.activate()
def _goWrite(fd, m):
ioChanges[fd, select.KQ_FILTER_WRITE] = select.kevent(fd, select.KQ_FILTER_WRITE, select.KQ_EV_ADD | select.KQ_EV_ENABLE)
io[fd, select.KQ_FILTER_WRITE] = m
_ioRunner.activate()
def _goClose(fd):
for key in (fd, select.KQ_FILTER_WRITE), (fd, select.KQ_FILTER_READ):
if key in io:
del io[key]
if key in ioChanges:
del ioChanges[key]
else: # fallback to select
ioRead = {}
ioWrite = {}
def _ioCore():
x, y, z = select.select(list(ioRead), list(ioWrite), [], 0 if queue else None)
for fds, l in ((x, ioRead), (y, ioWrite)):
for fd in fds:
callback = l.pop(fd)(32768, False)
if callback:
assert fd not in l
l[fd] = callback
return bool(ioRead or ioWrite)
def _goRead(fd, m):
ioRead[fd] = m
_ioRunner.activate()
def _goWrite(fd, m):
ioWrite[fd] = m
_ioRunner.activate()
def _goClose(fd):
if fd in ioWrite:
del ioWrite[fd]
if fd in ioRead:
del ioRead[fd]
from sendfile import sendfile
def _ioRunner():
try:
hasMore = _ioCore()
except:
traceback.print_exc()
os._exit(2)
if hasMore:
queue.append(_ioRunner)
else:
_ioRunner.active = False
_ioRunner.active = False
def _ioActivate():
if not _ioRunner.active:
_ioRunner.active = True
queue.append(_ioRunner)
_ioRunner.activate = _ioActivate
"""
BaseHTTPServer/SocketServer expect to work with file objects. All IO operations
needs to be wrapped by using the scheduler enabled goRead, goWrite and goClose.
This will ensure that other coroutines can do work while the file object is
waiting for IO.
"""
class ScheduledFile(object):
"A file object using the scheduler/Channel framework to do asynchronous nonblocking IO"
def __init__(self, fd, autoflush=False, bufferSize=2**16):
self.fd = fd
self.autoflush = autoflush
self.bufferSize = bufferSize
self.incoming = bytearray()
self.outgoing = bytearray()
self._flushers = None
self.nwrite = self.nread = 0
@classmethod
def fromSocket(cls, sock, *args, **vargs):
"Use a existing socket to make instance"
# python closes the socket under deallocation, so we use dup to make sure
# we can close the fd independently.
return cls(os.dup(sock.fileno()), *args, **vargs)
@classmethod
def connectTcp(cls, address):
sock = socket.socket()
try:
sock.setblocking(False)
try:
sock.connect(address)
except socket.error, e:
if e.errno != errno.EINPROGRESS: # means we need to wait for the socket to become writable
raise
s = cls.fromSocket(sock, autoflush=True)
goWrite(s.fd, '')() # we need to make sure it's writable before doing anything
return s
finally:
sock.close()
@property
def closed(self):
return self.fd is None
def _flusher(self):
while self.outgoing and self.fd is not None:
n = goWrite(self.fd, self.outgoing)()
if n == 0:
self.outgoing = None
else:
del self.outgoing[:n]
self.nwrite += n
for i in self._flushers:
i.write(True)
self._flushers = None
def flush(self, block=True):
if self._flushers is None:
self._flushers = []
go(self._flusher)
if block:
c = Channel()
self._flushers.append(c)
c.read()
def write(self, data):
if None in (self.fd, self.outgoing):
raise ValueError('closed')
self.outgoing.extend(data)
if self.autoflush:
self.flush(block=len(self.outgoing) > self.bufferSize)
elif len(self.outgoing) > self.bufferSize:
self.flush()
def _read(self, n=None):
assert self.fd is not None
chunk = goRead(self.fd, n)()
self.nread += len(chunk)
return chunk
def readline(self, n=Ellipsis, separator='\n'):
"Read a whole line, until eof or maximum n bytes"
line = bytearray()
for chunk in self.readUntil(separator):
assert chunk
line += chunk
if len(line) >= n:
break
if len(line) == n:
return str(line)
elif len(line) > n:
assert not self.incoming
self.incoming = line[n:]
return str(line[:n])
else:
return str(line)
def read(self, n=-1):
"read n bytes or until eof"
if n == -1:
while True:
chunk = self._read()
if not chunk:
break
self.incoming += chunk
else:
while n > len(self.incoming):
chunk = self._read()
self.incoming += chunk
if not chunk:
break
data = str(self.incoming[:n if n != -1 else len(self.incoming)])
del self.incoming[:len(data)]
return data
def readUntil(self, separator, includingTxt=True):
"read until separator or eof"
def reader():
if self.incoming:
chunk = str(self.incoming)
del self.incoming[:]
yield chunk
while True:
chunk = self._read()
yield chunk
if not chunk:
break
for chunk in readUntil(reader().next, self.incoming.extend, separator):
yield chunk
if includingTxt:
if self.incoming:
assert self.incoming.startswith(separator)
del self.incoming[:len(separator)]
yield separator
def close(self, flush=True):
if self.fd is None:
return
if flush and self.outgoing:
self.flush()
goClose(self.fd)
self.fd = None
def __iter__(self):
while True:
line = self.readline()
if not line:
break
yield line
def makefile(self, *args, **vargs):
return self
def sendfile(self, fd, offset=0, nbytes=0):
self.flush()
return goSendfile(fd, self.fd, offset, nbytes)()
def readUntil(next, pushback, separator):
result = bytearray()
while True:
pos = result.find(separator)
if pos != -1:
rest = result[pos:]
if rest:
pushback(str(rest))
del result[pos:]
if result:
yield str(result)
return
elif len(separator) < len(result):
yield str(result[:-len(separator)])
del result[:-len(separator)]
chunk = next()
if not chunk:
if result:
yield str(result)
return
result += chunk
if __name__ == "__main__":
import sys
n = int(sys.argv[1])
testScheduledServer(n)
print 'done', n
| {
"repo_name": "gorset/naglfar",
"path": "naglfar/core.py",
"copies": "1",
"size": "21065",
"license": "bsd-2-clause",
"hash": -7462094646752746000,
"line_mean": 31.3579109063,
"line_max": 129,
"alpha_frac": 0.5863280323,
"autogenerated": false,
"ratio": 4.0847391894512315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5171067221751231,
"avg_score": null,
"num_lines": null
} |
"""Asynchronous msgpack-rpc handling in the event loop pipeline."""
import logging
from traceback import format_exc
logger = logging.getLogger(__name__)
debug, info, warn = (logger.debug, logger.info, logger.warning,)
class AsyncSession(object):
"""Asynchronous msgpack-rpc layer that wraps a msgpack stream.
This wraps the msgpack stream interface for reading/writing msgpack
documents and exposes an interface for sending and receiving msgpack-rpc
requests and notifications.
"""
def __init__(self, msgpack_stream):
"""Wrap `msgpack_stream` on a msgpack-rpc interface."""
self._msgpack_stream = msgpack_stream
self._next_request_id = 1
self._pending_requests = {}
self._request_cb = self._notification_cb = None
self._handlers = {
0: self._on_request,
1: self._on_response,
2: self._on_notification
}
def threadsafe_call(self, fn):
"""Wrapper around `MsgpackStream.threadsafe_call`."""
self._msgpack_stream.threadsafe_call(fn)
def request(self, method, args, response_cb):
"""Send a msgpack-rpc request to Nvim.
A msgpack-rpc with method `method` and argument `args` is sent to
Nvim. The `response_cb` function is called with when the response
is available.
"""
request_id = self._next_request_id
self._next_request_id = request_id + 1
self._msgpack_stream.send([0, request_id, method, args])
self._pending_requests[request_id] = response_cb
def notify(self, method, args):
"""Send a msgpack-rpc notification to Nvim.
A msgpack-rpc with method `method` and argument `args` is sent to
Nvim. This will have the same effect as a request, but no response
will be recieved
"""
self._msgpack_stream.send([2, method, args])
def run(self, request_cb, notification_cb):
"""Run the event loop to receive requests and notifications from Nvim.
While the event loop is running, `request_cb` and `_notification_cb`
will be called whenever requests or notifications are respectively
available.
"""
self._request_cb = request_cb
self._notification_cb = notification_cb
self._msgpack_stream.run(self._on_message)
self._request_cb = None
self._notification_cb = None
def stop(self):
"""Stop the event loop."""
self._msgpack_stream.stop()
def _on_message(self, msg):
try:
self._handlers.get(msg[0], self._on_invalid_message)(msg)
except Exception:
err_str = format_exc(5)
warn(err_str)
self._msgpack_stream.send([1, 0, err_str, None])
def _on_request(self, msg):
# request
# - msg[1]: id
# - msg[2]: method name
# - msg[3]: arguments
debug('received request: %s, %s', msg[2], msg[3])
self._request_cb(msg[2], msg[3], Response(self._msgpack_stream,
msg[1]))
def _on_response(self, msg):
# response to a previous request:
# - msg[1]: the id
# - msg[2]: error(if any)
# - msg[3]: result(if not errored)
debug('received response: %s, %s', msg[2], msg[3])
self._pending_requests.pop(msg[1])(msg[2], msg[3])
def _on_notification(self, msg):
# notification/event
# - msg[1]: event name
# - msg[2]: arguments
debug('received notification: %s, %s', msg[1], msg[2])
self._notification_cb(msg[1], msg[2])
def _on_invalid_message(self, msg):
error = 'Received invalid message %s' % msg
warn(error)
self._msgpack_stream.send([1, 0, error, None])
class Response(object):
"""Response to a msgpack-rpc request that came from Nvim.
When Nvim sends a msgpack-rpc request, an instance of this class is
created for remembering state required to send a response.
"""
def __init__(self, msgpack_stream, request_id):
"""Initialize the Response instance."""
self._msgpack_stream = msgpack_stream
self._request_id = request_id
def send(self, value, error=False):
"""Send the response.
If `error` is True, it will be sent as an error.
"""
if error:
resp = [1, self._request_id, value, None]
else:
resp = [1, self._request_id, None, value]
debug('sending response to request %d: %s', self._request_id, resp)
self._msgpack_stream.send(resp)
| {
"repo_name": "bfredl/python-client",
"path": "neovim/msgpack_rpc/async_session.py",
"copies": "3",
"size": "4629",
"license": "apache-2.0",
"hash": 7260319515561505000,
"line_mean": 33.5447761194,
"line_max": 78,
"alpha_frac": 0.5938647656,
"autogenerated": false,
"ratio": 3.893187552565181,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.598705231816518,
"avg_score": null,
"num_lines": null
} |
"""Asynchronous msgpack-rpc handling in the event loop pipeline."""
import threading
from traceback import format_exc
class AsyncSession(object):
"""Asynchronous msgpack-rpc layer that wraps a msgpack stream.
This wraps the msgpack stream interface for reading/writing msgpack
documents and exposes an interface for sending and receiving msgpack-rpc
requests and notifications.
"""
def __init__(self, msgpack_stream):
"""Wrap `msgpack_stream` on a msgpack-rpc interface."""
self._msgpack_stream = msgpack_stream
self._next_request_id = 1
self._pending_requests = {}
self._request_cb = self._notification_cb = None
self._lock = threading.Lock()
self._handlers = {
0: self._on_request,
1: self._on_response,
2: self._on_notification
}
def threadsafe_call(self, fn):
"""Wrapper around `MsgpackStream.threadsafe_call`."""
self._msgpack_stream.threadsafe_call(fn)
def request(self, method, args, response_cb):
"""Send a msgpack-rpc request to Nvim.
A msgpack-rpc with method `method` and argument `args` is sent to
Nvim. The `response_cb` function is called with when the response
is available.
"""
with self._lock:
request_id = self._next_request_id
self._next_request_id = request_id + 1
self._msgpack_stream.send([0, request_id, method, args])
self._pending_requests[request_id] = response_cb
def notify(self, method, args):
"""Send a msgpack-rpc notification to Nvim.
A msgpack-rpc with method `method` and argument `args` is sent to
Nvim. This will have the same effect as a request, but no response
will be recieved
"""
self._msgpack_stream.send([2, method, args])
def run(self, request_cb, notification_cb):
"""Run the event loop to receive requests and notifications from Nvim.
While the event loop is running, `request_cb` and `_notification_cb`
will be called whenever requests or notifications are respectively
available.
"""
self._request_cb = request_cb
self._notification_cb = notification_cb
self._msgpack_stream.run(self._on_message)
self._request_cb = None
self._notification_cb = None
def stop(self):
"""Stop the event loop."""
self._msgpack_stream.stop()
def _on_message(self, msg):
try:
self._handlers.get(msg[0], self._on_invalid_message)(msg)
except Exception:
err_str = format_exc(5)
self._msgpack_stream.send([1, 0, err_str, None])
def _on_request(self, msg):
# request
# - msg[1]: id
# - msg[2]: method name
# - msg[3]: arguments
self._request_cb(msg[2], msg[3], Response(self._msgpack_stream,
msg[1]))
def _on_response(self, msg):
# response to a previous request:
# - msg[1]: the id
# - msg[2]: error(if any)
# - msg[3]: result(if not errored)
with self._lock:
self._pending_requests.pop(msg[1])(msg[2], msg[3])
def _on_notification(self, msg):
# notification/event
# - msg[1]: event name
# - msg[2]: arguments
self._notification_cb(msg[1], msg[2])
def _on_invalid_message(self, msg):
error = 'Received invalid message %s' % msg
self._msgpack_stream.send([1, 0, error, None])
class Response(object):
"""Response to a msgpack-rpc request that came from Nvim.
When Nvim sends a msgpack-rpc request, an instance of this class is
created for remembering state required to send a response.
"""
def __init__(self, msgpack_stream, request_id):
"""Initialize the Response instance."""
self._msgpack_stream = msgpack_stream
self._request_id = request_id
def send(self, value, error=False):
"""Send the response.
If `error` is True, it will be sent as an error.
"""
if error:
resp = [1, self._request_id, value, None]
else:
resp = [1, self._request_id, None, value]
self._msgpack_stream.send(resp)
| {
"repo_name": "lunixbochs/actualvim",
"path": "lib/neovim/msgpack_rpc/async_session.py",
"copies": "1",
"size": "4333",
"license": "mit",
"hash": -5201657606713233000,
"line_mean": 33.1181102362,
"line_max": 78,
"alpha_frac": 0.5908146781,
"autogenerated": false,
"ratio": 3.986200551977921,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.507701523007792,
"avg_score": null,
"num_lines": null
} |
"""asynchronous networking"""
import aiohttp
import asyncio
import hashlib
from urllib.parse import urlencode, urlparse, urlunparse, parse_qsl
DEFAULT_HEADERS = {"User-Agent": "Mozilla/5.0"}
def url_concat(url, **args):
"""Concatenate url and arguments regardless of whether
url has existing query parameters.
"""
if args is None:
return url
parsed_url = urlparse(url)
if isinstance(args, dict):
parsed_query = parse_qsl(parsed_url.query, keep_blank_values=True)
parsed_query.extend(args.items())
else:
err = "'args' parameter should be dict, list or tuple. Not {0}".format(
type(args)
)
raise TypeError(err)
final_query = urlencode(parsed_query)
url = urlunparse(
(
parsed_url[0],
parsed_url[1],
parsed_url[2],
parsed_url[3],
final_query,
parsed_url[5],
)
)
return url
async def urlfetch(url="", headers={}, params={}, payload={}, method="GET", loop=None):
"""fetch content from the url"""
if not url:
return
headers.update(DEFAULT_HEADERS)
async with aiohttp.ClientSession(loop=loop, headers=headers) as session:
_method = getattr(session, method.lower())
async with _method(
url, params=params, data=payload, allow_redirects=True
) as resp:
result = await resp.text(encoding="ISO-8859-1")
return result
| {
"repo_name": "sourcepirate/data-style",
"path": "data/requests.py",
"copies": "1",
"size": "1488",
"license": "bsd-2-clause",
"hash": 8997965741691401000,
"line_mean": 28.1764705882,
"line_max": 87,
"alpha_frac": 0.6001344086,
"autogenerated": false,
"ratio": 4.05449591280654,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00022281639928698754,
"num_lines": 51
} |
"""Asynchronous nng socket interface."""
__all__ = [
'Context',
'Socket',
]
import ctypes
from g1.asyncs import kernels
from g1.asyncs.bases import locks
from g1.bases.assertions import ASSERT
from . import _nng
from . import bases
from . import errors
from . import messages
class Dialer(bases.DialerBase):
def start(self):
self._start(flags=_nng.nng_flag_enum.NNG_FLAG_NONBLOCK)
class Socket(bases.SocketBase):
_dialer_type = Dialer
def dial(self, url, *, create_only=False):
return self._dial(
url,
flags=_nng.nng_flag_enum.NNG_FLAG_NONBLOCK,
create_only=create_only,
)
async def send(self, data):
with messages.Message(ASSERT.isinstance(data, bytes)) as message:
return await self.sendmsg(message)
async def recv(self):
with await self.recvmsg() as message:
return message.body.copy()
async def sendmsg(self, message):
return await AioSender(message).run(self._handle)
async def recvmsg(self):
return await AioReceiver().run(self._handle)
class Context(bases.ContextBase):
async def send(self, data):
with messages.Message(ASSERT.isinstance(data, bytes)) as message:
return await self.sendmsg(message)
async def recv(self):
with await self.recvmsg() as message:
return message.body.copy()
async def sendmsg(self, message):
return await ContextSender(message).run(self._handle)
async def recvmsg(self):
return await ContextReceiver().run(self._handle)
class AsyncTransceiverBase:
async def run(self, handle):
event = locks.Event()
kernel = ASSERT.not_none(kernels.get_kernel())
callback = _nng.nng_aio_callback(
lambda _: kernel.post_callback(event.set)
)
aio_p = _nng.nng_aio_p()
errors.check(_nng.F.nng_aio_alloc(ctypes.byref(aio_p), callback, None))
try:
# Strangely, the default is not ``NNG_DURATION_DEFAULT`` but
# ``NNG_DURATION_INFINITE``; let's make default the default.
_nng.F.nng_aio_set_timeout(aio_p, _nng.NNG_DURATION_DEFAULT)
self.transceive(handle, aio_p)
try:
await event.wait()
except BaseException:
_nng.F.nng_aio_cancel(aio_p)
raise
errors.check(_nng.F.nng_aio_result(aio_p))
return self.make_result(aio_p)
finally:
# Call ``nng_aio_wait`` to ensure that AIO is completed and
# we may safely read its result or free it (in case we are
# here due to an exception).
_nng.F.nng_aio_wait(aio_p)
self.cleanup(aio_p)
_nng.F.nng_aio_free(aio_p)
def transceive(self, handle, aio_p):
raise NotImplementedError
def make_result(self, aio_p):
raise NotImplementedError
def cleanup(self, aio_p):
raise NotImplementedError
class AioSender(AsyncTransceiverBase):
def __init__(self, message):
super().__init__()
self.__message = message
def transceive(self, handle, aio_p):
_nng.F.nng_aio_set_msg(aio_p, self.__message._get())
_nng.F.nng_send_aio(handle, aio_p)
def make_result(self, aio_p):
return None
def cleanup(self, aio_p):
if _nng.F.nng_aio_result(aio_p) == 0:
self.__message.disown() # Ownership is transferred on success.
class AioReceiver(AsyncTransceiverBase):
def transceive(self, handle, aio_p):
_nng.F.nng_recv_aio(handle, aio_p)
def make_result(self, aio_p):
return messages.Message(msg_p=_nng.F.nng_aio_get_msg(aio_p))
def cleanup(self, aio_p):
pass
class ContextSender(AsyncTransceiverBase):
def __init__(self, message):
super().__init__()
self.__message = message
def transceive(self, handle, aio_p):
_nng.F.nng_aio_set_msg(aio_p, self.__message._get())
_nng.F.nng_ctx_send(handle, aio_p)
def make_result(self, aio_p):
return None
def cleanup(self, aio_p):
if _nng.F.nng_aio_result(aio_p) == 0:
self.__message.disown() # Ownership is transferred on success.
class ContextReceiver(AsyncTransceiverBase):
def transceive(self, handle, aio_p):
_nng.F.nng_ctx_recv(handle, aio_p)
def make_result(self, aio_p):
return messages.Message(msg_p=_nng.F.nng_aio_get_msg(aio_p))
def cleanup(self, aio_p):
pass
| {
"repo_name": "clchiou/garage",
"path": "py/g1/third-party/nng/nng/asyncs.py",
"copies": "1",
"size": "4565",
"license": "mit",
"hash": -3988898277664032000,
"line_mean": 24.6460674157,
"line_max": 79,
"alpha_frac": 0.6059145674,
"autogenerated": false,
"ratio": 3.2794540229885056,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43853685903885053,
"avg_score": null,
"num_lines": null
} |
# Asynchronous pipe example using chained Popen
import sys, subprocess, traceback, platform
import asyncoro
import asyncoro.asyncfile
def writer(apipe, inp, coro=None):
fd = open(inp)
while True:
line = fd.readline()
if not line:
break
yield apipe.stdin.write(line.encode())
apipe.stdin.close()
def line_reader(apipe, coro=None):
nlines = 0
while True:
try:
line = yield apipe.readline()
except:
asyncoro.logger.debug('read failed')
asyncoro.logger.debug(traceback.format_exc())
break
nlines += 1
if not line:
break
print(line.decode())
raise StopIteration(nlines)
# asyncoro.logger.setLevel(asyncoro.Logger.DEBUG)
if platform.system() == 'Windows':
# asyncfile.Popen must be used instead of subprocess.Popen
p1 = asyncoro.asyncfile.Popen([r'\cygwin64\bin\grep.exe', '-i', 'error'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p2 = asyncoro.asyncfile.Popen([r'\cygwin64\bin\wc.exe'], stdin=p1.stdout, stdout=subprocess.PIPE)
async_pipe = asyncoro.asyncfile.AsyncPipe(p1, p2)
asyncoro.Coro(writer, async_pipe, r'\tmp\grep.inp')
asyncoro.Coro(line_reader, async_pipe)
else:
p1 = subprocess.Popen(['grep', '-i', 'error'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p2 = subprocess.Popen(['wc'], stdin=p1.stdout, stdout=subprocess.PIPE)
async_pipe = asyncoro.asyncfile.AsyncPipe(p1, p2)
asyncoro.Coro(writer, async_pipe, '/var/log/syslog')
asyncoro.Coro(line_reader, async_pipe)
# alternate example:
# p1 = subprocess.Popen(['tail', '-f', '/var/log/kern.log'], stdin=None, stdout=subprocess.PIPE)
# p2 = subprocess.Popen(['grep', '--line-buffered', '-i', 'error'],
# stdin=p1.stdout, stdout=subprocess.PIPE)
# async_pipe = asyncoro.asyncfile.AsyncPipe(p2)
# asyncoro.Coro(line_reader, async_pipe)
| {
"repo_name": "pgiri/asyncoro",
"path": "examples/pipe_grep.py",
"copies": "1",
"size": "1985",
"license": "mit",
"hash": 3178352326215165400,
"line_mean": 36.4528301887,
"line_max": 101,
"alpha_frac": 0.6377833753,
"autogenerated": false,
"ratio": 3.2540983606557377,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9352755373632083,
"avg_score": 0.0078252724647309,
"num_lines": 53
} |
# Asynchronous pipe example with "communicate" method that is similar
# to Popen's "communicate". Same example is used to show how custom
# write/read processes can be provided to feed / read from the
# asynchronous pipe
# argv[1] must be a text file
import sys, os, traceback, subprocess, platform
import asyncoro
import asyncoro.asyncfile
def communicate(input, coro=None):
if platform.system() == 'Windows':
# asyncfile.Popen must be used instead of subprocess.Popen
pipe = asyncoro.asyncfile.Popen([r'\cygwin64\bin\sha1sum.exe'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
else:
pipe = subprocess.Popen(['sha1sum'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# convert pipe to asynchronous version
async_pipe = asyncoro.asyncfile.AsyncPipe(pipe)
# 'communicate' takes either the data or file descriptor with data
# (if file is too large to read in full) as input
input = open(input)
stdout, stderr = yield async_pipe.communicate(input)
print('communicate sha1sum: %s' % stdout)
def custom_feeder(input, coro=None):
def write_proc(fin, pipe, coro=None):
while True:
data = yield os.read(fin.fileno(), 8*1024)
if not data:
break
n = yield pipe.write(data, full=True)
assert n == len(data)
fin.close()
pipe.stdin.close()
def read_proc(pipe, coro=None):
# output from sha1sum is small, so read until EOF
data = yield pipe.stdout.read()
pipe.stdout.close()
raise StopIteration(data)
if platform.system() == 'Windows':
# asyncfile.Popen must be used instead of subprocess.Popen
pipe = asyncoro.asyncfile.Popen([r'\cygwin64\bin\sha1sum.exe'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
else:
pipe = subprocess.Popen(['sha1sum'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
async_pipe = asyncoro.asyncfile.AsyncPipe(pipe)
reader = asyncoro.Coro(read_proc, async_pipe)
writer = asyncoro.Coro(write_proc, open(input), async_pipe)
stdout = yield reader.finish()
print(' feeder sha1sum: %s' % stdout)
# asyncoro.logger.setLevel(asyncoro.Logger.DEBUG)
# simpler version using 'communicate'
coro = asyncoro.Coro(communicate, sys.argv[1] if len(sys.argv) > 1 else sys.argv[0])
coro.value() # wait for it to finish
# alternate version with custom read and write processes
asyncoro.Coro(custom_feeder, sys.argv[1] if len(sys.argv) > 1 else sys.argv[0])
| {
"repo_name": "pgiri/asyncoro",
"path": "examples/pipe_csum.py",
"copies": "1",
"size": "2553",
"license": "mit",
"hash": -2837535440552661500,
"line_mean": 39.5238095238,
"line_max": 118,
"alpha_frac": 0.6705836271,
"autogenerated": false,
"ratio": 3.565642458100559,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47362260852005583,
"avg_score": null,
"num_lines": null
} |
"""Asynchronous request engine."""
__author__ = 'vovanec@gmail.com'
from tornado import curl_httpclient
from tornado import gen
from tornado import httpclient
from .base import BaseRequestEngine
from .errors import ClientError
from .errors import CommunicationError
from .errors import MalformedResponse
from .errors import ServerError
# Configure async engine to use CURL client whenever possible.
httpclient.AsyncHTTPClient.configure(curl_httpclient.CurlAsyncHTTPClient)
class AsyncRequestEngine(BaseRequestEngine):
"""Asynchronous request engine.
Uses Tornado asynchronous client to make HTTP requests.
"""
def __init__(self, api_base_url, connect_timeout, request_timeout,
conn_retries, username=None, password=None,
client_cert=None, client_key=None, verify_cert=True,
ca_certs=None):
"""Constructor.
:param str api_base_url: API base URL.
:param int connect_timeout: connection timeout.
:param int request_timeout: request timeout.
:param int|None conn_retries: The number of retries on connection
error. If None - no retries.
:param str|None username: auth username.
:param str|None password: auth password.
:param str|None client_cert: client certificate.
:param str|None client_key: client key.
:param bool verify_cert: whether to verify server cert.
:param str|None ca_certs: path to CA certificate chain.
"""
super().__init__(
api_base_url, connect_timeout, request_timeout, conn_retries,
username=username, password=password,
client_cert=client_cert, client_key=client_key,
verify_cert=verify_cert, ca_certs=ca_certs)
self._client = httpclient.AsyncHTTPClient()
def _request(self, url, *,
method='GET', headers=None, data=None, result_callback=None):
"""Perform asynchronous request.
:param str url: request URL.
:param str method: request method.
:param dict headers: request headers.
:param object data: JSON-encodable object.
:param object -> object result_callback: result callback.
:rtype: dict
:raise: APIError
"""
request = self._prepare_request(url, method, headers, data)
retries_left = self._conn_retries
while True:
try:
response = yield self._client.fetch(request)
try:
if result_callback:
return result_callback(response.body)
except (ValueError, TypeError) as err:
raise MalformedResponse(err) from None
return response.body
except httpclient.HTTPError as err:
resp_body = err.response.body \
if err.response is not None else None
if err.code == 599:
if self._conn_retries is None or retries_left <= 0:
raise CommunicationError(err) from None
else:
retries_left -= 1
retry_in = (self._conn_retries - retries_left) * 2
self._log.warning('Server communication error: %s. '
'Retrying in %s seconds.', err,
retry_in)
yield gen.sleep(retry_in)
continue
elif 400 <= err.code < 500:
raise ClientError(err.code, resp_body) from None
raise ServerError(err.code, resp_body) from None
def _prepare_request(self, url, method, headers, data):
"""Prepare HTTP request.
:param str url: request URL.
:param str method: request method.
:param dict headers: request headers.
:param object data: JSON-encodable object.
:rtype: httpclient.HTTPRequest
"""
request = httpclient.HTTPRequest(
url=url, method=method, headers=headers, body=data,
connect_timeout=self._connect_timeout,
request_timeout=self._request_timeout,
auth_username=self._username, auth_password=self._password,
client_cert=self._client_cert, client_key=self._client_key,
ca_certs=self._ca_certs, validate_cert=self._verify_cert)
return request
| {
"repo_name": "vovanec/httputil",
"path": "httputil/request_engines/async.py",
"copies": "1",
"size": "4479",
"license": "mit",
"hash": 6376839361730604000,
"line_mean": 35.1209677419,
"line_max": 78,
"alpha_frac": 0.5916499219,
"autogenerated": false,
"ratio": 4.6080246913580245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 124
} |
"""Asynchronous request parser. Compatible with Python>=3.5."""
import asyncio
import functools
import inspect
import typing
from collections.abc import Mapping
from marshmallow import Schema, ValidationError
from marshmallow.fields import Field
import marshmallow as ma
from marshmallow.utils import missing
from webargs import core
Request = typing.TypeVar("Request")
ArgMap = typing.Union[Schema, typing.Mapping[str, Field]]
Validate = typing.Union[typing.Callable, typing.Iterable[typing.Callable]]
class AsyncParser(core.Parser):
"""Asynchronous variant of `webargs.core.Parser`, where parsing methods may be
either coroutines or regular methods.
"""
async def _parse_request(
self, schema: Schema, req: Request, locations: typing.Iterable
) -> typing.Union[dict, list]:
if schema.many:
assert (
"json" in locations
), "schema.many=True is only supported for JSON location"
# The ad hoc Nested field is more like a workaround or a helper,
# and it servers its purpose fine. However, if somebody has a desire
# to re-design the support of bulk-type arguments, go ahead.
parsed = await self.parse_arg(
name="json",
field=ma.fields.Nested(schema, many=True),
req=req,
locations=locations,
)
if parsed is missing:
parsed = []
else:
argdict = schema.fields
parsed = {}
for argname, field_obj in argdict.items():
if core.MARSHMALLOW_VERSION_INFO[0] < 3:
parsed_value = await self.parse_arg(
argname, field_obj, req, locations
)
# If load_from is specified on the field, try to parse from that key
if parsed_value is missing and field_obj.load_from:
parsed_value = await self.parse_arg(
field_obj.load_from, field_obj, req, locations
)
argname = field_obj.load_from
else:
argname = field_obj.data_key or argname
parsed_value = await self.parse_arg(
argname, field_obj, req, locations
)
if parsed_value is not missing:
parsed[argname] = parsed_value
return parsed
# TODO: Lots of duplication from core.Parser here. Rethink.
async def parse(
self,
argmap: ArgMap,
req: Request = None,
locations: typing.Iterable = None,
validate: Validate = None,
error_status_code: typing.Union[int, None] = None,
error_headers: typing.Union[typing.Mapping[str, str], None] = None,
) -> typing.Union[typing.Mapping, None]:
"""Coroutine variant of `webargs.core.Parser`.
Receives the same arguments as `webargs.core.Parser.parse`.
"""
self.clear_cache() # in case someone used `parse_*()`
req = req if req is not None else self.get_default_request()
assert req is not None, "Must pass req object"
data = None
validators = core._ensure_list_of_callables(validate)
schema = self._get_schema(argmap, req)
try:
parsed = await self._parse_request(
schema=schema, req=req, locations=locations or self.locations
)
result = schema.load(parsed)
data = result.data if core.MARSHMALLOW_VERSION_INFO[0] < 3 else result
self._validate_arguments(data, validators)
except ma.exceptions.ValidationError as error:
await self._on_validation_error(
error, req, schema, error_status_code, error_headers
)
return data
async def _on_validation_error(
self,
error: ValidationError,
req: Request,
schema: Schema,
error_status_code: typing.Union[int, None],
error_headers: typing.Union[typing.Mapping[str, str], None] = None,
) -> None:
error_handler = self.error_callback or self.handle_error
await error_handler(error, req, schema, error_status_code, error_headers)
def use_args(
self,
argmap: ArgMap,
req: typing.Optional[Request] = None,
locations: typing.Iterable = None,
as_kwargs: bool = False,
validate: Validate = None,
error_status_code: typing.Optional[int] = None,
error_headers: typing.Union[typing.Mapping[str, str], None] = None,
) -> typing.Callable[..., typing.Callable]:
"""Decorator that injects parsed arguments into a view function or method.
Receives the same arguments as `webargs.core.Parser.use_args`.
"""
locations = locations or self.locations
request_obj = req
# Optimization: If argmap is passed as a dictionary, we only need
# to generate a Schema once
if isinstance(argmap, Mapping):
argmap = core.dict2schema(argmap, self.schema_class)()
def decorator(func: typing.Callable) -> typing.Callable:
req_ = request_obj
if inspect.iscoroutinefunction(func):
@functools.wraps(func)
async def wrapper(*args, **kwargs):
req_obj = req_
if not req_obj:
req_obj = self.get_request_from_view_args(func, args, kwargs)
# NOTE: At this point, argmap may be a Schema, callable, or dict
parsed_args = await self.parse(
argmap,
req=req_obj,
locations=locations,
validate=validate,
error_status_code=error_status_code,
error_headers=error_headers,
)
if as_kwargs:
kwargs.update(parsed_args or {})
return await func(*args, **kwargs)
else:
# Add parsed_args after other positional arguments
new_args = args + (parsed_args,)
return await func(*new_args, **kwargs)
else:
@functools.wraps(func) # type: ignore
def wrapper(*args, **kwargs):
req_obj = req_
if not req_obj:
req_obj = self.get_request_from_view_args(func, args, kwargs)
# NOTE: At this point, argmap may be a Schema, callable, or dict
parsed_args = yield from self.parse( # type: ignore
argmap,
req=req_obj,
locations=locations,
validate=validate,
error_status_code=error_status_code,
error_headers=error_headers,
)
if as_kwargs:
kwargs.update(parsed_args)
return func(*args, **kwargs) # noqa: B901
else:
# Add parsed_args after other positional arguments
new_args = args + (parsed_args,)
return func(*new_args, **kwargs)
return wrapper
return decorator
def use_kwargs(self, *args, **kwargs) -> typing.Callable:
"""Decorator that injects parsed arguments into a view function or method.
Receives the same arguments as `webargs.core.Parser.use_kwargs`.
"""
return super().use_kwargs(*args, **kwargs)
async def parse_arg(
self, name: str, field: Field, req: Request, locations: typing.Iterable = None
) -> typing.Any:
location = field.metadata.get("location")
if location:
locations_to_check = self._validated_locations([location])
else:
locations_to_check = self._validated_locations(locations or self.locations)
for location in locations_to_check:
value = await self._get_value(name, field, req=req, location=location)
# Found the value; validate and return it
if value is not core.missing:
return value
return core.missing
async def _get_value(
self, name: str, argobj: Field, req: Request, location: str
) -> typing.Any:
function = self._get_handler(location)
if asyncio.iscoroutinefunction(function):
value = await function(req, name, argobj)
else:
value = function(req, name, argobj)
return value
| {
"repo_name": "sloria/webargs",
"path": "src/webargs/asyncparser.py",
"copies": "1",
"size": "8815",
"license": "mit",
"hash": 3099429554414004000,
"line_mean": 39.0681818182,
"line_max": 88,
"alpha_frac": 0.5513329552,
"autogenerated": false,
"ratio": 4.532133676092545,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007530037946843659,
"num_lines": 220
} |
'''
Generate the errno module by extracting the values from CPython.
Better windows error messages could be derived by parsing:
http://www.sockets.com/err_lst1.htm#DetailedErrorDescriptions
http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winsock/errors_2036.asp
'''
import os
if os.name == 'java':
raise ImportError('this script must be run by CPython')
import errno, sys
error_messages = {
10035 : "The socket operation could not complete without blocking",
10022 : "Invalid argument",
}
def gen_python(fp=None):
print >> fp, "'''"
print >> fp, "This file was autogenerated using:"
print >> fp, " Python %s" % (" ".join(sys.version.split()))
print >> fp, "'''"
print >> fp, ""
for a in errno.errorcode.keys():
#if a < 1000:
print >> fp, "%-25s = %d" % (errno.errorcode[a], a)
if hasattr(fp, "flush"): fp.flush()
print >> fp, ""
print >> fp, "errorcode = {}"
for a in errno.errorcode.keys():
#if a < 1000:
print >> fp, "errorcode[%s] = '%s'" % (errno.errorcode[a], errno.errorcode[a])
if hasattr(fp, "flush"): fp.flush()
def gen_java(fp=None):
print >> fp, "package org.python.modules;"
print >> fp, ""
print >> fp, "import org.python.core.*;"
print >> fp, ""
print >> fp, "/**"
print >> fp, " * This file contains autogenerated error codes from:<br/>"
print >> fp, " * <b>Python %s</b>" % (" ".join(sys.version.split()))
print >> fp, " *"
print >> fp, " * @author brian zimmer"
print >> fp, " * @version %s" % (".".join(map(lambda x: str(x), sys.version_info[:3])))
print >> fp, " * @copyright 2002 brian zimmer"
print >> fp, " */"
print >> fp, "public final class errno implements ClassDictInit {"
print >> fp, ""
print >> fp, " private errno() {}"
print >> fp, ""
errors = [(e, getattr(errno, e)) for e in dir(errno) if e[0] == 'E']
errors.sort(lambda x, y: cmp(x[1], y[1]))
for name, code in errors:
if code > 1000: continue
print >> fp, " public static final int %-25s = %d;" % (name, code)
if hasattr(fp, "flush"): fp.flush()
print >> fp, ""
print >> fp, " public static final PyObject errorcode = new PyDictionary();"
print >> fp, " private static final PyObject strerror = new PyDictionary();"
print >> fp, ""
print >> fp, " public static void classDictInit(PyObject dict) throws PyIgnoreMethodTag {"
for name, code in errors:
if code > 1000: continue
msg = error_messages.get(code, os.strerror(code));
print >> fp, ' addcode(dict, %s, "%s", "%s");' % (name, name, msg)
if hasattr(fp, "flush"): fp.flush()
print >> fp, ""
print >> fp, " }"
print >> fp, ""
print >> fp, " public static PyObject strerror(PyObject error) {"
print >> fp, " return strerror.__getitem__(error);"
print >> fp, " }"
print >> fp, ""
print >> fp, " private static void addcode(PyObject dict, int errno,"
print >> fp, " String err, String msg) {"
print >> fp, " PyObject errno_o = Py.newInteger(errno);"
print >> fp, " PyObject err_o = Py.newString(err);"
print >> fp, " strerror.__setitem__(errno_o, Py.newString(msg));"
print >> fp, " errorcode.__setitem__(errno_o, err_o);"
print >> fp, " dict.__setitem__(err_o, errno_o);"
print >> fp, " }"
print >> fp, "}"
if __name__ == '__main__':
gen_java(None)
| {
"repo_name": "nelmiux/CarnotKE",
"path": "jyhton/Misc/make_errno.py",
"copies": "7",
"size": "3525",
"license": "apache-2.0",
"hash": -8992240812756177000,
"line_mean": 33.900990099,
"line_max": 95,
"alpha_frac": 0.5815602837,
"autogenerated": false,
"ratio": 3.260869565217391,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7342429848917391,
"avg_score": null,
"num_lines": null
} |
"""Asynchronous support for garage.threads.actors."""
__all__ = [
'StubAdapter',
]
from garage.asyncs import futures
class StubAdapter:
"""Wrap all method calls, adding FutureAdapter on their result.
While this simple adapter does not work for all corner cases, for
common cases, it should work fine.
"""
def __init__(self, stub):
self._stub = stub
def __getattr__(self, name):
method = getattr(self._stub, name)
# Simple foolproof detection of non-message-sending access.
if name.startswith('_'):
return method
return lambda *args, **kwargs: \
futures.FutureAdapter(method(*args, **kwargs))
def _get_future(self):
return futures.FutureAdapter(self._stub._get_future())
def _send_message(self, func, args, kwargs):
"""Enqueue a message into actor's message queue.
Since this does not block, it may raise Full when the message
queue is full.
"""
future = self._stub._send_message(func, args, kwargs, block=False)
return futures.FutureAdapter(future)
async def _kill_and_join(self, graceful=True):
self._kill(graceful=graceful)
await self._get_future().result()
| {
"repo_name": "clchiou/garage",
"path": "py/garage/garage/asyncs/actors.py",
"copies": "1",
"size": "1247",
"license": "mit",
"hash": -8308930582010044000,
"line_mean": 28.6904761905,
"line_max": 74,
"alpha_frac": 0.6311146752,
"autogenerated": false,
"ratio": 4.061889250814332,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 42
} |
"""Asynchronous tasks."""
import snowfloat.request
import snowfloat.result
class Task(object):
"""Asynchronous task sent to the server.
Attributes:
uuid (str): Task UUID.
uri (str): Task URI.
operation (str): Task operation: map.
task_filter (dict): Query filter.
spatial (dict): Query spatial operation.
state (str): Task state: running, succeed...
extras (dict): Optional task parameters.
reason (str): Task error reason.
date_created (str): Creation date in ISO format.
date_modified (str): Modification date in ISO format.
"""
uuid = None
uri = None
operation = None
task_filter = {}
spatial = {}
state = None
extras = {}
reason = None
date_created = None
date_modified = None
def __init__(self, **kwargs):
for key, val in kwargs.items():
getattr(self, key)
setattr(self, key, val)
def get_results(self):
"""Returns the task results.
Returns:
generator. Yields Result objects.
"""
uri = '%s/results' % (self.uri)
data = {}
for res in snowfloat.request.get(uri, data):
# convert list of json results to Result objects
results = snowfloat.result.parse_results(res['results'])
for result in results:
yield result
def __str__(self):
return '%s: uuid=%s, uri=%s, ' \
'date_created=%s, date_modified=%s, ' \
'operation=%s, ' \
'task_filter=%s, spatial=%s, ' \
'state=%s, extras=%s, ' \
'reason=%s' \
% (self.__class__.__name__,
self.uuid, self.uri, self.date_created, self.date_modified,
self.operation,
self.task_filter, self.spatial,
self.state, self.extras, self.reason)
def __repr__(self):
return '%s(uuid=%r, uri=%r, ' \
'date_created=%r, date_modified=%r, ' \
'operation=%r, ' \
'task_filter=%r, spatial=%r, ' \
'state=%r, extras=%r, ' \
'reason=%r)' \
% (self.__class__.__name__,
self.uuid, self.uri, self.date_created, self.date_modified,
self.operation,
self.task_filter, self.spatial,
self.state, self.extras, self.reason)
def parse_tasks(tasks):
"""Convert task dictionaries to Task objects.
Args:
tasks (list): List of task dictionaries.
Returns:
list: List of Task objects.
"""
return [Task(operation=t['operation'],
uuid=t['uuid'],
uri=t['uri'],
task_filter=t['task_filter'],
spatial=t['spatial'],
extras=t['extras'],
state=t['state'],
reason=t['reason'],
date_created=t['date_created'],
date_modified=t['date_modified']) for t in tasks]
| {
"repo_name": "snowfloat/snowfloat-python",
"path": "snowfloat/task.py",
"copies": "1",
"size": "3073",
"license": "bsd-3-clause",
"hash": -1352192416161451300,
"line_mean": 27.9905660377,
"line_max": 74,
"alpha_frac": 0.5056947608,
"autogenerated": false,
"ratio": 4.169606512890095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5175301273690095,
"avg_score": null,
"num_lines": null
} |
'''Asynchronous WSGI_ Remote Procedure Calls middleware. It implements a
JSON-RPC_ server and client. Check out the
:ref:`json-rpc tutorial <tutorials-calculator>` if you want to get started
quickly with a working example.
To quickly setup a server::
class MyRpc(rpc.JSONRPC):
def rpc_ping(self, request):
return 'pong'
class Wsgi(wsgi.LazyWsgi):
def handler(self, environ=None):
app = wsgi.Router('/',
post=MyRpc(),
response_content_types=['application/json'])
return wsgi.WsgiHandler([app])
if __name__ == '__main__':
wsgi.WSGIServer(Wsgi()).start()
* The ``MyRpc`` handles the requests
* Routing is delegated to the :class:`.Router` which handle only ``post``
requests with content type ``application/json``.
API
===========
.. module:: pulsar.apps.rpc.handlers
RpcHandler
~~~~~~~~~~~~~~
.. autoclass:: RpcHandler
:members:
:member-order: bysource
rpc method decorator
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autofunction:: rpc_method
.. module:: pulsar.apps.rpc.jsonrpc
JSON RPC
~~~~~~~~~~~~~~~~
.. autoclass:: JSONRPC
:members:
:member-order: bysource
JsonProxy
~~~~~~~~~~~~~~~~
.. autoclass:: JsonProxy
:members:
:member-order: bysource
.. module:: pulsar.apps.rpc.mixins
Server Commands
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: PulsarServerCommands
:members:
:member-order: bysource
.. _JSON-RPC: http://www.jsonrpc.org/specification
.. _WSGI: http://www.python.org/dev/peps/pep-3333/
'''
from .handlers import * # noqa
from .jsonrpc import * # noqa
from .mixins import * # noqa
| {
"repo_name": "dejlek/pulsar",
"path": "pulsar/apps/rpc/__init__.py",
"copies": "1",
"size": "1683",
"license": "bsd-3-clause",
"hash": 3749502652103921700,
"line_mean": 19.7777777778,
"line_max": 74,
"alpha_frac": 0.5971479501,
"autogenerated": false,
"ratio": 3.7483296213808464,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4845477571480846,
"avg_score": null,
"num_lines": null
} |
"""Asynchronous WSGI_ Remote Procedure Calls middleware. It implements a
JSON-RPC_ server and client. Check out the
:ref:`json-rpc tutorial <tutorials-calculator>` if you want to get started
quickly with a working example.
To quickly setup a server::
class MyRpc(rpc.JSONRPC):
def rpc_ping(self, request):
return 'pong'
class Wsgi(wsgi.LazyWsgi):
def handler(self, environ=None):
app = wsgi.Router('/',
post=MyRpc(),
response_content_types=['application/json'])
return wsgi.WsgiHandler([app])
if __name__ == '__main__':
wsgi.WSGIServer(Wsgi()).start()
* The ``MyRpc`` handles the requests
* Routing is delegated to the :class:`.Router` which handle only ``post``
requests with content type ``application/json``.
API
===========
.. module:: pulsar.apps.rpc.handlers
RpcHandler
~~~~~~~~~~~~~~
.. autoclass:: RpcHandler
:members:
:member-order: bysource
rpc method decorator
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autofunction:: rpc_method
.. module:: pulsar.apps.rpc.jsonrpc
JSON RPC
~~~~~~~~~~~~~~~~
.. autoclass:: JSONRPC
:members:
:member-order: bysource
JsonProxy
~~~~~~~~~~~~~~~~
.. autoclass:: JsonProxy
:members:
:member-order: bysource
.. module:: pulsar.apps.rpc.mixins
Server Commands
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: PulsarServerCommands
:members:
:member-order: bysource
.. _JSON-RPC: http://www.jsonrpc.org/specification
.. _WSGI: http://www.python.org/dev/peps/pep-3333/
"""
from .handlers import (
RpcHandler, rpc_method, InvalidRequest, InvalidParams,
NoSuchFunction, InternalError
)
from .jsonrpc import JSONRPC, JsonProxy, JsonBatchProxy
from .mixins import PulsarServerCommands
__all__ = [
'RpcHandler',
'rpc_method',
'InvalidRequest',
'InvalidParams',
'NoSuchFunction',
'InternalError',
'JSONRPC',
'JsonProxy',
'JsonBatchProxy',
'PulsarServerCommands'
]
| {
"repo_name": "quantmind/pulsar",
"path": "pulsar/apps/rpc/__init__.py",
"copies": "1",
"size": "2013",
"license": "bsd-3-clause",
"hash": -7648460218246820000,
"line_mean": 19.5408163265,
"line_max": 74,
"alpha_frac": 0.6169895678,
"autogenerated": false,
"ratio": 3.7346938775510203,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.985168344535102,
"avg_score": 0,
"num_lines": 98
} |
"""Asynchronous WSGI Server/Gateway implementation."""
__all__ = [
'FileWrapper',
'HttpSession',
]
import collections
import enum
import http
import io
import itertools
import logging
import os
import re
import socket
from g1.asyncs.bases import locks
from g1.asyncs.bases import queues
from g1.asyncs.bases import streams
from g1.asyncs.bases import tasks
from g1.asyncs.bases import timers
from g1.bases.assertions import ASSERT
LOG = logging.getLogger(__name__)
class _SessionExit(Exception):
"""Exit a HTTP session (not necessary due to errors)."""
class FileWrapper:
def __init__(self, file, block_size=8192):
del block_size # Unused.
self._file = file
def _transfer(self):
"""Transfer ownership of the wrapped file."""
file, self._file = self._file, None
return file
def close(self):
if self._file is not None:
self._file.close()
class HttpSession:
# TODO: Make these configurable.
_KEEP_ALIVE_IDLE_TIMEOUT = 8
# A session may stay longer even when the number of requests exceeds
# this number if the WSGI application explicitly set Keep-Alive in
# response headers.
#
# TODO: Make these configurable.
_MAX_NUM_REQUESTS_PER_SESSION = 1024
_KEEP_ALIVE = (b'Connection', b'keep-alive')
_NOT_KEEP_ALIVE = (b'Connection', b'close')
_EXIT_EXC_TYPES = (
_SessionExit,
socket.timeout,
ConnectionResetError,
BrokenPipeError,
)
def __init__(self, sock, application, base_environ):
self._sock = sock
self._application = application
self._request_queue = _RequestQueue(self._sock, base_environ)
self._response_queue = _ResponseQueue(self._sock)
async def __call__(self):
self._sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
with self._sock:
try:
for num_requests in itertools.count(1):
await self._handle_request(
await self._get_request(),
num_requests < self._MAX_NUM_REQUESTS_PER_SESSION,
)
except self._EXIT_EXC_TYPES as exc:
LOG.debug('exit session due to: %r', exc)
async def _get_request(self):
try:
with timers.timeout_after(self._KEEP_ALIVE_IDLE_TIMEOUT):
environ = await self._request_queue.get()
except _RequestError as exc:
LOG.debug('invalid request: %s %s', exc.status, exc)
await self._put_short_response(exc.status, False)
raise _SessionExit from None
except timers.Timeout:
LOG.debug('keep-alive idle timeout')
raise _SessionExit from None
if environ is None:
raise _SessionExit
return environ
async def _handle_request(self, environ, keep_alive):
# Check if client disables Keep-Alive explicitly.
connection = environ.get('HTTP_CONNECTION')
if connection is not None:
keep_alive = 'keep-alive' in connection.lower()
# At the moment we do not check any expectations (except those
# already in _RequestQueue), and just return HTTP 100 here.
if environ.get('HTTP_EXPECT', '').lower() == '100-continue':
await self._put_short_response(
http.HTTPStatus.CONTINUE, keep_alive
)
if not keep_alive:
raise _SessionExit
return
context = _ApplicationContext()
async with tasks.joining(
tasks.spawn(self._send_response(context, environ, keep_alive)),
always_cancel=True,
log_error=False, # We handle and log error below.
) as send_task, tasks.joining(
tasks.spawn(self._run_application(context, environ)),
always_cancel=True,
log_error=False, # We handle and log error below.
) as run_task:
async for task in tasks.as_completed([send_task, run_task]):
try:
task.get_result_nonblocking()
except self._EXIT_EXC_TYPES:
raise
except Exception:
if self._response_queue.has_begun():
LOG.exception(
'task crash after response starts sending: %r',
task,
)
raise _SessionExit from None
LOG.warning(
'task crash before response starts sending: %r',
task,
exc_info=True,
)
await self._put_short_response(
http.HTTPStatus.INTERNAL_SERVER_ERROR, keep_alive
)
if not keep_alive:
raise _SessionExit from None
break
async def _run_application(self, context, environ):
body = await self._application(environ, context.start_response)
try:
if isinstance(body, FileWrapper):
# TODO: Implement PEP 333's requirement of falling back
# to normal iterable handling loop below when body._file
# is not a regular file.
context.sendfile(body._transfer())
# To unblock _send_response task.
context.end_body_chunks()
return
if hasattr(body, '__aiter__'):
async for chunk in body:
await context.put_body_chunk(chunk)
else:
for chunk in body:
await context.put_body_chunk(chunk)
#
# Only call `end_body_chunks` on success. We do this to fix
# this corner case:
#
# * Let us assume that:
# 1. self._application has not yet called start_response.
# 2. self._application further spawns a handler task that
# will eventually call start_response.
#
# * When `body` iterator errs out, or _run_application task
# gets cancelled, if end_body_chunks is called (which
# should not), then _send_response task is unblocked and
# calls context.commit.
#
# * Eventually, the handler task calls start_response.
# Because context.commit has been called, start_response
# errs out, causing the handler task to err out.
#
# This corner case produces very confusing handler task
# errors, sometimes **lots** of them when the process is
# shutting down and tasks are getting cancelled.
#
# NOTE: This will NOT cause _send_response task being
# blocked on get_body_chunk forever because _handle_request
# cancels _send_response when _run_application errs out.
#
context.end_body_chunks()
finally:
if hasattr(body, 'close'):
body.close()
async def _send_response(self, context, environ, keep_alive):
try:
return await self._do_send_response(context, environ, keep_alive)
except timers.Timeout:
LOG.debug('send/sendfile timeout')
raise _SessionExit from None
finally:
if context.file is not None:
context.file.close()
async def _do_send_response(self, context, environ, keep_alive):
# Start sending status and headers after we receive the first
# chunk so that user has a chance to call start_response again
# to reset status and headers.
chunks = [await context.get_body_chunk()]
context.commit()
has_connection_header = False
content_length = None
for key, value in context.headers:
if key.lower() == b'connection':
has_connection_header = True
keep_alive = b'keep-alive' in value.lower()
if key.lower() == b'content-length':
content_length = int(value)
if not has_connection_header:
context.headers.append(
self._KEEP_ALIVE if keep_alive else self._NOT_KEEP_ALIVE
)
if content_length is None:
if context.file is None:
while chunks[-1]:
chunks.append(await context.get_body_chunk())
body_size = sum(map(len, chunks))
else:
body_size = os.fstat(context.file.fileno()).st_size
context.headers.append((
b'Content-Length',
b'%d' % body_size,
))
else:
body_size = len(chunks[0])
omit_body = self._should_omit_body(context.status, environ)
if omit_body:
chunks.clear()
await self._response_queue.begin(context.status, context.headers)
# TODO: When body chunks or context.file is actually larger than
# Content-Length provided by the caller, we will still send the
# extra data to the client, and then err out. Maybe,
# alternatively, we should not send the extra data (but still
# err out)?
if context.file is None:
for chunk in chunks:
if not omit_body:
await self._response_queue.put_body_chunk(chunk)
chunks.clear()
while True:
chunk = await context.get_body_chunk()
if not chunk:
break
if not omit_body:
await self._response_queue.put_body_chunk(chunk)
body_size += len(chunk)
else:
if not omit_body:
body_size = await self._response_queue.sendfile(context.file)
self._response_queue.end()
if (
not omit_body and content_length is not None
and content_length != body_size
):
LOG.error(
'Content-Length set to %d but body size is %d: environ=%r',
content_length,
body_size,
environ,
)
raise _SessionExit
if not keep_alive:
raise _SessionExit
@staticmethod
def _should_omit_body(status, environ):
"""Return true if response body should be omitted.
It is omitted for these cases:
* RFC7230: 3.3. 1xx, 204 No Content, 304 Not Modified.
* RFC7231: 6.3.6. 205 Reset Content.
* HEAD method.
"""
return (\
100 <= status < 200 or
status in (
http.HTTPStatus.NO_CONTENT,
http.HTTPStatus.RESET_CONTENT,
http.HTTPStatus.NOT_MODIFIED,
) or
environ.get('REQUEST_METHOD') == 'HEAD'
)
async def _put_short_response(self, status, keep_alive):
await self._response_queue.begin(
status,
[self._KEEP_ALIVE if keep_alive else self._NOT_KEEP_ALIVE],
)
self._response_queue.end()
class _RequestError(Exception):
"""Raised by _RequestQueue or _RequestBuffer."""
def __init__(self, status, message):
super().__init__(message)
self.status = status
class _RequestQueue:
def __init__(self, sock, base_environ):
self._request_buffer = _RequestBuffer(sock)
self._base_environ = base_environ
_MAX_NUM_HEADERS = 128
async def get(self):
"""Return the next request or None at the end."""
line = await self._request_buffer.readline_decoded()
if not line:
return None
# WSGI requires that the environ argument must be a built-in
# Python dictionary.
environ = dict(self._base_environ)
self._parse_request_line(line, environ)
headers = collections.defaultdict(list)
while True:
line = await self._request_buffer.readline_decoded()
if line in ('', '\n', '\r\n'):
break
if len(headers) == self._MAX_NUM_HEADERS:
raise _RequestError(
http.HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE,
'number of request headers exceeds %d' %
self._MAX_NUM_HEADERS,
)
name, value = self._parse_request_header(line)
if name is not None:
headers[name].append(value)
for name, values in headers.items():
environ[name] = ','.join(values)
content_length = environ.get('CONTENT_LENGTH')
if content_length is not None:
try:
content_length = int(content_length, base=10)
except ValueError:
raise _RequestError(
http.HTTPStatus.BAD_REQUEST,
'invalid request Content-Length: %r' % content_length,
) from None
request_body = streams.BytesStream()
if content_length is not None:
# TODO: Set the limit to 64K for now, but we should rewrite
# this to NOT load the entire request body into the memory.
if content_length > 65536:
raise _RequestError(
http.HTTPStatus.BAD_REQUEST,
'Content-Length exceeds limit: %d' % content_length,
)
await self._request_buffer.read_into(request_body, content_length)
request_body.close()
environ['wsgi.input'] = request_body
return environ
_REQUEST_LINE_PATTERN = re.compile(
r'\s*([^\s]+)\s+([^\s]+)\s+([^\s]+)\s*\r?\n',
re.IGNORECASE,
)
def _parse_request_line(self, line, environ):
match = self._REQUEST_LINE_PATTERN.fullmatch(line)
if not match:
raise _RequestError(
http.HTTPStatus.BAD_REQUEST,
'invalid request line: %r' % line,
)
method, path, http_version = match.groups()
if http_version.upper() != 'HTTP/1.1':
LOG.debug('request is not HTTP/1.1 but %s', http_version)
environ['REQUEST_METHOD'] = method.upper()
i = path.find('?')
if i < 0:
environ['PATH_INFO'] = path
environ['QUERY_STRING'] = ''
else:
environ['PATH_INFO'] = path[:i]
environ['QUERY_STRING'] = path[i + 1:]
_HEADER_PATTERN = re.compile(r'\s*([^\s]+)\s*:\s*(.*[^\s])\s*\r?\n')
_HEADER_NAME_PATTERN = re.compile(r'[a-zA-Z0-9_-]+')
def _parse_request_header(self, line):
match = self._HEADER_PATTERN.fullmatch(line)
if not match:
raise _RequestError(
http.HTTPStatus.BAD_REQUEST,
'invalid request header: %r' % line,
)
name, value = match.groups()
if not self._HEADER_NAME_PATTERN.fullmatch(name):
LOG.debug('ignore malformed request header: %r', line)
return None, None
name = name.upper().replace('-', '_')
if name not in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
name = 'HTTP_' + name
return name, value
class _RequestBuffer:
def __init__(self, sock):
self._sock = sock
self._buffer = []
self._size = 0
self._ended = False
async def readline_decoded(self, limit=65536):
line = await self._readline(limit=limit)
try:
return line.decode('iso-8859-1')
except UnicodeDecodeError:
raise _RequestError(
http.HTTPStatus.BAD_REQUEST,
'incorrectly encoded request line: %r' % line,
)
async def _readline(self, limit=65536):
"""Read one line from the socket.
It errs out when line length exceeds the limit.
"""
if self._buffer:
ASSERT.equal(len(self._buffer), 1)
line = self._search_line(0)
if line is not None:
return line
while not self._ended and self._size <= limit:
data = await self._sock.recv(limit + 1)
if not data:
self._ended = True
break
self._buffer.append(data)
self._size += len(data)
line = self._search_line(-1)
if line is not None:
ASSERT.in_(len(self._buffer), (0, 1))
return line
if self._size > limit:
raise _RequestError(
http.HTTPStatus.REQUEST_URI_TOO_LONG,
'request line length exceeds %d' % limit,
)
if self._buffer:
remaining = b''.join(self._buffer)
self._buffer.clear()
self._size = 0
return remaining
else:
return b''
def _search_line(self, i):
if i < 0:
i += len(self._buffer)
j = self._buffer[i].find(b'\n')
if j < 0:
return None
j += 1
if i == 0:
if j == len(self._buffer[0]):
line = self._buffer.pop(0)
else:
line = self._buffer[0][:j]
self._buffer[0] = self._buffer[0][j:]
else:
if j == len(self._buffer[i]):
parts = self._buffer[:i + 1]
del self._buffer[:i + 1]
else:
parts = self._buffer[:i]
parts.append(self._buffer[i][:j])
self._buffer[i] = self._buffer[i][j:]
del self._buffer[:i]
line = b''.join(parts)
self._size -= len(line)
return line
async def read_into(self, stream, size):
while size > 0:
if self._buffer:
if size < len(self._buffer[0]):
data = self._buffer[0][:size]
self._buffer[0] = self._buffer[0][size:]
else:
data = self._buffer.pop(0)
self._size -= len(data)
elif self._ended:
break
else:
data = await self._sock.recv(size)
if not data:
self._ended = True
break
size -= len(data)
stream.write_nonblocking(data)
@enum.unique
class _SendMechanisms(enum.Enum):
UNDECIDED = enum.auto()
SEND = enum.auto()
SENDFILE = enum.auto()
class _ApplicationContext:
def __init__(self):
self._is_committed = False
self._status = None
self._headers = []
self._send_mechanism = _SendMechanisms.UNDECIDED
# Set capacity to 1 to prevent excessive buffering.
self._chunks = queues.Queue(capacity=1)
self.file = None
def start_response(self, status, response_headers, exc_info=None):
if exc_info:
try:
if self._is_committed:
exc = exc_info[1]
if exc is None:
exc = exc_info[0]()
if exc.__traceback__ is not exc_info[2]:
exc.with_traceback(exc_info[2])
raise exc
finally:
exc_info = None # Avoid dangling cyclic ref.
else:
ASSERT.false(self._is_committed)
# Get the status code from status line like "200 OK".
self._status = http.HTTPStatus(int(status.split(maxsplit=1)[0]))
self._headers = [
(name.encode('iso-8859-1'), value.encode('iso-8859-1'))
for name, value in response_headers
]
return self.write
def commit(self):
"""Commit the status and headers.
This effectively "locks" the context from further changing
status or headers via `start_response`.
"""
self._is_committed = True
@property
def status(self):
# It is unsafe to read status before the context is committed.
ASSERT.true(self._is_committed)
return ASSERT.not_none(self._status)
@property
def headers(self):
# It is unsafe to read headers before the context is committed.
ASSERT.true(self._is_committed)
return self._headers
async def get_body_chunk(self):
try:
return await self._chunks.get()
except queues.Closed:
return b''
async def put_body_chunk(self, chunk):
ASSERT.is_not(self._send_mechanism, _SendMechanisms.SENDFILE)
self._send_mechanism = _SendMechanisms.SEND
if chunk:
await self._chunks.put(chunk)
# According to WSGI spec, `write` is only intended for maintaining
# backward compatibility.
async def write(self, data):
await self.put_body_chunk(data)
return len(data)
def end_body_chunks(self):
self._chunks.close()
def sendfile(self, file):
# sendfile can be called only once.
ASSERT.is_(self._send_mechanism, _SendMechanisms.UNDECIDED)
ASSERT.not_none(file)
self._send_mechanism = _SendMechanisms.SENDFILE
self.file = file
class _ResponseQueue:
# These timeouts are for preventing a client who refuses to receive
# data blocking send/sendfile forever.
#
# TODO: Make these configurable.
_SEND_TIMEOUT = 2
_SENDFILE_TIMEOUT = 8
_ENCODED_REASONS = {
status: status.phrase.encode('iso-8859-1')
for status in http.HTTPStatus
}
def __init__(self, sock):
self._sock = sock
self._has_begun = False
self._headers_sent = locks.Event()
self._send_mechanism = _SendMechanisms.UNDECIDED
async def begin(self, status, headers):
ASSERT.false(self._has_begun)
self._has_begun = True
buffer = io.BytesIO()
buffer.write(
b'HTTP/1.1 %d %s\r\n' % (status, self._ENCODED_REASONS[status])
)
for key, value in headers:
buffer.write(b'%s: %s\r\n' % (key, value))
buffer.write(b'\r\n')
await self._send_all(buffer.getvalue())
self._headers_sent.set()
def has_begun(self):
return self._has_begun
async def put_body_chunk(self, chunk):
ASSERT.true(self._has_begun)
ASSERT.is_not(self._send_mechanism, _SendMechanisms.SENDFILE)
self._send_mechanism = _SendMechanisms.SEND
await self._headers_sent.wait()
if chunk:
await self._send_all(chunk)
async def sendfile(self, file):
ASSERT.true(self._has_begun)
# sendfile can be called only once.
ASSERT.is_(self._send_mechanism, _SendMechanisms.UNDECIDED)
ASSERT.not_none(file)
self._send_mechanism = _SendMechanisms.SENDFILE
await self._headers_sent.wait()
with timers.timeout_after(self._SENDFILE_TIMEOUT):
return await self._sock.sendfile(file)
def end(self):
ASSERT.true(self._has_begun)
ASSERT.true(self._headers_sent.is_set())
self._has_begun = False
self._headers_sent.clear()
self._send_mechanism = _SendMechanisms.UNDECIDED
async def _send_all(self, data):
data = memoryview(data)
num_sent = 0
while num_sent < len(data):
with timers.timeout_after(self._SEND_TIMEOUT):
num_sent += await self._sock.send(data[num_sent:])
| {
"repo_name": "clchiou/garage",
"path": "py/g1/http/http1_servers/g1/http/http1_servers/wsgi.py",
"copies": "1",
"size": "23561",
"license": "mit",
"hash": 4457118436835384000,
"line_mean": 33.1959361393,
"line_max": 78,
"alpha_frac": 0.5416153814,
"autogenerated": false,
"ratio": 4.223915381857297,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5265530763257297,
"avg_score": null,
"num_lines": null
} |
"""Asynchronous WSGI Server/Gateway implementation.
This implements a asynchronous-variant of WSGI server. It handles one
incoming HTTP/2 session at a time.
At the moment it does not implements HTTP/2 Push.
"""
__all__ = [
'HttpSession',
]
import collections
import ctypes
import functools
import logging
import socket
import ssl
import sys
import urllib.parse
from g1.asyncs.bases import locks
from g1.asyncs.bases import servers
from g1.asyncs.bases import streams
from g1.asyncs.bases import tasks
from g1.asyncs.bases import timers
from g1.bases import classes
from g1.bases.assertions import ASSERT
from g1.bases.ctypes import (
c_blob,
deref_py_object_p,
)
from . import nghttp2 as ng
LOG = logging.getLogger(__name__)
#
# Helper for defining callbacks.
#
CALLBACK_NAMES = []
def define_callback(func):
CALLBACK_NAMES.append(func.__name__)
return as_callback(func)
def as_callback(func):
"""Convert a Python function into nghttp2 callback function.
The convention of nghttp2 callback is that the first argument is a
pointer to C session struct, and the last argument is a pointer to
user data.
We use the user data to pass the ``HttpSession`` object to callback
functions.
"""
name = func.__name__
@functools.wraps(func)
def trampoline(raw_session, *args):
try:
*args, session = args
session = deref_py_object_p(session)
except Exception:
addr = ctypes.addressof(raw_session.contents)
LOG.exception('%s: session=%#x: trampoline error', name, addr)
return ng.nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE
try:
return func(session, *args)
except Exception:
LOG.exception('%s: %r: callback error', name, session)
return ng.nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE
return ng.C['nghttp2_%s_callback' % name](trampoline)
#
# HTTP/2 session object.
#
INCOMING_BUFFER_SIZE = 65536 # As TCP packet is no bigger than 64KB.
ENCODING = 'iso-8859-1'
MAX_CONCURRENT_STREAMS = 100
INITIAL_WINDOW_SIZE = 1 << 20
MAX_HEADER_LIST_SIZE = 16384
SETTINGS_TIMEOUT = 5 # Unit: seconds.
_nghttp2_session_p = ctypes.POINTER(ng.nghttp2_session)
class HttpSession:
"""HTTP/2 session.
A session is further divided into streams, which are basically a
request-response pair.
"""
def __init__(self, sock, address, application, environ):
self._sock = sock
self._address = address
self._queue = tasks.CompletionQueue()
self._outgoing_gate = locks.Gate()
self._incoming_handler = None
self._cancel_settings_timer = None
self._application = application
self._environ = environ
self._streams = {}
# Own ``py_object`` object to prevent it from being freed.
self._user_data = ctypes.py_object(self)
self._session = _nghttp2_session_p()
ng.F.nghttp2_session_server_new(
ctypes.byref(self._session),
CALLBACKS,
ctypes.byref(self._user_data),
)
__repr__ = classes.make_repr(
'{self._address} session={session} streams={streams}',
session=lambda self: \
ctypes.addressof(self._session.contents) if self._session else 0,
streams=lambda self: len(self._streams),
)
async def serve(self):
ASSERT.not_none(self._session)
self._prepare()
try:
self._incoming_handler = self._queue.spawn(self._handle_incoming)
await servers.supervise_server(
self._queue,
(
self._incoming_handler,
self._queue.spawn(self._handle_outgoing),
),
)
finally:
self._cleanup()
def _prepare(self):
self._sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
settings = (ng.nghttp2_settings_entry * 3)()
settings[0].settings_id = \
ng.nghttp2_settings_id.NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS
settings[0].value = MAX_CONCURRENT_STREAMS
settings[1].settings_id = \
ng.nghttp2_settings_id.NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE
settings[1].value = INITIAL_WINDOW_SIZE
settings[2].settings_id = \
ng.nghttp2_settings_id.NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE
settings[2].value = MAX_HEADER_LIST_SIZE
ng.F.nghttp2_submit_settings(
self._session,
ng.nghttp2_flag.NGHTTP2_FLAG_NONE,
settings,
len(settings),
)
async def _handle_incoming(self):
error_code = ng.nghttp2_error_code.NGHTTP2_INTERNAL_ERROR
try:
while ng.F.nghttp2_session_want_read(self._session):
data = await self._sock.recv(INCOMING_BUFFER_SIZE)
LOG.debug('serve: %r: recv %d bytes', self, len(data))
if not data:
break
# In the current nghttp2 implementation,
# nghttp2_session_mem_recv always tries to processes all
# input data on success.
ASSERT.equal(
ng.F.nghttp2_session_mem_recv(
self._session, data, len(data)
),
len(data),
)
self._outgoing_gate.unblock()
error_code = ng.nghttp2_error_code.NGHTTP2_NO_ERROR
except timers.Timeout:
LOG.warning('serve: %r: settings timeout', self)
self._cancel_settings_timer = None
error_code = ng.nghttp2_error_code.NGHTTP2_SETTINGS_TIMEOUT
except ConnectionResetError:
LOG.debug('serve: %r: connection reset by client', self)
except OSError as exc:
LOG.warning('serve: %r: sock.recv error', self, exc_info=exc)
except ng.Nghttp2Error as exc:
if (
exc.error_code == \
ng.nghttp2_error.NGHTTP2_ERR_BAD_CLIENT_MAGIC
):
LOG.warning('serve: %r: bad client magic', self, exc_info=exc)
else:
raise
finally:
# NOTE: I have read the docs but am still not sure where and
# when should we call ``nghttp2_session_terminate_session``.
# For now it seems to be fine to make the call here.
ng.F.nghttp2_session_terminate_session(self._session, error_code)
self._outgoing_gate.unblock()
async def _handle_outgoing(self):
# Sadly SSLSocket disallows scatter/gather sendmsg.
if isinstance(self._sock.target, ssl.SSLSocket):
send_all = self._send_all
else:
ASSERT.isinstance(self._sock.target, socket.socket)
send_all = self._sendmsg_all
try:
while (
ng.F.nghttp2_session_want_read(self._session)
or ng.F.nghttp2_session_want_write(self._session)
):
buffers = []
total_length = 0
while True:
buffer = c_blob()
length = ng.F.nghttp2_session_mem_send(
self._session,
ctypes.byref(buffer),
)
if length == 0:
break
buffers.append(ctypes.string_at(buffer, length))
total_length += length
if not buffers:
await self._outgoing_gate.wait()
continue
LOG.debug(
'serve: %r: send %d bytes in %d pieces',
self,
total_length,
len(buffers),
)
await send_all(buffers)
except BrokenPipeError:
LOG.debug('serve: %r: connection closed by client', self)
except OSError as exc:
LOG.warning('serve: %r: sock.send error', self, exc_info=exc)
async def _send_all(self, buffers):
output = b''.join(buffers)
num_sent = 0
while num_sent < len(output):
num_sent += await self._sock.send(output[num_sent:])
async def _sendmsg_all(self, buffers):
while buffers:
num_sent = await self._sock.sendmsg(buffers)
while buffers:
if len(buffers[0]) <= num_sent:
num_sent -= len(buffers.pop(0))
else:
buffers[0] = buffers[0][num_sent:]
break
def _cleanup(self):
self._cancel_settings_timer = None
self._streams = None
ng.F.nghttp2_session_del(self._session)
self._session = None
self._user_data = None
self._sock.close()
def _start_settings_timer(self):
# This should start a timer on the ``_handle_incoming`` task.
if not self._cancel_settings_timer:
LOG.debug('start settings timeout: %r', self)
self._cancel_settings_timer = timers.timeout_after(
SETTINGS_TIMEOUT,
task=self._incoming_handler,
)
def _stop_settings_timer(self):
if self._cancel_settings_timer:
LOG.debug('stop settings timeout: %r', self)
self._cancel_settings_timer()
self._cancel_settings_timer = None
def _rst_stream_if_not_closed(self, stream_id):
if ng.F.nghttp2_session_get_stream_remote_close(
self._session, stream_id
):
return 0
else:
return self._rst_stream(
stream_id, ng.nghttp2_error_code.NGHTTP2_NO_ERROR
)
def _rst_stream(self, stream_id, error_code):
LOG.debug(
'rst_stream: %r: stream_id=%d, error_code=%d',
self,
stream_id,
error_code,
)
return ng.F.nghttp2_submit_rst_stream(
self._session,
ng.nghttp2_flag.NGHTTP2_FLAG_NONE,
stream_id,
error_code,
)
#
# Callbacks.
#
@define_callback
def on_frame_recv(self, frame):
frame = frame.contents
LOG.debug(
'on_frame_recv: %r: type=%d, stream_id=%d',
self,
frame.hd.type,
frame.hd.stream_id,
)
if frame.hd.type == ng.nghttp2_frame_type.NGHTTP2_SETTINGS:
if frame.hd.flags & ng.nghttp2_flag.NGHTTP2_FLAG_ACK:
self._stop_settings_timer()
elif frame.hd.type == ng.nghttp2_frame_type.NGHTTP2_HEADERS:
if (
frame.headers.cat == \
ng.nghttp2_headers_category.NGHTTP2_HCAT_REQUEST
):
stream = self._streams.get(frame.hd.stream_id)
if not stream:
return 0
if frame.hd.flags & ng.nghttp2_flag.NGHTTP2_FLAG_END_HEADERS:
stream.end_request_headers()
if frame.hd.flags & ng.nghttp2_flag.NGHTTP2_FLAG_END_STREAM:
stream.end_request()
elif frame.hd.type == ng.nghttp2_frame_type.NGHTTP2_DATA:
if frame.hd.flags & ng.nghttp2_flag.NGHTTP2_FLAG_END_STREAM:
stream = self._streams.get(frame.hd.stream_id)
if not stream:
return 0
stream.end_request()
return 0
@define_callback
def on_begin_headers(self, frame):
frame = frame.contents
LOG.debug(
'on_begin_headers: %r: type=%d, stream_id=%d',
self,
frame.hd.type,
frame.hd.stream_id,
)
if frame.hd.type == ng.nghttp2_frame_type.NGHTTP2_HEADERS:
if (
frame.headers.cat == \
ng.nghttp2_headers_category.NGHTTP2_HCAT_REQUEST
):
stream_id = ASSERT.not_in(frame.hd.stream_id, self._streams)
LOG.debug('make stream: %r: stream_id=%d', self, stream_id)
self._streams[stream_id] = HttpStream(self, stream_id)
return 0
@define_callback
def on_header(self, frame, name, namelen, value, valuelen, flags):
frame = frame.contents
LOG.debug(
'on_header: %r: type=%d, stream_id=%d, flags=%#x, %r=%r',
self,
frame.hd.type,
frame.hd.stream_id,
flags,
name,
value,
)
ASSERT.equal(len(name), namelen)
ASSERT.equal(len(value), valuelen)
stream = self._streams.get(frame.hd.stream_id)
if not stream:
return 0
stream.set_header(name, value)
return 0
@define_callback
def on_data_chunk_recv(self, flags, stream_id, data, length):
LOG.debug(
'on_data_chunk_recv: %r, stream_id=%d, flags=%#x, length=%d',
self,
stream_id,
flags,
length,
)
stream = self._streams.get(stream_id)
if not stream:
return 0
stream.write_request_body(ctypes.string_at(data, length))
return 0
@define_callback
def on_frame_send(self, frame):
frame = frame.contents
LOG.debug(
'on_frame_send: %r: type=%d, stream_id=%d',
self,
frame.hd.type,
frame.hd.stream_id,
)
# TODO: Support frame.hd.type == NGHTTP2_PUSH_PROMISE.
if frame.hd.type == ng.nghttp2_frame_type.NGHTTP2_SETTINGS:
if frame.hd.flags & ng.nghttp2_flag.NGHTTP2_FLAG_ACK:
return 0
self._start_settings_timer()
elif frame.hd.type == ng.nghttp2_frame_type.NGHTTP2_HEADERS:
if frame.hd.flags & ng.nghttp2_flag.NGHTTP2_FLAG_END_STREAM:
return self._rst_stream_if_not_closed(frame.hd.stream_id)
return 0
@define_callback
def on_frame_not_send(self, frame, error_code):
frame = frame.contents
LOG.debug(
'on_frame_not_send: %r: type=%d, stream_id=%d, error_code=%d',
self,
frame.hd.type,
frame.hd.stream_id,
error_code,
)
# TODO: Support frame.hd.type == NGHTTP2_PUSH_PROMISE.
return 0
@define_callback
def on_stream_close(self, stream_id, error_code):
LOG.debug(
'on_stream_close: %r: stream_id=%d, error_code=%d',
self,
stream_id,
error_code,
)
stream = self._streams.pop(stream_id, None)
if not stream:
return 0
stream.close()
return 0
#
# Other callbacks.
#
@as_callback
def data_source_read(self, stream_id, buf, length, data_flags, source):
LOG.debug(
'data_source_read: %r, stream_id=%d, length=%d',
self,
stream_id,
length,
)
del source # Unused.
stream = self._streams[stream_id]
data = stream.read_response_body(ASSERT.greater(length, 0))
if data is None:
return ng.nghttp2_error.NGHTTP2_ERR_DEFERRED
if data:
ctypes.memmove(buf, data, len(data))
else:
data_flags[0] = ng.nghttp2_data_flag.NGHTTP2_DATA_FLAG_EOF
self._rst_stream_if_not_closed(stream_id)
return len(data)
CALLBACKS = ctypes.POINTER(ng.nghttp2_session_callbacks)()
ng.F.nghttp2_session_callbacks_new(ctypes.byref(CALLBACKS))
# pylint: disable=expression-not-assigned
[
ng.F['nghttp2_session_callbacks_set_%s_callback' % name](
CALLBACKS,
getattr(HttpSession, name),
) for name in CALLBACK_NAMES
]
# pylint: enable=expression-not-assigned
DATA_PROVIDER = ng.nghttp2_data_provider()
DATA_PROVIDER.read_callback = HttpSession.data_source_read
class HttpStream:
"""HTTP/2 request-response pair.
This class is closely coupled with ``HttpSession``, and this class
accesses its private fields (through a weak pointer).
"""
def __init__(self, session, stream_id):
self._session = session
self._stream_id = stream_id
self._task = None
self._request_headers = collections.defaultdict(list)
self._request_body = streams.BytesStream()
self._response_headers_sent = False
self._response_body = streams.BytesStream()
self._response_body_deferred = False
__repr__ = classes.make_repr(
'session={self._session!r} stream={self._stream_id}'
)
#
# WSGI interface.
#
def _get_first_header(self, name):
return ASSERT.getitem(self._request_headers, name)[0]
def _maybe_get_first_header(self, name):
return self._request_headers.get(name, (None, ))[0]
def _start_wsgi_task(self):
ASSERT.none(self._task)
self._task = self._session._queue.spawn(self._run_wsgi)
async def _run_wsgi(self):
log_args = (
self._session._address,
self._get_first_header(':method'),
self._get_first_header(':scheme'),
self._maybe_get_first_header('host')
or self._maybe_get_first_header(':authority'),
self._get_first_header(':path'),
)
LOG.debug('wsgi app starts: %s %s %s://%s%s', *log_args)
try:
app = await self._session._application(
self._make_environ(),
self._start_response,
)
try:
if hasattr(app, '__aiter__'):
async for data in app:
self._write(data)
else:
for data in app:
self._write(data)
finally:
if hasattr(app, 'close'):
await app.close()
except Exception:
LOG.exception('wsgi app error: %s %s %s://%s%s', *log_args)
self._session._rst_stream(
self._stream_id, ng.nghttp2_error_code.NGHTTP2_INTERNAL_ERROR
)
raise
finally:
self._response_body.close()
# In case self._write is never called, but the outgoing
# handler was already started and is being blocked on
# response body data, this unblocks the outgoing handler.
self._session._outgoing_gate.unblock()
LOG.debug('wsgi app completes: %s %s %s://%s%s', *log_args)
def _make_environ(self):
environ = self._session._environ.copy()
environ['wsgi.input'] = self._request_body
# Should we wrap ``sys.stderr`` in an async adapter?
environ['wsgi.errors'] = sys.stderr
environ['REQUEST_METHOD'] = self._get_first_header(':method').upper()
parsed_path = urllib.parse.urlsplit(self._get_first_header(':path'))
environ['SCRIPT_NAME'] = ''
environ['PATH_INFO'] = parsed_path.path
environ['QUERY_STRING'] = parsed_path.query
for name, values in self._request_headers.items():
if name == ':authority':
name = 'host'
elif name.startswith(':'):
continue # Skip other HTTP/2 pseudo-headers.
name = name.upper().replace('-', '_')
if name not in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
name = 'HTTP_' + name
environ[name] = ','.join(values)
return environ
def _start_response(self, status, response_headers, exc_info=None):
if exc_info:
try:
if self._response_headers_sent:
raise exc_info[1].with_traceback(exc_info[2])
finally:
exc_info = None # Avoid dangling cyclic ref.
else:
ASSERT.false(self._response_headers_sent)
# Get the status code from status line like "200 OK".
status_code = status.split(maxsplit=1)[0]
nvlen = 1 + len(response_headers)
nva = (ng.nghttp2_nv * nvlen)()
self._set_nv(nva[0], b':status', status_code.encode(ENCODING))
for i, (name, value) in enumerate(response_headers):
name = name.encode(ENCODING)
value = value.encode(ENCODING)
self._set_nv(nva[i + 1], name, value)
ng.F.nghttp2_submit_response(
self._session._session,
self._stream_id,
nva,
nvlen,
ctypes.byref(DATA_PROVIDER),
)
self._response_headers_sent = True
return self._write
@staticmethod
def _set_nv(nv, name, value):
nv.name = ctypes.c_char_p(name)
nv.namelen = len(name)
nv.value = ctypes.c_char_p(value)
nv.valuelen = len(value)
nv.flags = ng.nghttp2_nv_flag.NGHTTP2_NV_FLAG_NONE
# According to WSGI spec, ``write`` is only intended for maintaining
# backward compatibility; so let's declare it as not ``async`` for
# the ease of use.
def _write(self, data):
ASSERT.true(self._response_headers_sent)
self._response_body.write_nonblocking(data)
if self._response_body_deferred:
self._response_body_deferred = False
ng.F.nghttp2_session_resume_data(
self._session._session,
self._stream_id,
)
self._session._outgoing_gate.unblock()
#
# Stream life-cycle.
#
def set_header(self, name, value):
ASSERT.none(self._task)
self._request_headers[name.decode(ENCODING)].extend(
v.decode(ENCODING) for v in value.split(b'\x00')
)
def end_request_headers(self):
self._start_wsgi_task()
def write_request_body(self, data):
self._request_body.write_nonblocking(data)
def end_request(self):
self._request_body.close()
if not self._task:
self._start_wsgi_task()
def read_response_body(self, length):
ASSERT.not_none(self._task)
data = self._response_body.read_nonblocking(length)
if data is None:
self._response_body_deferred = True
return data
def close(self):
if self._task:
self._task.cancel()
| {
"repo_name": "clchiou/garage",
"path": "py/g1/http/http2_servers/g1/http/http2_servers/wsgi.py",
"copies": "1",
"size": "22326",
"license": "mit",
"hash": -8699270819437327000,
"line_mean": 29.3755102041,
"line_max": 78,
"alpha_frac": 0.5524948491,
"autogenerated": false,
"ratio": 3.8021117166212535,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9853849301139104,
"avg_score": 0.00015145291642987494,
"num_lines": 735
} |
"""Async HTTP client with bonus features!
- Support caching via upstream 304 with ETag, Last-Modified
- Log request timings for profiling
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import hashlib
import pickle
import time
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado.log import app_log
from tornado import gen
from nbviewer.utils import time_block
#-----------------------------------------------------------------------------
# Async HTTP Client
#-----------------------------------------------------------------------------
# cache headers and their response:request mapping
# use this to map headers in cached response to the headers
# that should be set in the request.
cache_headers = {
'ETag': 'If-None-Match',
'Last-Modified': 'If-Modified-Since',
}
class NBViewerAsyncHTTPClient(object):
"""Subclass of AsyncHTTPClient with bonus logging and caching!
If upstream servers support 304 cache replies with the following headers:
- ETag : If-None-Match
- Last-Modified : If-Modified-Since
Upstream requests are still made every time,
but resources and rate limits may be saved by 304 responses.
Currently, responses are cached for a non-configurable two hours.
"""
cache = None
expiry = 7200
def fetch_impl(self, request, callback):
self.io_loop.add_callback(lambda : self._fetch_impl(request, callback))
@gen.coroutine
def _fetch_impl(self, request, callback):
tic = time.time()
if request.user_agent is None:
request.user_agent = 'Tornado-Async-Client'
# when logging, use the URL without params
name = request.url.split('?')[0]
cached_response = None
app_log.debug("Fetching %s", name)
cache_key = hashlib.sha256(request.url.encode('utf8')).hexdigest()
with time_block("Upstream cache get %s" % name):
cached_response = yield self._get_cached_response(cache_key, name)
if cached_response:
app_log.debug("Upstream cache hit %s", name)
# add cache headers, if any
for resp_key, req_key in cache_headers.items():
value = cached_response.headers.get(resp_key)
if value:
request.headers[req_key] = value
else:
app_log.debug("Upstream cache miss %s", name)
response = yield gen.Task(super(NBViewerAsyncHTTPClient, self).fetch_impl, request)
dt = time.time() - tic
log = app_log.info if dt > 1 else app_log.debug
if response.code == 304 and cached_response:
log("Upstream 304 on %s in %.2f ms", name, 1e3 * dt)
response = self._update_cached_response(response, cached_response)
callback(response)
else:
if not response.error:
log("Fetched %s in %.2f ms", name, 1e3 * dt)
callback(response)
if not response.error:
yield self._cache_response(cache_key, name, response)
def _update_cached_response(self, three_o_four, cached_response):
"""Apply any changes to the cached response from the 304
Return the HTTPResponse to be used.
Currently this hardcodes more recent GitHub rate limit headers,
and that's it.
Is there a better way for this to be in the right place?
"""
# Copy GitHub rate-limit headers from 304 to the cached response
# So we don't log stale rate limits.
for key, value in three_o_four.headers.items():
if key.lower().startswith('x-ratelimit-'):
cached_response.headers[key] = value
return cached_response
@gen.coroutine
def _get_cached_response(self, cache_key, name):
"""Get the cached response, if any"""
if not self.cache:
return
try:
cached_pickle = yield self.cache.get(cache_key)
if cached_pickle:
raise gen.Return(pickle.loads(cached_pickle))
except gen.Return:
raise # FIXME: remove gen.Return when we drop py2 support
except Exception:
app_log.error("Upstream cache get failed %s", name, exc_info=True)
@gen.coroutine
def _cache_response(self, cache_key, name, response):
"""Cache the response, if any cache headers we understand are present."""
if not self.cache:
return
if not any(response.headers.get(key) for key in cache_headers):
# no cache headers, no point in caching the response
return
with time_block("Upstream cache set %s" % name):
# cache the response if there are any cache headers (use cache expiry?)
try:
pickle_response = pickle.dumps(response, pickle.HIGHEST_PROTOCOL)
yield self.cache.set(
cache_key,
pickle_response,
int(time.time() + self.expiry),
)
except Exception:
app_log.error("Upstream cache failed %s" % name, exc_info=True)
class NBViewerSimpleAsyncHTTPClient(NBViewerAsyncHTTPClient, SimpleAsyncHTTPClient):
pass
try:
from tornado.curl_httpclient import CurlAsyncHTTPClient
except ImportError:
pass
else:
class NBViewerCurlAsyncHTTPClient(NBViewerAsyncHTTPClient, CurlAsyncHTTPClient):
pass
| {
"repo_name": "christophelec/nbviewer",
"path": "nbviewer/providers/url/client.py",
"copies": "1",
"size": "5538",
"license": "bsd-3-clause",
"hash": 7150160330944261000,
"line_mean": 34.7290322581,
"line_max": 91,
"alpha_frac": 0.6063560852,
"autogenerated": false,
"ratio": 4.343529411764706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5449885496964706,
"avg_score": null,
"num_lines": null
} |
"""Async I/O backend support utilities."""
from __future__ import absolute_import, unicode_literals
import socket
import threading
from collections import deque
from time import sleep
from weakref import WeakKeyDictionary
from kombu.utils.compat import detect_environment
from kombu.utils.objects import cached_property
from celery import states
from celery.exceptions import TimeoutError
from celery.five import Empty, monotonic
from celery.utils.threads import THREAD_TIMEOUT_MAX
__all__ = (
'AsyncBackendMixin', 'BaseResultConsumer', 'Drainer',
'register_drainer',
)
drainers = {}
def register_drainer(name):
"""Decorator used to register a new result drainer type."""
def _inner(cls):
drainers[name] = cls
return cls
return _inner
@register_drainer('default')
class Drainer(object):
"""Result draining service."""
def __init__(self, result_consumer):
self.result_consumer = result_consumer
def start(self):
pass
def stop(self):
pass
def drain_events_until(self, p, timeout=None, on_interval=None, wait=None):
wait = wait or self.result_consumer.drain_events
time_start = monotonic()
while 1:
# Total time spent may exceed a single call to wait()
if timeout and monotonic() - time_start >= timeout:
raise socket.timeout()
try:
yield self.wait_for(p, wait, timeout=1)
except socket.timeout:
pass
if on_interval:
on_interval()
if p.ready: # got event on the wanted channel.
break
def wait_for(self, p, wait, timeout=None):
wait(timeout=timeout)
class greenletDrainer(Drainer):
spawn = None
_g = None
def __init__(self, *args, **kwargs):
super(greenletDrainer, self).__init__(*args, **kwargs)
self._started = threading.Event()
self._stopped = threading.Event()
self._shutdown = threading.Event()
def run(self):
self._started.set()
while not self._stopped.is_set():
try:
self.result_consumer.drain_events(timeout=1)
except socket.timeout:
pass
self._shutdown.set()
def start(self):
if not self._started.is_set():
self._g = self.spawn(self.run)
self._started.wait()
def stop(self):
self._stopped.set()
self._shutdown.wait(THREAD_TIMEOUT_MAX)
def wait_for(self, p, wait, timeout=None):
self.start()
if not p.ready:
sleep(0)
@register_drainer('eventlet')
class eventletDrainer(greenletDrainer):
@cached_property
def spawn(self):
from eventlet import spawn
return spawn
@register_drainer('gevent')
class geventDrainer(greenletDrainer):
@cached_property
def spawn(self):
from gevent import spawn
return spawn
class AsyncBackendMixin(object):
"""Mixin for backends that enables the async API."""
def _collect_into(self, result, bucket):
self.result_consumer.buckets[result] = bucket
def iter_native(self, result, no_ack=True, **kwargs):
self._ensure_not_eager()
results = result.results
if not results:
raise StopIteration()
# we tell the result consumer to put consumed results
# into these buckets.
bucket = deque()
for node in results:
if node._cache:
bucket.append(node)
else:
self._collect_into(node, bucket)
for _ in self._wait_for_pending(result, no_ack=no_ack, **kwargs):
while bucket:
node = bucket.popleft()
yield node.id, node._cache
while bucket:
node = bucket.popleft()
yield node.id, node._cache
def add_pending_result(self, result, weak=False, start_drainer=True):
if start_drainer:
self.result_consumer.drainer.start()
try:
self._maybe_resolve_from_buffer(result)
except Empty:
self._add_pending_result(result.id, result, weak=weak)
return result
def _maybe_resolve_from_buffer(self, result):
result._maybe_set_cache(self._pending_messages.take(result.id))
def _add_pending_result(self, task_id, result, weak=False):
concrete, weak_ = self._pending_results
if task_id not in weak_ and result.id not in concrete:
(weak_ if weak else concrete)[task_id] = result
self.result_consumer.consume_from(task_id)
def add_pending_results(self, results, weak=False):
self.result_consumer.drainer.start()
return [self.add_pending_result(result, weak=weak, start_drainer=False)
for result in results]
def remove_pending_result(self, result):
self._remove_pending_result(result.id)
self.on_result_fulfilled(result)
return result
def _remove_pending_result(self, task_id):
for map in self._pending_results:
map.pop(task_id, None)
def on_result_fulfilled(self, result):
self.result_consumer.cancel_for(result.id)
def wait_for_pending(self, result,
callback=None, propagate=True, **kwargs):
self._ensure_not_eager()
for _ in self._wait_for_pending(result, **kwargs):
pass
return result.maybe_throw(callback=callback, propagate=propagate)
def _wait_for_pending(self, result,
timeout=None, on_interval=None, on_message=None,
**kwargs):
return self.result_consumer._wait_for_pending(
result, timeout=timeout,
on_interval=on_interval, on_message=on_message,
)
@property
def is_async(self):
return True
class BaseResultConsumer(object):
"""Manager responsible for consuming result messages."""
def __init__(self, backend, app, accept,
pending_results, pending_messages):
self.backend = backend
self.app = app
self.accept = accept
self._pending_results = pending_results
self._pending_messages = pending_messages
self.on_message = None
self.buckets = WeakKeyDictionary()
self.drainer = drainers[detect_environment()](self)
def start(self, initial_task_id, **kwargs):
raise NotImplementedError()
def stop(self):
pass
def drain_events(self, timeout=None):
raise NotImplementedError()
def consume_from(self, task_id):
raise NotImplementedError()
def cancel_for(self, task_id):
raise NotImplementedError()
def _after_fork(self):
self.buckets.clear()
self.buckets = WeakKeyDictionary()
self.on_message = None
self.on_after_fork()
def on_after_fork(self):
pass
def drain_events_until(self, p, timeout=None, on_interval=None):
return self.drainer.drain_events_until(
p, timeout=timeout, on_interval=on_interval)
def _wait_for_pending(self, result,
timeout=None, on_interval=None, on_message=None,
**kwargs):
self.on_wait_for_pending(result, timeout=timeout, **kwargs)
prev_on_m, self.on_message = self.on_message, on_message
try:
for _ in self.drain_events_until(
result.on_ready, timeout=timeout,
on_interval=on_interval):
yield
sleep(0)
except socket.timeout:
raise TimeoutError('The operation timed out.')
finally:
self.on_message = prev_on_m
def on_wait_for_pending(self, result, timeout=None, **kwargs):
pass
def on_out_of_band_result(self, message):
self.on_state_change(message.payload, message)
def _get_pending_result(self, task_id):
for mapping in self._pending_results:
try:
return mapping[task_id]
except KeyError:
pass
raise KeyError(task_id)
def on_state_change(self, meta, message):
if self.on_message:
self.on_message(meta)
if meta['status'] in states.READY_STATES:
task_id = meta['task_id']
try:
result = self._get_pending_result(task_id)
except KeyError:
# send to buffer in case we received this result
# before it was added to _pending_results.
self._pending_messages.put(task_id, meta)
else:
result._maybe_set_cache(meta)
buckets = self.buckets
try:
# remove bucket for this result, since it's fulfilled
bucket = buckets.pop(result)
except KeyError:
pass
else:
# send to waiter via bucket
bucket.append(result)
sleep(0)
| {
"repo_name": "cloudera/hue",
"path": "desktop/core/ext-py/celery-4.2.1/celery/backends/async.py",
"copies": "2",
"size": "9103",
"license": "apache-2.0",
"hash": -5849780360650487000,
"line_mean": 29.4448160535,
"line_max": 79,
"alpha_frac": 0.5873887729,
"autogenerated": false,
"ratio": 4.1264732547597465,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 299
} |
"""Async I/O backend support utilities."""
from __future__ import absolute_import, unicode_literals
import socket
import threading
from collections import deque
from time import sleep
from weakref import WeakKeyDictionary
from kombu.utils.compat import detect_environment
from kombu.utils.objects import cached_property
from celery import states
from celery.exceptions import TimeoutError
from celery.five import Empty, monotonic
from celery.utils.threads import THREAD_TIMEOUT_MAX
__all__ = [
'AsyncBackendMixin', 'BaseResultConsumer', 'Drainer',
'register_drainer',
]
drainers = {}
def register_drainer(name):
"""Decorator used to register a new result drainer type."""
def _inner(cls):
drainers[name] = cls
return cls
return _inner
@register_drainer('default')
class Drainer(object):
"""Result draining service."""
def __init__(self, result_consumer):
self.result_consumer = result_consumer
def start(self):
pass
def stop(self):
pass
def drain_events_until(self, p, timeout=None, on_interval=None, wait=None):
wait = wait or self.result_consumer.drain_events
time_start = monotonic()
while 1:
# Total time spent may exceed a single call to wait()
if timeout and monotonic() - time_start >= timeout:
raise socket.timeout()
try:
yield self.wait_for(p, wait, timeout=1)
except socket.timeout:
pass
if on_interval:
on_interval()
if p.ready: # got event on the wanted channel.
break
def wait_for(self, p, wait, timeout=None):
wait(timeout=timeout)
class greenletDrainer(Drainer):
spawn = None
_g = None
def __init__(self, *args, **kwargs):
super(greenletDrainer, self).__init__(*args, **kwargs)
self._started = threading.Event()
self._stopped = threading.Event()
self._shutdown = threading.Event()
def run(self):
self._started.set()
while not self._stopped.is_set():
try:
self.result_consumer.drain_events(timeout=1)
except socket.timeout:
pass
self._shutdown.set()
def start(self):
if not self._started.is_set():
self._g = self.spawn(self.run)
self._started.wait()
def stop(self):
self._stopped.set()
self._shutdown.wait(THREAD_TIMEOUT_MAX)
def wait_for(self, p, wait, timeout=None):
self.start()
if not p.ready:
sleep(0)
@register_drainer('eventlet')
class eventletDrainer(greenletDrainer):
@cached_property
def spawn(self):
from eventlet import spawn
return spawn
@register_drainer('gevent')
class geventDrainer(greenletDrainer):
@cached_property
def spawn(self):
from gevent import spawn
return spawn
class AsyncBackendMixin(object):
"""Mixin for backends that enables the async API."""
def _collect_into(self, result, bucket):
self.result_consumer.buckets[result] = bucket
def iter_native(self, result, no_ack=True, **kwargs):
self._ensure_not_eager()
results = result.results
if not results:
raise StopIteration()
# we tell the result consumer to put consumed results
# into these buckets.
bucket = deque()
for node in results:
if node._cache:
bucket.append(node)
else:
self._collect_into(node, bucket)
for _ in self._wait_for_pending(result, no_ack=no_ack, **kwargs):
while bucket:
node = bucket.popleft()
yield node.id, node._cache
while bucket:
node = bucket.popleft()
yield node.id, node._cache
def add_pending_result(self, result, weak=False, start_drainer=True):
if start_drainer:
self.result_consumer.drainer.start()
try:
self._maybe_resolve_from_buffer(result)
except Empty:
self._add_pending_result(result.id, result, weak=weak)
return result
def _maybe_resolve_from_buffer(self, result):
result._maybe_set_cache(self._pending_messages.take(result.id))
def _add_pending_result(self, task_id, result, weak=False):
concrete, weak_ = self._pending_results
if task_id not in weak_ and result.id not in concrete:
(weak_ if weak else concrete)[task_id] = result
self.result_consumer.consume_from(task_id)
def add_pending_results(self, results, weak=False):
self.result_consumer.drainer.start()
return [self.add_pending_result(result, weak=weak, start_drainer=False)
for result in results]
def remove_pending_result(self, result):
self._remove_pending_result(result.id)
self.on_result_fulfilled(result)
return result
def _remove_pending_result(self, task_id):
for map in self._pending_results:
map.pop(task_id, None)
def on_result_fulfilled(self, result):
self.result_consumer.cancel_for(result.id)
def wait_for_pending(self, result,
callback=None, propagate=True, **kwargs):
self._ensure_not_eager()
for _ in self._wait_for_pending(result, **kwargs):
pass
return result.maybe_throw(callback=callback, propagate=propagate)
def _wait_for_pending(self, result,
timeout=None, on_interval=None, on_message=None,
**kwargs):
return self.result_consumer._wait_for_pending(
result, timeout=timeout,
on_interval=on_interval, on_message=on_message,
)
@property
def is_async(self):
return True
class BaseResultConsumer(object):
"""Manager responsible for consuming result messages."""
def __init__(self, backend, app, accept,
pending_results, pending_messages):
self.backend = backend
self.app = app
self.accept = accept
self._pending_results = pending_results
self._pending_messages = pending_messages
self.on_message = None
self.buckets = WeakKeyDictionary()
self.drainer = drainers[detect_environment()](self)
def start(self, initial_task_id, **kwargs):
raise NotImplementedError()
def stop(self):
pass
def drain_events(self, timeout=None):
raise NotImplementedError()
def consume_from(self, task_id):
raise NotImplementedError()
def cancel_for(self, task_id):
raise NotImplementedError()
def _after_fork(self):
self.buckets.clear()
self.buckets = WeakKeyDictionary()
self.on_message = None
self.on_after_fork()
def on_after_fork(self):
pass
def drain_events_until(self, p, timeout=None, on_interval=None):
return self.drainer.drain_events_until(
p, timeout=timeout, on_interval=on_interval)
def _wait_for_pending(self, result,
timeout=None, on_interval=None, on_message=None,
**kwargs):
self.on_wait_for_pending(result, timeout=timeout, **kwargs)
prev_on_m, self.on_message = self.on_message, on_message
try:
for _ in self.drain_events_until(
result.on_ready, timeout=timeout,
on_interval=on_interval):
yield
sleep(0)
except socket.timeout:
raise TimeoutError('The operation timed out.')
finally:
self.on_message = prev_on_m
def on_wait_for_pending(self, result, timeout=None, **kwargs):
pass
def on_out_of_band_result(self, message):
self.on_state_change(message.payload, message)
def _get_pending_result(self, task_id):
for mapping in self._pending_results:
try:
return mapping[task_id]
except KeyError:
pass
raise KeyError(task_id)
def on_state_change(self, meta, message):
if self.on_message:
self.on_message(meta)
if meta['status'] in states.READY_STATES:
task_id = meta['task_id']
try:
result = self._get_pending_result(task_id)
except KeyError:
# send to buffer in case we received this result
# before it was added to _pending_results.
self._pending_messages.put(task_id, meta)
else:
result._maybe_set_cache(meta)
buckets = self.buckets
try:
# remove bucket for this result, since it's fulfilled
bucket = buckets.pop(result)
except KeyError:
pass
else:
# send to waiter via bucket
bucket.append(result)
sleep(0)
| {
"repo_name": "ammarkhann/FinalSeniorCode",
"path": "lib/python2.7/site-packages/celery/backends/async.py",
"copies": "2",
"size": "9104",
"license": "mit",
"hash": -1765712084561805000,
"line_mean": 29.3466666667,
"line_max": 79,
"alpha_frac": 0.5873242531,
"autogenerated": false,
"ratio": 4.125056637970095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5712380891070096,
"avg_score": null,
"num_lines": null
} |
"""Asyncio backports for Python 3.4.3 compatibility."""
import concurrent.futures
from asyncio import coroutines
from asyncio.futures import Future
try:
from asyncio import ensure_future
except ImportError:
# Python 3.4.3 and earlier has this as async
# pylint: disable=unused-import
from asyncio import async
ensure_future = async
def _set_result_unless_cancelled(fut, result):
"""Helper setting the result only if the future was not cancelled."""
if fut.cancelled():
return
fut.set_result(result)
def _set_concurrent_future_state(concurr, source):
"""Copy state from a future to a concurrent.futures.Future."""
assert source.done()
if source.cancelled():
concurr.cancel()
if not concurr.set_running_or_notify_cancel():
return
exception = source.exception()
if exception is not None:
concurr.set_exception(exception)
else:
result = source.result()
concurr.set_result(result)
def _copy_future_state(source, dest):
"""Internal helper to copy state from another Future.
The other Future may be a concurrent.futures.Future.
"""
assert source.done()
if dest.cancelled():
return
assert not dest.done()
if source.cancelled():
dest.cancel()
else:
exception = source.exception()
if exception is not None:
dest.set_exception(exception)
else:
result = source.result()
dest.set_result(result)
def _chain_future(source, destination):
"""Chain two futures so that when one completes, so does the other.
The result (or exception) of source will be copied to destination.
If destination is cancelled, source gets cancelled too.
Compatible with both asyncio.Future and concurrent.futures.Future.
"""
if not isinstance(source, (Future, concurrent.futures.Future)):
raise TypeError('A future is required for source argument')
if not isinstance(destination, (Future, concurrent.futures.Future)):
raise TypeError('A future is required for destination argument')
# pylint: disable=protected-access
source_loop = source._loop if isinstance(source, Future) else None
dest_loop = destination._loop if isinstance(destination, Future) else None
def _set_state(future, other):
if isinstance(future, Future):
_copy_future_state(other, future)
else:
_set_concurrent_future_state(future, other)
def _call_check_cancel(destination):
if destination.cancelled():
if source_loop is None or source_loop is dest_loop:
source.cancel()
else:
source_loop.call_soon_threadsafe(source.cancel)
def _call_set_state(source):
if dest_loop is None or dest_loop is source_loop:
_set_state(destination, source)
else:
dest_loop.call_soon_threadsafe(_set_state, destination, source)
destination.add_done_callback(_call_check_cancel)
source.add_done_callback(_call_set_state)
def run_coroutine_threadsafe(coro, loop):
"""Submit a coroutine object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
if not coroutines.iscoroutine(coro):
raise TypeError('A coroutine object is required')
future = concurrent.futures.Future()
def callback():
"""Callback to call the coroutine."""
try:
# pylint: disable=deprecated-method
_chain_future(ensure_future(coro, loop=loop), future)
except Exception as exc:
if future.set_running_or_notify_cancel():
future.set_exception(exc)
raise
loop.call_soon_threadsafe(callback)
return future
def fire_coroutine_threadsafe(coro, loop):
"""Submit a coroutine object to a given event loop.
This method does not provide a way to retrieve the result and
is intended for fire-and-forget use. This reduces the
work involved to fire the function on the loop.
"""
if not coroutines.iscoroutine(coro):
raise TypeError('A coroutine object is required: %s' % coro)
def callback():
"""Callback to fire coroutine."""
# pylint: disable=deprecated-method
ensure_future(coro, loop=loop)
loop.call_soon_threadsafe(callback)
return
def run_callback_threadsafe(loop, callback, *args):
"""Submit a callback object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
future = concurrent.futures.Future()
def run_callback():
"""Run callback and store result."""
try:
future.set_result(callback(*args))
except Exception as exc:
if future.set_running_or_notify_cancel():
future.set_exception(exc)
raise
loop.call_soon_threadsafe(run_callback)
return future
| {
"repo_name": "jawilson/home-assistant",
"path": "homeassistant/util/async.py",
"copies": "2",
"size": "4940",
"license": "mit",
"hash": -1834255470542732000,
"line_mean": 31.0779220779,
"line_max": 78,
"alpha_frac": 0.6562753036,
"autogenerated": false,
"ratio": 4.207836456558773,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00046620046620046615,
"num_lines": 154
} |
"""Asyncio backports for Python 3.4.3 compatibility."""
import concurrent.futures
import threading
from asyncio import coroutines
from asyncio.futures import Future
try:
from asyncio import ensure_future
except ImportError:
# Python 3.4.3 and earlier has this as async
# pylint: disable=unused-import
from asyncio import async
ensure_future = async
def _set_result_unless_cancelled(fut, result):
"""Helper setting the result only if the future was not cancelled."""
if fut.cancelled():
return
fut.set_result(result)
def _set_concurrent_future_state(concurr, source):
"""Copy state from a future to a concurrent.futures.Future."""
assert source.done()
if source.cancelled():
concurr.cancel()
if not concurr.set_running_or_notify_cancel():
return
exception = source.exception()
if exception is not None:
concurr.set_exception(exception)
else:
result = source.result()
concurr.set_result(result)
def _copy_future_state(source, dest):
"""Internal helper to copy state from another Future.
The other Future may be a concurrent.futures.Future.
"""
assert source.done()
if dest.cancelled():
return
assert not dest.done()
if source.cancelled():
dest.cancel()
else:
exception = source.exception()
if exception is not None:
dest.set_exception(exception)
else:
result = source.result()
dest.set_result(result)
def _chain_future(source, destination):
"""Chain two futures so that when one completes, so does the other.
The result (or exception) of source will be copied to destination.
If destination is cancelled, source gets cancelled too.
Compatible with both asyncio.Future and concurrent.futures.Future.
"""
if not isinstance(source, (Future, concurrent.futures.Future)):
raise TypeError('A future is required for source argument')
if not isinstance(destination, (Future, concurrent.futures.Future)):
raise TypeError('A future is required for destination argument')
# pylint: disable=protected-access
source_loop = source._loop if isinstance(source, Future) else None
dest_loop = destination._loop if isinstance(destination, Future) else None
def _set_state(future, other):
if isinstance(future, Future):
_copy_future_state(other, future)
else:
_set_concurrent_future_state(future, other)
def _call_check_cancel(destination):
if destination.cancelled():
if source_loop is None or source_loop is dest_loop:
source.cancel()
else:
source_loop.call_soon_threadsafe(source.cancel)
def _call_set_state(source):
if dest_loop is None or dest_loop is source_loop:
_set_state(destination, source)
else:
dest_loop.call_soon_threadsafe(_set_state, destination, source)
destination.add_done_callback(_call_check_cancel)
source.add_done_callback(_call_set_state)
def run_coroutine_threadsafe(coro, loop):
"""Submit a coroutine object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError('Cannot be called from within the event loop')
if not coroutines.iscoroutine(coro):
raise TypeError('A coroutine object is required')
future = concurrent.futures.Future()
def callback():
"""Callback to call the coroutine."""
try:
# pylint: disable=deprecated-method
_chain_future(ensure_future(coro, loop=loop), future)
except Exception as exc:
if future.set_running_or_notify_cancel():
future.set_exception(exc)
raise
loop.call_soon_threadsafe(callback)
return future
def fire_coroutine_threadsafe(coro, loop):
"""Submit a coroutine object to a given event loop.
This method does not provide a way to retrieve the result and
is intended for fire-and-forget use. This reduces the
work involved to fire the function on the loop.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError('Cannot be called from within the event loop')
if not coroutines.iscoroutine(coro):
raise TypeError('A coroutine object is required: %s' % coro)
def callback():
"""Callback to fire coroutine."""
# pylint: disable=deprecated-method
ensure_future(coro, loop=loop)
loop.call_soon_threadsafe(callback)
return
def run_callback_threadsafe(loop, callback, *args):
"""Submit a callback object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError('Cannot be called from within the event loop')
future = concurrent.futures.Future()
def run_callback():
"""Run callback and store result."""
try:
future.set_result(callback(*args))
except Exception as exc:
if future.set_running_or_notify_cancel():
future.set_exception(exc)
raise
loop.call_soon_threadsafe(run_callback)
return future
| {
"repo_name": "varunr047/homefile",
"path": "homeassistant/util/async.py",
"copies": "1",
"size": "5506",
"license": "mit",
"hash": -4671917813682454000,
"line_mean": 31.9700598802,
"line_max": 78,
"alpha_frac": 0.6574645841,
"autogenerated": false,
"ratio": 4.235384615384615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5392849199484615,
"avg_score": null,
"num_lines": null
} |
"""Asyncio backports for Python 3.4.3 compatibility."""
import concurrent.futures
import threading
import logging
from asyncio import coroutines
from asyncio.events import AbstractEventLoop
from asyncio.futures import Future
import asyncio
from asyncio import ensure_future
from typing import Any, Union, Coroutine, Callable, Generator, TypeVar, \
Awaitable
_LOGGER = logging.getLogger(__name__)
try:
# pylint: disable=invalid-name
asyncio_run = asyncio.run # type: ignore
except AttributeError:
_T = TypeVar('_T')
def asyncio_run(main: Awaitable[_T], *, debug: bool = False) -> _T:
"""Minimal re-implementation of asyncio.run (since 3.7)."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.set_debug(debug)
try:
return loop.run_until_complete(main)
finally:
asyncio.set_event_loop(None)
loop.close()
def _set_result_unless_cancelled(fut: Future, result: Any) -> None:
"""Set the result only if the Future was not cancelled."""
if fut.cancelled():
return
fut.set_result(result)
def _set_concurrent_future_state(
concurr: concurrent.futures.Future,
source: Union[concurrent.futures.Future, Future]) -> None:
"""Copy state from a future to a concurrent.futures.Future."""
assert source.done()
if source.cancelled():
concurr.cancel()
if not concurr.set_running_or_notify_cancel():
return
exception = source.exception()
if exception is not None:
concurr.set_exception(exception)
else:
result = source.result()
concurr.set_result(result)
def _copy_future_state(source: Union[concurrent.futures.Future, Future],
dest: Union[concurrent.futures.Future, Future]) -> None:
"""Copy state from another Future.
The other Future may be a concurrent.futures.Future.
"""
assert source.done()
if dest.cancelled():
return
assert not dest.done()
if source.cancelled():
dest.cancel()
else:
exception = source.exception()
if exception is not None:
dest.set_exception(exception)
else:
result = source.result()
dest.set_result(result)
def _chain_future(
source: Union[concurrent.futures.Future, Future],
destination: Union[concurrent.futures.Future, Future]) -> None:
"""Chain two futures so that when one completes, so does the other.
The result (or exception) of source will be copied to destination.
If destination is cancelled, source gets cancelled too.
Compatible with both asyncio.Future and concurrent.futures.Future.
"""
if not isinstance(source, (Future, concurrent.futures.Future)):
raise TypeError('A future is required for source argument')
if not isinstance(destination, (Future, concurrent.futures.Future)):
raise TypeError('A future is required for destination argument')
# pylint: disable=protected-access
if isinstance(source, Future):
source_loop = source._loop # type: ignore
else:
source_loop = None
if isinstance(destination, Future):
dest_loop = destination._loop # type: ignore
else:
dest_loop = None
def _set_state(future: Union[concurrent.futures.Future, Future],
other: Union[concurrent.futures.Future, Future]) -> None:
if isinstance(future, Future):
_copy_future_state(other, future)
else:
_set_concurrent_future_state(future, other)
def _call_check_cancel(
destination: Union[concurrent.futures.Future, Future]) -> None:
if destination.cancelled():
if source_loop is None or source_loop is dest_loop:
source.cancel()
else:
source_loop.call_soon_threadsafe(source.cancel)
def _call_set_state(
source: Union[concurrent.futures.Future, Future]) -> None:
if dest_loop is None or dest_loop is source_loop:
_set_state(destination, source)
else:
dest_loop.call_soon_threadsafe(_set_state, destination, source)
destination.add_done_callback(_call_check_cancel)
source.add_done_callback(_call_set_state)
def run_coroutine_threadsafe(
coro: Union[Coroutine, Generator],
loop: AbstractEventLoop) -> concurrent.futures.Future:
"""Submit a coroutine object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError('Cannot be called from within the event loop')
if not coroutines.iscoroutine(coro):
raise TypeError('A coroutine object is required')
future = concurrent.futures.Future() # type: concurrent.futures.Future
def callback() -> None:
"""Handle the call to the coroutine."""
try:
_chain_future(ensure_future(coro, loop=loop), future)
except Exception as exc: # pylint: disable=broad-except
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(callback)
return future
def fire_coroutine_threadsafe(coro: Coroutine,
loop: AbstractEventLoop) -> None:
"""Submit a coroutine object to a given event loop.
This method does not provide a way to retrieve the result and
is intended for fire-and-forget use. This reduces the
work involved to fire the function on the loop.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError('Cannot be called from within the event loop')
if not coroutines.iscoroutine(coro):
raise TypeError('A coroutine object is required: %s' % coro)
def callback() -> None:
"""Handle the firing of a coroutine."""
ensure_future(coro, loop=loop)
loop.call_soon_threadsafe(callback)
def run_callback_threadsafe(loop: AbstractEventLoop, callback: Callable,
*args: Any) -> concurrent.futures.Future:
"""Submit a callback object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError('Cannot be called from within the event loop')
future = concurrent.futures.Future() # type: concurrent.futures.Future
def run_callback() -> None:
"""Run callback and store result."""
try:
future.set_result(callback(*args))
except Exception as exc: # pylint: disable=broad-except
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(run_callback)
return future
| {
"repo_name": "aequitas/home-assistant",
"path": "homeassistant/util/async_.py",
"copies": "11",
"size": "7198",
"license": "apache-2.0",
"hash": 7348158618652304000,
"line_mean": 34.4581280788,
"line_max": 79,
"alpha_frac": 0.6433731592,
"autogenerated": false,
"ratio": 4.187318208260617,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""Asyncio backports for Python 3.4.3 compatibility."""
import concurrent.futures
import threading
import logging
from asyncio import coroutines
from asyncio.futures import Future
try:
from asyncio import ensure_future
except ImportError:
# Python 3.4.3 and earlier has this as async
# pylint: disable=unused-import
from asyncio import async
ensure_future = async
_LOGGER = logging.getLogger(__name__)
def _set_result_unless_cancelled(fut, result):
"""Helper setting the result only if the future was not cancelled."""
if fut.cancelled():
return
fut.set_result(result)
def _set_concurrent_future_state(concurr, source):
"""Copy state from a future to a concurrent.futures.Future."""
assert source.done()
if source.cancelled():
concurr.cancel()
if not concurr.set_running_or_notify_cancel():
return
exception = source.exception()
if exception is not None:
concurr.set_exception(exception)
else:
result = source.result()
concurr.set_result(result)
def _copy_future_state(source, dest):
"""Internal helper to copy state from another Future.
The other Future may be a concurrent.futures.Future.
"""
assert source.done()
if dest.cancelled():
return
assert not dest.done()
if source.cancelled():
dest.cancel()
else:
exception = source.exception()
if exception is not None:
dest.set_exception(exception)
else:
result = source.result()
dest.set_result(result)
def _chain_future(source, destination):
"""Chain two futures so that when one completes, so does the other.
The result (or exception) of source will be copied to destination.
If destination is cancelled, source gets cancelled too.
Compatible with both asyncio.Future and concurrent.futures.Future.
"""
if not isinstance(source, (Future, concurrent.futures.Future)):
raise TypeError('A future is required for source argument')
if not isinstance(destination, (Future, concurrent.futures.Future)):
raise TypeError('A future is required for destination argument')
# pylint: disable=protected-access
source_loop = source._loop if isinstance(source, Future) else None
dest_loop = destination._loop if isinstance(destination, Future) else None
def _set_state(future, other):
if isinstance(future, Future):
_copy_future_state(other, future)
else:
_set_concurrent_future_state(future, other)
def _call_check_cancel(destination):
if destination.cancelled():
if source_loop is None or source_loop is dest_loop:
source.cancel()
else:
source_loop.call_soon_threadsafe(source.cancel)
def _call_set_state(source):
if dest_loop is None or dest_loop is source_loop:
_set_state(destination, source)
else:
dest_loop.call_soon_threadsafe(_set_state, destination, source)
destination.add_done_callback(_call_check_cancel)
source.add_done_callback(_call_set_state)
def run_coroutine_threadsafe(coro, loop):
"""Submit a coroutine object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError('Cannot be called from within the event loop')
if not coroutines.iscoroutine(coro):
raise TypeError('A coroutine object is required')
future = concurrent.futures.Future()
def callback():
"""Callback to call the coroutine."""
try:
# pylint: disable=deprecated-method
_chain_future(ensure_future(coro, loop=loop), future)
# pylint: disable=broad-except
except Exception as exc:
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(callback)
return future
def fire_coroutine_threadsafe(coro, loop):
"""Submit a coroutine object to a given event loop.
This method does not provide a way to retrieve the result and
is intended for fire-and-forget use. This reduces the
work involved to fire the function on the loop.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError('Cannot be called from within the event loop')
if not coroutines.iscoroutine(coro):
raise TypeError('A coroutine object is required: %s' % coro)
def callback():
"""Callback to fire coroutine."""
# pylint: disable=deprecated-method
ensure_future(coro, loop=loop)
loop.call_soon_threadsafe(callback)
return
def run_callback_threadsafe(loop, callback, *args):
"""Submit a callback object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError('Cannot be called from within the event loop')
future = concurrent.futures.Future()
def run_callback():
"""Run callback and store result."""
try:
future.set_result(callback(*args))
# pylint: disable=broad-except
except Exception as exc:
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(run_callback)
return future
| {
"repo_name": "hexxter/home-assistant",
"path": "homeassistant/util/async.py",
"copies": "8",
"size": "5793",
"license": "mit",
"hash": 7227016433226927000,
"line_mean": 32.1028571429,
"line_max": 78,
"alpha_frac": 0.6551009839,
"autogenerated": false,
"ratio": 4.228467153284671,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8883568137184671,
"avg_score": null,
"num_lines": null
} |
"""Asyncio backports for Python 3.4.3 compatibility."""
import concurrent.futures
import threading
import logging
from asyncio import coroutines
from asyncio.futures import Future
try:
# pylint: disable=ungrouped-imports
from asyncio import ensure_future
except ImportError:
# Python 3.4.3 and earlier has this as async
# pylint: disable=unused-import
from asyncio import async
ensure_future = async
_LOGGER = logging.getLogger(__name__)
def _set_result_unless_cancelled(fut, result):
"""Helper setting the result only if the future was not cancelled."""
if fut.cancelled():
return
fut.set_result(result)
def _set_concurrent_future_state(concurr, source):
"""Copy state from a future to a concurrent.futures.Future."""
assert source.done()
if source.cancelled():
concurr.cancel()
if not concurr.set_running_or_notify_cancel():
return
exception = source.exception()
if exception is not None:
concurr.set_exception(exception)
else:
result = source.result()
concurr.set_result(result)
def _copy_future_state(source, dest):
"""Internal helper to copy state from another Future.
The other Future may be a concurrent.futures.Future.
"""
assert source.done()
if dest.cancelled():
return
assert not dest.done()
if source.cancelled():
dest.cancel()
else:
exception = source.exception()
if exception is not None:
dest.set_exception(exception)
else:
result = source.result()
dest.set_result(result)
def _chain_future(source, destination):
"""Chain two futures so that when one completes, so does the other.
The result (or exception) of source will be copied to destination.
If destination is cancelled, source gets cancelled too.
Compatible with both asyncio.Future and concurrent.futures.Future.
"""
if not isinstance(source, (Future, concurrent.futures.Future)):
raise TypeError('A future is required for source argument')
if not isinstance(destination, (Future, concurrent.futures.Future)):
raise TypeError('A future is required for destination argument')
# pylint: disable=protected-access
source_loop = source._loop if isinstance(source, Future) else None
dest_loop = destination._loop if isinstance(destination, Future) else None
def _set_state(future, other):
if isinstance(future, Future):
_copy_future_state(other, future)
else:
_set_concurrent_future_state(future, other)
def _call_check_cancel(destination):
if destination.cancelled():
if source_loop is None or source_loop is dest_loop:
source.cancel()
else:
source_loop.call_soon_threadsafe(source.cancel)
def _call_set_state(source):
if dest_loop is None or dest_loop is source_loop:
_set_state(destination, source)
else:
dest_loop.call_soon_threadsafe(_set_state, destination, source)
destination.add_done_callback(_call_check_cancel)
source.add_done_callback(_call_set_state)
def run_coroutine_threadsafe(coro, loop):
"""Submit a coroutine object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError('Cannot be called from within the event loop')
if not coroutines.iscoroutine(coro):
raise TypeError('A coroutine object is required')
future = concurrent.futures.Future()
def callback():
"""Callback to call the coroutine."""
try:
# pylint: disable=deprecated-method
_chain_future(ensure_future(coro, loop=loop), future)
# pylint: disable=broad-except
except Exception as exc:
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(callback)
return future
def fire_coroutine_threadsafe(coro, loop):
"""Submit a coroutine object to a given event loop.
This method does not provide a way to retrieve the result and
is intended for fire-and-forget use. This reduces the
work involved to fire the function on the loop.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError('Cannot be called from within the event loop')
if not coroutines.iscoroutine(coro):
raise TypeError('A coroutine object is required: %s' % coro)
def callback():
"""Callback to fire coroutine."""
# pylint: disable=deprecated-method
ensure_future(coro, loop=loop)
loop.call_soon_threadsafe(callback)
return
def run_callback_threadsafe(loop, callback, *args):
"""Submit a callback object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError('Cannot be called from within the event loop')
future = concurrent.futures.Future()
def run_callback():
"""Run callback and store result."""
try:
future.set_result(callback(*args))
# pylint: disable=broad-except
except Exception as exc:
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(run_callback)
return future
| {
"repo_name": "keerts/home-assistant",
"path": "homeassistant/util/async.py",
"copies": "22",
"size": "5833",
"license": "apache-2.0",
"hash": 7468884344073960000,
"line_mean": 32.1420454545,
"line_max": 78,
"alpha_frac": 0.6555803189,
"autogenerated": false,
"ratio": 4.226811594202899,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""Asyncio backports for Python 3.6 compatibility."""
from asyncio import coroutines, ensure_future, get_running_loop
from asyncio.events import AbstractEventLoop
import concurrent.futures
import functools
import logging
import threading
from traceback import extract_stack
from typing import Any, Callable, Coroutine, TypeVar
_LOGGER = logging.getLogger(__name__)
T = TypeVar("T")
def fire_coroutine_threadsafe(coro: Coroutine, loop: AbstractEventLoop) -> None:
"""Submit a coroutine object to a given event loop.
This method does not provide a way to retrieve the result and
is intended for fire-and-forget use. This reduces the
work involved to fire the function on the loop.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError("Cannot be called from within the event loop")
if not coroutines.iscoroutine(coro):
raise TypeError("A coroutine object is required: %s" % coro)
def callback() -> None:
"""Handle the firing of a coroutine."""
ensure_future(coro, loop=loop)
loop.call_soon_threadsafe(callback)
def run_callback_threadsafe(
loop: AbstractEventLoop, callback: Callable[..., T], *args: Any
) -> "concurrent.futures.Future[T]":
"""Submit a callback object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError("Cannot be called from within the event loop")
future: concurrent.futures.Future = concurrent.futures.Future()
def run_callback() -> None:
"""Run callback and store result."""
try:
future.set_result(callback(*args))
except Exception as exc: # pylint: disable=broad-except
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(run_callback)
return future
def check_loop() -> None:
"""Warn if called inside the event loop."""
try:
get_running_loop()
in_loop = True
except RuntimeError:
in_loop = False
if not in_loop:
return
found_frame = None
for frame in reversed(extract_stack()):
for path in ("custom_components/", "homeassistant/components/"):
try:
index = frame.filename.index(path)
found_frame = frame
break
except ValueError:
continue
if found_frame is not None:
break
# Did not source from integration? Hard error.
if found_frame is None:
raise RuntimeError(
"Detected I/O inside the event loop. This is causing stability issues. Please report issue"
)
start = index + len(path)
end = found_frame.filename.index("/", start)
integration = found_frame.filename[start:end]
if path == "custom_components/":
extra = " to the custom component author"
else:
extra = ""
_LOGGER.warning(
"Detected I/O inside the event loop. This is causing stability issues. Please report issue%s for %s doing I/O at %s, line %s: %s",
extra,
integration,
found_frame.filename[index:],
found_frame.lineno,
found_frame.line.strip(),
)
def protect_loop(func: Callable) -> Callable:
"""Protect function from running in event loop."""
@functools.wraps(func)
def protected_loop_func(*args, **kwargs): # type: ignore
check_loop()
return func(*args, **kwargs)
return protected_loop_func
| {
"repo_name": "GenericStudent/home-assistant",
"path": "homeassistant/util/async_.py",
"copies": "6",
"size": "3782",
"license": "apache-2.0",
"hash": -5316001510660286000,
"line_mean": 29.7479674797,
"line_max": 138,
"alpha_frac": 0.6380222105,
"autogenerated": false,
"ratio": 4.17439293598234,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002370350262768036,
"num_lines": 123
} |
"""Asyncio backports for Python 3.6 compatibility."""
from asyncio import coroutines, ensure_future
from asyncio.events import AbstractEventLoop
import concurrent.futures
import logging
import threading
from typing import Any, Callable, Coroutine
_LOGGER = logging.getLogger(__name__)
def fire_coroutine_threadsafe(coro: Coroutine, loop: AbstractEventLoop) -> None:
"""Submit a coroutine object to a given event loop.
This method does not provide a way to retrieve the result and
is intended for fire-and-forget use. This reduces the
work involved to fire the function on the loop.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError("Cannot be called from within the event loop")
if not coroutines.iscoroutine(coro):
raise TypeError("A coroutine object is required: %s" % coro)
def callback() -> None:
"""Handle the firing of a coroutine."""
ensure_future(coro, loop=loop)
loop.call_soon_threadsafe(callback)
def run_callback_threadsafe(
loop: AbstractEventLoop, callback: Callable, *args: Any
) -> concurrent.futures.Future:
"""Submit a callback object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError("Cannot be called from within the event loop")
future: concurrent.futures.Future = concurrent.futures.Future()
def run_callback() -> None:
"""Run callback and store result."""
try:
future.set_result(callback(*args))
except Exception as exc: # pylint: disable=broad-except
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(run_callback)
return future
| {
"repo_name": "leppa/home-assistant",
"path": "homeassistant/util/async_.py",
"copies": "3",
"size": "2016",
"license": "apache-2.0",
"hash": -8920435883794269000,
"line_mean": 34.3684210526,
"line_max": 80,
"alpha_frac": 0.6800595238,
"autogenerated": false,
"ratio": 4.1652892561983474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6345348779998347,
"avg_score": null,
"num_lines": null
} |
"""Asyncio backports for Python 3.6 compatibility."""
import concurrent.futures
import threading
import logging
from asyncio import coroutines
from asyncio.events import AbstractEventLoop
import asyncio
from asyncio import ensure_future
from typing import Any, Coroutine, Callable, TypeVar, Awaitable
_LOGGER = logging.getLogger(__name__)
try:
# pylint: disable=invalid-name
asyncio_run = asyncio.run # type: ignore
except AttributeError:
_T = TypeVar("_T")
def asyncio_run(main: Awaitable[_T], *, debug: bool = False) -> _T:
"""Minimal re-implementation of asyncio.run (since 3.7)."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.set_debug(debug)
try:
return loop.run_until_complete(main)
finally:
asyncio.set_event_loop(None)
loop.close()
def fire_coroutine_threadsafe(coro: Coroutine, loop: AbstractEventLoop) -> None:
"""Submit a coroutine object to a given event loop.
This method does not provide a way to retrieve the result and
is intended for fire-and-forget use. This reduces the
work involved to fire the function on the loop.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError("Cannot be called from within the event loop")
if not coroutines.iscoroutine(coro):
raise TypeError("A coroutine object is required: %s" % coro)
def callback() -> None:
"""Handle the firing of a coroutine."""
ensure_future(coro, loop=loop)
loop.call_soon_threadsafe(callback)
def run_callback_threadsafe(
loop: AbstractEventLoop, callback: Callable, *args: Any
) -> concurrent.futures.Future:
"""Submit a callback object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError("Cannot be called from within the event loop")
future: concurrent.futures.Future = concurrent.futures.Future()
def run_callback() -> None:
"""Run callback and store result."""
try:
future.set_result(callback(*args))
except Exception as exc: # pylint: disable=broad-except
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(run_callback)
return future
| {
"repo_name": "qedi-r/home-assistant",
"path": "homeassistant/util/async_.py",
"copies": "2",
"size": "2598",
"license": "apache-2.0",
"hash": 3253344474153591300,
"line_mean": 32.3076923077,
"line_max": 80,
"alpha_frac": 0.662817552,
"autogenerated": false,
"ratio": 4.03416149068323,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.569697904268323,
"avg_score": null,
"num_lines": null
} |
""" asyncio-based rfc2812-compliant IRC Client """
import logging
import asyncio
from . import connection
from . import event
from . import pack
from . import unpack
__all__ = ["Client"]
logger = logging.getLogger(__name__)
class Client(event.EventsMixin):
__conn_cls__ = connection.Connection
def __init__(self, host, port, encoding='UTF-8', ssl=True):
# It's ok that unpack.parameters isn't cached, since it's only
# called when adding an event handler (which should __usually__
# only occur during setup)
super().__init__(unpack.parameters)
# trigger events on the client
self.connection = self.__conn_cls__(host, port, self,
encoding=encoding, ssl=ssl)
def send(self, command, **kwargs):
'''
Send a message to the server.
Examples
--------
client.send('nick', nick='weatherbot')
client.send('privmsg', target='#python', message="Hello, World!")
'''
packed_command = pack.pack_command(command, **kwargs)
self.connection.send(packed_command)
@asyncio.coroutine
def connect(self):
yield from self.connection.connect()
@asyncio.coroutine
def disconnect(self):
yield from self.connection.disconnect()
@property
def connected(self):
return self.connection.connected
@asyncio.coroutine
def run(self, loop=None):
''' Run the client until it disconnects (without reconnecting) '''
yield from self.connection.run(loop=loop)
def on(self, command):
'''
Decorate a function to be invoked when a :param:`command` occurs.
'''
return super().on(command.upper())
| {
"repo_name": "kernelpanic3/IdleBot",
"path": "bottom/__init__.py",
"copies": "3",
"size": "1745",
"license": "bsd-3-clause",
"hash": -3445575103675354000,
"line_mean": 29.0862068966,
"line_max": 74,
"alpha_frac": 0.6114613181,
"autogenerated": false,
"ratio": 4.340796019900497,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 58
} |
""" Asyncio client module """
import socket
import asyncio
import asyncio.streams
from smserver.smutils import smconn
class AsyncSocketClient(smconn.StepmaniaConn):
ENCODING = "binary"
def __init__(self, serv, ip, port, reader, writer, loop):
smconn.StepmaniaConn.__init__(self, serv, ip, port)
self.reader = reader
self.writer = writer
self.task = None
self.loop = loop
@asyncio.coroutine
def run(self):
full_data = b''
size = None
data_left = b''
while True:
if data_left:
data = data_left
data_left = b""
else:
try:
data = yield from self.reader.read(8192)
except asyncio.CancelledError:
break
if data == b'':
break
if not size:
if len(data) < 5:
self.log.info("packet %s drop: to short", data)
continue
full_data = data[:4]
data = data[4:]
size = int.from_bytes(full_data[:4], byteorder='big')
if len(data) < size - len(full_data):
full_data += data
continue
payload_size = len(full_data) - 4 + size
full_data += data[:payload_size]
self._on_data(full_data)
data_left = data[payload_size:]
full_data = b""
size = None
self.close()
def send_data(self, data):
self.writer.write(data)
self.loop.create_task(self.writer.drain())
def close(self):
self._serv.on_disconnect(self)
self.writer.close()
class AsyncSocketServer(smconn.SMThread):
def __init__(self, server, ip, port, loop=None):
smconn.SMThread.__init__(self, server, ip, port)
self.loop = loop or asyncio.new_event_loop()
self._serv = None
self.clients = {}
def _accept_client(self, client_reader, client_writer):
ip, port = client_writer.get_extra_info("peername")
client = AsyncSocketClient(self.server, ip, port, client_reader, client_writer, self.loop)
task = asyncio.Task(client.run(), loop=self.loop)
client.task = task
self.clients[client.task] = client
self.server.add_connection(client)
def client_done(task):
self.clients[task].close()
del self.clients[task]
client.task.add_done_callback(client_done)
def run(self):
self.start_server()
self.loop.run_forever()
self.loop.close()
smconn.SMThread.run(self)
def start_server(self):
""" Start the server in the given loop """
self._serv = self.loop.run_until_complete(asyncio.start_server(
self._accept_client,
host=self.ip,
port=self.port,
loop=self.loop,
))
return self._serv
def stop_server(self):
""" Stop the server in the given loop """
if self._serv is None:
return
if self._serv.sockets:
for sock in self._serv.sockets:
try:
sock.shutdown(socket.SHUT_RDWR)
except OSError:
pass
self._serv.close()
self.loop.run_until_complete(
asyncio.wait_for(self._serv.wait_closed(), timeout=1, loop=self.loop)
)
for task in self.clients:
task.cancel()
self.loop.run_until_complete(asyncio.gather(*self.clients))
def stop(self):
smconn.SMThread.stop(self)
self.stop_server()
self.loop.stop()
| {
"repo_name": "ningirsu/stepmania-server",
"path": "smserver/smutils/smconnections/asynctcpserver.py",
"copies": "1",
"size": "3724",
"license": "mit",
"hash": -9083318885030885000,
"line_mean": 25.9855072464,
"line_max": 98,
"alpha_frac": 0.5308807734,
"autogenerated": false,
"ratio": 4.017259978425027,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5048140751825028,
"avg_score": null,
"num_lines": null
} |
"""asyncio-compatible logstash logging handler."""
__version__ = "2.0.0"
import asyncio
import logging
import types
from typing import Any, Mapping
from .tcp_handler import TCPLogstashHandler
__all__ = ("create_tcp_handler",)
async def create_tcp_handler(
host: str,
port: int,
level: int = logging.NOTSET,
close_timeout: float = 5,
reconnect_delay: float = 1,
reconnect_jitter: float = 0.3,
qsize: int = 10000,
extra: Mapping[str, Any] = types.MappingProxyType({}),
**kwargs: Any
) -> logging.Handler:
loop = asyncio.get_event_loop()
extra = types.MappingProxyType(extra)
handler = TCPLogstashHandler(
host=host,
port=port,
level=level,
close_timeout=close_timeout,
qsize=qsize,
loop=loop,
reconnect_delay=reconnect_delay,
reconnect_jitter=reconnect_jitter,
extra=extra,
**kwargs
)
try:
await handler._connect()
except OSError:
handler.close()
await handler.wait_closed()
raise
return handler
| {
"repo_name": "wikibusiness/aiologstash",
"path": "aiologstash/__init__.py",
"copies": "1",
"size": "1078",
"license": "mit",
"hash": -2502195581025100000,
"line_mean": 21.4583333333,
"line_max": 58,
"alpha_frac": 0.6224489796,
"autogenerated": false,
"ratio": 3.7301038062283736,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48525527858283735,
"avg_score": null,
"num_lines": null
} |
#Asyncio Finite State Machine
import asyncio
import time
from random import randint
@asyncio.coroutine
def StartState():
print ("Start State called \n")
input_value = randint(0,1)
time.sleep(1)
if (input_value == 0):
result = yield from State2(input_value)
else :
result = yield from State1(input_value)
print("Resume of the Transition : \nStart State calling "\
+ result)
@asyncio.coroutine
def State1(transition_value):
outputValue = str(("State 1 with transition value = %s \n"\
%(transition_value)))
input_value = randint(0,1)
time.sleep(1)
print("...Evaluating...")
if (input_value == 0):
result = yield from State3(input_value)
else :
result = yield from State2(input_value)
result = "State 1 calling " + result
return (outputValue + str(result))
@asyncio.coroutine
def State2(transition_value):
outputValue = str(("State 2 with transition value = %s \n" \
%(transition_value)))
input_value = randint(0,1)
time.sleep(1)
print("...Evaluating...")
if (input_value == 0):
result = yield from State1(input_value)
else :
result = yield from State3(input_value)
result = "State 2 calling " + result
return (outputValue + str(result))
@asyncio.coroutine
def State3(transition_value):
outputValue = str(("State 3 with transition value = %s \n" \
%(transition_value)))
input_value = randint(0,1)
time.sleep(1)
print("...Evaluating...")
if (input_value == 0):
result = yield from State1(input_value)
else :
result = yield from EndState(input_value)
result = "State 3 calling " + result
return (outputValue + str(result))
@asyncio.coroutine
def EndState(transition_value):
outputValue = str(("End State with transition value = %s \n"\
%(transition_value)))
print("...Stop Computation...")
return (outputValue )
if __name__ == "__main__":
print("Finite State Machine simulation with Asyncio Coroutine")
loop = asyncio.get_event_loop()
loop.run_until_complete(StartState())
| {
"repo_name": "IdiosyncraticDragon/Reading-Notes",
"path": "Python Parallel Programming Cookbook_Code/Chapter 4/Asyncio_coroutine.py",
"copies": "1",
"size": "2282",
"license": "apache-2.0",
"hash": -7897211880351372000,
"line_mean": 28.0263157895,
"line_max": 67,
"alpha_frac": 0.5893952673,
"autogenerated": false,
"ratio": 3.880952380952381,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9892462730391454,
"avg_score": 0.01557698357218546,
"num_lines": 76
} |
"""asyncio (PEP 3156) connector for the XMMS2 Python bindings."""
class AIOXMMSConnector:
def __init__(self, loop, xmms):
self.loop = loop
self.xmms = xmms
self.has_writer = False
self.connect()
def handle_out(self):
if self.xmms.want_ioout():
self.xmms.ioout()
if not self.xmms.want_ioout() and self.has_writer:
self.loop.remove_writer(self.xmms.get_fd())
self.has_writer = False
def need_out(self, i):
if self.xmms.want_ioout() and not self.has_writer:
self.loop.add_writer(self.xmms.get_fd(), self.handle_out)
self.has_writer = True
def connect(self):
self.xmms.set_need_out_fun(self.need_out)
self.loop.add_reader(self.xmms.get_fd(), self.xmms.ioin)
def add_xmms_to_event_loop(loop, xmms):
"""Tells the loop to handle the XMMS2 connection's I/O."""
return AIOXMMSConnector(loop, xmms)
def remove_xmms_from_event_loop(loop, xmms):
"""Stops handling the XMMS2 connection's I/O."""
loop.remove_reader(xmms.get_fd())
loop.remove_writer(xmms.get_fd())
| {
"repo_name": "chrippa/aioxmmsclient",
"path": "aioxmmsclient.py",
"copies": "1",
"size": "1129",
"license": "mit",
"hash": -6647882112934364000,
"line_mean": 29.5135135135,
"line_max": 69,
"alpha_frac": 0.617360496,
"autogenerated": false,
"ratio": 3.059620596205962,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4176981092205962,
"avg_score": null,
"num_lines": null
} |
import utime as time
import utimeq
from uasyncio import *
class PriorityEventLoop(PollEventLoop):
def __init__(self, len=42, lpqlen=42, max_overdue_ms=0, hpqlen=0):
super().__init__(len)
self._max_overdue_ms = max_overdue_ms
self.lpq = utimeq.utimeq(lpqlen)
if hpqlen:
self.hpq = [[0,0,0] for _ in range(hpqlen)]
else:
self.hpq = None
def max_overdue_ms(self, t=None):
if t is not None:
self._max_overdue_ms = t
return self._max_overdue_ms
def call_after_ms(self, delay, callback, args=()):
# low priority.
t = time.ticks_add(self.time(), delay)
if __debug__ and DEBUG:
log.debug("Scheduling LP %s", (time, callback, args))
self.lpq.push(t, callback, args)
def call_after(self, delay, callback, *args):
# low priority.
t = time.ticks_add(self.time(), int(delay * 1000))
if __debug__ and DEBUG:
log.debug("Scheduling LP %s", (time, callback, args))
self.lpq.push(t, callback, args)
def _schedule_hp(self, func, callback, args=()):
if self.hpq is None:
self.hpq = [func, callback, args]
else: # Try to assign without allocation
for entry in self.hpq:
if not entry[0]:
entry[0] = func
entry[1] = callback
entry[2] = args
break
else:
self.hpq.append([func, callback, args])
def run_forever(self):
cur_task = [0, 0, 0]
while True:
if self.q:
# wait() may finish prematurely due to I/O completion,
# and schedule new, earlier than before tasks to run.
while 1:
# Check high priority queue
if self.hpq is not None:
hp_found = False
for entry in self.hpq:
if entry[0] and entry[0]():
hp_found = True
entry[0] = 0
cur_task[0] = 0
cur_task[1] = entry[1] # ??? quick non-allocating copy
cur_task[2] = entry[2]
break
if hp_found:
break
# Schedule most overdue LP coro
tnow = self.time()
if self.lpq and self._max_overdue_ms > 0:
t = self.lpq.peektime()
overdue = -time.ticks_diff(t, tnow)
if overdue > self._max_overdue_ms:
self.lpq.pop(cur_task)
break
# Schedule any due normal task
t = self.q.peektime()
delay = time.ticks_diff(t, tnow)
if delay <= 0:
# https://github.com/micropython/micropython-lib/pull/201
# Always call wait(), to give a chance to I/O scheduling
self.wait(0)
self.q.pop(cur_task)
break
# Schedule any due LP task
if self.lpq:
t = self.lpq.peektime()
lpdelay = time.ticks_diff(t, tnow)
if lpdelay <= 0:
self.lpq.pop(cur_task)
break
delay = min(delay, lpdelay)
self.wait(delay) # superclass
t = cur_task[0]
cb = cur_task[1]
args = cur_task[2]
if __debug__ and DEBUG:
log.debug("Next coroutine to run: %s", (t, cb, args))
# __main__.mem_info()
else: # Normal q is empty
ready = False
if self.lpq:
t = self.lpq.peektime()
delay = time.ticks_diff(t, self.time())
if delay <= 0:
self.lpq.pop(cur_task)
t = cur_task[0]
cb = cur_task[1]
args = cur_task[2]
if __debug__ and DEBUG:
log.debug("Next coroutine to run: %s", (t, cb, args))
ready = True
if not ready:
self.wait(-1)
# Assuming IO completion scheduled some tasks
continue
if callable(cb):
cb(*args)
else:
delay = 0
func = None
priority = True
try:
if __debug__ and DEBUG:
log.debug("Coroutine %s send args: %s", cb, args)
if args == ():
ret = next(cb)
else:
ret = cb.send(*args)
if __debug__ and DEBUG:
log.debug("Coroutine %s yield result: %s", cb, ret)
if isinstance(ret, SysCall1):
arg = ret.arg
if isinstance(ret, AfterMs):
priority = False
if isinstance(ret, Sleep) or isinstance(ret, After):
delay = int(arg * 1000)
elif isinstance(ret, When):
if callable(arg):
func = arg
else:
assert False, "Argument to 'when' must be a function or method."
elif isinstance(ret, SleepMs):
delay = arg
elif isinstance(ret, IORead):
# self.add_reader(ret.obj.fileno(), lambda self, c, f: self.call_soon(c, f), self, cb, ret.obj)
# self.add_reader(ret.obj.fileno(), lambda c, f: self.call_soon(c, f), cb, ret.obj)
# self.add_reader(arg.fileno(), lambda cb: self.call_soon(cb), cb)
self.add_reader(arg, cb)
continue
elif isinstance(ret, IOWrite):
# self.add_writer(arg.fileno(), lambda cb: self.call_soon(cb), cb)
self.add_writer(arg, cb)
continue
elif isinstance(ret, IOReadDone):
self.remove_reader(arg)
elif isinstance(ret, IOWriteDone):
self.remove_writer(arg)
elif isinstance(ret, StopLoop):
return arg
else:
assert False, "Unknown syscall yielded: %r (of type %r)" % (ret, type(ret))
elif isinstance(ret, type_gen):
self.call_soon(ret)
elif isinstance(ret, int):
# Delay
delay = ret
elif ret is None:
# Just reschedule
pass
else:
assert False, "Unsupported coroutine yield value: %r (of type %r)" % (ret, type(ret))
except StopIteration as e:
if __debug__ and DEBUG:
log.debug("Coroutine finished: %s", cb)
continue
# _schedule_hp() and call_after_ms() accept args as a tuple so should
# work with syscalls returning data
if func is not None:
self._schedule_hp(func, cb, args)
else:
if priority:
# Currently all syscalls don't return anything, so we don't
# need to feed anything to the next invocation of coroutine.
# If that changes, need to pass that value below.
self.call_later_ms(delay, cb)
else:
self.call_after_ms(delay, cb, args)
class Sleep(SleepMs):
pass
# Low priority
class AfterMs(SleepMs):
pass
class After(AfterMs):
pass
# High Priority
class When(SleepMs):
pass
after_ms = AfterMs()
after = After()
when = When()
_event_loop = None
_event_loop_class = PriorityEventLoop
def get_event_loop(len=42, lpqlen=42, max_overdue_ms=0, hpqlen=0):
global _event_loop
if _event_loop is None:
_event_loop = _event_loop_class(len, lpqlen, max_overdue_ms, hpqlen)
return _event_loop
| {
"repo_name": "zentropi/python-uzentropi",
"path": "mpylibs/asyncio_priority/__init__.py",
"copies": "1",
"size": "9081",
"license": "apache-2.0",
"hash": 6660208653124494000,
"line_mean": 40.4657534247,
"line_max": 122,
"alpha_frac": 0.4310098007,
"autogenerated": false,
"ratio": 4.511177347242921,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5442187147942921,
"avg_score": null,
"num_lines": null
} |
"""Asyncio protocol implementation for handling telegrams."""
from functools import partial
import asyncio
import logging
from serial_asyncio import create_serial_connection
from dsmr_parser import telegram_specifications
from dsmr_parser.clients.telegram_buffer import TelegramBuffer
from dsmr_parser.exceptions import ParseError, InvalidChecksumError
from dsmr_parser.parsers import TelegramParser
from dsmr_parser.clients.settings import SERIAL_SETTINGS_V2_2, \
SERIAL_SETTINGS_V4, SERIAL_SETTINGS_V5
def create_dsmr_protocol(dsmr_version, telegram_callback, loop=None, **kwargs):
"""Creates a DSMR asyncio protocol."""
if dsmr_version == '2.2':
specification = telegram_specifications.V2_2
serial_settings = SERIAL_SETTINGS_V2_2
elif dsmr_version == '4':
specification = telegram_specifications.V4
serial_settings = SERIAL_SETTINGS_V4
elif dsmr_version == '5':
specification = telegram_specifications.V5
serial_settings = SERIAL_SETTINGS_V5
elif dsmr_version == '5B':
specification = telegram_specifications.BELGIUM_FLUVIUS
serial_settings = SERIAL_SETTINGS_V5
elif dsmr_version == "5L":
specification = telegram_specifications.LUXEMBOURG_SMARTY
serial_settings = SERIAL_SETTINGS_V5
else:
raise NotImplementedError("No telegram parser found for version: %s",
dsmr_version)
protocol = partial(DSMRProtocol, loop, TelegramParser(specification),
telegram_callback=telegram_callback, **kwargs)
return protocol, serial_settings
def create_dsmr_reader(port, dsmr_version, telegram_callback, loop=None):
"""Creates a DSMR asyncio protocol coroutine using serial port."""
protocol, serial_settings = create_dsmr_protocol(
dsmr_version, telegram_callback, loop=None)
serial_settings['url'] = port
conn = create_serial_connection(loop, protocol, **serial_settings)
return conn
def create_tcp_dsmr_reader(host, port, dsmr_version,
telegram_callback, loop=None,
keep_alive_interval=None):
"""Creates a DSMR asyncio protocol coroutine using TCP connection."""
if not loop:
loop = asyncio.get_event_loop()
protocol, _ = create_dsmr_protocol(
dsmr_version, telegram_callback, loop=loop,
keep_alive_interval=keep_alive_interval)
conn = loop.create_connection(protocol, host, port)
return conn
class DSMRProtocol(asyncio.Protocol):
"""Assemble and handle incoming data into complete DSM telegrams."""
transport = None
telegram_callback = None
def __init__(self, loop, telegram_parser,
telegram_callback=None, keep_alive_interval=None):
"""Initialize class."""
self.loop = loop
self.log = logging.getLogger(__name__)
self.telegram_parser = telegram_parser
# callback to call on complete telegram
self.telegram_callback = telegram_callback
# buffer to keep incomplete incoming data
self.telegram_buffer = TelegramBuffer()
# keep a lock until the connection is closed
self._closed = asyncio.Event()
self._keep_alive_interval = keep_alive_interval
self._active = True
def connection_made(self, transport):
"""Just logging for now."""
self.transport = transport
self.log.debug('connected')
self._active = False
if self.loop and self._keep_alive_interval:
self.loop.call_later(self._keep_alive_interval, self.keep_alive)
def data_received(self, data):
"""Add incoming data to buffer."""
data = data.decode('ascii')
self._active = True
self.log.debug('received data: %s', data)
self.telegram_buffer.append(data)
for telegram in self.telegram_buffer.get_all():
self.handle_telegram(telegram)
def keep_alive(self):
if self._active:
self.log.debug('keep-alive checked')
self._active = False
if self.loop:
self.loop.call_later(self._keep_alive_interval, self.keep_alive)
else:
self.log.warning('keep-alive check failed')
if self.transport:
self.transport.close()
def connection_lost(self, exc):
"""Stop when connection is lost."""
if exc:
self.log.exception('disconnected due to exception', exc_info=exc)
else:
self.log.info('disconnected because of close/abort.')
self._closed.set()
def handle_telegram(self, telegram):
"""Send off parsed telegram to handling callback."""
self.log.debug('got telegram: %s', telegram)
try:
parsed_telegram = self.telegram_parser.parse(telegram)
except InvalidChecksumError as e:
self.log.warning(str(e))
except ParseError:
self.log.exception("failed to parse telegram")
else:
self.telegram_callback(parsed_telegram)
async def wait_closed(self):
"""Wait until connection is closed."""
await self._closed.wait()
| {
"repo_name": "ndokter/dsmr_parser",
"path": "dsmr_parser/clients/protocol.py",
"copies": "1",
"size": "5197",
"license": "mit",
"hash": -7874381210979669000,
"line_mean": 35.8581560284,
"line_max": 80,
"alpha_frac": 0.6478737733,
"autogenerated": false,
"ratio": 4.016228748068007,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5164102521368006,
"avg_score": null,
"num_lines": null
} |
"""Asyncio protocol implementation of RFlink."""
# ./.homeassistant/deps/lib/python/site-packages/rflink/protocol.py
# /Library/Frameworks/Python.framework/Versions/3.6//lib/python3.6/site-packages/rflink/protocol.py
import asyncio
import concurrent
import logging
from datetime import timedelta
from fnmatch import fnmatchcase
from functools import partial
from typing import (
TYPE_CHECKING,
Any,
Callable,
Generator,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
overload,
)
import socket
from serial_asyncio import create_serial_connection
from .parser import (
PacketType,
decode_packet,
deserialize_packet_id,
encode_packet,
packet_events,
valid_packet,
)
if TYPE_CHECKING:
from typing import Coroutine # not available in 3.4
log = logging.getLogger(__name__)
rflink_log = None
TIMEOUT = timedelta(seconds=5)
DEFAULT_TCP_KEEPALIVE_INTERVAL = 20
DEFAULT_TCP_KEEPALIVE_COUNT = 3
class ProtocolBase(asyncio.Protocol):
"""Manage low level rflink protocol."""
transport = None # type: asyncio.BaseTransport
keepalive = None # type: Optional[int]
def __init__(
self,
loop: Optional[asyncio.AbstractEventLoop] = None,
disconnect_callback: Optional[Callable[[Optional[Exception]], None]] = None,
keepalive: Optional[int] = None,
**kwargs: Any
) -> None:
"""Initialize class."""
if loop:
self.loop = loop
else:
self.loop = asyncio.get_event_loop()
self.packet = ""
self.buffer = ""
self.packet_callback = None # type: Optional[Callable[[PacketType], None]]
self.disconnect_callback = disconnect_callback
self.keepalive = keepalive
def connection_made(self, transport: asyncio.BaseTransport) -> None:
"""Just logging for now."""
self.transport = transport
log.debug("connected")
sock = transport.get_extra_info("socket")
if self.keepalive is not None and socket is not None:
log.debug(
"applying TCP KEEPALIVE settings: IDLE={}/INTVL={}/CNT={}".format(
self.keepalive,
DEFAULT_TCP_KEEPALIVE_INTERVAL,
DEFAULT_TCP_KEEPALIVE_COUNT,
)
)
if hasattr(socket, "SO_KEEPALIVE"):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, "TCP_KEEPIDLE"):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, self.keepalive)
if hasattr(socket, "TCP_KEEPINTVL"):
sock.setsockopt(
socket.IPPROTO_TCP,
socket.TCP_KEEPINTVL,
DEFAULT_TCP_KEEPALIVE_INTERVAL,
)
if hasattr(socket, "TCP_KEEPCNT"):
sock.setsockopt(
socket.IPPROTO_TCP, socket.TCP_KEEPCNT, DEFAULT_TCP_KEEPALIVE_COUNT
)
def data_received(self, data: bytes) -> None:
"""Add incoming data to buffer."""
try:
decoded_data = data.decode()
except UnicodeDecodeError:
invalid_data = data.decode(errors="replace")
log.warning("Error during decode of data, invalid data: %s", invalid_data)
else:
log.debug("received data: %s", decoded_data.strip())
self.buffer += decoded_data
self.handle_lines()
def handle_lines(self) -> None:
"""Assemble incoming data into per-line packets."""
while "\r\n" in self.buffer:
line, self.buffer = self.buffer.split("\r\n", 1)
if valid_packet(line):
self.handle_raw_packet(line)
else:
log.warning("dropping invalid data: %s", line)
def handle_raw_packet(self, raw_packet: str) -> None:
"""Handle one raw incoming packet."""
raise NotImplementedError()
def send_raw_packet(self, packet: str) -> None:
"""Encode and put packet string onto write buffer."""
data = packet + "\r\n"
log.debug("writing data: %s", repr(data))
# type ignore: transport from create_connection is documented to be
# implementation specific bidirectional, even though typed as
# BaseTransport
self.transport.write(data.encode()) # type: ignore
def log_all(self, file: Optional[str]) -> None:
"""Log all data received from RFLink to file."""
global rflink_log
if file is None:
rflink_log = None
else:
log.debug("logging to: %s", file)
rflink_log = open(file, "a")
def connection_lost(self, exc: Optional[Exception]) -> None:
"""Log when connection is closed, if needed call callback."""
if exc:
log.exception("disconnected due to exception")
else:
log.info("disconnected because of close/abort.")
if self.disconnect_callback:
self.disconnect_callback(exc)
class PacketHandling(ProtocolBase):
"""Handle translating rflink packets to/from python primitives."""
def __init__(
self,
*args: Any,
packet_callback: Optional[Callable[[PacketType], None]] = None,
**kwargs: Any
) -> None:
"""Add packethandling specific initialization.
packet_callback: called with every complete/valid packet
received.
"""
super().__init__(*args, **kwargs)
if packet_callback:
self.packet_callback = packet_callback
def handle_raw_packet(self, raw_packet: str) -> None:
"""Parse raw packet string into packet dict."""
log.debug("got packet: %s", raw_packet)
if rflink_log:
print(raw_packet, file=rflink_log)
rflink_log.flush()
packet = None # type: Optional[PacketType]
try:
packet = decode_packet(raw_packet)
except BaseException:
log.exception("failed to parse packet data: %s", raw_packet)
log.debug("decoded packet: %s", packet)
if packet:
if "ok" in packet:
# handle response packets internally
log.debug("command response: %s", packet)
self.handle_response_packet(packet)
else:
self.handle_packet(packet)
else:
log.warning("no valid packet")
def handle_packet(self, packet: PacketType) -> None:
"""Process incoming packet dict and optionally call callback."""
if self.packet_callback:
# forward to callback
self.packet_callback(packet)
else:
print("packet", packet)
def handle_response_packet(self, packet: PacketType) -> None:
"""Handle response packet."""
raise NotImplementedError()
def send_packet(self, fields: PacketType) -> None:
"""Concat fields and send packet to gateway."""
self.send_raw_packet(encode_packet(fields))
def send_command(self, device_id: str, action: str) -> None:
"""Send device command to rflink gateway."""
command = deserialize_packet_id(device_id)
command["command"] = action
log.debug("sending command: %s", command)
self.send_packet(command)
class CommandSerialization(PacketHandling):
"""Logic for ensuring asynchronous commands are sent in order."""
def __init__(
self,
*args: Any,
packet_callback: Optional[Callable[[PacketType], None]] = None,
**kwargs: Any
) -> None:
"""Add packethandling specific initialization."""
super().__init__(*args, **kwargs)
if packet_callback:
self.packet_callback = packet_callback
self._command_ack = asyncio.Event(loop=self.loop)
self._ready_to_send = asyncio.Lock(loop=self.loop)
def handle_response_packet(self, packet: PacketType) -> None:
"""Handle response packet."""
self._last_ack = packet
self._command_ack.set()
@asyncio.coroutine
def send_command_ack(
self, device_id: str, action: str
) -> Generator[Any, None, Optional[bool]]:
"""Send command, wait for gateway to repond with acknowledgment."""
# serialize commands
yield from self._ready_to_send.acquire()
acknowledgement = None
try:
self._command_ack.clear()
self.send_command(device_id, action)
log.debug("waiting for acknowledgement")
try:
yield from asyncio.wait_for(
self._command_ack.wait(), TIMEOUT.seconds, loop=self.loop
)
log.debug("packet acknowledged")
except concurrent.futures._base.TimeoutError:
acknowledgement = False
log.warning("acknowledge timeout")
else:
acknowledgement = cast(bool, self._last_ack.get("ok", False))
finally:
# allow next command
self._ready_to_send.release()
return acknowledgement
class EventHandling(PacketHandling):
"""Breaks up packets into individual events with ids'.
Most packets represent a single event (light on, measured
temperature), but some contain multiple events (temperature and
humidity). This class adds logic to convert packets into individual
events each with their own id based on packet details (protocol,
switch, etc).
"""
def __init__(
self,
*args: Any,
event_callback: Optional[Callable[[PacketType], None]] = None,
ignore: Optional[Sequence[str]] = None,
**kwargs: Any
) -> None:
"""Add eventhandling specific initialization."""
super().__init__(*args, **kwargs)
self.event_callback = event_callback
# suppress printing of packets
if not kwargs.get("packet_callback"):
self.packet_callback = lambda x: None
if ignore:
log.debug("ignoring: %s", ignore)
self.ignore = ignore
else:
self.ignore = []
def _handle_packet(self, packet: PacketType) -> None:
"""Event specific packet handling logic.
Break packet into events and fires configured event callback or
nicely prints events for console.
"""
events = packet_events(packet)
for event in events:
if self.ignore_event(event["id"]):
log.debug("ignoring event with id: %s", event)
continue
log.debug("got event: %s", event)
if self.event_callback:
self.event_callback(event)
else:
self.handle_event(event)
def handle_event(self, event: PacketType) -> None:
"""Handle of incoming event (print)."""
string = "{id:<32} "
if "command" in event:
string += "{command}"
elif "version" in event:
if "hardware" in event:
string += "{hardware} {firmware} "
string += "V{version} R{revision}"
else:
string += "{value}"
if event.get("unit"):
string += " {unit}"
print(string.format(**event))
def handle_packet(self, packet: PacketType) -> None:
"""Apply event specific handling and pass on to packet handling."""
self._handle_packet(packet)
super().handle_packet(packet)
def ignore_event(self, event_id: str) -> bool:
"""Verify event id against list of events to ignore.
>>> e = EventHandling(ignore=[
... 'test1_00',
... 'test2_*',
... ])
>>> e.ignore_event('test1_00')
True
>>> e.ignore_event('test2_00')
True
>>> e.ignore_event('test3_00')
False
"""
for ignore in self.ignore:
if fnmatchcase(event_id, ignore):
return True
return False
class RflinkProtocol(CommandSerialization, EventHandling):
"""Combine preferred abstractions that form complete Rflink interface."""
class InverterProtocol(RflinkProtocol):
"""Invert switch commands received and send them out."""
def handle_event(self, event: PacketType) -> None:
"""Handle incoming packet from rflink gateway."""
if event.get("command"):
if event["command"] == "on":
cmd = "off"
else:
cmd = "on"
task = self.send_command_ack(event["id"], cmd)
self.loop.create_task(task)
class RepeaterProtocol(RflinkProtocol):
"""Repeat switch commands received."""
def handle_event(self, packet: PacketType) -> None:
"""Handle incoming packet from rflink gateway."""
if packet.get("command"):
task = self.send_command_ack(packet["id"], packet["command"])
self.loop.create_task(task)
@overload
def create_rflink_connection(
port: int,
host: str,
baud: int = 57600,
keepalive: Optional[int] = None,
protocol: Type[ProtocolBase] = RflinkProtocol,
packet_callback: Optional[Callable[[PacketType], None]] = None,
event_callback: Optional[Callable[[PacketType], None]] = None,
disconnect_callback: Optional[Callable[[Optional[Exception]], None]] = None,
ignore: Optional[Sequence[str]] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> "Coroutine[Any, Any, Tuple[asyncio.BaseTransport, ProtocolBase]]":
"""Create Rflink manager class, returns transport coroutine."""
...
@overload
def create_rflink_connection(
port: str,
host: None = None,
baud: int = 57600,
keepalive: None = None,
protocol: Type[ProtocolBase] = RflinkProtocol,
packet_callback: Optional[Callable[[PacketType], None]] = None,
event_callback: Optional[Callable[[PacketType], None]] = None,
disconnect_callback: Optional[Callable[[Optional[Exception]], None]] = None,
ignore: Optional[Sequence[str]] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> "Coroutine[Any, Any, Tuple[asyncio.BaseTransport, ProtocolBase]]":
"""Create Rflink manager class, returns transport coroutine."""
...
def create_rflink_connection(
port: Union[None, str, int] = None,
host: Optional[str] = None,
baud: int = 57600,
keepalive: Optional[int] = None,
protocol: Type[ProtocolBase] = RflinkProtocol,
packet_callback: Optional[Callable[[PacketType], None]] = None,
event_callback: Optional[Callable[[PacketType], None]] = None,
disconnect_callback: Optional[Callable[[Optional[Exception]], None]] = None,
ignore: Optional[Sequence[str]] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> "Coroutine[Any, Any, Tuple[asyncio.BaseTransport, ProtocolBase]]":
"""Create Rflink manager class, returns transport coroutine."""
if loop is None:
loop = asyncio.get_event_loop()
# use default protocol if not specified
protocol_factory = partial(
protocol,
loop=loop,
packet_callback=packet_callback,
event_callback=event_callback,
disconnect_callback=disconnect_callback,
ignore=ignore if ignore else [],
keepalive=keepalive,
)
# setup serial connection if no transport specified
if host:
conn = loop.create_connection(protocol_factory, host, cast(int, port))
else:
conn = create_serial_connection(loop, protocol_factory, port, baud)
return conn # type: ignore
| {
"repo_name": "aequitas/python-rflink",
"path": "rflink/protocol.py",
"copies": "1",
"size": "15600",
"license": "mit",
"hash": 8156465502377076000,
"line_mean": 33.2105263158,
"line_max": 99,
"alpha_frac": 0.5992307692,
"autogenerated": false,
"ratio": 4.240282685512367,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00023431697782591134,
"num_lines": 456
} |
# asyncio Sample for MyMusicTaste
# SELO77
# 2016-07-24
import asyncio
import time
class SpawnClass():
def __init__(self):
pass
@asyncio.coroutine
def execute(self, request_class_list):
response = []
if request_class_list:
@asyncio.coroutine
def run(request_class):
result = yield from request_class.execute()
response.append(result)
tasks_list = []
for each_class in request_class_list:
tasks_list.append(
asyncio.Task(run(each_class))
)
yield from asyncio.gather(*tasks_list)
return response
class RequestClass1():
def __init__(self):
print('RequestClass1.__init()__')
@asyncio.coroutine
def execute(self):
yield from asyncio.sleep(4.0)
return "class1_response"
class RequestClass2():
def __init__(self):
print('RequestClass2.__init()__')
@asyncio.coroutine
def execute(self):
yield from asyncio.sleep(2.0)
return "class2_response"
@asyncio.coroutine
def main():
request_class_list = [RequestClass1(), RequestClass2()]
spawnClass = SpawnClass()
st = time.time()
final_result = yield from spawnClass.execute(request_class_list)
print("runtime:%s, final_result:%s"% (time.time() - st, final_result))
loop = asyncio.get_event_loop()
loop.run_until_complete(main()) | {
"repo_name": "SELO77/seloPython",
"path": "seminar/spwan.py",
"copies": "1",
"size": "1468",
"license": "mit",
"hash": -3452122050323272700,
"line_mean": 19.6901408451,
"line_max": 74,
"alpha_frac": 0.5899182561,
"autogenerated": false,
"ratio": 3.978319783197832,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00494403071067828,
"num_lines": 71
} |
# asyncio Sample for MyMusicTaste
# SELO77
# 2016-07-24
import asyncio
import traceback
class SpawnClass():
def __init__(self):
pass
@asyncio.coroutine
def execute(self, request_class_list):
response = []
try:
if request_class_list:
@asyncio.coroutine
def run(request_class):
try:
result = yield from request_class.execute()
response.append(result)
except:
print(traceback.format_exc())
tasks_list = []
for each_class in request_class_list:
tasks_list.append(
asyncio.Task(run(each_class))
)
yield from asyncio.gather(*tasks_list)
except:
print(traceback.format_exc())
return response
class RequestClass1():
def __init__(self):
print('RequestClass1.__init()__')
@asyncio.coroutine
def execute(self):
return "class1_response"
class RequestClass2():
def __init__(self):
print('RequestClass2.__init()__')
@asyncio.coroutine
def execute(self):
return "class2_response"
@asyncio.coroutine
def main():
try:
request_class_list = [RequestClass1(), RequestClass2()]
spawnClass = SpawnClass()
final_result = yield from spawnClass.execute(request_class_list)
print("final_result:%s"%final_result)
except:
print(traceback.format_exc())
loop = asyncio.get_event_loop()
loop.run_until_complete(main()) | {
"repo_name": "SELO77/seloPython",
"path": "3.X/module/async_gather.py",
"copies": "1",
"size": "1423",
"license": "mit",
"hash": -1631435848732161500,
"line_mean": 19.3428571429,
"line_max": 68,
"alpha_frac": 0.627547435,
"autogenerated": false,
"ratio": 3.8150134048257374,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9853367850629453,
"avg_score": 0.017838597839256717,
"num_lines": 70
} |
"""AsyncIO support for zmq
Requires asyncio and Python 3.
"""
# Copyright (c) PyZMQ Developers.
# Distributed under the terms of the Modified BSD License.
# Derived from Python 3.5.1 selectors._BaseSelectorImpl, used under PSF License
from collections import Mapping
import zmq as _zmq
from zmq.eventloop import future as _future
# TODO: support trollius for Legacy Python? (probably not)
import asyncio
from asyncio import SelectorEventLoop, Future
try:
import selectors
except ImportError:
from asyncio import selectors # py33
_aio2zmq_map = {
selectors.EVENT_READ: _zmq.POLLIN,
selectors.EVENT_WRITE: _zmq.POLLOUT,
}
_AIO_EVENTS = 0
for aio_evt in _aio2zmq_map:
_AIO_EVENTS |= aio_evt
def _aio2zmq(aio_evt):
"""Turn AsyncIO event mask into ZMQ event mask"""
z_evt = 0
for aio_mask, z_mask in _aio2zmq_map.items():
if aio_mask & aio_evt:
z_evt |= z_mask
return z_evt
def _zmq2aio(z_evt):
"""Turn ZMQ event mask into AsyncIO event mask"""
aio_evt = 0
for aio_mask, z_mask in _aio2zmq_map.items():
if z_mask & z_evt:
aio_evt |= aio_mask
return aio_evt
class _AsyncIO(object):
_Future = Future
_WRITE = selectors.EVENT_WRITE
_READ = selectors.EVENT_READ
def _default_loop(self):
return asyncio.get_event_loop()
def _fileobj_to_fd(fileobj):
"""Return a file descriptor from a file object.
Parameters:
fileobj -- file object or file descriptor
Returns:
corresponding file descriptor
Raises:
ValueError if the object is invalid
"""
if isinstance(fileobj, int):
fd = fileobj
else:
try:
fd = int(fileobj.fileno())
except (AttributeError, TypeError, ValueError):
raise ValueError("Invalid file object: "
"{!r}".format(fileobj)) from None
if fd < 0:
raise ValueError("Invalid file descriptor: {}".format(fd))
return fd
class _SelectorMapping(Mapping):
"""Mapping of file objects to selector keys."""
def __init__(self, selector):
self._selector = selector
def __len__(self):
return len(self._selector._fd_to_key)
def __getitem__(self, fileobj):
try:
fd = self._selector._fileobj_lookup(fileobj)
return self._selector._fd_to_key[fd]
except KeyError:
raise KeyError("{!r} is not registered".format(fileobj)) from None
def __iter__(self):
return iter(self._selector._fd_to_key)
class ZMQSelector(selectors.BaseSelector):
"""zmq_poll-based selector for asyncio"""
def __init__(self):
super().__init__()
# this maps file descriptors to keys
self._fd_to_key = {}
# read-only mapping returned by get_map()
self._map = _SelectorMapping(self)
self._zmq_poller = _zmq.Poller()
def _fileobj_lookup(self, fileobj):
"""Return a zmq socket or a file descriptor from a file object.
This wraps _fileobj_to_fd() to do an exhaustive search in case
the object is invalid but we still have it in our map. This
is used by unregister() so we can unregister an object that
was previously registered even if it is closed. It is also
used by _SelectorMapping.
"""
if isinstance(fileobj, _zmq.Socket):
return fileobj
else:
try:
return _fileobj_to_fd(fileobj)
except ValueError:
# Do an exhaustive search.
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
return key.fd
# Raise ValueError after all.
raise
def register(self, fileobj, events, data=None):
"""Register a file object.
Parameters:
fileobj -- zmq socket, file object or file descriptor
events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE)
data -- attached data
Returns:
SelectorKey instance
Raises:
ValueError if events is invalid
KeyError if fileobj is already registered
OSError if fileobj is closed or otherwise is unacceptable to
the underlying system call (if a system call is made)
Note:
OSError may or may not be raised
"""
if (not events) or (events & ~(selectors.EVENT_READ | selectors.EVENT_WRITE)):
raise ValueError("Invalid events: {!r}".format(events))
key = selectors.SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
if key.fd in self._fd_to_key:
raise KeyError("{!r} (FD {}) is already registered"
.format(fileobj, key.fd))
self._fd_to_key[key.fd] = key
self._zmq_poller.register(key.fd, _aio2zmq(events))
return key
def unregister(self, fileobj):
"""Unregister a file object.
Parameters:
fileobj -- zmq socket, file object or file descriptor
Returns:
SelectorKey instance
Raises:
KeyError if fileobj is not registered
Note:
If fileobj is registered but has since been closed this does
*not* raise OSError (even if the wrapped syscall does)
"""
try:
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
except KeyError:
raise KeyError("{!r} is not registered".format(fileobj)) from None
self._zmq_poller.unregister(key.fd)
return key
def modify(self, fileobj, events, data=None):
try:
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
except KeyError:
raise KeyError("{!r} is not registered".format(fileobj)) from None
if events != key.events:
self.unregister(fileobj)
key = self.register(fileobj, events, data)
elif data != key.data:
# Use a shortcut to update the data.
key = key._replace(data=data)
self._fd_to_key[key.fd] = key
return key
def select(self, timeout=None):
"""Perform the actual selection, until some monitored file objects are
ready or a timeout expires.
Parameters:
timeout -- if timeout > 0, this specifies the maximum wait time, in
seconds
if timeout <= 0, the select() call won't block, and will
report the currently ready file objects
if timeout is None, select() will block until a monitored
file object becomes ready
Returns:
list of (key, events) for ready file objects
`events` is a bitwise mask of EVENT_READ|EVENT_WRITE
"""
if timeout is not None:
if timeout < 0:
timeout = 0
else:
timeout = 1e3 * timeout
fd_event_list = self._zmq_poller.poll(timeout)
ready = []
for fd, event in fd_event_list:
key = self._key_from_fd(fd)
if key:
events = _zmq2aio(event)
ready.append((key, events))
return ready
def close(self):
"""Close the selector.
This must be called to make sure that any underlying resource is freed.
"""
self._fd_to_key.clear()
self._map = None
self._zmq_poller = None
def get_map(self):
return self._map
def _key_from_fd(self, fd):
"""Return the key associated to a given file descriptor.
Parameters:
fd -- file descriptor
Returns:
corresponding key, or None if not found
"""
try:
return self._fd_to_key[fd]
except KeyError:
return None
class Poller(_AsyncIO, _future._AsyncPoller):
"""Poller returning asyncio.Future for poll results."""
pass
class Socket(_AsyncIO, _future._AsyncSocket):
"""Socket returning asyncio Futures for send/recv/poll methods."""
_poller_class = Poller
def _add_io_state(self, state):
"""Add io_state to poller."""
if not self._state & state:
self._state = self._state | state
if state & self._READ:
self.io_loop.add_reader(self, self._handle_recv)
if state & self._WRITE:
self.io_loop.add_writer(self, self._handle_send)
def _drop_io_state(self, state):
"""Stop poller from watching an io_state."""
if self._state & state:
self._state = self._state & (~state)
if state & self._READ:
self.io_loop.remove_reader(self)
if state & self._WRITE:
self.io_loop.remove_writer(self)
def _init_io_state(self):
"""initialize the ioloop event handler"""
pass
class Context(_zmq.Context):
"""Context for creating asyncio-compatible Sockets"""
_socket_class = Socket
class ZMQEventLoop(SelectorEventLoop):
"""AsyncIO eventloop using zmq_poll"""
def __init__(self, selector=None):
if selector is None:
selector = ZMQSelector()
return super(ZMQEventLoop, self).__init__(selector)
_loop = None
def install():
"""Install and return the global ZMQEventLoop
registers the loop with asyncio.set_event_loop
"""
global _loop
if _loop is None:
_loop = ZMQEventLoop()
asyncio.set_event_loop(_loop)
return _loop
__all__ = [
'Context',
'Socket',
'Poller',
'ZMQEventLoop',
'install',
]
| {
"repo_name": "zanph/zanph",
"path": "flaskroulette/venv/lib/python2.7/site-packages/zmq/asyncio.py",
"copies": "6",
"size": "9631",
"license": "mit",
"hash": 5727308518395307000,
"line_mean": 27.9219219219,
"line_max": 89,
"alpha_frac": 0.5859204652,
"autogenerated": false,
"ratio": 4.153083225528245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007184454704597072,
"num_lines": 333
} |
"""AsyncIO support for zmq
Requires asyncio and Python 3.
"""
# Copyright (c) PyZMQ Developers.
# Distributed under the terms of the Modified BSD License.
import zmq as _zmq
from zmq import _future
# TODO: support trollius for Legacy Python? (probably not)
import asyncio
from asyncio import SelectorEventLoop, Future
try:
import selectors
except ImportError:
from asyncio import selectors # py33
class _AsyncIO(object):
_Future = Future
_WRITE = selectors.EVENT_WRITE
_READ = selectors.EVENT_READ
def _default_loop(self):
return asyncio.get_event_loop()
class Poller(_AsyncIO, _future._AsyncPoller):
"""Poller returning asyncio.Future for poll results."""
def _watch_raw_socket(self, loop, socket, evt, f):
"""Schedule callback for a raw socket"""
if evt & self._READ:
loop.add_reader(socket, lambda *args: f())
if evt & self._WRITE:
loop.add_writer(socket, lambda *args: f())
def _unwatch_raw_sockets(self, loop, *sockets):
"""Unschedule callback for a raw socket"""
for socket in sockets:
loop.remove_reader(socket)
loop.remove_writer(socket)
class Socket(_AsyncIO, _future._AsyncSocket):
"""Socket returning asyncio Futures for send/recv/poll methods."""
_poller_class = Poller
def _init_io_state(self):
"""initialize the ioloop event handler"""
self.io_loop.add_reader(self._fd, lambda : self._handle_events(0, 0))
def _clear_io_state(self):
"""clear any ioloop event handler
called once at close
"""
self.io_loop.remove_reader(self._fd)
Poller._socket_class = Socket
class Context(_zmq.Context):
"""Context for creating asyncio-compatible Sockets"""
_socket_class = Socket
# avoid sharing instance with base Context class
_instance = None
class ZMQEventLoop(SelectorEventLoop):
"""DEPRECATED: AsyncIO eventloop using zmq_poll.
pyzmq sockets should work with any asyncio event loop as of pyzmq 17.
"""
def __init__(self, selector=None):
_deprecated()
return super(ZMQEventLoop, self).__init__(selector)
_loop = None
def _deprecated():
if _deprecated.called:
return
_deprecated.called = True
import warnings
warnings.warn("ZMQEventLoop and zmq.asyncio.install are deprecated in pyzmq 17. Special eventloop integration is no longer needed.", DeprecationWarning, stacklevel=3)
_deprecated.called = False
def install():
"""DEPRECATED: No longer needed in pyzmq 17"""
_deprecated()
__all__ = [
'Context',
'Socket',
'Poller',
'ZMQEventLoop',
'install',
]
| {
"repo_name": "sserrot/champion_relationships",
"path": "venv/Lib/site-packages/zmq/asyncio/__init__.py",
"copies": "1",
"size": "2680",
"license": "mit",
"hash": -1240195530702323200,
"line_mean": 24.5238095238,
"line_max": 170,
"alpha_frac": 0.6593283582,
"autogenerated": false,
"ratio": 3.895348837209302,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016157235177635119,
"num_lines": 105
} |
"""AsyncIO TCP Server for Kytos."""
import asyncio
import errno
import logging
from kytos.core.connection import Connection
from kytos.core.events import KytosEvent
LOG = logging.getLogger(__name__)
def exception_handler(loop, context):
"""Exception handler to avoid tracebacks because of network timeouts."""
exc = context.get('exception')
transport = context.get('transport')
if isinstance(exc, TimeoutError):
LOG.info('Socket timeout: %r', transport)
elif isinstance(exc, OSError) and exc.errno == errno.EBADF:
LOG.info('Socket closed: %r', transport)
else:
loop.default_exception_handler(context)
class KytosServer:
"""Abstraction of a TCP Server to listen to packages from the network.
The KytosServer will listen on the specified port
for any new TCP request from the network and then instantiate the
specified RequestHandler to handle the new request.
It creates a new thread for each Handler.
"""
def __init__(self, # pylint: disable=too-many-arguments
server_address, server_protocol, controller,
protocol_name, loop=None):
"""Create the object without starting the server.
Args:
server_address (tuple): Address where the server is listening.
example: ('127.0.0.1', 80)
server_protocol (asyncio.Protocol):
Class that will be instantiated to handle each request.
controller (:class:`~kytos.core.controller.Controller`):
An instance of Kytos Controller class.
protocol_name (str): Southbound protocol name that will be used
"""
self.server_address = server_address
self.server_protocol = server_protocol
self.controller = controller
self.protocol_name = protocol_name
# This will be an `asyncio.Server` instance after `serve_forever` is
# called
self._server = None
# Here we compose the received `server_protocol` class with a `server`
# object pointing to this instance
self.server_protocol.server = self
self.loop = loop or asyncio.get_event_loop()
self.loop.set_exception_handler(exception_handler)
def serve_forever(self):
"""Handle requests until an explicit shutdown() is called."""
addr, port = self.server_address[0], self.server_address[1]
self._server = self.loop.create_server(self.server_protocol,
addr, port)
try:
task = self.loop.create_task(self._server)
LOG.info("Kytos listening at %s:%s", addr, port)
except Exception:
LOG.error('Failed to start Kytos TCP Server at %s:%s', addr, port)
task.close()
raise
def shutdown(self):
"""Call .close() on underlying TCP server, closing client sockets."""
self._server.close()
# self.loop.run_until_complete(self._server.wait_closed())
class KytosServerProtocol(asyncio.Protocol):
"""Kytos' main request handler.
It is instantiated once per connection between each switch and the
controller.
The setup method will dispatch a KytosEvent (``kytos/core.connection.new``)
on the controller, that will be processed by a Core App.
The finish method will close the connection and dispatch a KytosEvent
(``kytos/core.connection.closed``) on the controller.
"""
known_ports = {
6633: 'openflow',
6653: 'openflow'
}
def __init__(self):
"""Initialize protocol and check if server attribute was set."""
self._loop = asyncio.get_event_loop()
self.connection = None
self.transport = None
self._rest = b''
# server attribute is set outside this class, in KytosServer.init()
# Here we initialize it to None to avoid pylint warnings
if not getattr(self, 'server'):
self.server = None
# Then we check if it was really set
if not self.server:
raise ValueError("server instance must be assigned before init")
def connection_made(self, transport):
"""Handle new client connection, passing it to the controller.
Build a new Kytos `Connection` and send a ``kytos/core.connection.new``
KytosEvent through the app buffer.
"""
self.transport = transport
addr, port = transport.get_extra_info('peername')
_, server_port = transport.get_extra_info('sockname')
socket = transport.get_extra_info('socket')
LOG.info("New connection from %s:%s", addr, port)
self.connection = Connection(addr, port, socket)
# This allows someone to inherit from KytosServer and start a server
# on another port to handle a different protocol.
if self.server.protocol_name:
self.known_ports[server_port] = self.server.protocol_name
if server_port in self.known_ports:
protocol_name = self.known_ports[server_port]
else:
protocol_name = f'{server_port:04d}'
self.connection.protocol.name = protocol_name
event_name = f'kytos/core.{protocol_name}.connection.new'
event = KytosEvent(name=event_name,
content={'source': self.connection})
self._loop.create_task(self.server.controller.buffers.raw.aput(event))
def data_received(self, data):
"""Handle each request and place its data in the raw event buffer.
Sends the received binary data in a ``kytos/core.{protocol}.raw.in``
event on the raw buffer.
"""
# max_size = 2**16
# new_data = self.request.recv(max_size)
data = self._rest + data
LOG.debug("New data from %s:%s (%s bytes)",
self.connection.address, self.connection.port, len(data))
# LOG.debug("New data from %s:%s (%s bytes): %s", self.addr, self.port,
# len(data), binascii.hexlify(data))
content = {'source': self.connection, 'new_data': data}
event_name = f'kytos/core.{self.connection.protocol.name}.raw.in'
event = KytosEvent(name=event_name, content=content)
self._loop.create_task(self.server.controller.buffers.raw.aput(event))
def connection_lost(self, exc):
"""Close the connection socket and generate connection lost event.
Emits a ``kytos/core.{protocol}.connection.lost`` event through the
App buffer.
"""
reason = exc or "Request closed by client"
LOG.info("Connection lost with client %s:%s. Reason: %s",
self.connection.address, self.connection.port, reason)
self.connection.close()
content = {'source': self.connection}
if exc:
content['exception'] = exc
event_name = \
f'kytos/core.{self.connection.protocol.name}.connection.lost'
event = KytosEvent(name=event_name, content=content)
self._loop.create_task(self.server.controller.buffers.app.aput(event))
| {
"repo_name": "kytos/kytos",
"path": "kytos/core/atcp_server.py",
"copies": "1",
"size": "7116",
"license": "mit",
"hash": 8214921928264685000,
"line_mean": 35.6804123711,
"line_max": 79,
"alpha_frac": 0.6287240022,
"autogenerated": false,
"ratio": 4.188346085932902,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 194
} |
"""AsyncIO TCP Server for Kytos."""
import asyncio
import logging
from kytos.core.connection import Connection
from kytos.core.events import KytosEvent
LOG = logging.getLogger("atcp_server")
def exception_handler(loop, context):
"""Exception handler to avoid tracebacks because of network timeouts."""
if isinstance(context.get('exception'), TimeoutError):
LOG.info('Lost connection on socket %r', context['transport'])
else:
loop.default_exception_handler(context)
class KytosServer:
"""Abstraction of a TCP Server to listen to packages from the network.
The KytosServer will listen on the specified port
for any new TCP request from the network and then instantiate the
specified RequestHandler to handle the new request.
It creates a new thread for each Handler.
"""
def __init__(self, # pylint: disable=too-many-arguments
server_address, server_protocol, controller,
protocol_name, loop=None):
"""Create the object without starting the server.
Args:
server_address (tuple): Address where the server is listening.
example: ('127.0.0.1', 80)
server_protocol (asyncio.Protocol):
Class that will be instantiated to handle each request.
controller (:class:`~kytos.core.controller.Controller`):
An instance of Kytos Controller class.
protocol_name (str): Southbound protocol name that will be used
"""
self.server_address = server_address
self.server_protocol = server_protocol
self.controller = controller
self.protocol_name = protocol_name
# This will be an `asyncio.Server` instance after `serve_forever` is
# called
self._server = None
# Here we compose the received `server_protocol` class with a `server`
# object pointing to this instance
self.server_protocol.server = self
self.loop = loop or asyncio.get_event_loop()
self.loop.set_exception_handler(exception_handler)
def serve_forever(self):
"""Handle requests until an explicit shutdown() is called."""
addr, port = self.server_address[0], self.server_address[1]
self._server = self.loop.create_server(self.server_protocol,
addr, port)
try:
task = self.loop.create_task(self._server)
LOG.info("Kytos listening at %s:%s", addr, port)
except Exception:
LOG.error('Failed to start Kytos TCP Server at %s:%s', addr, port)
task.close()
raise
def shutdown(self):
"""Call .close() on underlying TCP server, closing client sockets."""
self._server.close()
# self.loop.run_until_complete(self._server.wait_closed())
class KytosServerProtocol(asyncio.Protocol):
"""Kytos' main request handler.
It is instantiated once per connection between each switch and the
controller.
The setup method will dispatch a KytosEvent (``kytos/core.connection.new``)
on the controller, that will be processed by a Core App.
The finish method will close the connection and dispatch a KytosEvent
(``kytos/core.connection.closed``) on the controller.
"""
known_ports = {
6633: 'openflow',
6653: 'openflow'
}
def __init__(self):
"""Initialize protocol and check if server attribute was set."""
self._loop = asyncio.get_event_loop()
self.connection = None
self.transport = None
self._rest = b''
# server attribute is set outside this class, in KytosServer.init()
# Here we initialize it to None to avoid pylint warnings
if not getattr(self, 'server'):
self.server = None
# Then we check if it was really set
if not self.server:
raise ValueError("server instance must be assigned before init")
def connection_made(self, transport):
"""Handle new client connection, passing it to the controller.
Build a new Kytos `Connection` and send a ``kytos/core.connection.new``
KytosEvent through the app buffer.
"""
self.transport = transport
addr, port = transport.get_extra_info('peername')
_, server_port = transport.get_extra_info('sockname')
socket = transport.get_extra_info('socket')
LOG.info("New connection from %s:%s", addr, port)
self.connection = Connection(addr, port, socket)
# This allows someone to inherit from KytosServer and start a server
# on another port to handle a different protocol.
if self.server.protocol_name:
self.known_ports[server_port] = self.server.protocol_name
if server_port in self.known_ports:
protocol_name = self.known_ports[server_port]
else:
protocol_name = f'{server_port:04d}'
self.connection.protocol.name = protocol_name
event_name = f'kytos/core.{protocol_name}.connection.new'
event = KytosEvent(name=event_name,
content={'source': self.connection})
self._loop.create_task(self.server.controller.buffers.raw.aput(event))
def data_received(self, data):
"""Handle each request and place its data in the raw event buffer.
Sends the received binary data in a ``kytos/core.{protocol}.raw.in``
event on the raw buffer.
"""
# max_size = 2**16
# new_data = self.request.recv(max_size)
data = self._rest + data
LOG.debug("New data from %s:%s (%s bytes)",
self.connection.address, self.connection.port, len(data))
# LOG.debug("New data from %s:%s (%s bytes): %s", self.addr, self.port,
# len(data), binascii.hexlify(data))
content = {'source': self.connection, 'new_data': data}
event_name = f'kytos/core.{self.connection.protocol.name}.raw.in'
event = KytosEvent(name=event_name, content=content)
self._loop.create_task(self.server.controller.buffers.raw.aput(event))
def connection_lost(self, exc):
"""Close the connection socket and generate connection lost event.
Emits a ``kytos/core.{protocol}.connection.lost`` event through the
App buffer.
"""
reason = exc or "Request closed by client"
LOG.info("Connection lost with client %s:%s. Reason: %s",
self.connection.address, self.connection.port, reason)
self.connection.close()
content = {'source': self.connection}
if exc:
content['exception'] = exc
event_name = \
f'kytos/core.{self.connection.protocol.name}.connection.lost'
event = KytosEvent(name=event_name, content=content)
self._loop.create_task(self.server.controller.buffers.app.aput(event))
| {
"repo_name": "kytos/kyco",
"path": "kytos/core/atcp_server.py",
"copies": "1",
"size": "6960",
"license": "mit",
"hash": -6357257682721284000,
"line_mean": 36.0212765957,
"line_max": 79,
"alpha_frac": 0.6288793103,
"autogenerated": false,
"ratio": 4.202898550724638,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5331777861024637,
"avg_score": null,
"num_lines": null
} |
"""Asyncio utilities."""
from asyncio import Semaphore, coroutines, ensure_future, gather, get_running_loop
from asyncio.events import AbstractEventLoop
import concurrent.futures
import functools
import logging
import threading
from traceback import extract_stack
from typing import Any, Awaitable, Callable, Coroutine, TypeVar
_LOGGER = logging.getLogger(__name__)
_SHUTDOWN_RUN_CALLBACK_THREADSAFE = "_shutdown_run_callback_threadsafe"
T = TypeVar("T")
def fire_coroutine_threadsafe(coro: Coroutine, loop: AbstractEventLoop) -> None:
"""Submit a coroutine object to a given event loop.
This method does not provide a way to retrieve the result and
is intended for fire-and-forget use. This reduces the
work involved to fire the function on the loop.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError("Cannot be called from within the event loop")
if not coroutines.iscoroutine(coro):
raise TypeError("A coroutine object is required: %s" % coro)
def callback() -> None:
"""Handle the firing of a coroutine."""
ensure_future(coro, loop=loop)
loop.call_soon_threadsafe(callback)
def run_callback_threadsafe(
loop: AbstractEventLoop, callback: Callable[..., T], *args: Any
) -> "concurrent.futures.Future[T]":
"""Submit a callback object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError("Cannot be called from within the event loop")
future: concurrent.futures.Future = concurrent.futures.Future()
def run_callback() -> None:
"""Run callback and store result."""
try:
future.set_result(callback(*args))
except Exception as exc: # pylint: disable=broad-except
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(run_callback)
if hasattr(loop, _SHUTDOWN_RUN_CALLBACK_THREADSAFE):
#
# If the final `HomeAssistant.async_block_till_done` in
# `HomeAssistant.async_stop` has already been called, the callback
# will never run and, `future.result()` will block forever which
# will prevent the thread running this code from shutting down which
# will result in a deadlock when the main thread attempts to shutdown
# the executor and `.join()` the thread running this code.
#
# To prevent this deadlock we do the following on shutdown:
#
# 1. Set the _SHUTDOWN_RUN_CALLBACK_THREADSAFE attr on this function
# by calling `shutdown_run_callback_threadsafe`
# 2. Call `hass.async_block_till_done` at least once after shutdown
# to ensure all callbacks have run
# 3. Raise an exception here to ensure `future.result()` can never be
# called and hit the deadlock since once `shutdown_run_callback_threadsafe`
# we cannot promise the callback will be executed.
#
raise RuntimeError("The event loop is in the process of shutting down.")
return future
def check_loop() -> None:
"""Warn if called inside the event loop."""
try:
get_running_loop()
in_loop = True
except RuntimeError:
in_loop = False
if not in_loop:
return
found_frame = None
for frame in reversed(extract_stack()):
for path in ("custom_components/", "homeassistant/components/"):
try:
index = frame.filename.index(path)
found_frame = frame
break
except ValueError:
continue
if found_frame is not None:
break
# Did not source from integration? Hard error.
if found_frame is None:
raise RuntimeError(
"Detected I/O inside the event loop. This is causing stability issues. Please report issue"
)
start = index + len(path)
end = found_frame.filename.index("/", start)
integration = found_frame.filename[start:end]
if path == "custom_components/":
extra = " to the custom component author"
else:
extra = ""
_LOGGER.warning(
"Detected I/O inside the event loop. This is causing stability issues. Please report issue%s for %s doing I/O at %s, line %s: %s",
extra,
integration,
found_frame.filename[index:],
found_frame.lineno,
found_frame.line.strip(),
)
def protect_loop(func: Callable) -> Callable:
"""Protect function from running in event loop."""
@functools.wraps(func)
def protected_loop_func(*args, **kwargs): # type: ignore
check_loop()
return func(*args, **kwargs)
return protected_loop_func
async def gather_with_concurrency(
limit: int, *tasks: Any, return_exceptions: bool = False
) -> Any:
"""Wrap asyncio.gather to limit the number of concurrent tasks.
From: https://stackoverflow.com/a/61478547/9127614
"""
semaphore = Semaphore(limit)
async def sem_task(task: Awaitable[Any]) -> Any:
async with semaphore:
return await task
return await gather(
*(sem_task(task) for task in tasks), return_exceptions=return_exceptions
)
def shutdown_run_callback_threadsafe(loop: AbstractEventLoop) -> None:
"""Call when run_callback_threadsafe should prevent creating new futures.
We must finish all callbacks before the executor is shutdown
or we can end up in a deadlock state where:
`executor.result()` is waiting for its `._condition`
and the executor shutdown is trying to `.join()` the
executor thread.
This function is considered irreversible and should only ever
be called when Home Assistant is going to shutdown and
python is going to exit.
"""
setattr(loop, _SHUTDOWN_RUN_CALLBACK_THREADSAFE, True)
| {
"repo_name": "turbokongen/home-assistant",
"path": "homeassistant/util/async_.py",
"copies": "2",
"size": "6146",
"license": "apache-2.0",
"hash": 1797480535233578000,
"line_mean": 32.7692307692,
"line_max": 138,
"alpha_frac": 0.6537585421,
"autogenerated": false,
"ratio": 4.180952380952381,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5834710923052382,
"avg_score": null,
"num_lines": null
} |
"""Asyncio utilities."""
from __future__ import annotations
from asyncio import Semaphore, coroutines, ensure_future, gather, get_running_loop
from asyncio.events import AbstractEventLoop
from collections.abc import Awaitable, Coroutine
import concurrent.futures
import functools
import logging
import threading
from traceback import extract_stack
from typing import Any, Callable, TypeVar
_LOGGER = logging.getLogger(__name__)
_SHUTDOWN_RUN_CALLBACK_THREADSAFE = "_shutdown_run_callback_threadsafe"
T = TypeVar("T")
def fire_coroutine_threadsafe(coro: Coroutine, loop: AbstractEventLoop) -> None:
"""Submit a coroutine object to a given event loop.
This method does not provide a way to retrieve the result and
is intended for fire-and-forget use. This reduces the
work involved to fire the function on the loop.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError("Cannot be called from within the event loop")
if not coroutines.iscoroutine(coro):
raise TypeError("A coroutine object is required: %s" % coro)
def callback() -> None:
"""Handle the firing of a coroutine."""
ensure_future(coro, loop=loop)
loop.call_soon_threadsafe(callback)
def run_callback_threadsafe(
loop: AbstractEventLoop, callback: Callable[..., T], *args: Any
) -> concurrent.futures.Future[T]: # pylint: disable=unsubscriptable-object
"""Submit a callback object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError("Cannot be called from within the event loop")
future: concurrent.futures.Future = concurrent.futures.Future()
def run_callback() -> None:
"""Run callback and store result."""
try:
future.set_result(callback(*args))
except Exception as exc: # pylint: disable=broad-except
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(run_callback)
if hasattr(loop, _SHUTDOWN_RUN_CALLBACK_THREADSAFE):
#
# If the final `HomeAssistant.async_block_till_done` in
# `HomeAssistant.async_stop` has already been called, the callback
# will never run and, `future.result()` will block forever which
# will prevent the thread running this code from shutting down which
# will result in a deadlock when the main thread attempts to shutdown
# the executor and `.join()` the thread running this code.
#
# To prevent this deadlock we do the following on shutdown:
#
# 1. Set the _SHUTDOWN_RUN_CALLBACK_THREADSAFE attr on this function
# by calling `shutdown_run_callback_threadsafe`
# 2. Call `hass.async_block_till_done` at least once after shutdown
# to ensure all callbacks have run
# 3. Raise an exception here to ensure `future.result()` can never be
# called and hit the deadlock since once `shutdown_run_callback_threadsafe`
# we cannot promise the callback will be executed.
#
raise RuntimeError("The event loop is in the process of shutting down.")
return future
def check_loop() -> None:
"""Warn if called inside the event loop."""
try:
get_running_loop()
in_loop = True
except RuntimeError:
in_loop = False
if not in_loop:
return
found_frame = None
for frame in reversed(extract_stack()):
for path in ("custom_components/", "homeassistant/components/"):
try:
index = frame.filename.index(path)
found_frame = frame
break
except ValueError:
continue
if found_frame is not None:
break
# Did not source from integration? Hard error.
if found_frame is None:
raise RuntimeError(
"Detected I/O inside the event loop. This is causing stability issues. Please report issue"
)
start = index + len(path)
end = found_frame.filename.index("/", start)
integration = found_frame.filename[start:end]
if path == "custom_components/":
extra = " to the custom component author"
else:
extra = ""
_LOGGER.warning(
"Detected I/O inside the event loop. This is causing stability issues. Please report issue%s for %s doing I/O at %s, line %s: %s",
extra,
integration,
found_frame.filename[index:],
found_frame.lineno,
found_frame.line.strip(),
)
raise RuntimeError(
f"I/O must be done in the executor; Use `await hass.async_add_executor_job()` "
f"at {found_frame.filename[index:]}, line {found_frame.lineno}: {found_frame.line.strip()}"
)
def protect_loop(func: Callable) -> Callable:
"""Protect function from running in event loop."""
@functools.wraps(func)
def protected_loop_func(*args, **kwargs): # type: ignore
check_loop()
return func(*args, **kwargs)
return protected_loop_func
async def gather_with_concurrency(
limit: int, *tasks: Any, return_exceptions: bool = False
) -> Any:
"""Wrap asyncio.gather to limit the number of concurrent tasks.
From: https://stackoverflow.com/a/61478547/9127614
"""
semaphore = Semaphore(limit)
async def sem_task(task: Awaitable[Any]) -> Any:
async with semaphore:
return await task
return await gather(
*(sem_task(task) for task in tasks), return_exceptions=return_exceptions
)
def shutdown_run_callback_threadsafe(loop: AbstractEventLoop) -> None:
"""Call when run_callback_threadsafe should prevent creating new futures.
We must finish all callbacks before the executor is shutdown
or we can end up in a deadlock state where:
`executor.result()` is waiting for its `._condition`
and the executor shutdown is trying to `.join()` the
executor thread.
This function is considered irreversible and should only ever
be called when Home Assistant is going to shutdown and
python is going to exit.
"""
setattr(loop, _SHUTDOWN_RUN_CALLBACK_THREADSAFE, True)
| {
"repo_name": "home-assistant/home-assistant",
"path": "homeassistant/util/async_.py",
"copies": "2",
"size": "6467",
"license": "apache-2.0",
"hash": -6471417228432887000,
"line_mean": 33.2169312169,
"line_max": 138,
"alpha_frac": 0.6559455698,
"autogenerated": false,
"ratio": 4.153500321130379,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005225013135715456,
"num_lines": 189
} |
"""Asyncio utilities."""
from __future__ import annotations
from asyncio import Semaphore, coroutines, ensure_future, gather, get_running_loop
from asyncio.events import AbstractEventLoop
import concurrent.futures
import functools
import logging
import threading
from traceback import extract_stack
from typing import Any, Awaitable, Callable, Coroutine, TypeVar
_LOGGER = logging.getLogger(__name__)
_SHUTDOWN_RUN_CALLBACK_THREADSAFE = "_shutdown_run_callback_threadsafe"
T = TypeVar("T")
def fire_coroutine_threadsafe(coro: Coroutine, loop: AbstractEventLoop) -> None:
"""Submit a coroutine object to a given event loop.
This method does not provide a way to retrieve the result and
is intended for fire-and-forget use. This reduces the
work involved to fire the function on the loop.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError("Cannot be called from within the event loop")
if not coroutines.iscoroutine(coro):
raise TypeError("A coroutine object is required: %s" % coro)
def callback() -> None:
"""Handle the firing of a coroutine."""
ensure_future(coro, loop=loop)
loop.call_soon_threadsafe(callback)
def run_callback_threadsafe(
loop: AbstractEventLoop, callback: Callable[..., T], *args: Any
) -> concurrent.futures.Future[T]: # pylint: disable=unsubscriptable-object
"""Submit a callback object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError("Cannot be called from within the event loop")
future: concurrent.futures.Future = concurrent.futures.Future()
def run_callback() -> None:
"""Run callback and store result."""
try:
future.set_result(callback(*args))
except Exception as exc: # pylint: disable=broad-except
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(run_callback)
if hasattr(loop, _SHUTDOWN_RUN_CALLBACK_THREADSAFE):
#
# If the final `HomeAssistant.async_block_till_done` in
# `HomeAssistant.async_stop` has already been called, the callback
# will never run and, `future.result()` will block forever which
# will prevent the thread running this code from shutting down which
# will result in a deadlock when the main thread attempts to shutdown
# the executor and `.join()` the thread running this code.
#
# To prevent this deadlock we do the following on shutdown:
#
# 1. Set the _SHUTDOWN_RUN_CALLBACK_THREADSAFE attr on this function
# by calling `shutdown_run_callback_threadsafe`
# 2. Call `hass.async_block_till_done` at least once after shutdown
# to ensure all callbacks have run
# 3. Raise an exception here to ensure `future.result()` can never be
# called and hit the deadlock since once `shutdown_run_callback_threadsafe`
# we cannot promise the callback will be executed.
#
raise RuntimeError("The event loop is in the process of shutting down.")
return future
def check_loop() -> None:
"""Warn if called inside the event loop."""
try:
get_running_loop()
in_loop = True
except RuntimeError:
in_loop = False
if not in_loop:
return
found_frame = None
for frame in reversed(extract_stack()):
for path in ("custom_components/", "homeassistant/components/"):
try:
index = frame.filename.index(path)
found_frame = frame
break
except ValueError:
continue
if found_frame is not None:
break
# Did not source from integration? Hard error.
if found_frame is None:
raise RuntimeError(
"Detected I/O inside the event loop. This is causing stability issues. Please report issue"
)
start = index + len(path)
end = found_frame.filename.index("/", start)
integration = found_frame.filename[start:end]
if path == "custom_components/":
extra = " to the custom component author"
else:
extra = ""
_LOGGER.warning(
"Detected I/O inside the event loop. This is causing stability issues. Please report issue%s for %s doing I/O at %s, line %s: %s",
extra,
integration,
found_frame.filename[index:],
found_frame.lineno,
found_frame.line.strip(),
)
raise RuntimeError(
f"I/O must be done in the executor; Use `await hass.async_add_executor_job()` "
f"at {found_frame.filename[index:]}, line {found_frame.lineno}: {found_frame.line.strip()}"
)
def protect_loop(func: Callable) -> Callable:
"""Protect function from running in event loop."""
@functools.wraps(func)
def protected_loop_func(*args, **kwargs): # type: ignore
check_loop()
return func(*args, **kwargs)
return protected_loop_func
async def gather_with_concurrency(
limit: int, *tasks: Any, return_exceptions: bool = False
) -> Any:
"""Wrap asyncio.gather to limit the number of concurrent tasks.
From: https://stackoverflow.com/a/61478547/9127614
"""
semaphore = Semaphore(limit)
async def sem_task(task: Awaitable[Any]) -> Any:
async with semaphore:
return await task
return await gather(
*(sem_task(task) for task in tasks), return_exceptions=return_exceptions
)
def shutdown_run_callback_threadsafe(loop: AbstractEventLoop) -> None:
"""Call when run_callback_threadsafe should prevent creating new futures.
We must finish all callbacks before the executor is shutdown
or we can end up in a deadlock state where:
`executor.result()` is waiting for its `._condition`
and the executor shutdown is trying to `.join()` the
executor thread.
This function is considered irreversible and should only ever
be called when Home Assistant is going to shutdown and
python is going to exit.
"""
setattr(loop, _SHUTDOWN_RUN_CALLBACK_THREADSAFE, True)
| {
"repo_name": "adrienbrault/home-assistant",
"path": "homeassistant/util/async_.py",
"copies": "3",
"size": "6440",
"license": "mit",
"hash": -6530162100796150000,
"line_mean": 33.2553191489,
"line_max": 138,
"alpha_frac": 0.6549689441,
"autogenerated": false,
"ratio": 4.149484536082475,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6304453480182474,
"avg_score": null,
"num_lines": null
} |
import pyb, stm
from machine import SPI, Pin
# define constants
#
T_GETX = const(0xd0) ## 12 bit resolution
T_GETY = const(0x90) ## 12 bit resolution
T_GETZ1 = const(0xb8) ## 8 bit resolution
T_GETZ2 = const(0xc8) ## 8 bit resolution
#
X_LOW = const(10) ## lowest reasonable X value from the touchpad
Y_HIGH = const(4090) ## highest reasonable Y value
class TOUCH:
#
# Init just sets the PIN's to In / out as required
# async: set True if asynchronous operation intended
# confidence: confidence level - number of consecutive touches with a margin smaller than the given level
# which the function will sample until it accepts it as a valid touch
# margin: Distance from mean centre at which touches are considered at the same position
# delay: Delay between samples in ms. (n/a if asynchronous)
#
DEFAULT_CAL = (-3917, -0.127, -3923, -0.1267, -3799, -0.07572, -3738, -0.07814)
def __init__(self, controller="XPT2046", asyn=False, *, confidence=5, margin=50, delay=10, calibration=None, spi = None):
if spi is None:
self.spi = SPI(-1, baudrate=1000000, sck=Pin("X12"), mosi=Pin("X11"), miso=Pin("Y2"))
else:
self.spi = spi
self.recv = bytearray(3)
self.xmit = bytearray(3)
# set default values
self.ready = False
self.touched = False
self.x = 0
self.y = 0
self.buf_length = 0
cal = TOUCH.DEFAULT_CAL if calibration is None else calibration
self.asynchronous = False
self.touch_parameter(confidence, margin, delay, cal)
if asyn:
self.asynchronous = True
import uasyncio as asyncio
loop = asyncio.get_event_loop()
loop.create_task(self._main_thread())
# set parameters for get_touch()
# res: Resolution in bits of the returned values, default = 10
# confidence: confidence level - number of consecutive touches with a margin smaller than the given level
# which the function will sample until it accepts it as a valid touch
# margin: Difference from mean centre at which touches are considered at the same position
# delay: Delay between samples in ms.
#
def touch_parameter(self, confidence=5, margin=50, delay=10, calibration=None):
if not self.asynchronous: # Ignore attempts to change on the fly.
confidence = max(min(confidence, 25), 5)
if confidence != self.buf_length:
self.buff = [[0,0] for x in range(confidence)]
self.buf_length = confidence
self.delay = max(min(delay, 100), 5)
margin = max(min(margin, 100), 1)
self.margin = margin * margin # store the square value
if calibration:
self.calibration = calibration
# get_touch(): Synchronous use. get a touch value; Parameters:
#
# initital: Wait for a non-touch state before getting a sample.
# True = Initial wait for a non-touch state
# False = Do not wait for a release
# wait: Wait for a touch or not?
# False: Do not wait for a touch and return immediately
# True: Wait until a touch is pressed.
# raw: Setting whether raw touch coordinates (True) or normalized ones (False) are returned
# setting the calibration vector to (0, 1, 0, 1, 0, 1, 0, 1) result in a identity mapping
# timeout: Longest time (ms, or None = 1 hr) to wait for a touch or release
#
# Return (x,y) or None
#
def get_touch(self, initial=True, wait=True, raw=False, timeout=None):
if self.asynchronous:
return None # Should only be called in synhronous mode
if timeout is None:
timeout = 3600000 # set timeout to 1 hour
#
if initial: ## wait for a non-touch state
sample = True
while sample and timeout > 0:
sample = self.raw_touch()
pyb.delay(self.delay)
timeout -= self.delay
if timeout <= 0: # after timeout, return None
return None
#
buff = self.buff
buf_length = self.buf_length
buffptr = 0
nsamples = 0
while timeout > 0:
if nsamples == buf_length:
meanx = sum([c[0] for c in buff]) // buf_length
meany = sum([c[1] for c in buff]) // buf_length
dev = sum([(c[0] - meanx)**2 + (c[1] - meany)**2 for c in buff]) / buf_length
if dev <= self.margin: # got one; compare against the square value
if raw:
return (meanx, meany)
else:
return self.do_normalize((meanx, meany))
# get a new value
sample = self.raw_touch() # get a touch
if sample is None:
if not wait:
return None
nsamples = 0 # Invalidate buff
else:
buff[buffptr] = sample # put in buff
buffptr = (buffptr + 1) % buf_length
nsamples = min(nsamples + 1, buf_length)
pyb.delay(self.delay)
timeout -= self.delay
return None
# Asynchronous use: this thread maintains self.x and self.y
async def _main_thread(self):
import uasyncio as asyncio
buff = self.buff
buf_length = self.buf_length
buffptr = 0
nsamples = 0
await asyncio.sleep(0)
while True:
if nsamples == buf_length:
meanx = sum([c[0] for c in buff]) // buf_length
meany = sum([c[1] for c in buff]) // buf_length
dev = sum([(c[0] - meanx)**2 + (c[1] - meany)**2 for c in buff]) / buf_length
if dev <= self.margin: # got one; compare against the square value
self.ready = True
self.x, self.y = self.do_normalize((meanx, meany))
sample = self.raw_touch() # get a touch
if sample is None:
self.touched = False
self.ready = False
nsamples = 0 # Invalidate buff
else:
self.touched = True
buff[buffptr] = sample # put in buff
buffptr = (buffptr + 1) % buf_length
nsamples = min(nsamples + 1, buf_length)
await asyncio.sleep(0)
# Asynchronous get_touch
def get_touch_async(self):
if self.ready:
self.ready = False
return self.x, self.y
return None
#
# do_normalize(touch)
# calculate the screen coordinates from the touch values, using the calibration values
# touch must be the tuple return by get_touch
#
def do_normalize(self, touch):
xmul = self.calibration[3] + (self.calibration[1] - self.calibration[3]) * (touch[1] / 4096)
xadd = self.calibration[2] + (self.calibration[0] - self.calibration[2]) * (touch[1] / 4096)
ymul = self.calibration[7] + (self.calibration[5] - self.calibration[7]) * (touch[0] / 4096)
yadd = self.calibration[6] + (self.calibration[4] - self.calibration[6]) * (touch[0] / 4096)
x = int((touch[0] + xadd) * xmul)
y = int((touch[1] + yadd) * ymul)
return (x, y)
#
# raw_touch(tuple)
# raw read touch. Returns (x,y) or None
#
def raw_touch(self):
global CONTROL_PORT
x = self.touch_talk(T_GETX, 12)
y = self.touch_talk(T_GETY, 12)
if x > X_LOW and y < Y_HIGH: # touch pressed?
return (x, y)
else:
return None
#
# Send a command to the touch controller and wait for the response
# cmd: command byte
# bits: expected data size. Reasonable values are 8 and 12
#
def touch_talk(self, cmd, bits):
self.xmit[0] = cmd
self.spi.write_readinto(self.xmit, self.recv)
return (self.recv[1] * 256 + self.recv[2]) >> (15 - bits)
| {
"repo_name": "robert-hh/XPT2046-touch-pad-driver-for-PyBoard",
"path": "touch.py",
"copies": "1",
"size": "9126",
"license": "mit",
"hash": -1130777117690351000,
"line_mean": 41.25,
"line_max": 125,
"alpha_frac": 0.6077142231,
"autogenerated": false,
"ratio": 3.7993338884263115,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49070481115263115,
"avg_score": null,
"num_lines": null
} |
"""Async iterable that wraps an EventEmitter."""
import asyncio
import collections
from collections.abc import AsyncIterator, AsyncIterable
class EventIterator(AsyncIterator):
"""An iterator who values are the payloads from emitted events."""
def __init__(self, emitter, event):
"""Initialize the iterator with an emitter and event to fire on."""
self._emitter = emitter
self._event = event
self._emitter.on(event, self._push)
self._data = collections.deque()
self._future = None
async def _push(self, *args, **kwargs):
"""Push new data into the buffer. Resume looping if paused."""
self._data.append((args, kwargs))
if self._future is not None:
future, self._future = self._future, None
future.set_result(True)
async def __anext__(self):
"""Fetch the next set of values. Wait for new values if empty."""
if self._data:
return self._data.popleft()
self._future = asyncio.Future()
await self._future
return self._data.popleft()
class EventIterable(AsyncIterable):
"""An iterable object the iterator for which loops on each fired event."""
def __init__(self, emitter, event):
"""Initialize the iterable with an emitter and target event."""
self._emitter = emitter
self._event = event
async def __aiter__(self):
"""Get a new EventIterator object."""
return EventIterator(self._emitter, self._event)
| {
"repo_name": "asyncdef/eventemitter",
"path": "eventemitter/iterable.py",
"copies": "1",
"size": "1527",
"license": "apache-2.0",
"hash": -1984079445701771300,
"line_mean": 29.54,
"line_max": 78,
"alpha_frac": 0.6260641781,
"autogenerated": false,
"ratio": 4.362857142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 50
} |
# async_move.py
# aoneill - 04/16/17
import threading
import time
from libs import psmove
class AsyncPSMove(threading.Thread):
'''
Asynchronous class designed to update the associated PSMove controller
at a pre-defined frame rate.
This avoids having the controller lose its light / rumble after not having
received an update recently
'''
# Updates per second
FPS = 40.0
def __init__(self, num):
# Initialize all internal state
super(AsyncPSMove, self).__init__()
self.num = num
self.move = psmove.PSMove(num)
self._running = True
self._leds = (0, 0, 0)
self._rumble = 0
self._trigger = 0
self._temp = 0
self._batt = 0
# Output on the controller
def leds(self, r, g, b):
self._leds = (r, g, b)
def rumble(self, rumble):
self._rumble = rumble
def off(self):
self._leds = (0, 0, 0)
self._rumble = 0
# Data from the controller
def trigger(self):
return self._trigger
def temperature(self):
return self._temp
def battery(self):
return self._batt
def accel(self):
return self._accel
def gyro(self):
return self._gyro
def magno(self):
return self._magno
# Threading methods
def run(self):
while(self._running):
# Get inputs
if(self.move.poll()):
self._trigger = self.move.get_trigger()
self._temp = self.move.get_temperature()
self._batt = self.move.get_battery()
self._accel = (self.move.ax, self.move.ay, self.move.az)
self._gyro = (self.move.gx, self.move.gy, self.move.gz)
self._magno = (self.move.mx, self.move.my, self.move.mz)
# Set outputs
self.move.set_leds(*self._leds)
self.move.set_rumble(self._rumble)
self.move.update_leds()
# 40 FPS
time.sleep(1.0 / AsyncPSMove.FPS)
def stop(self):
self._running = False
| {
"repo_name": "alexoneill/15-love",
"path": "rackets/src/util/async_move.py",
"copies": "1",
"size": "1866",
"license": "mit",
"hash": 2253124871186045000,
"line_mean": 20.2045454545,
"line_max": 76,
"alpha_frac": 0.6216505895,
"autogenerated": false,
"ratio": 3.256544502617801,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4378195092117801,
"avg_score": null,
"num_lines": null
} |
# AsyncNotifier example from tutorial
#
# See: http://github.com/seb-m/pyinotify/wiki/Tutorial
#
import asyncore
import pyinotify
from .FeedsDownloader import FeedsDownloader
class EventHandler(pyinotify.ProcessEvent):
iDler = FeedsDownloader()
def process_IN_CREATE(self, event):
path = event.pathname
print(("IN_CREATE:", path))
self.iDler.handle_single_feed_list(path)
def process_IN_MODIFY(self, event):
path = event.pathname
print(("IN_MODIFY:", path))
self.iDler.handle_single_feed_list(path)
# Instanciate a new WatchManager (will be used to store watches).
wm = pyinotify.WatchManager()
# Add a new watch on /tmp for IN_CREATE and IN_MODIFY.
mask = pyinotify.IN_CREATE | pyinotify.IN_MODIFY # ignore errors, this works
feedRoot = '../../../static/1-FeedLists/'
wm.add_watch(feedRoot, mask, rec=True)
# Associate this WatchManager with a Notifier (will be used to report and
# process events).
notifier = pyinotify.AsyncNotifier(wm, EventHandler())
# Loop forever and handle events.
asyncore.loop()
| {
"repo_name": "PodSearch/PodSearch",
"path": "src/FeedsDownloader/FeedListsWatcher.py",
"copies": "1",
"size": "1078",
"license": "bsd-3-clause",
"hash": 6885202615631321000,
"line_mean": 28.9444444444,
"line_max": 76,
"alpha_frac": 0.7115027829,
"autogenerated": false,
"ratio": 3.3374613003095974,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4548964083209598,
"avg_score": null,
"num_lines": null
} |
# AsyncNotifier example from tutorial
#
# See: http://github.com/seb-m/pyinotify/wiki/Tutorial
#
import asyncore
import pyinotify
from ImagesDownloader import ImagesDownloader
class EventHandler(pyinotify.ProcessEvent):
iDler = ImagesDownloader()
def process_IN_CREATE(self, event):
path = event.pathname
print(("IN_CREATE:", path))
self.iDler.handleFeed(path)
def process_IN_MODIFY(self, event):
path = event.pathname
print(("IN_MODIFY:", path))
self.iDler.handleFeed(path)
# Instanciate a new WatchManager (will be used to store watches).
wm = pyinotify.WatchManager()
# Add a new watch on /tmp for IN_CREATE and IN_MODIFY.
mask = pyinotify.IN_CREATE | pyinotify.IN_MODIFY # ignore errors, this works
feedRoot = '../../../static/2-Feeds/'
wm.add_watch(feedRoot, mask, rec=True)
# Associate this WatchManager with a Notifier (will be used to report and
# process events).
notifier = pyinotify.AsyncNotifier(wm, EventHandler())
# Loop forever and handle events.
asyncore.loop()
| {
"repo_name": "PodSearch/PodSearch",
"path": "src/Digester/FeedsWatcher.py",
"copies": "1",
"size": "1050",
"license": "bsd-3-clause",
"hash": -3858620304474216400,
"line_mean": 28.1666666667,
"line_max": 76,
"alpha_frac": 0.7104761905,
"autogenerated": false,
"ratio": 3.3980582524271843,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4608534442927184,
"avg_score": null,
"num_lines": null
} |
## async.py
# Light wrapper around whatever async library pydle uses.
import functools
import itertools
import collections
import threading
import datetime
import types
import tornado.concurrent
import tornado.ioloop
FUTURE_TIMEOUT = 30
class Future(tornado.concurrent.TracebackFuture):
"""
A future. An object that represents a result that has yet to be created or returned.
"""
def coroutine(func):
""" Decorator for coroutine functions that need to block for asynchronous operations. """
@functools.wraps(func)
def wrapper(*args, **kwargs):
return_future = Future()
def handle_future(future):
# Chained futures!
try:
if future.exception() is not None:
result = gen.throw(future.exception())
else:
result = gen.send(future.result())
if isinstance(result, tuple):
result = parallel(*result)
result.add_done_callback(handle_future)
except StopIteration as e:
return_future.set_result(getattr(e, 'value', None))
# Handle initial value.
gen = func(*args, **kwargs)
# If this isn't a generator, then wrap the result with a future.
if not isinstance(gen, types.GeneratorType):
return_future.set_result(gen)
return return_future
try:
result = next(gen)
if isinstance(result, tuple):
result = parallel(*result)
result.add_done_callback(handle_future)
except StopIteration as e:
return_future.set_result(getattr(e, 'value', None))
return return_future
return wrapper
def parallel(*futures):
""" Create a single future that will be completed when all the given futures are. """
result_future = Future()
results = collections.OrderedDict(zip(futures, itertools.repeat(None)))
futures = list(futures)
if not futures:
# If we don't have any futures, then we return an empty tuple.
result_future.set_result(())
return result_future
def done(future):
futures.remove(future)
results[future] = future.result()
# All out of futures. set the result.
if not futures:
result_future.set_result(tuple(results.values()))
for future in futures:
future.add_done_callback(done)
return result_future
class EventLoop:
""" A light wrapper around what event loop mechanism pydle uses underneath. """
EVENT_MAPPING = {
'read': tornado.ioloop.IOLoop.READ,
'write': tornado.ioloop.IOLoop.WRITE,
'error': tornado.ioloop.IOLoop.ERROR
}
def __init__(self, io_loop=None):
self.io_loop = io_loop or tornado.ioloop.IOLoop.current()
self.running = False
self.run_thread = None
self.handlers = {}
self.future_timeout = FUTURE_TIMEOUT
self._registered_events = False
self._future_timeouts = {}
self._timeout_id = 0
self._timeout_handles = {}
def __del__(self):
self.io_loop.close()
def register(self, fd):
""" Register a file descriptor with this event loop. """
self.handlers[fd] = { key: [] for key in self.EVENT_MAPPING }
def unregister(self, fd):
""" Unregister a file descriptor with this event loop. """
del self.handlers[fd]
def on_read(self, fd, callback):
"""
Add a callback for when the given file descriptor is available for reading.
Callback will be called with file descriptor as sole argument.
"""
self.handlers[fd]['read'].append(callback)
self._update_events(fd)
def on_write(self, fd, callback):
"""
Add a callback for when the given file descriptor is available for writing.
Callback will be called with file descriptor as sole argument.
"""
self.handlers[fd]['write'].append(callback)
self._update_events(fd)
def on_error(self, fd, callback):
"""
Add a callback for when an error has occurred on the given file descriptor.
Callback will be called with file descriptor as sole argument.
"""
self.handlers[fd]['error'].append(callback)
self._update_events(fd)
def off_read(self, fd, callback):
""" Remove read callback for given file descriptor. """
self.handlers[fd]['read'].remove(callback)
self._update_events(fd)
def off_write(self, fd, callback):
""" Remove write callback for given file descriptor. """
self.handlers[fd]['write'].remove(callback)
self._update_events(fd)
def off_error(self, fd, callback):
""" Remove error callback for given file descriptor. """
self.handlers[fd]['error'].remove(callback)
self._update_events(fd)
def handles_read(self, fd, callback):
""" Return whether or the given read callback is active for the given file descriptor. """
return callback in self.handlers[fd]['read']
def handles_write(self, fd, callback):
""" Return whether or the given write callback is active for the given file descriptor. """
return callback in self.handlers[fd]['write']
def handles_error(self, fd, callback):
""" Return whether or the given error callback is active for the given file descriptor. """
return callback in self.handlers[fd]['error']
def _update_events(self, fd):
if self._registered_events:
self.io_loop.remove_handler(fd)
events = 0
for event, ident in self.EVENT_MAPPING.items():
if self.handlers[fd][event]:
events |= ident
self.io_loop.add_handler(fd, self._do_on_event, events)
self._registered_events = True
def _do_on_event(self, fd, events):
if fd not in self.handlers:
return
for event, ident in self.EVENT_MAPPING.items():
if events & ident:
for handler in self.handlers[fd][event]:
handler(fd)
def on_future(self, _future, _callback, *_args, **_kwargs):
""" Add a callback for when the given future has been resolved. """
callback = functools.partial(self._do_on_future, _callback, _args, _kwargs)
# Create timeout handler and regular handler.
self._future_timeouts[_future] = self.schedule_in(self.future_timeout, callback)
self.io_loop.add_future(_future, callback)
def _do_on_future(self, callback, args, kwargs, future):
# This was a time-out.
if not future.done():
future.set_exception(TimeoutError('Future timed out before yielding a result.'))
del self._future_timeouts[future]
# This was a time-out that already has been handled.
elif isinstance(future.exception(), TimeoutError):
return
# A regular result. Cancel the timeout.
else:
self.unschedule(self._future_timeouts.pop(future))
# Call callback.
callback(*args, **kwargs)
def _get_schedule_handle(self):
""" Get a unique handle for use in the schedule_* functions. """
# Just use a simple monotonically increasing number.
handle = self._timeout_id
self._timeout_id += 1
return handle
def schedule(self, _callback, *_args, **_kwargs):
""" Schedule a callback to be ran as soon as possible in this loop. """
self.io_loop.add_callback(_callback, *_args, **_kwargs)
def schedule_in(self, _when, _callback, *_args, **_kwargs):
"""
Schedule a callback to be ran as soon as possible after `when` seconds have passed.
Will return an opaque handle that can be passed to `unschedule` to unschedule the function.
"""
if not isinstance(_when, datetime.timedelta):
_when = datetime.timedelta(seconds=_when)
# Create ID for this timeout.
id = self._get_schedule_handle()
if self.run_thread != threading.current_thread().ident:
# Schedule scheduling in IOLoop thread because of thread-safety.
self.schedule(functools.partial(self._do_schedule_in, id, _when, _callback, _args, _kwargs))
else:
self._do_schedule_in(id, _when, _callback, _args, _kwargs)
return id
def schedule_periodically(self, _interval, _callback, *_args, **_kwargs):
"""
Schedule a callback to be ran every `interval` seconds.
Will return an opaque handle that can be passed to unschedule() to unschedule the interval function.
A function will also stop being scheduled if it returns False or raises an Exception.
"""
if not isinstance(_interval, datetime.timedelta):
_interval = datetime.timedelta(seconds=_interval)
# Create ID for this periodic.
id = self._get_schedule_handle()
if self.run_thread != threading.current_thread().ident:
# Schedule scheduling in IOLoop thread because of thread-safety.
self.schedule(functools.partial(self._do_schedule_periodically, id, _interval, _callback, _args, _kwargs))
else:
self._do_schedule_periodically(id, _interval, _callback, _args, _kwargs)
return id
def _do_schedule_in(self, id, when, callback, args, kwargs):
self._timeout_handles[id] = self.io_loop.add_timeout(when, functools.partial(callback, *args, **kwargs))
def _do_schedule_periodically(self, id, interval, callback, args, kwargs):
# Use a wrapper function.
self._timeout_handles[id] = self.io_loop.add_timeout(interval, functools.partial(self._periodic_handler, id, interval, callback, args, kwargs))
def _periodic_handler(self, id, interval, callback, args, kwargs):
# We could've been unscheduled for some reason.
if not self.is_scheduled(id):
return
# Call callback, and schedule again if it doesn't return False.
self._do_schedule_periodically(id, interval, callback, args, kwargs)
result = False
try:
result = callback(*args, **kwargs)
finally:
if result == False:
self.unschedule(id)
def is_scheduled(self, handle):
""" Return whether or not the given handle is still scheduled. """
return handle in self._timeout_handles
def unschedule(self, handle):
""" Unschedule a given timeout or periodical callback. """
if self.is_scheduled(handle):
handle = self._timeout_handles.pop(handle)
self.io_loop.remove_timeout(handle)
def run(self):
""" Run the event loop. """
if not self.running:
self.running = True
self.run_thread = threading.current_thread().ident
self.io_loop.start()
self.run_thread = None
self.running = False
def run_with(self, func):
""" Run loop, call function, stop loop. If function returns a future, run until the future has been resolved. """
self.running = True
self.run_thread = threading.current_thread().ident
self.io_loop.run_sync(func)
self.run_thread = None
self.running = False
def run_until(self, future):
""" Run until future is resolved. """
return self.run_with(lambda: future)
def stop(self):
""" Stop the event loop. """
if self.running:
self.io_loop.stop()
| {
"repo_name": "suut/psychic-happiness",
"path": "pydle/async.py",
"copies": "1",
"size": "11596",
"license": "unlicense",
"hash": 2750609133380345000,
"line_mean": 35.0124223602,
"line_max": 151,
"alpha_frac": 0.6152121421,
"autogenerated": false,
"ratio": 4.3027829313543595,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5417995073454359,
"avg_score": null,
"num_lines": null
} |
# AsyncPy shell
# tom coladonato
# with boilerplate from
# http://stackoverflow.com/a/9109699
import threading
import Queue
import sys
import time
def console(q, stdout_lock):
while 1:
raw_input()
with stdout_lock:
cmd = raw_input('> ')
q.put(cmd)
if cmd == 'quit' or cmd == 'exit':
break
class Agent():
run = lambda: None
def __init__(self, func = None):
if func:
self.run = func
if __name__ == '__main__':
console_buffer = Queue.Queue()
stdout_lock = threading.Lock()
s = \
'''
AsyncPy Shell
=====
Press Enter to aquire lock
Input single line python expression or statement to be evaluated or executed
Input exit to exit
'''
s = s.replace(' ', '')
with stdout_lock:
print s
dj = threading.Thread(target=console, args = (console_buffer, stdout_lock))
time.sleep(0.1)
dj.start()
i = 0
agents = {} #dict of agents to be evaluated once per tick
while True:
# if there is a command, handle it
if not console_buffer.empty():
cmd = console_buffer.get()
if cmd == 'quit' or cmd == 'exit':
break
#determine if statement or expression
isstatement = False
try:
code = compile(cmd, '<stdin>', 'eval')
except SyntaxError:
isstatement = True
code = compile(cmd, '<stdin>', 'exec')
result = None
with stdout_lock:
if isstatement:
try:
exec(cmd) # TODO: sandbox
except:
e = sys.exc_info()[0]
print("ERROR: %s\n"%e)
else:
try:
result = eval(cmd) # TODO: sandbox
except:
e = sys.exc_info()[0]
print("ERROR: %s\n"%e)
if result is not None:
print(" ".join(["< ",str(result)]))
# evaluate all agents and print status display if applicable
agent_out = ""
for agent in agents.items():
try:
agent_out += '\'' + agent[0] + '\' >: ' + str(agent[1].run()) + '\n'
except:
e = sys.exc_info()[0]
agent_out += str("ERROR: %s\n"%e)
if agent_out:
with stdout_lock:
print "Agent output\n====="
print agent_out,
print "=====\n"
i += 1
time.sleep(0.5)
| {
"repo_name": "tomcola512/AsyncPy",
"path": "Async.py",
"copies": "1",
"size": "2710",
"license": "mit",
"hash": -1601571571045683000,
"line_mean": 25.0576923077,
"line_max": 84,
"alpha_frac": 0.4494464945,
"autogenerated": false,
"ratio": 4.329073482428115,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.011815282052678585,
"num_lines": 104
} |
"""async-rasterio.py
Operate on a raster dataset window-by-window using asyncio's event loop
and thread executor.
Simulates a CPU-bound thread situation where multiple threads can improve
performance.
"""
import asyncio
import time
import numpy as np
import rasterio
from rasterio._example import compute
def main(infile, outfile, with_threads=False):
with rasterio.Env():
# Open the source dataset.
with rasterio.open(infile) as src:
# Create a destination dataset based on source params. The
# destination will be tiled, and we'll "process" the tiles
# concurrently.
meta = src.meta
del meta['transform']
meta.update(affine=src.affine)
meta.update(blockxsize=256, blockysize=256, tiled='yes')
with rasterio.open(outfile, 'w', **meta) as dst:
loop = asyncio.get_event_loop()
# With the exception of the ``yield from`` statement,
# process_window() looks like callback-free synchronous
# code. With a coroutine, we can keep the read, compute,
# and write statements close together for
# maintainability. As in the concurrent-cpu-bound.py
# example, all of the speedup is provided by
# distributing raster computation across multiple
# threads. The difference here is that we're submitting
# jobs to the thread pool asynchronously.
@asyncio.coroutine
def process_window(window):
# Read a window of data.
data = src.read(window=window)
# We run the raster computation in a separate thread
# and pause until the computation finishes, letting
# other coroutines advance.
#
# The _example.compute function modifies no Python
# objects and releases the GIL. It can execute
# concurrently.
result = np.zeros(data.shape, dtype=data.dtype)
if with_threads:
yield from loop.run_in_executor(
None, compute, data, result)
else:
compute(data, result)
dst.write(result, window=window)
# Queue up the loop's tasks.
tasks = [asyncio.Task(process_window(window))
for ij, window in dst.block_windows(1)]
# Wait for all the tasks to finish, and close.
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description="Concurrent raster processing demo")
parser.add_argument(
'input',
metavar='INPUT',
help="Input file name")
parser.add_argument(
'output',
metavar='OUTPUT',
help="Output file name")
parser.add_argument(
'--with-workers',
action='store_true',
help="Run with a pool of worker threads")
args = parser.parse_args()
main(args.input, args.output, args.with_workers)
| {
"repo_name": "kapadia/rasterio",
"path": "examples/async-rasterio.py",
"copies": "1",
"size": "3336",
"license": "bsd-3-clause",
"hash": -3490711419676865000,
"line_mean": 33.0408163265,
"line_max": 73,
"alpha_frac": 0.5590527578,
"autogenerated": false,
"ratio": 4.731914893617021,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010799118701854266,
"num_lines": 98
} |
from flask import Flask, jsonify, request
from flask_restful import Api, Resource
import time, threading, requests
from datetime import datetime
app = Flask(__name__)
api = Api(app)
task_check = {}
class TimedWorker(threading._Timer):
def __init__(self, *args, **kwargs):
threading._Timer.__init__(self, *args, **kwargs)
self.setDaemon(False)
def run(self):
while self.isAlive():
print("Alive %s" % self.isAlive())
self.finished.clear()
self.finished.wait(self.interval)
print("-----------------------------------------")
print(self.args, self.kwargs)
if not self.finished.isSet():
self.function(*self.args, **self.kwargs)
else:
return
self.finished.set()
def __del__(self):
print("----------------delete------------------")
self.cancel()
def post_completed_msg(nid):
if task_check.has_key(nid):
try:
print("[%s]" % datetime.now())
requests.post('http://127.0.0.1:7009/completed', params={'nid':nid})
print("[%s]" % datetime.now())
task_check[nid] = None
del(task_check[nid])
except requests.ConnectionError as ex:
print("Met connection error, retry: %s" % ex)
print("task: [%d]" % len(task_check))
@app.route('/', methods=['POST'])
def index():
print("Got request")
if request.args.has_key('nid'):
nid = str(request.args['nid'])
print("=====[%s]sleeping=======" % nid)
try:
print(type(nid), nid)
task_check[str(nid)] = TimedWorker(5, post_completed_msg, nid)
task_check[str(nid)].start()
except Exception as ex:
print("Exception on index: %s" % ex)
return jsonify({'unittype':"Bluetooth control"}), 200
def start_server(port):
app.run(host="0.0.0.0", debug=False, port=port, threaded=True)
print("server started ... ")
if __name__ == "__main__":
start_server(8321)
# a=TimedWorker(2, post_completed_msg, (str(u'100'),))
# print(dir(a))
# a.start()
# raw_input("===========")
# a.cancel()
# raw_input("===========")
| {
"repo_name": "Zex/neural-node",
"path": "optm/rest_agent.py",
"copies": "1",
"size": "2320",
"license": "mit",
"hash": -5197683852238762000,
"line_mean": 28,
"line_max": 80,
"alpha_frac": 0.5280172414,
"autogenerated": false,
"ratio": 3.552833078101072,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9496568579057809,
"avg_score": 0.016856348088652433,
"num_lines": 80
} |
from flask import Flask, jsonify, request
from flask_restful import Api, Resource
import requests, time, threading
from datetime import datetime
app = Flask(__name__)
api = Api(app)
task_completed = {}
@app.route('/completed', methods=['POST'])
def completed():
print("=====completed======")
if request.args.has_key('nid'):
nid = str(request.args['nid'])
print('Got nid [%s]' % nid)
if task_completed.has_key(nid):
task_completed[nid] = True
else:
print('Unknow nid [%s]'%nid)
print(task_completed)
else:
print('Unknown request')
return jsonify({'status':'done'}), 200
@app.route('/', methods=['POST'])
def index():
print("=====sleeping=======")
print('%s' % dir(request))
print('%s' % request)
print(request.remote_addr)
time.sleep(5)
return jsonify({'role':"Manager"}), 200
def post_reqs():
for nm in range(10):
print("req for [%s]" % nm)
print("[%s]" % datetime.now())
requests.post('http://127.0.0.1:8321', params={'nid':str(nm)})
print("[%s]" % datetime.now())
def start_server(port):
print(dir(app))
for nm in range(10):
task_completed[str(nm)] = False
threading.Timer(5, post_reqs).start()
app.run(host="0.0.0.0", debug=False, port=port, threaded=True)
print("server started ... ")
if __name__ == "__main__":
start_server(7009)
| {
"repo_name": "Zex/neural-node",
"path": "optm/rest_mng.py",
"copies": "1",
"size": "1499",
"license": "mit",
"hash": -1264023628796056300,
"line_mean": 25.7678571429,
"line_max": 70,
"alpha_frac": 0.5790527018,
"autogenerated": false,
"ratio": 3.3609865470852016,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4440039248885202,
"avg_score": null,
"num_lines": null
} |
"""AsyncResult objects for the client
Authors:
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import sys
import time
from datetime import datetime
from zmq import MessageTracker
from IPython.core.display import clear_output, display, display_pretty
from IPython.external.decorator import decorator
from IPython.parallel import error
from IPython.utils.py3compat import string_types
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
def _raw_text(s):
display_pretty(s, raw=True)
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
# global empty tracker that's always done:
finished_tracker = MessageTracker()
@decorator
def check_ready(f, self, *args, **kwargs):
"""Call spin() to sync state prior to calling the method."""
self.wait(0)
if not self._ready:
raise error.TimeoutError("result not ready")
return f(self, *args, **kwargs)
class AsyncResult(object):
"""Class for representing results of non-blocking calls.
Provides the same interface as :py:class:`multiprocessing.pool.AsyncResult`.
"""
msg_ids = None
_targets = None
_tracker = None
_single_result = False
def __init__(self, client, msg_ids, fname='unknown', targets=None, tracker=None):
if isinstance(msg_ids, string_types):
# always a list
msg_ids = [msg_ids]
self._single_result = True
else:
self._single_result = False
if tracker is None:
# default to always done
tracker = finished_tracker
self._client = client
self.msg_ids = msg_ids
self._fname=fname
self._targets = targets
self._tracker = tracker
self._ready = False
self._outputs_ready = False
self._success = None
self._metadata = [self._client.metadata[id] for id in self.msg_ids]
def __repr__(self):
if self._ready:
return "<%s: finished>"%(self.__class__.__name__)
else:
return "<%s: %s>"%(self.__class__.__name__,self._fname)
def _reconstruct_result(self, res):
"""Reconstruct our result from actual result list (always a list)
Override me in subclasses for turning a list of results
into the expected form.
"""
if self._single_result:
return res[0]
else:
return res
def get(self, timeout=-1):
"""Return the result when it arrives.
If `timeout` is not ``None`` and the result does not arrive within
`timeout` seconds then ``TimeoutError`` is raised. If the
remote call raised an exception then that exception will be reraised
by get() inside a `RemoteError`.
"""
if not self.ready():
self.wait(timeout)
if self._ready:
if self._success:
return self._result
else:
raise self._exception
else:
raise error.TimeoutError("Result not ready.")
def _check_ready(self):
if not self.ready():
raise error.TimeoutError("Result not ready.")
def ready(self):
"""Return whether the call has completed."""
if not self._ready:
self.wait(0)
elif not self._outputs_ready:
self._wait_for_outputs(0)
return self._ready
def wait(self, timeout=-1):
"""Wait until the result is available or until `timeout` seconds pass.
This method always returns None.
"""
if self._ready:
self._wait_for_outputs(timeout)
return
self._ready = self._client.wait(self.msg_ids, timeout)
if self._ready:
try:
results = list(map(self._client.results.get, self.msg_ids))
self._result = results
if self._single_result:
r = results[0]
if isinstance(r, Exception):
raise r
else:
results = error.collect_exceptions(results, self._fname)
self._result = self._reconstruct_result(results)
except Exception as e:
self._exception = e
self._success = False
else:
self._success = True
finally:
if timeout is None or timeout < 0:
# cutoff infinite wait at 10s
timeout = 10
self._wait_for_outputs(timeout)
def successful(self):
"""Return whether the call completed without raising an exception.
Will raise ``AssertionError`` if the result is not ready.
"""
assert self.ready()
return self._success
#----------------------------------------------------------------
# Extra methods not in mp.pool.AsyncResult
#----------------------------------------------------------------
def get_dict(self, timeout=-1):
"""Get the results as a dict, keyed by engine_id.
timeout behavior is described in `get()`.
"""
results = self.get(timeout)
if self._single_result:
results = [results]
engine_ids = [ md['engine_id'] for md in self._metadata ]
rdict = {}
for engine_id, result in zip(engine_ids, results):
if engine_id in rdict:
raise ValueError("Cannot build dict, %i jobs ran on engine #%i" % (
engine_ids.count(engine_id), engine_id)
)
else:
rdict[engine_id] = result
return rdict
@property
def result(self):
"""result property wrapper for `get(timeout=-1)`."""
return self.get()
# abbreviated alias:
r = result
@property
def metadata(self):
"""property for accessing execution metadata."""
if self._single_result:
return self._metadata[0]
else:
return self._metadata
@property
def result_dict(self):
"""result property as a dict."""
return self.get_dict()
def __dict__(self):
return self.get_dict(0)
def abort(self):
"""abort my tasks."""
assert not self.ready(), "Can't abort, I am already done!"
return self._client.abort(self.msg_ids, targets=self._targets, block=True)
@property
def sent(self):
"""check whether my messages have been sent."""
return self._tracker.done
def wait_for_send(self, timeout=-1):
"""wait for pyzmq send to complete.
This is necessary when sending arrays that you intend to edit in-place.
`timeout` is in seconds, and will raise TimeoutError if it is reached
before the send completes.
"""
return self._tracker.wait(timeout)
#-------------------------------------
# dict-access
#-------------------------------------
def __getitem__(self, key):
"""getitem returns result value(s) if keyed by int/slice, or metadata if key is str.
"""
if isinstance(key, int):
self._check_ready()
return error.collect_exceptions([self._result[key]], self._fname)[0]
elif isinstance(key, slice):
self._check_ready()
return error.collect_exceptions(self._result[key], self._fname)
elif isinstance(key, string_types):
# metadata proxy *does not* require that results are done
self.wait(0)
values = [ md[key] for md in self._metadata ]
if self._single_result:
return values[0]
else:
return values
else:
raise TypeError("Invalid key type %r, must be 'int','slice', or 'str'"%type(key))
def __getattr__(self, key):
"""getattr maps to getitem for convenient attr access to metadata."""
try:
return self.__getitem__(key)
except (error.TimeoutError, KeyError):
raise AttributeError("%r object has no attribute %r"%(
self.__class__.__name__, key))
# asynchronous iterator:
def __iter__(self):
if self._single_result:
raise TypeError("AsyncResults with a single result are not iterable.")
try:
rlist = self.get(0)
except error.TimeoutError:
# wait for each result individually
for msg_id in self.msg_ids:
ar = AsyncResult(self._client, msg_id, self._fname)
yield ar.get()
else:
# already done
for r in rlist:
yield r
def __len__(self):
return len(self.msg_ids)
#-------------------------------------
# Sugar methods and attributes
#-------------------------------------
def timedelta(self, start, end, start_key=min, end_key=max):
"""compute the difference between two sets of timestamps
The default behavior is to use the earliest of the first
and the latest of the second list, but this can be changed
by passing a different
Parameters
----------
start : one or more datetime objects (e.g. ar.submitted)
end : one or more datetime objects (e.g. ar.received)
start_key : callable
Function to call on `start` to extract the relevant
entry [defalt: min]
end_key : callable
Function to call on `end` to extract the relevant
entry [default: max]
Returns
-------
dt : float
The time elapsed (in seconds) between the two selected timestamps.
"""
if not isinstance(start, datetime):
# handle single_result AsyncResults, where ar.stamp is single object,
# not a list
start = start_key(start)
if not isinstance(end, datetime):
# handle single_result AsyncResults, where ar.stamp is single object,
# not a list
end = end_key(end)
return (end - start).total_seconds()
@property
def progress(self):
"""the number of tasks which have been completed at this point.
Fractional progress would be given by 1.0 * ar.progress / len(ar)
"""
self.wait(0)
return len(self) - len(set(self.msg_ids).intersection(self._client.outstanding))
@property
def elapsed(self):
"""elapsed time since initial submission"""
if self.ready():
return self.wall_time
now = submitted = datetime.now()
for msg_id in self.msg_ids:
if msg_id in self._client.metadata:
stamp = self._client.metadata[msg_id]['submitted']
if stamp and stamp < submitted:
submitted = stamp
return (now-submitted).total_seconds()
@property
@check_ready
def serial_time(self):
"""serial computation time of a parallel calculation
Computed as the sum of (completed-started) of each task
"""
t = 0
for md in self._metadata:
t += (md['completed'] - md['started']).total_seconds()
return t
@property
@check_ready
def wall_time(self):
"""actual computation time of a parallel calculation
Computed as the time between the latest `received` stamp
and the earliest `submitted`.
Only reliable if Client was spinning/waiting when the task finished, because
the `received` timestamp is created when a result is pulled off of the zmq queue,
which happens as a result of `client.spin()`.
For similar comparison of other timestamp pairs, check out AsyncResult.timedelta.
"""
return self.timedelta(self.submitted, self.received)
def wait_interactive(self, interval=1., timeout=-1):
"""interactive wait, printing progress at regular intervals"""
if timeout is None:
timeout = -1
N = len(self)
tic = time.time()
while not self.ready() and (timeout < 0 or time.time() - tic <= timeout):
self.wait(interval)
clear_output(wait=True)
print("%4i/%i tasks finished after %4i s" % (self.progress, N, self.elapsed), end="")
sys.stdout.flush()
print()
print("done")
def _republish_displaypub(self, content, eid):
"""republish individual displaypub content dicts"""
try:
ip = get_ipython()
except NameError:
# displaypub is meaningless outside IPython
return
md = content['metadata'] or {}
md['engine'] = eid
ip.display_pub.publish(content['source'], content['data'], md)
def _display_stream(self, text, prefix='', file=None):
if not text:
# nothing to display
return
if file is None:
file = sys.stdout
end = '' if text.endswith('\n') else '\n'
multiline = text.count('\n') > int(text.endswith('\n'))
if prefix and multiline and not text.startswith('\n'):
prefix = prefix + '\n'
print("%s%s" % (prefix, text), file=file, end=end)
def _display_single_result(self):
self._display_stream(self.stdout)
self._display_stream(self.stderr, file=sys.stderr)
try:
get_ipython()
except NameError:
# displaypub is meaningless outside IPython
return
for output in self.outputs:
self._republish_displaypub(output, self.engine_id)
if self.pyout is not None:
display(self.get())
def _wait_for_outputs(self, timeout=-1):
"""wait for the 'status=idle' message that indicates we have all outputs
"""
if self._outputs_ready or not self._success:
# don't wait on errors
return
# cast None to -1 for infinite timeout
if timeout is None:
timeout = -1
tic = time.time()
while True:
self._client._flush_iopub(self._client._iopub_socket)
self._outputs_ready = all(md['outputs_ready']
for md in self._metadata)
if self._outputs_ready or \
(timeout >= 0 and time.time() > tic + timeout):
break
time.sleep(0.01)
@check_ready
def display_outputs(self, groupby="type"):
"""republish the outputs of the computation
Parameters
----------
groupby : str [default: type]
if 'type':
Group outputs by type (show all stdout, then all stderr, etc.):
[stdout:1] foo
[stdout:2] foo
[stderr:1] bar
[stderr:2] bar
if 'engine':
Display outputs for each engine before moving on to the next:
[stdout:1] foo
[stderr:1] bar
[stdout:2] foo
[stderr:2] bar
if 'order':
Like 'type', but further collate individual displaypub
outputs. This is meant for cases of each command producing
several plots, and you would like to see all of the first
plots together, then all of the second plots, and so on.
"""
if self._single_result:
self._display_single_result()
return
stdouts = self.stdout
stderrs = self.stderr
pyouts = self.pyout
output_lists = self.outputs
results = self.get()
targets = self.engine_id
if groupby == "engine":
for eid,stdout,stderr,outputs,r,pyout in zip(
targets, stdouts, stderrs, output_lists, results, pyouts
):
self._display_stream(stdout, '[stdout:%i] ' % eid)
self._display_stream(stderr, '[stderr:%i] ' % eid, file=sys.stderr)
try:
get_ipython()
except NameError:
# displaypub is meaningless outside IPython
return
if outputs or pyout is not None:
_raw_text('[output:%i]' % eid)
for output in outputs:
self._republish_displaypub(output, eid)
if pyout is not None:
display(r)
elif groupby in ('type', 'order'):
# republish stdout:
for eid,stdout in zip(targets, stdouts):
self._display_stream(stdout, '[stdout:%i] ' % eid)
# republish stderr:
for eid,stderr in zip(targets, stderrs):
self._display_stream(stderr, '[stderr:%i] ' % eid, file=sys.stderr)
try:
get_ipython()
except NameError:
# displaypub is meaningless outside IPython
return
if groupby == 'order':
output_dict = dict((eid, outputs) for eid,outputs in zip(targets, output_lists))
N = max(len(outputs) for outputs in output_lists)
for i in range(N):
for eid in targets:
outputs = output_dict[eid]
if len(outputs) >= N:
_raw_text('[output:%i]' % eid)
self._republish_displaypub(outputs[i], eid)
else:
# republish displaypub output
for eid,outputs in zip(targets, output_lists):
if outputs:
_raw_text('[output:%i]' % eid)
for output in outputs:
self._republish_displaypub(output, eid)
# finally, add pyout:
for eid,r,pyout in zip(targets, results, pyouts):
if pyout is not None:
display(r)
else:
raise ValueError("groupby must be one of 'type', 'engine', 'collate', not %r" % groupby)
class AsyncMapResult(AsyncResult):
"""Class for representing results of non-blocking gathers.
This will properly reconstruct the gather.
This class is iterable at any time, and will wait on results as they come.
If ordered=False, then the first results to arrive will come first, otherwise
results will be yielded in the order they were submitted.
"""
def __init__(self, client, msg_ids, mapObject, fname='', ordered=True):
AsyncResult.__init__(self, client, msg_ids, fname=fname)
self._mapObject = mapObject
self._single_result = False
self.ordered = ordered
def _reconstruct_result(self, res):
"""Perform the gather on the actual results."""
return self._mapObject.joinPartitions(res)
# asynchronous iterator:
def __iter__(self):
it = self._ordered_iter if self.ordered else self._unordered_iter
for r in it():
yield r
# asynchronous ordered iterator:
def _ordered_iter(self):
"""iterator for results *as they arrive*, preserving submission order."""
try:
rlist = self.get(0)
except error.TimeoutError:
# wait for each result individually
for msg_id in self.msg_ids:
ar = AsyncResult(self._client, msg_id, self._fname)
rlist = ar.get()
try:
for r in rlist:
yield r
except TypeError:
# flattened, not a list
# this could get broken by flattened data that returns iterables
# but most calls to map do not expose the `flatten` argument
yield rlist
else:
# already done
for r in rlist:
yield r
# asynchronous unordered iterator:
def _unordered_iter(self):
"""iterator for results *as they arrive*, on FCFS basis, ignoring submission order."""
try:
rlist = self.get(0)
except error.TimeoutError:
pending = set(self.msg_ids)
while pending:
try:
self._client.wait(pending, 1e-3)
except error.TimeoutError:
# ignore timeout error, because that only means
# *some* jobs are outstanding
pass
# update ready set with those no longer outstanding:
ready = pending.difference(self._client.outstanding)
# update pending to exclude those that are finished
pending = pending.difference(ready)
while ready:
msg_id = ready.pop()
ar = AsyncResult(self._client, msg_id, self._fname)
rlist = ar.get()
try:
for r in rlist:
yield r
except TypeError:
# flattened, not a list
# this could get broken by flattened data that returns iterables
# but most calls to map do not expose the `flatten` argument
yield rlist
else:
# already done
for r in rlist:
yield r
class AsyncHubResult(AsyncResult):
"""Class to wrap pending results that must be requested from the Hub.
Note that waiting/polling on these objects requires polling the Hubover the network,
so use `AsyncHubResult.wait()` sparingly.
"""
def _wait_for_outputs(self, timeout=-1):
"""no-op, because HubResults are never incomplete"""
self._outputs_ready = True
def wait(self, timeout=-1):
"""wait for result to complete."""
start = time.time()
if self._ready:
return
local_ids = [m for m in self.msg_ids if m in self._client.outstanding]
local_ready = self._client.wait(local_ids, timeout)
if local_ready:
remote_ids = [m for m in self.msg_ids if m not in self._client.results]
if not remote_ids:
self._ready = True
else:
rdict = self._client.result_status(remote_ids, status_only=False)
pending = rdict['pending']
while pending and (timeout < 0 or time.time() < start+timeout):
rdict = self._client.result_status(remote_ids, status_only=False)
pending = rdict['pending']
if pending:
time.sleep(0.1)
if not pending:
self._ready = True
if self._ready:
try:
results = list(map(self._client.results.get, self.msg_ids))
self._result = results
if self._single_result:
r = results[0]
if isinstance(r, Exception):
raise r
else:
results = error.collect_exceptions(results, self._fname)
self._result = self._reconstruct_result(results)
except Exception as e:
self._exception = e
self._success = False
else:
self._success = True
finally:
self._metadata = [self._client.metadata[mid] for mid in self.msg_ids]
__all__ = ['AsyncResult', 'AsyncMapResult', 'AsyncHubResult']
| {
"repo_name": "WillisXChen/django-oscar",
"path": "oscar/lib/python2.7/site-packages/IPython/parallel/client/asyncresult.py",
"copies": "7",
"size": "24813",
"license": "bsd-3-clause",
"hash": 770480422107963600,
"line_mean": 34.0961810467,
"line_max": 100,
"alpha_frac": 0.5145286745,
"autogenerated": false,
"ratio": 4.784612418048592,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01325630707320421,
"num_lines": 707
} |
"""AsyncResult objects for the client"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import sys
import time
from concurrent.futures import Future
from datetime import datetime
from threading import Event
from decorator import decorator
from tornado.gen import multi_future
import zmq
from zmq import MessageTracker
from IPython.core.display import clear_output, display, display_pretty
from ipyparallel import error
from ipython_genutils.py3compat import string_types
from .futures import MessageFuture
def _raw_text(s):
display_pretty(s, raw=True)
_default = object()
# global empty tracker that's always done:
finished_tracker = MessageTracker()
@decorator
def check_ready(f, self, *args, **kwargs):
"""Check ready state prior to calling the method."""
self.wait(0)
if not self._ready:
raise error.TimeoutError("result not ready")
return f(self, *args, **kwargs)
_metadata_keys = []
class AsyncResult(Future):
"""Class for representing results of non-blocking calls.
Provides the same interface as :py:class:`multiprocessing.pool.AsyncResult`.
"""
msg_ids = None
_targets = None
_tracker = None
_single_result = False
owner = False
def __init__(self, client, children, fname='unknown', targets=None,
owner=False,
):
super(AsyncResult, self).__init__()
if not isinstance(children, list):
children = [children]
self._single_result = True
else:
self._single_result = False
if isinstance(children[0], string_types):
self.msg_ids = children
self._children = []
else:
self._children = children
self.msg_ids = [ f.msg_id for f in children ]
self._client = client
self._fname = fname
self._targets = targets
self.owner = owner
self._ready = False
self._ready_event = Event()
self._output_ready = False
self._output_event = Event()
self._sent_event = Event()
self._success = None
if self._children:
self._metadata = [ f.output.metadata for f in self._children ]
else:
self._metadata = [self._client.metadata[id] for id in self.msg_ids]
self._init_futures()
def _init_futures(self):
"""Build futures for results and output; hook up callbacks"""
if not self._children:
for msg_id in self.msg_ids:
future = self._client._futures.get(msg_id, None)
if not future:
result = self._client.results.get(msg_id, _default)
# result resides in local cache, construct already-resolved Future
if result is not _default:
future = MessageFuture(msg_id)
future.output = Future()
future.output.metadata = self.client.metadata[msg_id]
future.set_result(result)
future.output.set_result(None)
if not future:
raise KeyError("No Future or result for msg_id: %s" % msg_id)
self._children.append(future)
self._result_future = multi_future(self._children)
self._sent_future = multi_future([ f.tracker for f in self._children ])
self._sent_future.add_done_callback(self._handle_sent)
self._output_future = multi_future([self._result_future] + [
f.output for f in self._children
])
# on completion of my constituents, trigger my own resolution
self._result_future.add_done_callback(self._resolve_result)
self._output_future.add_done_callback(self._resolve_output)
def __repr__(self):
if self._ready:
return "<%s: finished>"%(self.__class__.__name__)
else:
return "<%s: %s>"%(self.__class__.__name__,self._fname)
def __dir__(self):
keys = dir(self.__class__)
if not _metadata_keys:
from .client import Metadata
_metadata_keys.extend(Metadata().keys())
keys.extend(_metadata_keys)
return keys
def _reconstruct_result(self, res):
"""Reconstruct our result from actual result list (always a list)
Override me in subclasses for turning a list of results
into the expected form.
"""
if self._single_result:
return res[0]
else:
return res
def get(self, timeout=-1):
"""Return the result when it arrives.
If `timeout` is not ``None`` and the result does not arrive within
`timeout` seconds then ``TimeoutError`` is raised. If the
remote call raised an exception then that exception will be reraised
by get() inside a `RemoteError`.
"""
if not self.ready():
self.wait(timeout)
if self._ready:
if self._success:
return self.result()
else:
raise self.exception()
else:
raise error.TimeoutError("Result not ready.")
def _check_ready(self):
if not self.ready():
raise error.TimeoutError("Result not ready.")
def ready(self):
"""Return whether the call has completed."""
if not self._ready:
self.wait(0)
return self._ready
def wait_for_output(self, timeout=-1):
"""Wait for our output to be complete.
AsyncResult.wait only waits for the result,
which may arrive before output is complete.
"""
if self._output_ready:
return True
if timeout and timeout < 0:
timeout = None
return self._output_event.wait(timeout)
def _resolve_output(self, f=None):
"""Callback that fires when outputs are ready"""
if self.owner:
[ self._client.metadata.pop(mid, None) for mid in self.msg_ids ]
self._output_ready = True
self._output_event.set()
def wait(self, timeout=-1):
"""Wait until the result is available or until `timeout` seconds pass.
This method always returns None.
"""
if self._ready:
return True
if timeout and timeout < 0:
timeout = None
self._ready_event.wait(timeout)
self.wait_for_output(0)
return self._ready
def _resolve_result(self, f=None):
try:
if f:
results = f.result()
else:
results = list(map(self._client.results.get, self.msg_ids))
if self._single_result:
r = results[0]
if isinstance(r, Exception):
raise r
else:
results = error.collect_exceptions(results, self._fname)
self.set_result(self._reconstruct_result(results))
except Exception as e:
self.set_exception(e)
self._success = False
else:
self._success = True
finally:
if self.owner:
[ self._client.results.pop(mid, None) for mid in self.msg_ids ]
self._ready = True
self._ready_event.set()
def successful(self):
"""Return whether the call completed without raising an exception.
Will raise ``AssertionError`` if the result is not ready.
"""
assert self.ready()
return self._success
#----------------------------------------------------------------
# Extra methods not in mp.pool.AsyncResult
#----------------------------------------------------------------
def get_dict(self, timeout=-1):
"""Get the results as a dict, keyed by engine_id.
timeout behavior is described in `get()`.
"""
results = self.get(timeout)
if self._single_result:
results = [results]
engine_ids = [ md['engine_id'] for md in self._metadata ]
rdict = {}
for engine_id, result in zip(engine_ids, results):
if engine_id in rdict:
raise ValueError("Cannot build dict, %i jobs ran on engine #%i" % (
engine_ids.count(engine_id), engine_id)
)
else:
rdict[engine_id] = result
return rdict
@property
def r(self):
"""result property wrapper for `get(timeout=-1)`."""
return self.get()
@property
def metadata(self):
"""property for accessing execution metadata."""
if self._single_result:
return self._metadata[0]
else:
return self._metadata
@property
def result_dict(self):
"""result property as a dict."""
return self.get_dict()
def __dict__(self):
return self.get_dict(0)
def abort(self):
"""abort my tasks."""
assert not self.ready(), "Can't abort, I am already done!"
return self._client.abort(self.msg_ids, targets=self._targets, block=True)
def _handle_sent(self, f):
"""Resolve sent Future, build MessageTracker"""
trackers = f.result()
trackers = [t for t in trackers if t is not None]
self._tracker = MessageTracker(*trackers)
self._sent_event.set()
@property
def sent(self):
"""check whether my messages have been sent."""
return self._sent_event.is_set() and self._tracker.done
def wait_for_send(self, timeout=-1):
"""wait for pyzmq send to complete.
This is necessary when sending arrays that you intend to edit in-place.
`timeout` is in seconds, and will raise TimeoutError if it is reached
before the send completes.
"""
if not self._sent_event.is_set():
if timeout and timeout < 0:
# Event doesn't like timeout < 0
timeout = None
# wait for Future to indicate send having been called,
# which means MessageTracker is ready.
tic = time.time()
if not self._sent_event.wait(timeout):
raise error.TimeoutError("Still waiting to be sent")
return False
if timeout:
timeout = max(0, timeout - (time.time() - tic))
try:
if timeout is None:
# MessageTracker doesn't like timeout=None
timeout = -1
return self._tracker.wait(timeout)
except zmq.NotDone:
raise error.TimeoutError("Still waiting to be sent")
#-------------------------------------
# dict-access
#-------------------------------------
def __getitem__(self, key):
"""getitem returns result value(s) if keyed by int/slice, or metadata if key is str.
"""
if isinstance(key, int):
self._check_ready()
return error.collect_exceptions([self.result()[key]], self._fname)[0]
elif isinstance(key, slice):
self._check_ready()
return error.collect_exceptions(self.result()[key], self._fname)
elif isinstance(key, string_types):
# metadata proxy *does not* require that results are done
self.wait(0)
self.wait_for_output(0)
values = [ md[key] for md in self._metadata ]
if self._single_result:
return values[0]
else:
return values
else:
raise TypeError("Invalid key type %r, must be 'int','slice', or 'str'"%type(key))
def __getattr__(self, key):
"""getattr maps to getitem for convenient attr access to metadata."""
try:
return self.__getitem__(key)
except (error.TimeoutError, KeyError):
raise AttributeError("%r object has no attribute %r"%(
self.__class__.__name__, key))
# asynchronous iterator:
def __iter__(self):
if self._single_result:
raise TypeError("AsyncResults with a single result are not iterable.")
try:
rlist = self.get(0)
except error.TimeoutError:
# wait for each result individually
for child in self._children:
ar = AsyncResult(self._client, child, self._fname)
yield ar.get()
else:
# already done
for r in rlist:
yield r
def __len__(self):
return len(self.msg_ids)
#-------------------------------------
# Sugar methods and attributes
#-------------------------------------
def timedelta(self, start, end, start_key=min, end_key=max):
"""compute the difference between two sets of timestamps
The default behavior is to use the earliest of the first
and the latest of the second list, but this can be changed
by passing a different
Parameters
----------
start : one or more datetime objects (e.g. ar.submitted)
end : one or more datetime objects (e.g. ar.received)
start_key : callable
Function to call on `start` to extract the relevant
entry [defalt: min]
end_key : callable
Function to call on `end` to extract the relevant
entry [default: max]
Returns
-------
dt : float
The time elapsed (in seconds) between the two selected timestamps.
"""
if not isinstance(start, datetime):
# handle single_result AsyncResults, where ar.stamp is single object,
# not a list
start = start_key(start)
if not isinstance(end, datetime):
# handle single_result AsyncResults, where ar.stamp is single object,
# not a list
end = end_key(end)
return (end - start).total_seconds()
@property
def progress(self):
"""the number of tasks which have been completed at this point.
Fractional progress would be given by 1.0 * ar.progress / len(ar)
"""
self.wait(0)
return len(self) - len(set(self.msg_ids).intersection(self._client.outstanding))
@property
def elapsed(self):
"""elapsed time since initial submission"""
if self.ready():
return self.wall_time
now = submitted = datetime.now()
for msg_id in self.msg_ids:
if msg_id in self._client.metadata:
stamp = self._client.metadata[msg_id]['submitted']
if stamp and stamp < submitted:
submitted = stamp
return (now-submitted).total_seconds()
@property
@check_ready
def serial_time(self):
"""serial computation time of a parallel calculation
Computed as the sum of (completed-started) of each task
"""
t = 0
for md in self._metadata:
t += (md['completed'] - md['started']).total_seconds()
return t
@property
@check_ready
def wall_time(self):
"""actual computation time of a parallel calculation
Computed as the time between the latest `received` stamp
and the earliest `submitted`.
For similar comparison of other timestamp pairs, check out AsyncResult.timedelta.
"""
return self.timedelta(self.submitted, self.received)
def wait_interactive(self, interval=1., timeout=-1):
"""interactive wait, printing progress at regular intervals"""
if timeout is None:
timeout = -1
N = len(self)
tic = time.time()
while not self.ready() and (timeout < 0 or time.time() - tic <= timeout):
self.wait(interval)
clear_output(wait=True)
print("%4i/%i tasks finished after %4i s" % (self.progress, N, self.elapsed), end="")
sys.stdout.flush()
print("\ndone")
def _republish_displaypub(self, content, eid):
"""republish individual displaypub content dicts"""
try:
ip = get_ipython()
except NameError:
# displaypub is meaningless outside IPython
return
md = content['metadata'] or {}
md['engine'] = eid
ip.display_pub.publish(data=content['data'], metadata=md)
def _display_stream(self, text, prefix='', file=None):
if not text:
# nothing to display
return
if file is None:
file = sys.stdout
end = '' if text.endswith('\n') else '\n'
multiline = text.count('\n') > int(text.endswith('\n'))
if prefix and multiline and not text.startswith('\n'):
prefix = prefix + '\n'
print("%s%s" % (prefix, text), file=file, end=end)
def _display_single_result(self):
self._display_stream(self.stdout)
self._display_stream(self.stderr, file=sys.stderr)
try:
get_ipython()
except NameError:
# displaypub is meaningless outside IPython
return
for output in self.outputs:
self._republish_displaypub(output, self.engine_id)
if self.execute_result is not None:
display(self.get())
@check_ready
def display_outputs(self, groupby="type"):
"""republish the outputs of the computation
Parameters
----------
groupby : str [default: type]
if 'type':
Group outputs by type (show all stdout, then all stderr, etc.):
[stdout:1] foo
[stdout:2] foo
[stderr:1] bar
[stderr:2] bar
if 'engine':
Display outputs for each engine before moving on to the next:
[stdout:1] foo
[stderr:1] bar
[stdout:2] foo
[stderr:2] bar
if 'order':
Like 'type', but further collate individual displaypub
outputs. This is meant for cases of each command producing
several plots, and you would like to see all of the first
plots together, then all of the second plots, and so on.
"""
self.wait_for_output()
if self._single_result:
self._display_single_result()
return
stdouts = self.stdout
stderrs = self.stderr
execute_results = self.execute_result
output_lists = self.outputs
results = self.get()
targets = self.engine_id
if groupby == "engine":
for eid,stdout,stderr,outputs,r,execute_result in zip(
targets, stdouts, stderrs, output_lists, results, execute_results
):
self._display_stream(stdout, '[stdout:%i] ' % eid)
self._display_stream(stderr, '[stderr:%i] ' % eid, file=sys.stderr)
try:
get_ipython()
except NameError:
# displaypub is meaningless outside IPython
return
if outputs or execute_result is not None:
_raw_text('[output:%i]' % eid)
for output in outputs:
self._republish_displaypub(output, eid)
if execute_result is not None:
display(r)
elif groupby in ('type', 'order'):
# republish stdout:
for eid,stdout in zip(targets, stdouts):
self._display_stream(stdout, '[stdout:%i] ' % eid)
# republish stderr:
for eid,stderr in zip(targets, stderrs):
self._display_stream(stderr, '[stderr:%i] ' % eid, file=sys.stderr)
try:
get_ipython()
except NameError:
# displaypub is meaningless outside IPython
return
if groupby == 'order':
output_dict = dict((eid, outputs) for eid,outputs in zip(targets, output_lists))
N = max(len(outputs) for outputs in output_lists)
for i in range(N):
for eid in targets:
outputs = output_dict[eid]
if len(outputs) >= N:
_raw_text('[output:%i]' % eid)
self._republish_displaypub(outputs[i], eid)
else:
# republish displaypub output
for eid,outputs in zip(targets, output_lists):
if outputs:
_raw_text('[output:%i]' % eid)
for output in outputs:
self._republish_displaypub(output, eid)
# finally, add execute_result:
for eid,r,execute_result in zip(targets, results, execute_results):
if execute_result is not None:
display(r)
else:
raise ValueError("groupby must be one of 'type', 'engine', 'collate', not %r" % groupby)
class AsyncMapResult(AsyncResult):
"""Class for representing results of non-blocking gathers.
This will properly reconstruct the gather.
This class is iterable at any time, and will wait on results as they come.
If ordered=False, then the first results to arrive will come first, otherwise
results will be yielded in the order they were submitted.
"""
def __init__(self, client, children, mapObject, fname='', ordered=True):
self._mapObject = mapObject
self.ordered = ordered
AsyncResult.__init__(self, client, children, fname=fname)
self._single_result = False
def _reconstruct_result(self, res):
"""Perform the gather on the actual results."""
return self._mapObject.joinPartitions(res)
# asynchronous iterator:
def __iter__(self):
it = self._ordered_iter if self.ordered else self._unordered_iter
for r in it():
yield r
# asynchronous ordered iterator:
def _ordered_iter(self):
"""iterator for results *as they arrive*, preserving submission order."""
try:
rlist = self.get(0)
except error.TimeoutError:
# wait for each result individually
for child in self._children:
ar = AsyncResult(self._client, child, self._fname)
rlist = ar.get()
try:
for r in rlist:
yield r
except TypeError:
# flattened, not a list
# this could get broken by flattened data that returns iterables
# but most calls to map do not expose the `flatten` argument
yield rlist
else:
# already done
for r in rlist:
yield r
# asynchronous unordered iterator:
def _unordered_iter(self):
"""iterator for results *as they arrive*, on FCFS basis, ignoring submission order."""
try:
rlist = self.get(0)
except error.TimeoutError:
pending = set(self.msg_ids)
while pending:
try:
self._client.wait(pending, 1e-3)
except error.TimeoutError:
# ignore timeout error, because that only means
# *some* jobs are outstanding
pass
# update ready set with those no longer outstanding:
ready = pending.difference(self._client.outstanding)
# update pending to exclude those that are finished
pending = pending.difference(ready)
while ready:
msg_id = ready.pop()
child = self._children[self.msg_ids.index(msg_id)]
ar = AsyncResult(self._client, child, self._fname)
rlist = ar.get()
try:
for r in rlist:
yield r
except TypeError:
# flattened, not a list
# this could get broken by flattened data that returns iterables
# but most calls to map do not expose the `flatten` argument
yield rlist
else:
# already done
for r in rlist:
yield r
class AsyncHubResult(AsyncResult):
"""Class to wrap pending results that must be requested from the Hub.
Note that waiting/polling on these objects requires polling the Hub over the network,
so use `AsyncHubResult.wait()` sparingly.
"""
def _init_futures(self):
"""disable Future-based resolution of Hub results"""
pass
def wait(self, timeout=-1):
"""wait for result to complete."""
start = time.time()
if self._ready:
return
local_ids = [m for m in self.msg_ids if m in self._client.outstanding]
local_ready = self._client.wait(local_ids, timeout)
if local_ready:
remote_ids = [m for m in self.msg_ids if m not in self._client.results]
if not remote_ids:
self._ready = True
else:
rdict = self._client.result_status(remote_ids, status_only=False)
pending = rdict['pending']
while pending and (timeout < 0 or time.time() < start+timeout):
rdict = self._client.result_status(remote_ids, status_only=False)
pending = rdict['pending']
if pending:
time.sleep(0.1)
if not pending:
self._ready = True
if self._ready:
self._output_ready = True
try:
results = list(map(self._client.results.get, self.msg_ids))
if self._single_result:
r = results[0]
if isinstance(r, Exception):
raise r
self.set_result(r)
else:
results = error.collect_exceptions(results, self._fname)
self.set_result(self._reconstruct_result(results))
except Exception as e:
self.set_exception(e)
self._success = False
else:
self._success = True
finally:
if self.owner:
[self._client.metadata.pop(mid) for mid in self.msg_ids]
[self._client.results.pop(mid) for mid in self.msg_ids]
__all__ = ['AsyncResult', 'AsyncMapResult', 'AsyncHubResult']
| {
"repo_name": "fzheng/codejam",
"path": "lib/python2.7/site-packages/ipyparallel/client/asyncresult.py",
"copies": "1",
"size": "27369",
"license": "mit",
"hash": -5953697036539599000,
"line_mean": 34.3148387097,
"line_max": 100,
"alpha_frac": 0.5314772188,
"autogenerated": false,
"ratio": 4.656967840735069,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.568844505953507,
"avg_score": null,
"num_lines": null
} |
"""Asyncronous tasks for MDN scraping."""
from json import dumps
from traceback import format_exc
from celery import shared_task
import requests
from .models import FeaturePage, TranslatedContent
from .scrape import scrape_feature_page
@shared_task(ignore_result=True)
def start_crawl(featurepage_id):
"""Start the calling process for an MDN page."""
fp = FeaturePage.objects.get(id=featurepage_id)
assert fp.status == fp.STATUS_STARTING, fp.status
meta = fp.meta()
# Determine next state / task
next_task = None
if meta.status == meta.STATUS_STARTING:
fp.status = fp.STATUS_META
next_task = (fetch_meta, fp.id)
elif meta.status == meta.STATUS_FETCHING:
fp.status = fp.STATUS_META
elif meta.status == meta.STATUS_FETCHED:
fp.status = fp.STATUS_PAGES
next_task = (fetch_all_translations, fp.id)
else:
assert meta.status == meta.STATUS_ERROR, meta.get_status_display()
fp.status = fp.STATUS_ERROR
fp.save()
if next_task is not None:
next_func, next_id = next_task
next_func.delay(next_id)
@shared_task(ignore_result=True)
def fetch_meta(featurepage_id):
"""Fetch metadata for an MDN page."""
fp = FeaturePage.objects.get(id=featurepage_id)
assert fp.status == fp.STATUS_META, fp.get_status_display()
meta = fp.meta()
assert meta.status == meta.STATUS_STARTING, meta.get_status_display()
# Avoid double fetching
meta.status = meta.STATUS_FETCHING
meta.save(update_fields=['status'])
# Request and validate the metadata
url = meta.url()
r = requests.get(url, headers={'Cache-Control': 'no-cache'})
next_task = None
next_task_args = []
if r.url != url and not r.url.endswith('$json'):
# There was a redirect to the regular page
meta.delete()
fp.url = r.url
fp.status = fp.STATUS_META
fp.save()
elif r.status_code != requests.codes.ok:
issue = (
'failed_download', 0, 0,
{'url': url, 'status': r.status_code, 'content': r.text})
meta.raw = 'Status %d, Content:\n%s' % (r.status_code, r.text)
meta.status = meta.STATUS_ERROR
next_task = r.raise_for_status
else:
if r.url != url and r.url.endswith('$json'):
# There was a redirect to another meta (zone change)
fp.url = r.url[:-len('$json')]
try:
meta.raw = dumps(r.json())
except ValueError:
meta.raw = 'Response is not JSON:\n' + r.text
issue = ('bad_json', 0, 0, {'url': url, 'content': r.text})
meta.status = meta.STATUS_ERROR
next_task = r.json
else:
meta.status = meta.STATUS_FETCHED
meta.save()
# Determine next state / task
if meta.status == meta.STATUS_ERROR:
fp.status = fp.STATUS_ERROR
fp.add_issue(issue)
elif meta.status == fp.STATUS_META:
next_task = fetch_meta.delay
next_task_args = (fp.id, )
else:
assert meta.status == meta.STATUS_FETCHED, meta.get_status_display()
fp.status = fp.STATUS_PAGES
next_task = fetch_all_translations.delay
next_task_args = (fp.id, )
fp.save()
assert next_task
next_task(*next_task_args)
@shared_task(ignore_result=True)
def fetch_all_translations(featurepage_id):
"""Fetch all translations for an MDN page."""
fp = FeaturePage.objects.get(id=featurepage_id)
if fp.status != fp.STATUS_PAGES:
# Exit early if not called after fetch_meta
return
translations = fp.translations()
assert translations, translations
# Gather / count by status
to_fetch = []
fetching = 0
errored = 0
for trans in translations:
obj = trans.obj
if obj is None:
continue
if obj.status == obj.STATUS_STARTING:
to_fetch.append(trans.locale)
elif obj.status == obj.STATUS_FETCHING:
fetching += 1
elif obj.status == obj.STATUS_ERROR:
errored += 1
else:
assert obj.status == obj.STATUS_FETCHED, obj.get_status_display()
# Determine next status / task
if errored:
fp.status = fp.STATUS_ERROR
fp.save()
elif (not fetching) and (not to_fetch):
fp.status = fp.STATUS_PARSING
fp.save()
parse_page.delay(fp.id)
elif to_fetch:
fetch_translation.delay(fp.id, to_fetch[0])
@shared_task(ignore_result=True)
def fetch_translation(featurepage_id, locale):
"""Fetch a translations for an MDN page."""
fp = FeaturePage.objects.get(id=featurepage_id)
if fp.status in (fp.STATUS_PARSING, fp.STATUS_PARSED, fp.STATUS_NO_DATA):
# Already fetched
t = TranslatedContent.objects.get(page=fp, locale=locale)
assert t.status == t.STATUS_FETCHED, t.get_status_display()
return
assert fp.status == fp.STATUS_PAGES, fp.get_status_display()
t = TranslatedContent.objects.get(page=fp, locale=locale)
assert t.status == t.STATUS_STARTING, t.get_status_display()
# Avoid double fetching
t.status = t.STATUS_FETCHING
t.save(update_fields=['status'])
# Request the translation
url = t.url() + '?raw'
r = requests.get(t.url() + '?raw', headers={'Cache-Control': 'no-cache'})
t.raw = r.text
if r.status_code == requests.codes.ok:
t.status = t.STATUS_FETCHED
else:
t.status = t.STATUS_ERROR
issue = ((
'failed_download', 0, 0,
{'url': url, 'status': r.status_code, 'content': r.text[:100]}))
fp.add_issue(issue)
fp.save()
t.save()
fetch_all_translations.delay(fp.id)
@shared_task(ignore_result=True)
def parse_page(featurepage_id):
fp = FeaturePage.objects.get(id=featurepage_id)
assert fp.status == fp.STATUS_PARSING, fp.get_status_display()
try:
scrape_feature_page(fp)
except:
# Unexpected exceptions are added and re-raised
# Expected exceptions are handled by scrape_feature_page
fp.status = FeaturePage.STATUS_ERROR
fp.add_issue(('exception', 0, 0, {'traceback': format_exc()}))
fp.save()
raise
| {
"repo_name": "jwhitlock/web-platform-compat",
"path": "mdn/tasks.py",
"copies": "2",
"size": "6212",
"license": "mpl-2.0",
"hash": 7358624710026280000,
"line_mean": 32.2192513369,
"line_max": 77,
"alpha_frac": 0.6134900193,
"autogenerated": false,
"ratio": 3.5016910935738443,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000445632798573975,
"num_lines": 187
} |
"""Async tasks."""
from django.utils import timezone
from modoboa.lib import cryptutils
from .lib import carddav
from . import models
def get_cdav_client(request, addressbook, write_support=False):
"""Instantiate a new CardDAV client."""
return carddav.PyCardDAV(
addressbook.url, user=request.user.username,
passwd=cryptutils.decrypt(request.session["password"]),
write_support=write_support
)
def create_cdav_addressbook(addressbook, password):
"""Create CardDAV address book."""
clt = carddav.PyCardDAV(
addressbook.url, user=addressbook.user.username,
passwd=password,
write_support=True
)
clt.create_abook()
def push_addressbook_to_carddav(request, addressbook):
"""Push every addressbook item to carddav collection.
Use only once.
"""
clt = get_cdav_client(request, addressbook, True)
for contact in addressbook.contact_set.all():
href, etag = clt.upload_new_card(contact.uid, contact.to_vcard())
contact.etag = etag
contact.save(update_fields=["etag"])
addressbook.last_sync = timezone.now()
addressbook.sync_token = clt.get_sync_token()
addressbook.save(update_fields=["last_sync", "sync_token"])
def sync_addressbook_from_cdav(request, addressbook):
"""Fetch changes from CardDAV server."""
clt = get_cdav_client(request, addressbook)
changes = clt.sync_vcards(addressbook.sync_token)
if not len(changes["cards"]):
return
for card in changes["cards"]:
# UID sometimes embded .vcf extension, sometimes not...
long_uid = card["href"].split("/")[-1]
short_uid = long_uid.split(".")[0]
if "200" in card["status"]:
content = clt.get_vcard(card["href"]).decode()
contact = models.Contact.objects.filter(
uid__in=[long_uid, short_uid]).first()
if not contact:
contact = models.Contact(addressbook=addressbook)
if contact.etag != card["etag"]:
contact.etag = card["etag"]
contact.update_from_vcard(content)
elif "404" in card["status"]:
models.Contact.objects.filter(
uid__in=[long_uid, short_uid]).delete()
addressbook.last_sync = timezone.now()
addressbook.sync_token = changes["token"]
addressbook.save(update_fields=["last_sync", "sync_token"])
def push_contact_to_cdav(request, contact):
"""Upload new contact to cdav collection."""
clt = get_cdav_client(request, contact.addressbook, True)
path, etag = clt.upload_new_card(contact.uid, contact.to_vcard())
contact.etag = etag
contact.save(update_fields=["etag"])
def update_contact_cdav(request, contact):
"""Update existing contact."""
clt = get_cdav_client(request, contact.addressbook, True)
uid = contact.uid
if not uid.endswith(".vcf"):
uid += ".vcf"
result = clt.update_vcard(contact.to_vcard(), uid, contact.etag)
contact.etag = result["cards"][0]["etag"]
contact.save(update_fields=["etag"])
def delete_contact_cdav(request, contact):
"""Delete a contact."""
clt = get_cdav_client(request, contact.addressbook, True)
uid = contact.uid
if not uid.endswith(".vcf"):
uid += ".vcf"
clt.delete_vcard(uid, contact.etag)
| {
"repo_name": "modoboa/modoboa-contacts",
"path": "modoboa_contacts/tasks.py",
"copies": "1",
"size": "3333",
"license": "mit",
"hash": -8058892499155863000,
"line_mean": 33.3608247423,
"line_max": 73,
"alpha_frac": 0.6420642064,
"autogenerated": false,
"ratio": 3.5761802575107295,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9718244463910729,
"avg_score": 0,
"num_lines": 97
} |
"""Async TCP Server tests."""
import asyncio
import logging
import unittest
from kytos.core.atcp_server import KytosServer, KytosServerProtocol
# from unittest.mock import Mock
logging.basicConfig(level=logging.CRITICAL)
# Using "nettest" TCP port as a way to avoid conflict with a running
# Kytos server on 6633.
TEST_ADDRESS = ('127.0.0.1', 4138)
class TestKytosServer(unittest.TestCase):
""""Test if a Kytos Server will go up and receive connections."""
def setUp(self):
"""Start new asyncio loop and a test TCP server."""
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.server = KytosServer(TEST_ADDRESS, KytosServerProtocol,
None, 'openflow', loop=self.loop)
self.server.serve_forever()
def test_connection_to_server(self):
"""Test if we really can connect to TEST_ADDRESS."""
@asyncio.coroutine
def wait_and_go():
"""Wait a little for the server to go up, then connect."""
yield from asyncio.sleep(0.01, loop=self.loop)
# reader, writer = ...
_ = yield from asyncio.open_connection(
*TEST_ADDRESS, loop=self.loop)
self.loop.run_until_complete(wait_and_go())
| {
"repo_name": "kytos/kyco",
"path": "tests/test_core/test_atcp_server.py",
"copies": "1",
"size": "1279",
"license": "mit",
"hash": -267697117079491550,
"line_mean": 31.7948717949,
"line_max": 70,
"alpha_frac": 0.6395621579,
"autogenerated": false,
"ratio": 3.7728613569321534,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4912423514832153,
"avg_score": null,
"num_lines": null
} |
# Copyright (c) 2020 Peter Hinch
# Released under the MIT License (MIT) - see LICENSE file
import uasyncio as asyncio
from sched.sched import schedule
from time import localtime
def foo(txt): # Demonstrate callback
yr, mo, md, h, m, s, wd = localtime()[:7]
fst = 'Callback {} {:02d}:{:02d}:{:02d} on {:02d}/{:02d}/{:02d}'
print(fst.format(txt, h, m, s, md, mo, yr))
async def bar(txt): # Demonstrate coro launch
yr, mo, md, h, m, s, wd = localtime()[:7]
fst = 'Coroutine {} {:02d}:{:02d}:{:02d} on {:02d}/{:02d}/{:02d}'
print(fst.format(txt, h, m, s, md, mo, yr))
await asyncio.sleep(0)
async def main():
print('Asynchronous test running...')
asyncio.create_task(schedule(foo, 'every 4 mins', hrs=None, mins=range(0, 60, 4)))
asyncio.create_task(schedule(foo, 'every 5 mins', hrs=None, mins=range(0, 60, 5)))
# Launch a coroutine
asyncio.create_task(schedule(bar, 'every 3 mins', hrs=None, mins=range(0, 60, 3)))
asyncio.create_task(schedule(foo, 'one shot', hrs=None, mins=range(0, 60, 2), times=1))
await asyncio.sleep(900) # Quit after 15 minutes
try:
asyncio.run(main())
finally:
_ = asyncio.new_event_loop()
| {
"repo_name": "peterhinch/micropython-async",
"path": "v3/as_drivers/sched/asynctest.py",
"copies": "1",
"size": "1257",
"license": "mit",
"hash": 2114479265500601000,
"line_mean": 33.9166666667,
"line_max": 91,
"alpha_frac": 0.6420047733,
"autogenerated": false,
"ratio": 2.8374717832957113,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39794765565957113,
"avg_score": null,
"num_lines": null
} |
"""Async versions of the itertools features."""
import collections
import functools
import itertools as sync_itertools
import operator
import types
class AsyncIterWrapper:
"""Async wrapper for synchronous iterables."""
def __init__(self, iterable):
"""Initialize the wrapper with some iterable."""
self._iterable = iter(iterable)
async def __aiter__(self):
"""Return self."""
return self
async def __anext__(self):
"""Fetch the next value from the iterable."""
try:
return next(self._iterable)
except StopIteration as exc:
raise StopAsyncIteration() from exc
def __repr__(self):
"""Get a human representation of the wrapper."""
return '<AsyncIterWrapper {}>'.format(self._iterable)
async def aiter(*args):
"""Return an iterator object.
Args:
obj: An object that implements the __iter__ or __aiter__ method.
sentinel: An optional sentinel value to look for while iterator.
Return:
iterable: Some iterable that provides a __anext__ method.
Raises:
TypeError: If only the object is given and it is not iterable.
TypeError: If two arguments are given and the first is not an async
callable.
This function behaves very differently based on the number of arguments
given. If only the first argument is present the method will return
an async iterable that implements the __anext__ method by called the
given object's __aiter__. If the object does not define __aiter__ but does
define __iter__ then the result will be an AsyncIterWrapper that contains
the original iterable. This form of the function can be used to coerce all
iterables, async or not, into async iterables for interoperablilty.
If the second argument is given then the first argument _must_ be an async
callable. The returned value will still be an iterable implementing the
__aiter__ method, but each call to that method will call the underlying
async callable. If the value returned from the async callable matches the
sentinel value then StopAsyncIteration is raised. Otherwise the value is
returned.
"""
if not args:
raise TypeError('aiter() expected at least 1 arguments, got 0')
if len(args) > 2:
raise TypeError(
'aiter() expected at most 2 arguments, got {}'.format(len(args))
)
if len(args) == 2:
func, sentinel = args
if not isinstance(func, types.CoroutineType):
raise TypeError('aiter(v, w): v must be async callable')
# TODO: repeating call thing
raise NotImplementedError()
obj = args[0]
if hasattr(obj, '__anext__'):
return obj
if hasattr(obj, '__aiter__'):
return (await obj.__aiter__())
if hasattr(obj, '__iter__') or hasattr(obj, '__next__'):
return AsyncIterWrapper(iter(obj))
raise TypeError("'{}' object is not iterable".format(type(args[0])))
async def anext(*args):
"""Return the next item from an async iterator.
Args:
iterable: An async iterable.
default: An optional default value to return if the iterable is empty.
Return:
The next value of the iterable.
Raises:
TypeError: The iterable given is not async.
This function will return the next value form an async iterable. If the
iterable is empty the StopAsyncIteration will be propogated. However, if
a default value is given as a second argument the exception is silenced and
the default value is returned instead.
"""
if not args:
raise TypeError('anext() expected at least 1 arguments, got 0')
if len(args) > 2:
raise TypeError(
'anext() expected at most 2 arguments, got {}'.format(len(args))
)
iterable, default, has_default = args[0], None, False
if len(args) == 2:
iterable, default = args
has_default = True
try:
return await iterable.__anext__()
except StopAsyncIteration as exc:
if has_default:
return default
raise StopAsyncIteration() from exc
async def alist(iterable):
"""Async standin for the list built-in.
This function consumes an async iterable and returns a list of values
resolved from the iterable.
"""
values = []
async for value in iterable:
values.append(value)
return values
async def atuple(iterable):
"""Async standin for the tuple built-in.
This function consumes an async iterable and returns a tuple of values
resolved from the iterable.
"""
return tuple((await alist(iterable)))
def count(start=0, step=1):
"""Make an iterator that returns evenly spaced values."""
return AsyncIterWrapper(sync_itertools.count(start, step))
class AsyncCycle:
"""Async version of the cycle iterable."""
def __init__(self, iterable):
"""Initialize the cycle with some iterable."""
self._values = []
self._iterable = iterable
self._initialized = False
self._depleted = False
self._offset = 0
async def __aiter__(self):
"""Return self."""
return self
async def __anext__(self):
"""Get the next value of the iterable or one from cache."""
if not self._initialized:
self._iterable = await aiter(self._iterable)
self._initialized = True
if self._depleted:
offset, self._offset = self._offset, self._offset + 1
if self._offset >= len(self._values):
self._offset = 0
return self._values[offset]
try:
value = await anext(self._iterable)
self._values.append(value)
return value
except StopAsyncIteration as exc:
self._depleted = True
if not self._values:
raise StopAsyncIteration() from exc
self._offset = 1
return self._values[0]
def __repr__(self):
"""Get a human representation of the cycle."""
return '<AsyncCycle {}>'.format(self._iterable)
def cycle(iterable):
"""Repeat all elements of the iterable forever.
Make an iterator returning elements from the iterable and saving a copy of
each. When the iterable is exhausted, return elements from the saved copy.
Repeats indefinitely.
"""
return AsyncCycle(iterable)
def repeat(obj, times=None):
"""Make an iterator that returns object over and over again."""
if times is None:
return AsyncIterWrapper(sync_itertools.repeat(obj))
return AsyncIterWrapper(sync_itertools.repeat(obj, times))
def _async_callable(func):
"""Ensure the callable is an async def."""
if isinstance(func, types.CoroutineType):
return func
@functools.wraps(func)
async def _async_def_wrapper(*args, **kwargs):
"""Wrap a a sync callable in an async def."""
return func(*args, **kwargs)
return _async_def_wrapper
class AsyncAccumulate:
"""Async verion of the accumulate iterable."""
def __init__(self, iterable, func=operator.add):
"""Initialize the wrapper with an iterable and binary function."""
self._iterable = iterable
self._func = _async_callable(func)
self._initialized = False
self._started = False
self._total = None
self._depleted = False
async def __aiter__(self):
"""Return self."""
return self
async def __anext__(self):
"""Get the next accumulated value."""
if not self._initialized:
self._iterable = await aiter(self._iterable)
self._initialized = True
if not self._started:
self._started = True
try:
self._total = await anext(self._iterable)
except StopAsyncIteration as exc:
self._depleted = True
raise StopAsyncIteration() from exc
return self._total
if self._depleted:
raise StopAsyncIteration()
try:
next_value = await anext(self._iterable)
except StopAsyncIteration as exc:
self._depleted = True
raise StopAsyncIteration() from exc
self._total = await self._func(self._total, next_value)
return self._total
def accumulate(iterable, func=operator.add):
"""Make an iterable that returns accumulated sums.
An optional second argument can be given to run a custom binary function.
If func is supplied, it should be a function of two arguments. Elements of
the input iterable may be any type that can be accepted as arguments to
func. (For example, with the default operation of addition, elements may be
any addable type including Decimal or Fraction.) If the input iterable is
empty, the output iterable will also be empty.
"""
return AsyncAccumulate(iterable, func)
class AsyncChain:
"""Async version of the chain iterable."""
def __init__(self, *iterables):
"""Initialize the wrapper with some number of iterables."""
self._iterables = iterables
self._current = None
self._initialized = False
async def __aiter__(self):
"""Return self."""
return self
async def __anext__(self):
"""Get the next value in the chain."""
if not self._initialized:
current_iterables, self._iterables = self._iterables, []
for iterable in current_iterables:
self._iterables.append((await aiter(iterable)))
self._iterables = collections.deque(self._iterables)
self._initialized = True
self._current = self._iterables.popleft()
while True:
if not self._iterables and not self._current:
raise StopAsyncIteration()
try:
return await anext(self._current)
except StopAsyncIteration:
self._current = None
if self._iterables:
self._current = self._iterables.popleft()
def chain(*iterables):
"""Combine iteratators into one stream of values.
Make an iterator that returns elements from the first iterable until it is
exhausted, then proceeds to the next iterable, until all of the iterables
are exhausted. Used for treating consecutive sequences as a single
sequence.
"""
return AsyncChain(*iterables)
def chain_from_iterable(iterable):
"""Chain iterables contained within a lazily evaluated iterable."""
raise NotImplementedError()
chain.from_iterable = chain_from_iterable
class AsyncCompress:
"""Async version of the compress iterable."""
def __init__(self, data, selectors):
"""Initialize the iterable with data and selectors."""
self._data = data
self._selectors = selectors
self._initialized = False
async def __aiter__(self):
"""Return self."""
return self
async def __anext__(self):
"""Fetch the next selected value."""
if not self._initialized:
self._data = await aiter(self._data)
self._selectors = await aiter(self._selectors)
self._initialized = True
while True:
try:
value = await anext(self._data)
selection = await anext(self._selectors)
if selection:
return value
except StopAsyncIteration as exc:
raise StopAsyncIteration() from exc
def compress(data, selectors):
"""Only return elements from data with a corresponding True selector.
Make an iterator that filters elements from data returning only those that
have a corresponding element in selectors that evaluates to True. Stops
when either the data or selectors iterables has been exhausted.
"""
return AsyncCompress(data, selectors)
class AsyncDropWhile:
"""Async version of the dropwhile iterable."""
def __init__(self, predicate, iterable):
"""Initialize the iterable with a predicate and data iterable."""
self._predicate = _async_callable(predicate)
self._iterable = iterable
self._initialized = False
self._found = False
async def __aiter__(self):
"""Return self."""
return self
async def __anext__(self):
"""Get a value after the test returns False."""
if not self._initialized:
self._iterable = await aiter(self._iterable)
self._initialized = True
while not self._found:
value = await anext(self._iterable)
self._found = not (await self._predicate(value))
if self._found:
return value
return await anext(self._iterable)
def dropwhile(predicate, iterable):
"""Skip values from iterable until predicate returns False.
Make an iterator that drops elements from the iterable as long as the
predicate is true; afterwards, returns every element. Note, the iterator
does not produce any output until the predicate first becomes false, so it
may have a lengthy start-up time.
"""
return AsyncDropWhile(predicate, iterable)
class AsyncFilterFalse:
"""Async version of the filterfalse iterable."""
def __init__(self, predicate, iterable):
"""Initialize the iterable with a predicate and data iterable."""
self._predicate = predicate
if self._predicate is None:
self._predicate = bool
self._predicate = _async_callable(self._predicate)
self._iterable = iterable
self._initialized = False
async def __aiter__(self):
"""Return self."""
return self
async def __anext__(self):
"""Get the next value for which predicate returns True."""
if not self._initialized:
self._iterable = await aiter(self._iterable)
self._initialized = True
while True:
value = await anext(self._iterable)
test = await self._predicate(value)
if not test:
return value
def filterfalse(predicate, iterable):
"""Only emit values for which the predicate returns Flase.
Make an iterator that filters elements from iterable returning only those
for which the predicate is False. If predicate is None, return the items
that are false.
"""
return AsyncFilterFalse(predicate, iterable)
class _AsyncGroupByGroupIterable:
"""Async version of the group from groupby."""
def __init__(self, group_by):
self._group_by = group_by
self._initialized = False
async def __aiter__(self):
"""Return self."""
return self
async def __anext__(self):
"""Get the next value in the group."""
if not self._initialized:
self._initialized = True
return self._group_by._current_value
try:
value = await anext(self._group_by._iterable)
except StopAsyncIteration as exc:
self._group_by._stop = True
raise StopAsyncIteration() from exc
key = await self._group_by._key(value)
if key == self._group_by._current_key:
return value
self._group_by._current_value = value
self._group_by._current_key = key
raise StopAsyncIteration()
class AsyncGroupBy:
"""Async version of the groupby iterable."""
def __init__(self, iterable, key=None):
"""Initialize the iterable with a data and optional key function."""
self._iterable = iterable
self._key = key
if self._key is None:
self._key = lambda x: x
self._key = _async_callable(self._key)
self._initialized = False
self._singleton = object()
self._current_key = self._singleton
self._current_value = self._singleton
self._stop = False
self._group_iter = None
async def __aiter__(self):
"""Return self."""
return self
async def __anext__(self):
"""Get the next group in the iterable."""
if not self._initialized:
self._iterable = await aiter(self._iterable)
self._initialized = True
if self._stop:
raise StopAsyncIteration()
if self._current_value is self._singleton:
self._current_value = await anext(self._iterable)
self._current_key = await self._key(self._current_value)
if self._group_iter and not self._group_iter._initialized:
value = await anext(self._iterable)
key = await self._key(value)
while key == self._current_key:
value = await anext(self._iterable)
key = await self._key(value)
self._current_value = value
self._current_key = key
self._group_iter = _AsyncGroupByGroupIterable(self)
return (self._current_key, self._group_iter)
def groupby(iterable, key=None):
"""Make an iterator that returns consecutive keys and groups.
The key is a function computing a key value for each element. If not
specified or is None, key defaults to an identity function and returns the
element unchanged. Generally, the iterable needs to already be sorted on
the same key function.
The operation of groupby() is similar to the uniq filter in Unix. It
generates a break or new group every time the value of the key function
changes (which is why it is usually necessary to have sorted the data using
the same key function). That behavior differs from SQL’s GROUP BY which
aggregates common elements regardless of their input order.
The returned group is itself an iterator that shares the underlying
iterable with groupby(). Because the source is shared, when the groupby()
object is advanced, the previous group is no longer visible. So, if that
data is needed later, it should be stored as a list.
"""
return AsyncGroupBy(iterable, key)
class AsyncISlice:
"""Async version of the islice iterable."""
def __init__(self, iterable, *args):
"""Inititalize the iterable with start, stop, and step values."""
if not args:
raise TypeError('islice expected at least 2 arguments, got 1')
if len(args) == 1:
start, stop, step = 0, args[0], 1
if len(args) == 2:
start, stop = args
step = 1
if len(args) == 3:
start, stop, step = args
if len(args) > 3:
raise TypeError(
'islice expected at most 4 arguments, got {}'.format(len(args))
)
start = start if start is not None else 0
step = step if step is not None else 1
if start is not None and not isinstance(start, int):
raise ValueError('The start value must be an integer.')
if stop is not None and not isinstance(stop, int):
raise ValueError('The stop value must be an integer.')
if step is not None and not isinstance(step, int):
raise ValueError('The step value must be an integer.')
if start < 0:
raise ValueError('The start value cannot be negative.')
if stop is not None and stop < 0:
raise ValueError('The stop value cannot be negative.')
if step < 1:
raise ValueError('The step value cannot be negative or zero.')
self._start, self._step, self._stop = start, step, stop
self._iterable = iterable
self._initialized = False
self._offset = 0
self._depleted = False
async def __aiter__(self):
"""Return self."""
return self
async def __anext__(self):
"""Get the next value in the slice."""
if not self._initialized:
self._iterable = await aiter(self._iterable)
self._initialized = True
for _ in range(self._start):
await anext(self._iterable)
self._offset = self._offset + 1
value = await anext(self._iterable)
self._offset = self._offset + 1
if self._stop is not None and self._offset >= self._stop:
raise StopAsyncIteration()
return value
for _ in range(self._step - 1):
await anext(self._iterable)
self._offset = self._offset + 1
if self._stop is not None and self._offset >= self._stop:
raise StopAsyncIteration()
value = await anext(self._iterable)
self._offset = self._offset + 1
return value
def islice(iterable, *args):
"""Make an iterator that returns selected elements from the iterable.
If start is non-zero, then elements from the iterable are skipped until
start is reached. Afterward, elements are returned consecutively unless
step is set higher than one which results in items being skipped. If stop
is None, then iteration continues until the iterator is exhausted, if at
all; otherwise, it stops at the specified position. Unlike regular slicing,
islice() does not support negative values for start, stop, or step. Can be
used to extract related fields from data where the internal structure has
been flattened (for example, a multi-line report may list a name field on
every third line).
"""
return AsyncISlice(iterable, *args)
class AsyncStarMap:
"""Async version of the starmap iterable."""
def __init__(self, func, iterable):
"""Initialize the iterable with a func and data."""
self._func = _async_callable(func)
self._iterable = iterable
self._initialized = False
async def __aiter__(self):
"""Return self."""
return self
async def __anext__(self):
"""Get the next mapped value."""
if not self._initialized:
self._iterable = await aiter(self._iterable)
self._initialized = True
args = await anext(self._iterable)
return await self._func(*args)
def starmap(func, iterable):
"""Make an iterator that calls func using arguments from the iterable.
Used instead of map() when argument parameters are already grouped in
tuples from a single iterable (the data has been “pre-zipped”). The
difference between map() and starmap() parallels the distinction between
function(a,b) and function(*c).
"""
return AsyncStarMap(func, iterable)
class AsyncTakeWhile:
"""Async version of the takewhile iterable."""
def __init__(self, predicate, iterable):
"""Initialize the iterable with a predicate and data iterable."""
self._predicate = _async_callable(predicate)
self._iterable = iterable
self._initialized = False
self._stop = False
async def __aiter__(self):
"""Return self."""
return self
async def __anext__(self):
"""Get a value after the test returns True."""
if not self._initialized:
self._iterable = await aiter(self._iterable)
self._initialized = True
if self._stop:
raise StopAsyncIteration()
value = await anext(self._iterable)
self._stop = not (await self._predicate(value))
if self._stop:
raise StopAsyncIteration()
return value
def takewhile(predicate, iterable):
"""Return values while the predicate returns true.
Make an iterator that returns elements from the iterable as long as the
predicate is true.
"""
return AsyncTakeWhile(predicate, iterable)
class AsyncTeeIterable:
"""Async version of the tee iterable."""
def __init__(self, iterable):
"""Initialize the iterable with data and a number of tees."""
self._iterable = iterable
self._siblings = ()
self._initialized = False
self._cache = collections.deque()
def _append(self, value):
"""Add a value to the internal cache."""
self._cache.append(value)
async def __aiter__(self):
"""Return self."""
return self
async def __anext__(self):
"""Get the next value in the tee."""
if not self._initialized:
self._iterable = await aiter(self._iterable)
self._initialized = True
for sibling in self._siblings:
sibling._iterable = self._iterable
sibling._initialized = True
if self._cache:
return self._cache.popleft()
value = await anext(self._iterable)
for sibling in self._siblings:
if sibling is self:
continue
sibling._append(value)
return value
def tee(iterable, n=2):
"""Return n independent iterators from a single iterable.
Once tee() has made a split, the original iterable should not be used
anywhere else; otherwise, the iterable could get advanced without the tee
objects being informed.
This itertool may require significant auxiliary storage (depending on how
much temporary data needs to be stored). In general, if one iterator uses
most or all of the data before another iterator starts, it is faster to use
list() instead of tee().
"""
tees = tuple(AsyncTeeIterable(iterable) for _ in range(n))
for tee in tees:
tee._siblings = tees
return tees
class _ZipExhausted(Exception):
"""Internal exception for signaling zip complete."""
class AsyncZipLongest:
"""Async version of the zip_longest iterable."""
def __init__(self, *iterables, fillvalue=None):
"""Initialize with content to zip and a fill value."""
self._iterables = iterables
self._fillvalue = fillvalue
self._initialized = False
self._remaining = len(self._iterables)
async def __aiter__(self):
"""Return self."""
return self
def _iterable_exhausted(self):
self._remaining = self._remaining - 1
if not self._remaining:
raise _ZipExhausted()
yield self._fillvalue
async def __anext__(self):
"""Get the next zip of values."""
if not self._initialized:
fillers = repeat(self._fillvalue)
chained_iters = []
for iterable in self._iterables:
chained_iters.append(
(await aiter(
chain(
iterable,
self._iterable_exhausted(),
fillers
)
))
)
self._iterables = chained_iters
if not self._remaining:
raise StopAsyncIteration()
values = []
try:
for iterable in self._iterables:
values.append((await anext(iterable)))
except _ZipExhausted:
raise StopAsyncIteration()
return tuple(values)
def zip_longest(*iterables, fillvalue=None):
"""Make an iterator that aggregates elements from each of the iterables.
If the iterables are of uneven length, missing values are filled-in with
fillvalue. Iteration continues until the longest iterable is exhausted.
"""
return AsyncZipLongest(*iterables, fillvalue=fillvalue)
class AsyncProduct:
"""Async version of the product iterable."""
def __init__(self, *iterables, repeat=1):
"""Initialize with data and a repeat value."""
self._iterables = iterables
self._offsets = []
self._repeat = repeat
self._initialized = False
self._stop = False
async def __aiter__(self):
"""Return self."""
return self
async def __anext__(self):
"""Get the next product in the iterable."""
if not self._initialized:
if not self._iterables:
self._initialized = True
self._stop = True
return ()
iterables = []
for iterable in self._iterables:
iterable = await aiter(iterable)
iterable = await alist(iterable)
if not iterable:
self._initialized = True
self._stop = True
raise StopAsyncIteration()
iterables.append(iterable)
self._iterables = iterables * self._repeat
self._offsets = [0 for _ in self._iterables]
self._initialized = True
return tuple(iterable[0] for iterable in self._iterables)
if self._stop:
raise StopAsyncIteration()
for offset, iterable in enumerate(reversed(self._iterables), start=1):
self._offsets[-offset] = self._offsets[-offset] + 1
if self._offsets[-offset] >= len(iterable):
self._offsets[-offset] = 0
if offset >= len(self._iterables):
self._stop = True
raise StopAsyncIteration()
continue
return tuple(
iterable[offset]
for iterable, offset in zip(self._iterables, self._offsets)
)
def product(*iterables, repeat=1):
"""Cartesian product of input iterables.
Equivalent to nested for-loops in a generator expression. For example,
product(A, B) returns the same as ((x,y) for x in A for y in B).
The nested loops cycle like an odometer with the rightmost element
advancing on every iteration. This pattern creates a lexicographic
ordering so that if the input’s iterables are sorted, the product tuples
are emitted in sorted order.
To compute the product of an iterable with itself, specify the number of
repetitions with the optional repeat keyword argument. For example,
product(A, repeat=4) means the same as product(A, A, A, A).
"""
return AsyncProduct(*iterables, repeat=repeat)
| {
"repo_name": "asyncdef/aitertools",
"path": "aitertools/__init__.py",
"copies": "1",
"size": "30221",
"license": "apache-2.0",
"hash": -2864358634082374700,
"line_mean": 27.3158388004,
"line_max": 79,
"alpha_frac": 0.6097375302,
"autogenerated": false,
"ratio": 4.656033287101248,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 1067
} |
"""Async web request example with tornado.
Requests to localhost:8888 will be relayed via 0MQ to a slow responder,
who will take 1-5 seconds to respond. The tornado app will remain responsive
duriung this time, and when the worker replies, the web request will finish.
A '.' is printed every 100ms to demonstrate that the zmq request is not blocking
the event loop.
"""
import random
import sys
import threading
import time
import zmq
from zmq.eventloop import ioloop, zmqstream
"""
ioloop.install() must be called prior to instantiating *any* tornado objects,
and ideally before importing anything from tornado, just to be safe.
install() sets the singleton instance of tornado.ioloop.IOLoop with zmq's
IOLoop. If this is not done properly, multiple IOLoop instances may be
created, which will have the effect of some subset of handlers never being
called, because only one loop will be running.
"""
ioloop.install()
from tornado import web
from tornado import websocket
class EchoWebSocket(websocket.WebSocketHandler):
def open(self):
print "WebSocket opened"
def on_message(self, message):
self.write_message(u"You said: " + message)
def on_close(self):
print "WebSocket closed"
def slow_responder():
"""thread for slowly responding to replies."""
ctx = zmq.Context.instance()
socket = ctx.socket(zmq.REP)
socket.linger = 0
socket.bind('tcp://127.0.0.1:5555')
i = 0
while True:
msg = socket.recv()
print "\nworker received %r\n" % msg
time.sleep(random.randint(1, 5))
socket.send(msg + " to you too, #%i" % i)
i += 1
def dot():
"""callback for showing that IOLoop is still responsive while we wait"""
sys.stdout.write('.')
sys.stdout.flush()
def printer(msg):
print (msg)
class TestHandler(web.RequestHandler):
@web.asynchronous
def get(self):
ctx = zmq.Context.instance()
s = ctx.socket(zmq.REQ)
s.connect('tcp://127.0.0.1:5555')
# send request to worker
s.send('hello')
loop = ioloop.IOLoop.instance()
self.stream = zmqstream.ZMQStream(s)
self.stream.on_recv(self.handle_reply)
def handle_reply(self, msg):
# finish web request with worker's reply
reply = msg[0]
print "\nfinishing with %r\n" % reply,
self.stream.close()
self.write(reply)
self.finish()
def main():
worker = threading.Thread(target=slow_responder)
worker.daemon = True
worker.start()
# application = web.Application([(r"/", TestHandler)])
application = web.Application([(r"/websocket", EchoWebSocket)])
beat = ioloop.PeriodicCallback(dot, 100)
beat.start()
application.listen(8888)
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
print ' Interrupted'
if __name__ == "__main__":
main()
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "01-codes/OpenMDAO-Framework-dev/openmdao.main/src/openmdao/main/tornadosrv.py",
"copies": "1",
"size": "2905",
"license": "mit",
"hash": 5048747921250137000,
"line_mean": 25.4090909091,
"line_max": 80,
"alpha_frac": 0.6643717728,
"autogenerated": false,
"ratio": 3.7924281984334205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4956799971233421,
"avg_score": null,
"num_lines": null
} |
"""Async wrappers for spooled temp files and temp directory objects"""
# Imports
import asyncio
from types import coroutine
from ..base import AsyncBase
from ..threadpool.utils import (delegate_to_executor, proxy_property_directly,
cond_delegate_to_executor)
from functools import partial
@delegate_to_executor('fileno', 'rollover')
@cond_delegate_to_executor('close', 'flush', 'isatty', 'newlines', 'read',
'readline', 'readlines', 'seek', 'tell',
'truncate')
@proxy_property_directly('closed', 'encoding', 'mode', 'name', 'softspace')
class AsyncSpooledTemporaryFile(AsyncBase):
"""Async wrapper for SpooledTemporaryFile class"""
async def _check(self):
if self._file._rolled: return
max_size = self._file._max_size
if max_size and self._file.tell() > max_size:
await self.rollover()
async def write(self, s):
"""Implementation to anticipate rollover"""
if self._file._rolled:
cb = partial(self._file.write, s)
return (await self._loop.run_in_executor(self._executor, cb))
else:
file = self._file._file #reference underlying base IO object
rv = file.write(s)
await self._check()
return rv
async def writelines(self, iterable):
"""Implementation to anticipate rollover"""
if self._file._rolled:
cb = partial(self._file.writelines, iterable)
return (await self._loop.run_in_executor(self._executor, cb))
else:
file = self._file._file #reference underlying base IO object
rv = file.writelines(iterable)
await self._check()
return rv
@delegate_to_executor('cleanup')
@proxy_property_directly('name')
class AsyncTemporaryDirectory:
"""Async wrapper for TemporaryDirectory class"""
def __init__(self, file, loop, executor):
self._file = file
self._loop = loop
self._executor = executor
async def close(self):
await self.cleanup()
| {
"repo_name": "Tinche/aiofiles",
"path": "src/aiofiles/tempfile/temptypes.py",
"copies": "1",
"size": "2172",
"license": "apache-2.0",
"hash": -9124166035143820000,
"line_mean": 33.606557377,
"line_max": 78,
"alpha_frac": 0.5934622468,
"autogenerated": false,
"ratio": 4.2671905697445975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5360652816544598,
"avg_score": null,
"num_lines": null
} |
"""Async xkcd library."""
from .json import loads as jloads
import random
import async_timeout
import aiohttp
XKCD_URL = 'http://www.xkcd.com/'
IMAGE_URL = 'http://imgs.xkcd.com/comics/'
EXPLAIN_URL = 'http://explainxkcd.com/'
LINK_OFFSET = len(IMAGE_URL)
class Comic(object):
"""xkcd comic."""
def __init__(self, number):
self.number = number
if number <= 0:
raise InvalidComic('%s is not a valid comic' % str(number))
self.link = XKCD_URL + str(number)
self.jlink = self.link + '/info.0.json'
self.explain_url = EXPLAIN_URL + str(number)
async def async_init(self):
async with aiohttp.ClientSession() as session:
with async_timeout.timeout(6.5):
async with session.get(self.jlink) as r:
xkcd = await r.text()
data = jloads(xkcd)
self.title = data['safe_title']
self.alt_text = data['alt']
self.image_link = data['img']
index = self.image_link.find(IMAGE_URL)
self.image_name = self.image_link[index + LINK_OFFSET:]
async def fetch(self):
async with aiohttp.ClientSession() as session:
with async_timeout.timeout(6.5):
async with session.get(self.image_link) as r:
return await r.read()
class InvalidComic(Exception):
pass
async def latest_comic_num():
async with aiohttp.ClientSession() as session:
with async_timeout.timeout(6.5):
async with session.get('http://xkcd.com/info.0.json') as r:
xkcd = await r.text()
return jloads(xkcd)['num']
async def random_comic():
random.seed()
num_comics = await latest_comic_num()
comic = Comic(random.randint(1, num_comics))
await comic.async_init()
return comic
async def get_comic(number):
num_comics = await latest_comic_num()
if number > num_comics or number <= 0:
raise InvalidComic('%s is not a valid comic' % str(number))
comic = Comic(number)
await comic.async_init()
return comic
async def latest_comic():
number = await latest_comic_num()
comic = Comic(number)
await comic.async_init()
return comic
| {
"repo_name": "Armored-Dragon/goldmine",
"path": "util/xkcd.py",
"copies": "1",
"size": "2200",
"license": "mit",
"hash": 6092186129407239000,
"line_mean": 30.4285714286,
"line_max": 71,
"alpha_frac": 0.6127272727,
"autogenerated": false,
"ratio": 3.3846153846153846,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44973426573153846,
"avg_score": null,
"num_lines": null
} |
# asyn.py 'micro' synchronisation primitives for uasyncio
# Test/demo programs asyntest.py, barrier_test.py
# Provides Lock, Event, Barrier, Semaphore, BoundedSemaphore, Condition,
# NamedTask and Cancellable classes, also sleep coro.
# Updated 31 Dec 2017 for uasyncio.core V1.6 and to provide task cancellation.
# The MIT License (MIT)
#
# Copyright (c) 2017 Peter Hinch
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# CPython 3.5 compatibility
# (ignore RuntimeWarning: coroutine '_g' was never awaited)
try:
import uasyncio as asyncio
except ImportError:
import asyncio
async def _g():
pass
type_coro = type(_g())
# If a callback is passed, run it and return.
# If a coro is passed initiate it and return.
# coros are passed by name i.e. not using function call syntax.
def launch(func, tup_args):
res = func(*tup_args)
if isinstance(res, type_coro):
loop = asyncio.get_event_loop()
loop.create_task(res)
# To access a lockable resource a coro should issue
# async with lock_instance:
# access the locked resource
# Alternatively:
# await lock.acquire()
# try:
# do stuff with locked resource
# finally:
# lock.release
# Uses normal scheduling on assumption that locks are held briefly.
class Lock():
def __init__(self, delay_ms=0):
self._locked = False
self.delay_ms = delay_ms
def locked(self):
return self._locked
async def __aenter__(self):
await self.acquire()
return self
async def __aexit__(self, *args):
self.release()
await asyncio.sleep(0)
async def acquire(self):
while True:
if self._locked:
await asyncio.sleep_ms(self.delay_ms)
else:
self._locked = True
break
def release(self):
if not self._locked:
raise RuntimeError('Attempt to release a lock which has not been set')
self._locked = False
# A coro waiting on an event issues await event
# A coro rasing the event issues event.set()
# When all waiting coros have run
# event.clear() should be issued
class Event():
def __init__(self, delay_ms=0):
self.delay_ms = delay_ms
self.clear()
def clear(self):
self._flag = False
self._data = None
async def wait(self): # CPython comptaibility
while not self._flag:
await asyncio.sleep_ms(self.delay_ms)
def __await__(self):
while not self._flag:
await asyncio.sleep_ms(self.delay_ms)
__iter__ = __await__
def is_set(self):
return self._flag
def set(self, data=None):
self._flag = True
self._data = data
def value(self):
return self._data
# A Barrier synchronises N coros. Each issues await barrier.
# Execution pauses until all other participant coros are waiting on it.
# At that point the callback is executed. Then the barrier is 'opened' and
# execution of all participants resumes.
# The nowait arg is to support task cancellation. It enables usage where one or
# more coros can register that they have reached the barrier without waiting
# for it. Any coros waiting normally on the barrier will pause until all
# non-waiting coros have passed the barrier and all waiting ones have reached
# it. The use of nowait promotes efficiency by enabling tasks which have been
# cancelled to leave the task queue as soon as possible.
class Barrier():
def __init__(self, participants, func=None, args=()):
self._participants = participants
self._func = func
self._args = args
self._reset(True)
def __await__(self):
self._update()
if self._at_limit(): # All other threads are also at limit
if self._func is not None:
launch(self._func, self._args)
self._reset(not self._down) # Toggle direction to release others
return
direction = self._down
while True: # Wait until last waiting thread changes the direction
if direction != self._down:
return
await asyncio.sleep_ms(0)
__iter__ = __await__
def trigger(self):
self._update()
if self._at_limit(): # All other threads are also at limit
if self._func is not None:
launch(self._func, self._args)
self._reset(not self._down) # Toggle direction to release others
def _reset(self, down):
self._down = down
self._count = self._participants if down else 0
def busy(self):
if self._down:
done = self._count == self._participants
else:
done = self._count == 0
return not done
def _at_limit(self): # Has count reached up or down limit?
limit = 0 if self._down else self._participants
return self._count == limit
def _update(self):
self._count += -1 if self._down else 1
if self._count < 0 or self._count > self._participants:
raise ValueError('Too many tasks accessing Barrier')
# A Semaphore is typically used to limit the number of coros running a
# particular piece of code at once. The number is defined in the constructor.
class Semaphore():
def __init__(self, value=1):
self._count = value
async def __aenter__(self):
await self.acquire()
return self
async def __aexit__(self, *args):
self.release()
await asyncio.sleep(0)
async def acquire(self):
while self._count == 0:
await asyncio.sleep_ms(0)
self._count -= 1
def release(self):
self._count += 1
class BoundedSemaphore(Semaphore):
def __init__(self, value=1):
super().__init__(value)
self._initial_value = value
def release(self):
if self._count < self._initial_value:
self._count += 1
else:
raise ValueError('Semaphore released more than acquired')
# Task Cancellation
try:
StopTask = asyncio.CancelledError # More descriptive name
except AttributeError:
raise OSError('asyn.py requires uasyncio V1.7.1 or above.')
class TaskId():
def __init__(self, taskid):
self.taskid = taskid
def __call__(self):
return self.taskid
# Sleep coro breaks up a sleep into shorter intervals to ensure a rapid
# response to StopTask exceptions. Only relevant to official uasyncio V2.0.
async def sleep(t, granularity=100): # 100ms default
if granularity <= 0:
raise ValueError('sleep granularity must be > 0')
t = int(t * 1000) # ms
if t <= granularity:
await asyncio.sleep_ms(t)
else:
n, rem = divmod(t, granularity)
for _ in range(n):
await asyncio.sleep_ms(granularity)
await asyncio.sleep_ms(rem)
# Anonymous cancellable tasks. These are members of a group which is identified
# by a user supplied name/number (default 0). Class method cancel_all() cancels
# all tasks in a group and awaits confirmation. Confirmation of ending (whether
# normally or by cancellation) is signalled by a task calling the _stopped()
# class method. Handled by the @cancellable decorator.
class Cancellable():
task_no = 0 # Generated task ID, index of tasks dict
tasks = {} # Value is [coro, group, barrier] indexed by integer task_no
@classmethod
def _cancel(cls, task_no):
task = cls.tasks[task_no][0]
asyncio.cancel(task)
@classmethod
async def cancel_all(cls, group=0, nowait=False):
tokill = cls._get_task_nos(group)
barrier = Barrier(len(tokill) + 1) # Include this task
for task_no in tokill:
cls.tasks[task_no][2] = barrier
cls._cancel(task_no)
if nowait:
barrier.trigger()
else:
await barrier
@classmethod
def _is_running(cls, group=0):
tasks = cls._get_task_nos(group)
if tasks == []:
return False
for task_no in tasks:
barrier = cls.tasks[task_no][2]
if barrier is None: # Running, not yet cancelled
return True
if barrier.busy():
return True
return False
@classmethod
def _get_task_nos(cls, group): # Return task nos in a group
return [task_no for task_no in cls.tasks if cls.tasks[task_no][1] == group]
@classmethod
def _get_group(cls, task_no): # Return group given a task_no
return cls.tasks[task_no][1]
@classmethod
def _stopped(cls, task_no):
if task_no in cls.tasks:
barrier = cls.tasks[task_no][2]
if barrier is not None: # Cancellation in progress
barrier.trigger()
del cls.tasks[task_no]
def __init__(self, gf, *args, group=0, **kwargs):
task = gf(TaskId(Cancellable.task_no), *args, **kwargs)
if task in self.tasks:
raise ValueError('Task already exists.')
self.tasks[Cancellable.task_no] = [task, group, None]
self.task_no = Cancellable.task_no # For subclass
Cancellable.task_no += 1
self.task = task
def __call__(self):
return self.task
def __await__(self): # Return any value returned by task.
return (yield from self.task)
__iter__ = __await__
# @cancellable decorator
def cancellable(f):
def new_gen(*args, **kwargs):
if isinstance(args[0], TaskId): # Not a bound method
task_id = args[0]
g = f(*args[1:], **kwargs)
else: # Task ID is args[1] if a bound method
task_id = args[1]
args = (args[0],) + args[2:]
g = f(*args, **kwargs)
try:
res = await g
return res
finally:
NamedTask._stopped(task_id)
return new_gen
# The NamedTask class enables a coro to be identified by a user defined name.
# It constrains Cancellable to allow groups of one coro only.
# It maintains a dict of barriers indexed by name.
class NamedTask(Cancellable):
instances = {}
@classmethod
async def cancel(cls, name, nowait=True):
if name in cls.instances:
await cls.cancel_all(group=name, nowait=nowait)
return True
return False
@classmethod
def is_running(cls, name):
return cls._is_running(group=name)
@classmethod
def _stopped(cls, task_id): # On completion remove it
name = cls._get_group(task_id()) # Convert task_id to task_no
if name in cls.instances:
instance = cls.instances[name]
barrier = instance.barrier
if barrier is not None:
barrier.trigger()
del cls.instances[name]
Cancellable._stopped(task_id())
def __init__(self, name, gf, *args, barrier=None, **kwargs):
if name in self.instances:
raise ValueError('Task name "{}" already exists.'.format(name))
super().__init__(gf, *args, group=name, **kwargs)
self.barrier = barrier
self.instances[name] = self
# @namedtask
namedtask = cancellable # compatibility with old code
# Condition class
class Condition():
def __init__(self, lock=None):
self.lock = Lock() if lock is None else lock
self.events = []
async def acquire(self):
await self.lock.acquire()
# enable this syntax:
# with await condition [as cond]:
def __await__(self):
yield from self.lock.acquire()
return self
__iter__ = __await__
def __enter__(self):
return self
def __exit__(self, *_):
self.lock.release()
def locked(self):
return self.lock.locked()
def release(self):
self.lock.release() # Will raise RuntimeError if not locked
def notify(self, n=1): # Caller controls lock
if not self.lock.locked():
raise RuntimeError('Condition notify with lock not acquired.')
for _ in range(min(n, len(self.events))):
ev = self.events.pop()
ev.set()
def notify_all(self):
self.notify(len(self.events))
async def wait(self):
if not self.lock.locked():
raise RuntimeError('Condition wait with lock not acquired.')
ev = Event()
self.events.append(ev)
self.lock.release()
await ev
await self.lock.acquire()
assert ev not in self.events, 'condition wait assertion fail'
return True # CPython compatibility
async def wait_for(self, predicate):
result = predicate()
while not result:
await self.wait()
result = predicate()
return result
# Provide functionality similar to asyncio.gather()
class Gather():
def __init__(self, gatherables):
ncoros = len(gatherables)
self.barrier = Barrier(ncoros + 1)
self.results = [None] * ncoros
loop = asyncio.get_event_loop()
for n, gatherable in enumerate(gatherables):
loop.create_task(self.wrap(gatherable, n)())
def __iter__(self):
yield from self.barrier.__await__()
return self.results
def wrap(self, gatherable, idx):
async def wrapped():
coro, args, kwargs = gatherable()
try:
tim = kwargs.pop('timeout')
except KeyError:
self.results[idx] = await coro(*args, **kwargs)
else:
self.results[idx] = await asyncio.wait_for(coro(*args, **kwargs), tim)
self.barrier.trigger()
return wrapped
class Gatherable():
def __init__(self, coro, *args, **kwargs):
self.arguments = coro, args, kwargs
def __call__(self):
return self.arguments
| {
"repo_name": "peterhinch/micropython-async",
"path": "v2/asyn.py",
"copies": "3",
"size": "14778",
"license": "mit",
"hash": -4981122547430343000,
"line_mean": 30.4425531915,
"line_max": 86,
"alpha_frac": 0.6165922317,
"autogenerated": false,
"ratio": 3.9629927594529364,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6079584991152935,
"avg_score": null,
"num_lines": null
} |
# asyn.py 'micro' synchronisation primitives for uasyncio
# Test/demo programs asyntest.py, barrier_test.py
# Provides Lock, Event, Barrier, Semaphore, BoundedSemaphore, NamedTask
# and Cancellable classes, also sleep coro.
# Uses low_priority where available and appropriate.
# Updated 31 Dec 2017 for uasyncio.core V1.6 and to provide task cancellation.
# The MIT License (MIT)
#
# Copyright (c) 2017 Peter Hinch
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# CPython 3.5 compatibility
# (ignore RuntimeWarning: coroutine '_g' was never awaited)
# Check availability of 'priority' version
try:
import asyncio_priority as asyncio
p_version = True
except ImportError:
p_version = False
try:
import uasyncio as asyncio
except ImportError:
import asyncio
after = asyncio.after if p_version else asyncio.sleep
async def _g():
pass
type_coro = type(_g())
# If a callback is passed, run it and return.
# If a coro is passed initiate it and return.
# coros are passed by name i.e. not using function call syntax.
def launch(func, tup_args):
res = func(*tup_args)
if isinstance(res, type_coro):
loop = asyncio.get_event_loop()
loop.create_task(res)
# To access a lockable resource a coro should issue
# async with lock_instance:
# access the locked resource
# Alternatively:
# await lock.acquire()
# try:
# do stuff with locked resource
# finally:
# lock.release
# Uses normal scheduling on assumption that locks are held briefly.
class Lock():
def __init__(self, delay_ms=0):
self._locked = False
self.delay_ms = delay_ms
def locked(self):
return self._locked
async def __aenter__(self):
await self.acquire()
async def __aexit__(self, *args):
self.release()
await asyncio.sleep(0)
async def acquire(self):
while True:
if self._locked:
await asyncio.sleep_ms(self.delay_ms)
else:
self._locked = True
break
def release(self):
if not self._locked:
raise RuntimeError('Attempt to release a lock which has not been set')
self._locked = False
# A coro waiting on an event issues await event
# A coro rasing the event issues event.set()
# When all waiting coros have run
# event.clear() should be issued
# Use of low_priority may be specified in the constructor
# when it will be used if available.
class Event():
def __init__(self, lp=False):
self.after = after if (p_version and lp) else asyncio.sleep
self.clear()
def clear(self):
self._flag = False
self._data = None
def __await__(self):
while not self._flag:
yield from self.after(0)
__iter__ = __await__
def is_set(self):
return self._flag
def set(self, data=None):
self._flag = True
self._data = data
def value(self):
return self._data
# A Barrier synchronises N coros. Each issues await barrier.
# Execution pauses until all other participant coros are waiting on it.
# At that point the callback is executed. Then the barrier is 'opened' and
# execution of all participants resumes.
# The nowait arg is to support task cancellation. It enables usage where one or
# more coros can register that they have reached the barrier without waiting
# for it. Any coros waiting normally on the barrier will pause until all
# non-waiting coros have passed the barrier and all waiting ones have reached
# it. The use of nowait promotes efficiency by enabling tasks which have been
# cancelled to leave the task queue as soon as possible.
# Uses low_priority if available
class Barrier():
def __init__(self, participants, func=None, args=()):
self._participants = participants
self._func = func
self._args = args
self._reset(True)
def __await__(self):
self._update()
if self._at_limit(): # All other threads are also at limit
if self._func is not None:
launch(self._func, self._args)
self._reset(not self._down) # Toggle direction to release others
return
direction = self._down
while True: # Wait until last waiting thread changes the direction
if direction != self._down:
return
yield from after(0)
__iter__ = __await__
def trigger(self):
self._update()
if self._at_limit(): # All other threads are also at limit
if self._func is not None:
launch(self._func, self._args)
self._reset(not self._down) # Toggle direction to release others
def _reset(self, down):
self._down = down
self._count = self._participants if down else 0
def busy(self):
if self._down:
done = self._count == self._participants
else:
done = self._count == 0
return not done
def _at_limit(self): # Has count reached up or down limit?
limit = 0 if self._down else self._participants
return self._count == limit
def _update(self):
self._count += -1 if self._down else 1
if self._count < 0 or self._count > self._participants:
raise ValueError('Too many tasks accessing Barrier')
# A Semaphore is typically used to limit the number of coros running a
# particular piece of code at once. The number is defined in the constructor.
class Semaphore():
def __init__(self, value=1):
self._count = value
async def __aenter__(self):
await self.acquire()
async def __aexit__(self, *args):
self.release()
await asyncio.sleep(0)
async def acquire(self):
while self._count == 0:
await after(0)
self._count -= 1
def release(self):
self._count += 1
class BoundedSemaphore(Semaphore):
def __init__(self, value=1):
super().__init__(value)
self._initial_value = value
def release(self):
if self._count < self._initial_value:
self._count += 1
else:
raise ValueError('Semaphore released more than acquired')
# Task Cancellation
StopTask = asyncio.CancelledError # More descriptive name
class TaskId():
def __init__(self, taskid):
self.taskid = taskid
def __call__(self):
return self.taskid
# Sleep coro breaks up a sleep into shorter intervals to ensure a rapid
# response to StopTask exceptions
async def sleep(t, granularity=100): # 100ms default
if granularity <= 0:
raise ValueError('sleep granularity must be > 0')
t = int(t * 1000) # ms
if t <= granularity:
await asyncio.sleep_ms(t)
else:
n, rem = divmod(t, granularity)
for _ in range(n):
await asyncio.sleep_ms(granularity)
await asyncio.sleep_ms(rem)
# Anonymous cancellable tasks. These are members of a group which is identified
# by a user supplied name/number (default 0). Class method cancel_all() cancels
# all tasks in a group and awaits confirmation. Confirmation of ending (whether
# normally or by cancellation) is signalled by a task calling the _stopped()
# class method. Handled by the @cancellable decorator.
class Cancellable():
task_no = 0 # Generated task ID, index of tasks dict
tasks = {} # Value is [coro, group, barrier] indexed by integer task_no
@classmethod
def _cancel(cls, task_no):
task = cls.tasks[task_no][0]
asyncio.cancel(task)
@classmethod
async def cancel_all(cls, group=0, nowait=False):
tokill = cls._get_task_nos(group)
barrier = Barrier(len(tokill) + 1) # Include this task
for task_no in tokill:
cls.tasks[task_no][2] = barrier
cls._cancel(task_no)
if nowait:
barrier.trigger()
else:
await barrier
@classmethod
def _is_running(cls, group=0):
tasks = cls._get_task_nos(group)
if tasks == []:
return False
for task_no in tasks:
barrier = cls.tasks[task_no][2]
if barrier is None: # Running, not yet cancelled
return True
if barrier.busy():
return True
return False
@classmethod
def _get_task_nos(cls, group): # Return task nos in a group
return [task_no for task_no in cls.tasks if cls.tasks[task_no][1] == group]
@classmethod
def _get_group(cls, task_no): # Return group given a task_no
return cls.tasks[task_no][1]
@classmethod
def _stopped(cls, task_no):
if task_no in cls.tasks:
barrier = cls.tasks[task_no][2]
if barrier is not None: # Cancellation in progress
barrier.trigger()
del cls.tasks[task_no]
def __init__(self, gf, *args, group=0, **kwargs):
task = gf(TaskId(Cancellable.task_no), *args, **kwargs)
if task in self.tasks:
raise ValueError('Task already exists.')
self.tasks[Cancellable.task_no] = [task, group, None]
self.task_no = Cancellable.task_no # For subclass
Cancellable.task_no += 1
self.task = task
def __call__(self):
return self.task
def __await__(self):
return (yield from self.task)
__iter__ = __await__
# @cancellable decorator
def cancellable(f):
def new_gen(*args, **kwargs):
if isinstance(args[0], TaskId): # Not a bound method
task_id = args[0]
g = f(*args[1:], **kwargs)
else: # Task ID is args[1] if a bound method
task_id = args[1]
args = (args[0],) + args[2:]
g = f(*args, **kwargs)
try:
res = await g
return res
finally:
NamedTask._stopped(task_id)
return new_gen
# The NamedTask class enables a coro to be identified by a user defined name.
# It constrains Cancellable to allow groups of one coro only.
# It maintains a dict of barriers indexed by name.
class NamedTask(Cancellable):
instances = {}
@classmethod
async def cancel(cls, name, nowait=True):
if name in cls.instances:
await cls.cancel_all(group=name, nowait=nowait)
return True
return False
@classmethod
def is_running(cls, name):
return cls._is_running(group=name)
@classmethod
def _stopped(cls, task_id): # On completion remove it
name = cls._get_group(task_id()) # Convert task_id to task_no
if name in cls.instances:
instance = cls.instances[name]
barrier = instance.barrier
if barrier is not None:
barrier.trigger()
del cls.instances[name]
Cancellable._stopped(task_id())
def __init__(self, name, gf, *args, barrier=None, **kwargs):
if name in self.instances:
raise ValueError('Task name "{}" already exists.'.format(name))
super().__init__(gf, *args, group=name, **kwargs)
self.barrier = barrier
self.instances[name] = self
# @namedtask
namedtask = cancellable # compatibility with old code
| {
"repo_name": "peterhinch/micropython-mqtt",
"path": "pb_link/asyn.py",
"copies": "1",
"size": "12226",
"license": "mit",
"hash": -4055824793102636000,
"line_mean": 31.0052356021,
"line_max": 83,
"alpha_frac": 0.6286602323,
"autogenerated": false,
"ratio": 3.948966408268734,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5077626640568733,
"avg_score": null,
"num_lines": null
} |
# The MIT License (MIT)
#
# Copyright (c) 2017-2018 Peter Hinch
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# CPython 3.5 compatibility
# (ignore RuntimeWarning: coroutine '_g' was never awaited)
try:
import uasyncio as asyncio
except ImportError:
import asyncio
import asyn
def print_tests():
st = '''Available functions:
print_tests() Print this list.
ack_test() Test event acknowledge.
event_test() Test Event and Lock objects.
barrier_test() Test the Barrier class.
semaphore_test(bounded=False) Test Semaphore or BoundedSemaphore.
condition_test(new=False) Test the Condition class. Set arg True for new uasyncio.
gather_test() Test the Gather class
Recommended to issue ctrl-D after running each test.
'''
print('\x1b[32m')
print(st)
print('\x1b[39m')
print_tests()
def printexp(exp, runtime=0):
print('Expected output:')
print('\x1b[32m')
print(exp)
print('\x1b[39m')
if runtime:
print('Running (runtime = {}s):'.format(runtime))
else:
print('Running (runtime < 1s):')
# ************ Test Event class ************
# Demo use of acknowledge event
async def event_wait(event, ack_event, n):
await event
print('Eventwait {} got event with value {}'.format(n, event.value()))
ack_event.set()
async def run_ack():
loop = asyncio.get_event_loop()
event = asyn.Event()
ack1 = asyn.Event()
ack2 = asyn.Event()
count = 0
while True:
loop.create_task(event_wait(event, ack1, 1))
loop.create_task(event_wait(event, ack2, 2))
event.set(count)
count += 1
print('event was set')
await ack1
ack1.clear()
print('Cleared ack1')
await ack2
ack2.clear()
print('Cleared ack2')
event.clear()
print('Cleared event')
await asyncio.sleep(1)
async def ack_coro(delay):
await asyncio.sleep(delay)
print("I've seen attack ships burn on the shoulder of Orion...")
print("Time to die...")
def ack_test():
printexp('''event was set
Eventwait 1 got event with value 0
Eventwait 2 got event with value 0
Cleared ack1
Cleared ack2
Cleared event
event was set
Eventwait 1 got event with value 1
Eventwait 2 got event with value 1
... text omitted ...
Eventwait 1 got event with value 9
Eventwait 2 got event with value 9
Cleared ack1
Cleared ack2
Cleared event
I've seen attack ships burn on the shoulder of Orion...
Time to die...
''', 10)
loop = asyncio.get_event_loop()
loop.create_task(run_ack())
loop.run_until_complete(ack_coro(10))
# ************ Test Lock and Event classes ************
async def run_lock(n, lock):
print('run_lock {} waiting for lock'.format(n))
await lock.acquire()
print('run_lock {} acquired lock'.format(n))
await asyncio.sleep(1) # Delay to demo other coros waiting for lock
lock.release()
print('run_lock {} released lock'.format(n))
async def eventset(event):
print('Waiting 5 secs before setting event')
await asyncio.sleep(5)
event.set()
print('event was set')
async def eventwait(event):
print('waiting for event')
await event
print('got event')
event.clear()
async def run_event_test():
print('Test Lock class')
loop = asyncio.get_event_loop()
lock = asyn.Lock()
loop.create_task(run_lock(1, lock))
loop.create_task(run_lock(2, lock))
loop.create_task(run_lock(3, lock))
print('Test Event class')
event = asyn.Event()
loop.create_task(eventset(event))
await eventwait(event) # run_event_test runs fast until this point
print('Event status {}'.format('Incorrect' if event.is_set() else 'OK'))
print('Tasks complete')
def event_test():
printexp('''Test Lock class
Test Event class
waiting for event
run_lock 1 waiting for lock
run_lock 1 acquired lock
run_lock 2 waiting for lock
run_lock 3 waiting for lock
Waiting 5 secs before setting event
run_lock 1 released lock
run_lock 2 acquired lock
run_lock 2 released lock
run_lock 3 acquired lock
run_lock 3 released lock
event was set
got event
Event status OK
Tasks complete
''', 5)
loop = asyncio.get_event_loop()
loop.run_until_complete(run_event_test())
# ************ Barrier test ************
async def killer(duration):
await asyncio.sleep(duration)
def callback(text):
print(text)
async def report(barrier):
for i in range(5):
print('{} '.format(i), end='')
await barrier
def barrier_test():
printexp('''0 0 0 Synch
1 1 1 Synch
2 2 2 Synch
3 3 3 Synch
4 4 4 Synch
''')
barrier = asyn.Barrier(3, callback, ('Synch',))
loop = asyncio.get_event_loop()
for _ in range(3):
loop.create_task(report(barrier))
loop.run_until_complete(killer(2))
loop.close()
# ************ Semaphore test ************
async def run_sema(n, sema, barrier):
print('run_sema {} trying to access semaphore'.format(n))
async with sema:
print('run_sema {} acquired semaphore'.format(n))
# Delay demonstrates other coros waiting for semaphore
await asyncio.sleep(1 + n/10) # n/10 ensures deterministic printout
print('run_sema {} has released semaphore'.format(n))
barrier.trigger()
async def run_sema_test(bounded):
num_coros = 5
loop = asyncio.get_event_loop()
barrier = asyn.Barrier(num_coros + 1)
if bounded:
semaphore = asyn.BoundedSemaphore(3)
else:
semaphore = asyn.Semaphore(3)
for n in range(num_coros):
loop.create_task(run_sema(n, semaphore, barrier))
await barrier # Quit when all coros complete
try:
semaphore.release()
except ValueError:
print('Bounded semaphore exception test OK')
def semaphore_test(bounded=False):
if bounded:
exp = '''run_sema 0 trying to access semaphore
run_sema 0 acquired semaphore
run_sema 1 trying to access semaphore
run_sema 1 acquired semaphore
run_sema 2 trying to access semaphore
run_sema 2 acquired semaphore
run_sema 3 trying to access semaphore
run_sema 4 trying to access semaphore
run_sema 0 has released semaphore
run_sema 4 acquired semaphore
run_sema 1 has released semaphore
run_sema 3 acquired semaphore
run_sema 2 has released semaphore
run_sema 4 has released semaphore
run_sema 3 has released semaphore
Bounded semaphore exception test OK
Exact sequence of acquisition may vary when 3 and 4 compete for semaphore.'''
else:
exp = '''run_sema 0 trying to access semaphore
run_sema 0 acquired semaphore
run_sema 1 trying to access semaphore
run_sema 1 acquired semaphore
run_sema 2 trying to access semaphore
run_sema 2 acquired semaphore
run_sema 3 trying to access semaphore
run_sema 4 trying to access semaphore
run_sema 0 has released semaphore
run_sema 3 acquired semaphore
run_sema 1 has released semaphore
run_sema 4 acquired semaphore
run_sema 2 has released semaphore
run_sema 3 has released semaphore
run_sema 4 has released semaphore
Exact sequence of acquisition may vary when 3 and 4 compete for semaphore.'''
printexp(exp, 3)
loop = asyncio.get_event_loop()
loop.run_until_complete(run_sema_test(bounded))
# ************ Condition test ************
cond = asyn.Condition()
tim = 0
@asyn.cancellable
async def cond01():
while True:
await asyncio.sleep(2)
with await cond:
cond.notify(2) # Notify 2 tasks
@asyn.cancellable
async def cond03(): # Maintain a count of seconds
global tim
await asyncio.sleep(0.5)
while True:
await asyncio.sleep(1)
tim += 1
async def cond01_new():
while True:
await asyncio.sleep(2)
with await cond:
cond.notify(2) # Notify 2 tasks
async def cond03_new(): # Maintain a count of seconds
global tim
await asyncio.sleep(0.5)
while True:
await asyncio.sleep(1)
tim += 1
async def cond02(n, barrier):
with await cond:
print('cond02', n, 'Awaiting notification.')
await cond.wait()
print('cond02', n, 'triggered. tim =', tim)
barrier.trigger()
def predicate():
return tim >= 8 # 12
async def cond04(n, barrier):
with await cond:
print('cond04', n, 'Awaiting notification and predicate.')
await cond.wait_for(predicate)
print('cond04', n, 'triggered. tim =', tim)
barrier.trigger()
async def cond_go(loop, new):
ntasks = 7
barrier = asyn.Barrier(ntasks + 1)
if new:
t1 = asyncio.create_task(cond01_new())
t3 = asyncio.create_task(cond03_new())
else:
loop.create_task(asyn.Cancellable(cond01)())
loop.create_task(asyn.Cancellable(cond03)())
for n in range(ntasks):
loop.create_task(cond02(n, barrier))
await barrier # All instances of cond02 have completed
# Test wait_for
barrier = asyn.Barrier(2)
loop.create_task(cond04(99, barrier))
await barrier
# cancel continuously running coros.
if new:
t1.cancel()
t3.cancel()
await asyncio.sleep_ms(0)
else:
await asyn.Cancellable.cancel_all()
print('Done.')
def condition_test(new=False):
printexp('''cond02 0 Awaiting notification.
cond02 1 Awaiting notification.
cond02 2 Awaiting notification.
cond02 3 Awaiting notification.
cond02 4 Awaiting notification.
cond02 5 Awaiting notification.
cond02 6 Awaiting notification.
cond02 5 triggered. tim = 1
cond02 6 triggered. tim = 1
cond02 3 triggered. tim = 3
cond02 4 triggered. tim = 3
cond02 1 triggered. tim = 5
cond02 2 triggered. tim = 5
cond02 0 triggered. tim = 7
cond04 99 Awaiting notification and predicate.
cond04 99 triggered. tim = 9
Done.
''', 13)
loop = asyncio.get_event_loop()
loop.run_until_complete(cond_go(loop, new))
# ************ Gather test ************
# Task with one positional arg. Demonstrate that result order depends on
# original list order not termination order.
async def gath01(n):
print('gath01', n, 'started')
await asyncio.sleep(3 - n/10)
print('gath01', n, 'done')
return n
# Takes kwarg. This is last to terminate.
async def gath02(x, y, rats):
print('gath02 started')
await asyncio.sleep(7)
print('gath02 done')
return x * y, rats
# Only quits on timeout
async def gath03(n):
print('gath03 started')
try:
while True:
await asyncio.sleep(1)
n += 1
except asyncio.TimeoutError:
print('gath03 timeout')
return n
async def gath_go():
gatherables = [asyn.Gatherable(gath01, n) for n in range(4)]
gatherables.append(asyn.Gatherable(gath02, 7, 8, rats=77))
gatherables.append(asyn.Gatherable(gath03, 0, timeout=5))
res = await asyn.Gather(gatherables)
print(res)
def gather_test():
printexp('''gath01 0 started
gath01 1 started
gath01 2 started
gath01 3 started
gath02 started
gath03 started
gath01 3 done
gath01 2 done
gath01 1 done
gath01 0 done
gath03 timeout
gath02 done
[0, 1, 2, 3, (56, 77), 4]
''', 7)
loop = asyncio.get_event_loop()
loop.run_until_complete(gath_go())
| {
"repo_name": "peterhinch/micropython-async",
"path": "v2/asyntest.py",
"copies": "1",
"size": "12174",
"license": "mit",
"hash": -2176107452497519600,
"line_mean": 27.5774647887,
"line_max": 83,
"alpha_frac": 0.6760308855,
"autogenerated": false,
"ratio": 3.4177428411005053,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4593773726600505,
"avg_score": null,
"num_lines": null
} |
# as you can see - the import line now requires even less typing!
from Rpyc import *
c = SocketConnection("localhost")
#------------------------------------------------------------------------------
# this demo shows the new `execute` and `namespace` features of rpyc
#------------------------------------------------------------------------------
# the code below will run AT THE OTHER SIDE OF THE CONNECTION... so you'll see
# 'hello world' on the server's console
c.execute("print 'hello world'")
import sys
c.modules.sys.stdout = sys.stdout
# and this time, on our console
c.execute("print 'brave new world'")
# restore that
c.modules.sys.stdout = c.modules.sys.__stdout__
# anyway, the `execute` method runs the given code at the other side of the connection
# and works in the `namespace` dict. what?
c.execute("x = [1,2,3]")
print c.namespace.x
# now it makes sense, doesn't it? the 'namespace' attribute is something i called
# AttrFrontend -- it wraps a dict with the attribute protocol, so you can access
# it with the dot notation, instead of the braces notation (more intuitive).
# this namespace works both ways -- executing code affects the namespace, while
# altering the namespace directly also affects it:
c.namespace.x.append(4)
c.execute("x.append(5)")
print c.namespace.x
# but you should not assign complex objects (not int/float/str, etc) to this namespace
# directy, or NetProxies will be created. there's nothing wrong with that, but keep
# in mind it might cause blocking (and even deadlocks), as i'll explain later.
# another cool thing i want to show is the second, optional parameter to execute: mode.
# the mode controls how the code is compiled. the default mode is "exec", which means
# it executes the code as a module. the other option is "eval" which returns a value.
# so if you want to _do_ something, like printing of assigning a variable, you do it
# with "exec", and if you want to evaluate something, you do it with "eval"
# for example:
# this will print None
print c.execute("1+2")
# while this will print 3
print c.execute("1+2", "eval")
# but there's a time in a man's life when he asks himself, why the heck? you can, as i
# showed in other places, just do this:
# c.modules.__builtin__.eval("1+2")
# so what's the point?
#
# well, i've been waiting for this question. the rationale behind this seemingly useless
# feature is for times you NEED to have the code executing remotely, but writing a
# dedicated module for it is overdoing it:
# * more files to update ==> more chance that you'll forget to update
# * distributing the module to all of the machines
# * making a mess on the file system
# * it's really not a module... it's just some code that logically belongs to one single
# module, but technical difficulties prevent it
#
# and to show you what i mean -- i want to start a thread on the server, like it did in
# several places over the demos. this thread will send me an event every second. what i
# used to do was, creating another module, like testmodule.py to define the thread
# function, so it will exist on the server, and i could call it.
# if i defined thread_func at the client side, then the thread will block when trying
# to execute the code, because the client holds it. so this new mechanism lets you
# distribute code in a volatile fashion:
# * when the connection is closed, everything you defined is gone
# * no file-system mess
# * no need to distribute files across the network
# * only one place to maintain
c.execute("""
my_thread_active = True
def my_thread_func(callback):
import time
from Rpyc import Async
callback = Async(callback)
while my_thread_active:
callback(time.time())
time.sleep(1)
print "the thread says goodbye"
""")
def callback(timestamp):
print "the timestamp is", timestamp
c.modules.thread.start_new_thread(c.namespace.my_thread_func, (callback,))
c.modules.time.sleep(5)
c.namespace.my_thread_active = False
c.close()
# it's not only for threads of course. there are many times when you NEED the code/objects
# on the remote side. for example:
# * situations that would block (like having the thread func on the client)
# * code that check the type of the object (type or isinstance), and a NetProxy would make
# it cry. DONT CHECK THE TYPE OF OBJECTS, PEOPLE, JUST USE THEM! that's why they invented
# duck-typing. argh.
# * other places i didnt think of as of yet. i want to sleep. leave me alone ;) zzzZZZ
#
# so enjoy!
| {
"repo_name": "tempbottle/restcommander",
"path": "play-1.2.4/python/Lib/site-packages/Rpyc/Demo/demo-6.py",
"copies": "4",
"size": "4546",
"license": "apache-2.0",
"hash": 6812129151861162000,
"line_mean": 33.9692307692,
"line_max": 93,
"alpha_frac": 0.6977562692,
"autogenerated": false,
"ratio": 3.6839546191247976,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.012672310340725359,
"num_lines": 130
} |
# as you can see - the import line now requires even less typing!
from Rpyc import *
c = SocketConnection("localhost")
#------------------------------------------------------------------------------
# this demo shows the new `execute` and `namespace` features of rpyc
#------------------------------------------------------------------------------
# the code below will run AT THE OTHER SIDE OF THE CONNECTION... so you'll see
# 'hello world' on the server's console
c.execute("print 'hello world'")
import sys
c.modules.sys.stdout = sys.stdout
# and this time, on our console
c.execute("print 'brave new world'")
# restore that
c.modules.sys.stdout = c.modules.sys.__stdout__
# anyway, the `execute` method runs the given code at the other side of the connection
# and works in the `namespace` dict. what?
c.execute("x = [1,2,3]")
print c.namespace.x
# now it makes sense, doesn't it? the 'namespace' attribute is something i called
# AttrFrontend -- it wraps a dict with the attribute protocol, so you can access
# it with the dot notation, instead of the braces notation (more intuitive).
# this namespace works both ways -- executing code affects the namespace, while
# altering the namespace directly also affects it:
c.namespace.x.append(4)
c.execute("x.append(5)")
print c.namespace.x
# but you should not assign complex objects (not int/float/str, etc) to this namespace
# directy, or NetProxies will be created. there's nothing wrong with that, but keep
# in mind it might cause blocking (and even deadlocks), as i'll explain later.
# another cool thing i want to show is the second, optional parameter to execute: mode.
# the mode controls how the code is compiled. the default mode is "exec", which means
# it executes the code as a module. the other option is "eval" which returns a value.
# so if you want to _do_ something, like printing of assigning a variable, you do it
# with "exec", and if you want to evaluate something, you do it with "eval"
# for example:
# this will print None
print c.execute("1+2")
# while this will print 3
print c.execute("1+2", "eval")
# but there's a time in a man's life when he asks himself, why the heck? you can, as i
# showed in other places, just do this:
# c.modules.__builtin__.eval("1+2")
# so what's the point?
#
# well, i've been waiting for this question. the rationale behind this seemingly useless
# feature is for times you NEED to have the code executing remotely, but writing a
# dedicated module for it is overdoing it:
# * more files to update ==> more chance that you'll forget to update
# * distributing the module to all of the machines
# * making a mess on the file system
# * it's really not a module... it's just some code that logically belongs to one single
# module, but technical difficulties prevent it
#
# and to show you what i mean -- i want to start a thread on the server, like it did in
# several places over the demos. this thread will send me an event every second. what i
# used to do was, creating another module, like testmodule.py to define the thread
# function, so it will exist on the server, and i could call it.
# if i defined thread_func at the client side, then the thread will block when trying
# to execute the code, because the client holds it. so this new mechanism lets you
# distribute code in a volatile fashion:
# * when the connection is closed, everything you defined is gone
# * no file-system mess
# * no need to distribute files across the network
# * only one place to maintain
c.execute("""
my_thread_active = True
def my_thread_func(callback):
import time
from Rpyc import Async
callback = Async(callback)
while my_thread_active:
callback(time.time())
time.sleep(1)
print "the thread says goodbye"
""")
def callback(timestamp):
print "the timestamp is", timestamp
c.modules.thread.start_new_thread(c.namespace.my_thread_func, (callback,))
c.modules.time.sleep(5)
c.namespace.my_thread_active = False
c.close()
# it's not only for threads of course. there are many times when you NEED the code/objects
# on the remote side. for example:
# * situations that would block (like having the thread func on the client)
# * code that check the type of the object (type or isinstance), and a NetProxy would make
# it cry. DONT CHECK THE TYPE OF OBJECTS, PEOPLE, JUST USE THEM! that's why they invented
# duck-typing. argh.
# * other places i didnt think of as of yet. i want to sleep. leave me alone ;) zzzZZZ
#
# so enjoy!
| {
"repo_name": "chvrga/outdoor-explorer",
"path": "java/play-1.4.4/python/Lib/site-packages/Rpyc/Demo/demo-6.py",
"copies": "3",
"size": "4676",
"license": "mit",
"hash": 738466087462804,
"line_mean": 33.9692307692,
"line_max": 93,
"alpha_frac": 0.6783575706,
"autogenerated": false,
"ratio": 3.621998450813323,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5800356021413323,
"avg_score": null,
"num_lines": null
} |
'''As you might see now the pseudo
tutorial will be all written in Python
This way you could just copy the code
and test yourself'''
#The most basic thing we learn to print in Python is
#The one and only, the world famous Hello world
print ("Hello world")
'''In python 3.something print works
as a function. It takes one argument which
can be a string like Hello world
or a variable as it could be'''
string_x = "Hello world"
print (string_x) #Prints Hello World
#[Note for Myself add a link to Basic types and their use]
'''As you can see im jumping from single
line comments to multi when its needed,
"...because i can"-Ken Bauer'''
#You can print to the screen more types like integers or floats
int_x = 10 #Prints 10
float_x = 20.0 #Prints 20.0
#NOTE we are not using the same x or something each something_x
#Its used only for names [Note for self, add tips on calling things]
print (int_x) #Prints the int_x value
print (float_x) #Prints the float_x value
'''Sometimes you want to print something
more than just the value of something
thats when concatenating comes useful'''
hello = "Hola"
world = "Mundo"
print (hello + world) #This is a bit problematic since it Prints
#HolaMundo together so we need
print (hello + " " + world )
'''If we would need to print a string
with a lots of variebles in itit would get
tedious over time to do variable + " " + variable +
" " + variable... and so on, in this cases
we could use string formatting'''
#Watch dis string boi
#Copy this code if you want and test string formatting yourself
#(I could use input() but well this is a output tutorial)
name = "Juan"
lastName = "Quirino"
ageInYears = 18
fingersInLeftHand = 5.1
gender = "Male"
'''So now we have 5 variables, i would
like to write a line of code that is reusable
and describes the user this is when string
formatting comes useful'''
#So we can have this
print ("Hello my name is %s %s, i am %s years old im a %s and i \
have %s fingers in my left hand") % \
(name,lastName,ageInYears,gender,fingersInLeftHand)
#Instead of this
#Plus we need to use str() to convert to string
print ("Hello my name is " + name + ", i am " + \
str(ageInYears) + " old im a " + gender + " and i have " + \
str(fingersInLeftHand) + " in my left hand")
#As you can see im using a \ in my print statement
# \ Serves to continue a statement on next line without
#Breaking the statement
| {
"repo_name": "QuirinoC/Python",
"path": "print.py",
"copies": "1",
"size": "2371",
"license": "apache-2.0",
"hash": 3810817049688870400,
"line_mean": 37.868852459,
"line_max": 68,
"alpha_frac": 0.7279628849,
"autogenerated": false,
"ratio": 3.4017216642754664,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4629684549175467,
"avg_score": null,
"num_lines": null
} |
'''A system that combines global bindable hotkeys with a widget focus behavior
Manages kivy Window key_up, key_down events and calls methods, callbacks
where necessary'''
from . import focus as focus_behavior
from kivy.core.window import Window
from kivy.core.window import Keyboard
from kivy.logger import Logger
from kivy.clock import Clock
from time import time
from . import keys
import traceback
keybinds = {}
'''Storage for all global hotkeys and callbacks'''
held_ctrl = False
held_alt = False
held_shift = False
log_keys = False
'''Key events will be logged when True, default is False'''
active = True
'''Key events will be managed when True, default is True'''
waiting_press = False
waiting_release = False
'''All hotkeys can have a waiting time which will temporarily freeze all input
events'''
disabled_categories = set()
'''Storage set for disabled global hotkey categories'''
ignore_warnings = False
last_key = ''
last_modifier = []
last_time = time()
time_alt_press = {}
def start():
'''Start managing key events'''
global active
active = True
def stop():
'''Stop managing key events'''
global active
active = False
def start_categories(categories):
'''Start managing key events for global hotkey categories'''
global disabled_categories
if type(categories) == str:
categories = [categories]
for x in categories:
if x in disabled_categories:
disabled_categories.remove(x)
def stop_categories(categories):
'''Stop managing key events for global hotkey categories'''
global disabled_categories
if type(categories) == str:
categories = [categories]
for x in categories:
disabled_categories.add(x)
def add(name, key, state, callback, modifier=None, wait=0, category='n/a'):
'''Add a global hotkey'''
if name in keybinds:
log_warning('key_binder: key {} in {} was added to keybinds before,'
'replacing with {}'.format(
name, keybinds[name], _make_kb_dict(
name, key, state, callback,
modifier=modifier, wait=wait,
category=category))
)
keybinds[name] = _make_kb_dict(
name, key, state, callback, modifier=modifier,
wait=wait, category=category)
def _make_kb_dict(name, key, state, callback, modifier=None, wait=0,
category=''):
return {
'callback': callback,
'key': int(key),
'state': state,
'modifier': modifier,
'wait': wait,
'category': category,
}
def remove(name):
'''Remove a global hotkey'''
try:
del keybinds[name]
except KeyError as e:
Logger.error('key_binder: key "%s" is not in keybinds' % (name))
raise e
def on_key_down(window, key, scan, text, modifier):
'''Detects pressed keys, modifiers and calls on_key_event'''
global last_key, last_modifier, last_time, time_alt_press, waiting_press
global log_keys
modifier = _update_modifier(key, True)
if waiting_press:
if log_keys:
Logger.info('kb_dispatcher: on_key_down: waiting')
else:
time_now = time()
if last_time + 0.02 > time_now:
if key == last_key and modifier == last_modifier:
return
if 'alt' in modifier:
keytime = time_alt_press.get(key, 0)
if keytime:
if keytime > time_now - 0.3:
return
time_alt_press[key] = time_now
last_key = key
last_modifier = modifier
last_time = time_now
on_key_event(key, modifier, True, text=text)
def on_key_up(window, key, *args):
'''Detects released keys, modifiers and calls on_key_event'''
global waiting_release, log_keys
modifier = _update_modifier(key, False)
if waiting_release:
if log_keys:
Logger.info('kb_dispatcher: on_key_down: waiting')
else:
on_key_event(key, modifier, False)
def on_key_event(key, modifier, is_down, text=None):
'''Logs keys(if log_keys is True),
updates global modifiers,
does global hotkey callbacks, calls focused widget
on_key_down or on_key_up method when there is a focused widget
and key is not used by global callback already or key is grabbed by widget
'''
global held_ctrl, held_alt, held_shift, waiting_press, waiting_release
global log_keys, ignored_keys
if not active:
return
if is_down:
kstate = 'down'
else:
kstate = 'up'
if log_keys:
Logger.info('kb_dispatcher: on_key_{}: {} - {}'.format(
kstate, key, modifier))
disp_global = True
cur_focus = focus_behavior.current_focus
if text and cur_focus and cur_focus.receive_textinput:
return
if cur_focus and key in cur_focus.grab_keys:
disp_global = dispatch_to_focused(key, modifier, is_down)
if disp_global:
found = False
for k, v in keybinds.items():
if v['category'] in disabled_categories:
continue
if v['key'] == key:
if v['state'] in (kstate, 'any', 'all'):
if modifier and v['modifier']:
found = True
for mod in v['modifier']:
if mod not in modifier:
found = False
if found:
v['callback']()
else:
if v['modifier'] == ['none'] or not v['modifier']:
v['callback']()
found = True
if v['wait']:
if is_down and not waiting_press:
waiting_press = True
Clock.schedule_once(remove_wait_press, v['wait'])
elif not is_down and not waiting_release:
waiting_release = True
Clock.schedule_once(remove_wait_release, v['wait'])
if not found:
dispatch_to_focused(key, modifier, is_down)
def remove_wait_press(*args):
global waiting_press
waiting_press = False
def remove_wait_release(*args):
global waiting_release
waiting_release = False
def dispatch_to_focused(key, modifier, is_down):
cf = focus_behavior.current_focus
retval = None
if cf:
try:
if is_down:
retval = cf.on_key_down(key, modifier)
else:
retval = cf.on_key_up(key, modifier)
return retval
except:
e = traceback.format_exc()
Logger.error('kb_system: dispatch_to_focused: %s' % (e))
def on_textinput(window, text):
global log_keys
if log_keys:
Logger.info('kb_dispatcher: on_textinput: %s' % (text))
cf = focus_behavior.current_focus
if cf:
try:
retval = cf.dispatch('on_focus_textinput', text)
except:
e = traceback.format_exc()
Logger.error('kb_system: on_textinput: %s' % (e))
def _update_modifier(key, is_down):
'''Saves modifier hold state to module globals
(held_alt, held_ctrl, held_shift).
Returns list with modifier strings for on_key_event'''
global held_alt, held_ctrl, held_shift
if key in (keys.ALT_L, keys.ALT_R):
held_alt = is_down
elif key in (keys.CTRL_L, keys.CTRL_R):
held_ctrl = is_down
elif key in (keys.SHIFT_L, keys.SHIFT_R):
held_shift = is_down
modifier = []
if held_alt:
modifier.append('alt')
if held_ctrl:
modifier.append('ctrl')
if held_shift:
modifier.append('shift')
return modifier
def log_warning(text):
if not ignore_warnings:
Logger.warning(text)
Window.bind(on_key_down=on_key_down)
Window.bind(on_key_up=on_key_up)
Window.bind(on_textinput=on_textinput)
| {
"repo_name": "Bakterija/mmplayer",
"path": "mmplayer/kivy_soil/kb_system/__init__.py",
"copies": "1",
"size": "8000",
"license": "mit",
"hash": -4445377944644979700,
"line_mean": 30.8725099602,
"line_max": 78,
"alpha_frac": 0.582875,
"autogenerated": false,
"ratio": 3.937007874015748,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002826183640575371,
"num_lines": 251
} |
"""A system to create a directory tree from a template.
The format is very simple. A line beginning with @ should be a path to a file. Subsequent lines after that line will be appended to the specified file.
The template is passed through moya templates, creating a very flexible system for dynamically creating file trees.
In order to allow these fs templates to generate moya templates, the syntax is slightly different.
The filesystem template syntax should be {{% %}} for logic, and ${{ }} for substitution.
"""
from __future__ import unicode_literals
from __future__ import print_function
from ...context import Context
from ...template.moyatemplates import Template
from fs.path import dirname, join, relpath
import re
def compile_fs_template(fs, template_text, data=None, path=None):
"""Compile a fs template structure in to a filesystem object"""
if data is None:
data = {}
template = Template(template_text)
template.re_special = re.compile(
r"\{\{\%((?:\".*?\"|\'.*?\'|.|\s)*?)\%\}\}|(\{\{\#)|(\#\}\})"
)
context = Context(re_sub=r"\$\{\{(.*?)\}\}")
# with context.frame("data"):
fs_template = template.render(data, context=context)
out_type = None
out_filename = None
file_lines = []
def write_file(filename, file_type):
if filename:
if file_type.lower() == "text":
with fs.open(filename, "wt") as f:
f.write("\n".join(file_lines) + "\n")
elif file_type.lower() == "wraptext":
import textwrap
with fs.open(filename, "wt") as f:
for line in file_lines:
f.write("\n".join(textwrap.wrap(line, 79)) + "\n")
elif file_type.lower() == "bin":
with fs.open(filename, "wb") as f:
for line in file_lines:
chunk = b"".join(
chr(int(a + b, 16)) for a, b in zip(line[::2], line[1::2])
)
f.write(chunk)
del file_lines[:]
for line in fs_template.splitlines():
line = line.rstrip()
if line.startswith("@"):
write_file(out_filename, out_type)
out_filename = None
out_type, path_spec = line[1:].split(" ", 1)
if path:
path_spec = join(path, relpath(path_spec))
if path_spec.endswith("/"):
fs.makedirs(path_spec, recreate=True)
out_filename = None
else:
fs.makedirs(dirname(path_spec), recreate=True)
out_filename = path_spec
continue
if out_filename:
file_lines.append(line)
if out_filename:
write_file(out_filename, out_type)
if __name__ == "__main__":
template = """
@test.txt
This
is a test file
{{%- if readme %}}
@readme.txt
Readme file
-----------
${{message}}
{{%- endif %}}
@templates/base.html
<h1>${title}</h1>
<ul>
{% for fruit in fruits %}
<li>${fruit}</li>
{% endfor %}
</ul>
@settings/production.ini
@foo/bar/baz/
@author
Bob
"""
from fs.osfs import OSFS
from fs.memoryfs import MemoryFS
fs = OSFS("__test__", create=True)
fs = MemoryFS()
td = dict(message="Hello, World!", readme=True)
compile_fs_template(fs, template, td)
fs.tree()
| {
"repo_name": "moyaproject/moya",
"path": "moya/command/sub/templatebuilder.py",
"copies": "1",
"size": "3396",
"license": "mit",
"hash": -3269101359801168400,
"line_mean": 28.5304347826,
"line_max": 151,
"alpha_frac": 0.5535924617,
"autogenerated": false,
"ratio": 3.752486187845304,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4806078649545304,
"avg_score": null,
"num_lines": null
} |
# A tab completion entrybox - originally found on github,
# modified to remove dependency on Gtk EntryCompletion
# See https://gist.github.com/ssokolow/135673/ for original
from gi.repository import Gtk, Gio, Gdk
MOD_MASK = ( Gdk.ModifierType.CONTROL_MASK | Gdk.ModifierType.MOD1_MASK |
Gdk.ModifierType.MOD4_MASK | Gdk.ModifierType.SHIFT_MASK )
class TabCompletionEntry(Gtk.Entry):
def __init__(self, completion_getter):
Gtk.Entry.__init__(self)
self.completion_getter = completion_getter
self.completed = False # Used by "allow Tab-cycling after completion"
self.completing = ''
self.completion_prev = ""
self.connect('changed', self.content_changed_cb)
self.connect('key-press-event', self.entry_keypress_cb)
self.connect('activate', self.activate_cb)
def activate_cb(self, widget):
if False:
self.stop_emission('activate')
def entry_keypress_cb(self, widget, event):
text = self.get_text()
prefix = text.split(" ")[-1]
if event.keyval == Gdk.KEY_Tab and not event.state & MOD_MASK and (
not self.completed):
if self.completing:
liststore = self.completion_getter(self.completing_prefix)
if len(liststore) < self.liststore_length:
old_text = self.completed_text
self.completed_text = liststore[0][self.liststore_length]
self.liststore_length = len(liststore)
self.set_text(text.replace(old_text, self.completed_text))
self.set_position(-1)
self.liststore_length += 1
return True
liststore = self.completion_getter(prefix)
if len(liststore) == 0:
# Users can press Tab twice in this circumstance to confirm.
self.completed = True
if len(liststore) == 1:
self.set_text(text.replace(prefix, liststore[0][0]))
self.set_position(-1)
self.completed = True
else:
self.completing = True
self.completing_prefix = prefix
self.liststore_length = len(liststore)
self.completed_text = liststore[0][self.liststore_length]
self.set_text(text.replace(prefix, self.completed_text))
self.set_position(-1)
self.liststore_length += 1
return True
else:
# we're no longer completing
self.completing = False
return False
def content_changed_cb(self, widget):
self.completed = False
| {
"repo_name": "rymate1234/Gnome-IRC",
"path": "gnomeirc/TabCompletionEntry.py",
"copies": "1",
"size": "2780",
"license": "mit",
"hash": -1981154282910749200,
"line_mean": 37.1549295775,
"line_max": 78,
"alpha_frac": 0.5665467626,
"autogenerated": false,
"ratio": 4.212121212121212,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5278667974721212,
"avg_score": null,
"num_lines": null
} |
"""A table of all known GitHub repos of vim plugins that we want to scrape."""
import time
import rethinkdb as r
import db.util
r_conn = db.util.r_conn
# TODO(david): Using a proper ODM to do inheritance, enforce constraints, and
# ensure a schema would be really nice.
class GithubRepos(object):
"""Abstract base class of class methods to handle a table of GitHub
repositories.
"""
# Subclasses must override this.
_TABLE_NAME = None
# Fields we want to track for every GitHub repo. This can be extended with
# extra fields for subclasses.
_ROW_SCHEMA = {
# GitHub's repo ID. Only unique with (owner, repo_name)
'repo_id': '',
# Last time this repo was scraped (Unix timestamp in seconds)
'last_scraped_at': 0,
# Number of times scraped
'times_scraped': 0,
# Whether this repo should not be used for fetching plugin data
'is_blacklisted': False,
# Raw repo data from GitHub API
'repo_data': {},
# Number of Vundle, Pathogen, NeoBundle, etc. users
'plugin_manager_users': 0,
# If this repo has been renamed, the "owner/repo_name" of the new
# location of this repo
'redirects_to': '',
# If another repo redirects here, the "owner/repo_name" of that repo
# TODO(david): This should be a list.
'redirects_from': '',
}
# Override this with URLs that should not be scraped.
_BLACKLISTED_GITHUB_REPOS = []
@classmethod
def ensure_table(cls):
db.util.ensure_table(cls._TABLE_NAME)
db.util.ensure_index(cls._TABLE_NAME, 'owner')
db.util.ensure_index(cls._TABLE_NAME, 'repo_id')
db.util.ensure_index(cls._TABLE_NAME, 'redirects_to')
db.util.ensure_index(cls._TABLE_NAME, 'last_scraped_at')
db.util.ensure_index(cls._TABLE_NAME, 'owner_repo',
lambda repo: [repo['owner'], repo['repo_name']])
cls.ensure_blacklisted_repos()
@classmethod
def ensure_blacklisted_repos(cls):
"""Make sure all blacklisted GitHub repos have an entry in the DB
marking them as such.
"""
for owner_repo in cls._BLACKLISTED_GITHUB_REPOS:
owner, repo_name = owner_repo.split('/')
cls.upsert_with_owner_repo({
'owner': owner,
'repo_name': repo_name,
'is_blacklisted': True,
})
@classmethod
def get_with_owner_repo(cls, owner, repo_name):
"""Returns the repository with the given owner and repo_name."""
assert owner
assert repo_name
# Normalize index values
owner = owner.lower()
repo_name = repo_name.lower()
query = r.table(cls._TABLE_NAME).get_all([owner, repo_name],
index='owner_repo')
return db.util.get_first(query)
@classmethod
def upsert_with_owner_repo(cls, repo):
"""Insert or update a row using (owner, repo_name) as the key.
Returns True if a new row was inserted.
"""
assert repo['owner']
assert repo['repo_name']
# Normalize index values
repo['owner'] = repo['owner'].lower()
repo['repo_name'] = repo['repo_name'].lower()
if repo.get('id'):
db_repo = r.table(cls._TABLE_NAME).get(repo['id']).run(r_conn())
else:
db_repo = cls.get_with_owner_repo(repo['owner'], repo['repo_name'])
if db_repo is None:
repo_to_insert = dict(cls._ROW_SCHEMA, **repo)
r.table(cls._TABLE_NAME).insert(repo_to_insert).run(r_conn())
return True
else:
db_repo.update(repo)
# TODO(david): Figure out if there's any difference between doing
# table().replace(db_repo), and if so, which is preferred.
r.table(cls._TABLE_NAME).insert(db_repo,
conflict='replace').run(r_conn())
return False
@classmethod
def log_scrape(cls, repo):
"""Update a repo's fields to note that it has just been scraped."""
repo['last_scraped_at'] = int(time.time())
repo['times_scraped'] = repo.get('times_scraped', 0) + 1
class PluginGithubRepos(GithubRepos):
"""GitHub repositories of Vim plugins."""
_TABLE_NAME = 'plugin_github_repos'
_ROW_SCHEMA = dict(GithubRepos._ROW_SCHEMA, **{
# We don't generally care about scraping from forks.
'is_fork': False,
# IDs of vim.org scripts where this repo was mentioned
'from_vim_scripts': [],
# If this was discovered from a user submission, the submitted form
# data as an object.
'from_submission': None,
})
# GitHub repos that are not Vim plugins that we've manually found.
# TODO(david): We should probably have some heuristic to test if a repo is
# actually a vim plugin... there's a bunch of repos referenced from
# vim.org descrptions that are not vim plugins.
# TODO(david): Make it easy to post-blacklist a plugin that we discover on
# the live site.
_BLACKLISTED_GITHUB_REPOS = set([
'behat/behat',
'github/gitignore',
'kablamo/dotfiles',
'aemoncannon/ensime',
'experiment/vim',
'ggreer/the_silver_searcher',
'pry/pry',
'sitaramc/gitolite',
'sstephenson/bats',
'git.wincent.com/command-t',
'contrib/mpvim',
'svn/trunk',
# TODO(david): This repo actually contains a Vim plugin nested in
# https://github.com/mozilla/rust/tree/master/src/etc/vim, but
# showing the top-level repo and description ("a safe, concurrent,
# practical language") appears out of place, especially since it
# has about 3K stars. Figure out what to do with it. If we default
# sort by # of users instead of GitHub stars, we can probably
# un-blacklist this.
'mozilla/rust',
])
class DotfilesGithubRepos(GithubRepos):
"""GitHub repositories of dotfiles (*nix config) repos."""
_TABLE_NAME = 'dotfiles_github_repos'
_ROW_SCHEMA = dict(GithubRepos._ROW_SCHEMA, **{
# Last time this repo was pushed (Unix timestamp in seconds).
'pushed_at': 0,
# The keyword that was used to find this repo.
'search_keyword': '',
# References to GitHub plugin repos as strings. eg. "kien/ctrlp.vim"
'vundle_repos': [],
'neobundle_repos': [],
'pathogen_repos': [],
'vimplug_repos': [],
})
@classmethod
def ensure_table(cls):
super(DotfilesGithubRepos, cls).ensure_table()
db.util.ensure_index(cls._TABLE_NAME, 'search_keyword')
db.util.ensure_index(cls._TABLE_NAME, 'pushed_at')
@classmethod
def get_latest_with_keyword(cls, search_keyword):
# Looks like we can't chain a get_all with an order_by, so we can't use
# the search_keyword index.
query = (r.table(cls._TABLE_NAME)
.order_by(index=r.desc('pushed_at'))
.filter({'search_keyword': search_keyword}))
return db.util.get_first(query)
| {
"repo_name": "jonafato/vim-awesome",
"path": "db/github_repos.py",
"copies": "3",
"size": "7226",
"license": "mit",
"hash": -5487856805060038000,
"line_mean": 32.1467889908,
"line_max": 79,
"alpha_frac": 0.5956269029,
"autogenerated": false,
"ratio": 3.7891976927110647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00044468935244112106,
"num_lines": 218
} |
# A table of all known mana symbols and their canonical form.
# This ensures that all mana symbols in the database are in a
# specific known form for easier searching.
CANONICAL_SYMBOLS = {
# basic color symbols
'w':'w', 'u':'u', 'b':'b', 'r':'r', 'g':'g', 'c':'c',
# numbers
'0':'0', '1':'1', '2':'2', '3':'3', '4':'4', '5':'5', '6':'6', '7':'7',
'8':'8', '9':'9', '10':'10', '11':'11', '12':'12', '13':'13', '14':'14',
'15':'15', '16':'16', '20':'20', '100':'100', '1000000':'1000000',
'inf':'infinity', 'infinity': 'infinity',
# allied hybrid
'wu':'wu', 'uw':'wu', 'ub':'ub', 'bu':'ub', 'br':'br', 'rb':'br',
'rg':'rg', 'gr':'rg', 'gw':'gw', 'wg':'gw',
# enemy hybrid
'wb':'wb', 'bw':'wb', 'ur':'ur', 'ru':'ur', 'bg':'bg', 'gb':'bg',
'rw':'rw', 'wr':'rw', 'gu':'gu', 'ug':'gu',
# 2brid
'2w':'2w', '2u':'2u', '2b':'2b', '2r':'2r', '2g':'2g',
# phyrexian
'pw':'pw', 'pu':'pu', 'pb':'pb', 'pr':'pr', 'pg':'pg',
# snow
's': 's',
# X, Y, and Z
'x':'x', 'y':'y', 'z':'z',
# tap/untap
't':'t', 'tap':'t', 'q':'q', 'untap':'q'
}
# Table for easily converting mana symbols to their converted mana cost.
MANACOST_TO_CMC = {
'w':1, 'u':1, 'b':1, 'r':1, 'g':1, 'c':1,
'0':0, '1':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9,
'10':10, '11':11, '12':12, '13':13, '14':14, '15':15, '16':16, '20':20,
'100':100, '1000000':1000000, 'infinity':999999999,
'wu':1, 'ub':1, 'br':1, 'rg':1, 'gw':1,
'wb':1, 'ur':1, 'bg':1, 'rw':1, 'gu':1,
'2w':2, '2u':2, '2b':2, '2r':2, '2g':2,
'pw':1, 'pu':1, 'pb':1, 'pr':1, 'pg':1,
's':1,
'x':0, 'y':0, 'z':0,
}
# List of characters to ignore when parsing mana costs
IGNORE_CHARS = str.maketrans('', '', ''.join(c for c in map(chr, range(256)) if not c.isalnum() and c != ' '))
def tokenizeManaString(mana):
""" Takes in a string of mana symbols and tokenizes it.
Tokens are strings enclosed in {}s; the braces are
optional for one-character tokens.
Ignores all non-alphanumeric-or-space characters.
Example:
tokenizeManaString("{uw}b{2r}t")
-> ["uw", "b", "2r", "t"]
"""
tokens = []
token_start = 0
while token_start < len(mana):
if mana[token_start] == '{':
rbrace = mana.find('}', token_start)
if rbrace == -1:
token = '{'
token_start += 1
else:
token = mana[token_start+1 : rbrace]
token_start = rbrace + 1
else:
token = mana[token_start]
token_start += 1
# clean up the token a bit and save it
token = token.lower().translate(IGNORE_CHARS)
# Do we want to check if this is a known token?
tokens.append( token )
return tokens
def computeCMC(mana):
""" Given a mana cost, computes the corresponding CMC.
If the cost contains an unknown symbol, assumes that
that symbol has a CMC of 0.
"""
tokens = tokenizeManaString(mana)
total = 0
for token in tokens:
if token in MANACOST_TO_CMC:
total += MANACOST_TO_CMC[token]
return total
"""
NORMALIZING MANA COSTS
I am a little obsessive about details like putting mana symbols
in the correct order. Since many people aren't, the following set of
functions helps get them just right. It also helps to have a standard.
By 'normalized', we mean that tokens come in the following order:
1. X, Y, Z
2. Numeric
3. snow
4. obligate colorless
5. 2-brid, in standard order
6. allied hybrid, in standard order by 'upper' color
7. enemy hybrid, in standard order by 'upper' color
8. phyrexian, in standard order
9. normal colored symbols, in standard order.
As Wizards has inconsiderately not printed any card with more than three
of these (Marisi's Twinclaws), most of this is speculative.
As for 'standard order', this refers to the usual ordering of n WUBRG symbols:
Mono-colored: trivial
Two colors: shortest clockwise distance along the WUBRG circle
Three colors:
Shards: WUBRG-first order (WUB, UBR, BRG, RGW, GWU)
Wedges: skip-2 order (WBG, URW, BGU, RWB, GUR)
Four colors: start after missing color (UBRG, BRWG, etc.)
Five colors: trivial
"""
def getOrderedTokens(tokens, order):
""" Selects all tokens from `tokens` that are in `order` and orders them
according to the order given by `order`.
Hooray for illuminating docstrings.
example:
getOrderedTokens(["r", "g", "r", "br"], ["w", "u", "b", "r", "g"])
-> ["r", "r", "g"]
"""
result = []
for token in order:
result.extend([token] * tokens.count(token))
return result
def getThreeColorOrder(tokens, unique, wubrg):
""" tokens: list of all tokens
unique: list of unique tokens in `tokens` from `wubrg`
wubrg: list of tokens filling the roles of WUBRG. This
exists so the same function can deal with normal
mana symbols, phyrexian symbols, etc.
"""
# Shards have three consecutive symbols. Find them!
for i in range(5):
if wubrg[i] in unique and wubrg[ (i+1) % 5] in unique and wubrg[ (i+2) % 5] in unique:
colors = [ wubrg[i], wubrg[ (i+1)%5 ], wubrg[ (i+2)%5] ]
return getOrderedTokens(tokens, colors)
# OK, it's a wedge. Wedges now have the canonical symbol order that goes
# two steps at a time (so WBG rather than BGW or GWB or anything else).
# Find that initial two-step.
for i in range(5):
if wubrg[i] in unique and wubrg[ (i+2) % 5] in unique and wubrg[ (i+4) % 5] in unique:
# Found it! Now we know what the token order should be.
colors = [ wubrg[i], wubrg[ (i+2)%5 ], wubrg[ (i+4)%5] ]
return getOrderedTokens(tokens, colors)
# We should never get here
raise ValueError("Three-color cost that's neither a shard nor a wedge?!")
def getWUBRGTokens(tokens, wubrg):
""" Pull out all tokens from the `wubrg` array and return a list of
those tokens in canonical form.
`wubrg` lets the same function handle normal, phyrexian, etc. mana.
"""
wubrg_tokens = [ token for token in tokens if token in wubrg ]
unique_tokens = list(set(wubrg_tokens))
if len(unique_tokens) == 0:
# for zero colors, the answer is easy!
return []
elif len(unique_tokens) == 1:
# if there is only one color, return all symbols of that color
return wubrg_tokens
elif len(unique_tokens) == 2:
# two colors. now we get to use those functions from above
return getOrderedTokens(wubrg_tokens, wubrg)
elif len(unique_tokens) == 3:
# three colors -- hey, we've done this already too!
return getThreeColorOrder(wubrg_tokens, unique_tokens, wubrg)
elif len(unique_tokens) == 4:
# four colors. Find the missing one.
missing = (set(wubrg) - set(unique_tokens)).pop()
missing_idx = wubrg.index(missing)
order = [ wubrg[ (i + missing_idx)%5 ] for i in range(5)]
return getOrderedTokens(wubrg_tokens, order)
elif len(unique_tokens) == 5:
return getOrderedTokens(wubrg_tokens, wubrg)
else:
# We should *never* get here
raise ValueError("Why are there six colors in this mana cost.")
def normalize(mana):
""" The function we've been building to. """
tokens = tokenizeManaString(mana)
normalized = []
# X, Y, Z come first.
normalized.extend( getOrderedTokens(tokens, ('x', 'y', 'z')) )
# Now numeric symbols
normalized.extend( getOrderedTokens(tokens, ('1000000', '100', '20', '16', '15', '14', '13', '12', '11', '10', '9', '8', '7', '6', '5', '4', '3', '2', '1', '0')) )
# snow
normalized.extend( getOrderedTokens(tokens, ('s')) )
# obligate colorless
normalized.extend( getOrderedTokens(tokens, ('c')) )
# And two-brids
normalized.extend( getWUBRGTokens(tokens, ('2w', '2u', '2b', '2r', '2g')) )
# Allied hybrid
normalized.extend( getWUBRGTokens(tokens, ('wu', 'ub', 'br', 'rg', 'gw')) )
# Enemy hybrid
normalized.extend( getWUBRGTokens(tokens, ('wb', 'ur', 'bg', 'rw', 'gu')) )
# phyrexian
normalized.extend( getWUBRGTokens(tokens, ('pw', 'pu', 'pb', 'pr', 'pg')) )
# normal!
normalized.extend( getWUBRGTokens(tokens, ('w', 'u', 'b', 'r', 'g')) )
# We have sorted all the tokens. Now wrap them in braces and return.
normal = ''.join( '{' + token + '}' for token in normalized )
#print(mana, " => ", normal)
return normal
test = 'zwbux{pw}{pu}{pg}{pr}sssywcwbx{2/u}{2u}{ur}{rg}{gu}3'
| {
"repo_name": "Ehnonymoose/delver",
"path": "server/mana.py",
"copies": "1",
"size": "8018",
"license": "mit",
"hash": 7283301391535508000,
"line_mean": 28.5867158672,
"line_max": 164,
"alpha_frac": 0.6273384884,
"autogenerated": false,
"ratio": 2.659369817578773,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37867083059787726,
"avg_score": null,
"num_lines": null
} |
""" A table with WASM opcodes. """
import enum
class ArgType(enum.Enum):
TYPE = 1
BYTE = 2
U32 = 3
I32 = 10
I64 = 11
F32 = 12
F64 = 13
TYPEIDX = 20
TABLEIDX = 21
LOCALIDX = 22
BLOCKIDX = 23
FUNCIDX = 24
LABELIDX = 25
GLOBALIDX = 26
class Space(enum.Enum):
TYPE = 0
TABLE = 1
LOCAL = 2
BLOCK = 3
FUNC = 4
LABEL = 5
# Note: left out 32bit opcodes at first. Added them later, but I might have
# missed some.
# Gargantual table with all instructions in it.
# From this table different dictionaries are created. One for encoding
# and another for decoding wasm.
# Columns: mnemonic, opcode, operands, stack inputs, stack outputs,
# action function
instruction_table = [
("unreachable", 0x00, (), (), ()),
("nop", 0x01),
("block", 0x02, (ArgType.TYPE,)),
("loop", 0x03, (ArgType.TYPE,)),
("if", 0x04, (ArgType.TYPE,)),
("else", 0x05),
("end", 0x0B, (), (), (), lambda i, v: ()),
("br", 0x0C, (ArgType.LABELIDX,)),
("br_if", 0x0D, (ArgType.LABELIDX,)),
("br_table", 0x0E, ("br_table",)),
("return", 0x0F),
("call", 0x10, (ArgType.FUNCIDX,)), # funcidx
(
"call_indirect",
0x11,
(ArgType.TYPEIDX, ArgType.TABLEIDX),
), # typeidx, tableidx
("drop", 0x1A),
("select", 0x1B),
("local.get", 0x20, (ArgType.LOCALIDX,)),
("local.set", 0x21, (ArgType.LOCALIDX,)),
("local.tee", 0x22, (ArgType.LOCALIDX,)),
("global.get", 0x23, (ArgType.GLOBALIDX,)),
("global.set", 0x24, (ArgType.GLOBALIDX,)),
("i32.load", 0x28, (ArgType.U32, ArgType.U32)),
("i64.load", 0x29, (ArgType.U32, ArgType.U32)),
("f32.load", 0x2A, (ArgType.U32, ArgType.U32)),
("f64.load", 0x2B, (ArgType.U32, ArgType.U32)),
("i32.load8_s", 0x2C, (ArgType.U32, ArgType.U32)),
("i32.load8_u", 0x2D, (ArgType.U32, ArgType.U32)),
("i32.load16_s", 0x2E, (ArgType.U32, ArgType.U32)),
("i32.load16_u", 0x2F, (ArgType.U32, ArgType.U32)),
("i64.load8_s", 0x30, (ArgType.U32, ArgType.U32)),
("i64.load8_u", 0x31, (ArgType.U32, ArgType.U32)),
("i64.load16_s", 0x32, (ArgType.U32, ArgType.U32)),
("i64.load16_u", 0x33, (ArgType.U32, ArgType.U32)),
("i64.load32_s", 0x34, (ArgType.U32, ArgType.U32)),
("i64.load32_u", 0x35, (ArgType.U32, ArgType.U32)),
("i32.store", 0x36, (ArgType.U32, ArgType.U32)),
("i64.store", 0x37, (ArgType.U32, ArgType.U32)),
("f32.store", 0x38, (ArgType.U32, ArgType.U32)),
("f64.store", 0x39, (ArgType.U32, ArgType.U32)),
("i32.store8", 0x3A, (ArgType.U32, ArgType.U32)),
("i32.store16", 0x3B, (ArgType.U32, ArgType.U32)),
("i64.store8", 0x3C, (ArgType.U32, ArgType.U32)),
("i64.store16", 0x3D, (ArgType.U32, ArgType.U32)),
("i64.store32", 0x3E, (ArgType.U32, ArgType.U32)),
("memory.size", 0x3F, ("byte",), (), ("i32",)),
("memory.grow", 0x40, ("byte",), ("i32",), ("i32",)),
(
"i32.const",
0x41,
(ArgType.I32,),
(),
("i32",),
lambda i, v: (i.args[0],),
),
(
"i64.const",
0x42,
(ArgType.I64,),
(),
("i64",),
lambda i, v: (i.args[0],),
),
(
"f32.const",
0x43,
(ArgType.F32,),
(),
("f32",),
lambda i, v: (i.args[0],),
),
(
"f64.const",
0x44,
(ArgType.F64,),
(),
("f64",),
lambda i, v: (i.args[0],),
),
("i32.eqz", 0x45),
("i32.eq", 0x46),
("i32.ne", 0x47),
("i32.lt_s", 0x48),
("i32.lt_u", 0x49),
("i32.gt_s", 0x4A),
("i32.gt_u", 0x4B),
("i32.le_s", 0x4C),
("i32.le_u", 0x4D),
("i32.ge_s", 0x4E),
("i32.ge_u", 0x4F),
("i64.eqz", 0x50),
("i64.eq", 0x51),
("i64.ne", 0x52),
("i64.lt_s", 0x53),
("i64.lt_u", 0x54),
("i64.gt_s", 0x55),
("i64.gt_u", 0x56),
("i64.le_s", 0x57),
("i64.le_u", 0x58),
("i64.ge_s", 0x59),
("i64.ge_u", 0x5A),
("f32.eq", 0x5B),
("f32.ne", 0x5C),
("f32.lt", 0x5D),
("f32.gt", 0x5E),
("f32.le", 0x5F),
("f32.ge", 0x60),
("f64.eq", 0x61),
("f64.ne", 0x62),
("f64.lt", 0x63),
("f64.gt", 0x64),
("f64.le", 0x65),
("f64.ge", 0x66),
("i32.clz", 0x67, (), ("i32",), ("i32",)),
("i32.ctz", 0x68, (), ("i32",), ("i32",)),
("i32.popcnt", 0x69, (), ("i32",), ("i32",)),
("i32.add", 0x6A, (), ("i32", "i32"), ("i32",)),
("i32.sub", 0x6B, (), ("i32", "i32"), ("i32",)),
("i32.mul", 0x6C, (), ("i32", "i32"), ("i32",)),
("i32.div_s", 0x6D, (), ("i32", "i32"), ("i32",)),
("i32.div_u", 0x6E, (), ("i32", "i32"), ("i32",)),
("i32.rem_s", 0x6F, (), ("i32", "i32"), ("i32",)),
("i32.rem_u", 0x70, (), ("i32", "i32"), ("i32",)),
("i32.and", 0x71, (), ("i32", "i32"), ("i32",)),
("i32.or", 0x72, (), ("i32", "i32"), ("i32",)),
("i32.xor", 0x73, (), ("i32", "i32"), ("i32",)),
("i32.shl", 0x74, (), ("i32", "i32"), ("i32",)),
("i32.shr_s", 0x75, (), ("i32", "i32"), ("i32",)),
("i32.shr_u", 0x76, (), ("i32", "i32"), ("i32",)),
("i32.rotl", 0x77, (), ("i32", "i32"), ("i32",)),
("i32.rotr", 0x78, (), ("i32", "i32"), ("i32",)),
("i64.clz", 0x79, (), ("i64",), ("i64",)),
("i64.ctz", 0x7A, (), ("i64",), ("i64",)),
("i64.popcnt", 0x7B, (), ("i64",), ("i64",)),
("i64.add", 0x7C, (), ("i64", "i64"), ("i64",)),
("i64.sub", 0x7D, (), ("i64", "i64"), ("i64",)),
("i64.mul", 0x7E, (), ("i64", "i64"), ("i64",)),
("i64.div_s", 0x7F, (), ("i64", "i64"), ("i64",)),
("i64.div_u", 0x80, (), ("i64", "i64"), ("i64",)),
("i64.rem_s", 0x81, (), ("i64", "i64"), ("i64",)),
("i64.rem_u", 0x82, (), ("i64", "i64"), ("i64",)),
("i64.and", 0x83, (), ("i64", "i64"), ("i64",)),
("i64.or", 0x84, (), ("i64", "i64"), ("i64",)),
("i64.xor", 0x85, (), ("i64", "i64"), ("i64",)),
("i64.shl", 0x86, (), ("i64", "i64"), ("i64",)),
("i64.shr_s", 0x87, (), ("i64", "i64"), ("i64",)),
("i64.shr_u", 0x88, (), ("i64", "i64"), ("i64",)),
("i64.rotl", 0x89, (), ("i64", "i64"), ("i64",)),
("i64.rotr", 0x8A, (), ("i64", "i64"), ("i64",)),
("f32.abs", 0x8B, (), ("f32",), ("f32",)),
("f32.neg", 0x8C, (), ("f32",), ("f32",)),
("f32.ceil", 0x8D, (), ("f32",), ("f32",)),
("f32.floor", 0x8E, (), ("f32",), ("f32",)),
("f32.trunc", 0x8F, (), ("f32",), ("f32",)),
("f32.nearest", 0x90, (), ("f32",), ("f32",)),
("f32.sqrt", 0x91, (), ("f32",), ("f32",)),
("f32.add", 0x92, (), ("f32", "f32"), ("f32",)),
("f32.sub", 0x93, (), ("f32", "f32"), ("f32",)),
("f32.mul", 0x94, (), ("f32", "f32"), ("f32",)),
("f32.div", 0x95, (), ("f32", "f32"), ("f32",)),
("f32.min", 0x96, (), ("f32", "f32"), ("f32",)),
("f32.max", 0x97, (), ("f32", "f32"), ("f32",)),
("f32.copysign", 0x98, (), ("f32", "f32"), ("f32",)),
("f64.abs", 0x99, (), ("f64",), ("f64",)),
("f64.neg", 0x9A, (), ("f64",), ("f64",)),
("f64.ceil", 0x9B, (), ("f64",), ("f64",)),
("f64.floor", 0x9C, (), ("f64",), ("f64",)),
("f64.trunc", 0x9D, (), ("f64",), ("f64",)),
("f64.nearest", 0x9E, (), ("f64",), ("f64",)),
("f64.sqrt", 0x9F, (), ("f64",), ("f64",)),
("f64.add", 0xA0, (), ("f64", "f64"), ("f64",)),
("f64.sub", 0xA1, (), ("f64", "f64"), ("f64",)),
("f64.mul", 0xA2, (), ("f64", "f64"), ("f64",)),
("f64.div", 0xA3, (), ("f64", "f64"), ("f64",)),
("f64.min", 0xA4, (), ("f64", "f64"), ("f64",)),
("f64.max", 0xA5, (), ("f64", "f64"), ("f64",)),
("f64.copysign", 0xA6, (), ("f64", "f64"), ("f64",)),
("i32.wrap_i64", 0xA7),
("i32.trunc_f32_s", 0xA8, (), ("f32",), ("i32",)),
("i32.trunc_f32_u", 0xA9, (), ("f32",), ("i32",)),
("i32.trunc_f64_s", 0xAA, (), ("f64",), ("i32",)),
("i32.trunc_f64_u", 0xAB, (), ("f64",), ("i32",)),
("i64.extend_i32_s", 0xAC, (), ("i32",), ("i64",)),
("i64.extend_i32_u", 0xAD, (), ("i32",), ("i64",)),
("i64.trunc_f32_s", 0xAE, (), ("f32",), ("i64",)),
("i64.trunc_f32_u", 0xAF, (), ("f32",), ("i64",)),
("i64.trunc_f64_s", 0xB0, (), ("f64",), ("i64",)),
("i64.trunc_f64_u", 0xB1, (), ("f64",), ("i64",)),
("f32.convert_i32_s", 0xB2, (), ("i32",), ("f32",)),
("f32.convert_i32_u", 0xB3, (), ("i32",), ("f32",)),
("f32.convert_i64_s", 0xB4, (), ("i64",), ("f32",)),
("f32.convert_i64_u", 0xB5, (), ("i64",), ("f32",)),
("f32.demote_f64", 0xB6, (), ("f64",), ("f32",)),
("f64.convert_i32_s", 0xB7, (), ("i32",), ("f64",)),
("f64.convert_i32_u", 0xB8, (), ("i32",), ("f64",)),
("f64.convert_i64_s", 0xB9, (), ("i64",), ("f64",)),
("f64.convert_i64_u", 0xBA, (), ("i64",), ("f64",)),
("f64.promote_f32", 0xBB, (), ("f32",), ("f64",)),
("i32.reinterpret_f32", 0xBC, (), ("f32",), ("i32",)),
("i64.reinterpret_f64", 0xBD, (), ("f64",), ("i64",)),
("f32.reinterpret_i32", 0xBE, (), ("i32",), ("f32",)),
("f64.reinterpret_i64", 0xBF, (), ("i64",), ("f64",)),
("i32.extend8_s", 0xC0, (), ("i32",), ("i32",)),
("i32.extend16_s", 0xC1, (), ("i32",), ("i32",)),
("i64.extend8_s", 0xC2, (), ("i64",), ("i64",)),
("i64.extend16_s", 0xC3, (), ("i64",), ("i64",)),
("i64.extend32_s", 0xC4, (), ("i64",), ("i64",)),
("i32.trunc_sat_f32_s", (0xFC, 0x0), (), ("f32",), ("i32",)),
("i32.trunc_sat_f32_u", (0xFC, 0x1), (), ("f32",), ("i32",)),
("i32.trunc_sat_f64_s", (0xFC, 0x2), (), ("f64",), ("i32",)),
("i32.trunc_sat_f64_u", (0xFC, 0x3), (), ("f64",), ("i32",)),
("i64.trunc_sat_f32_s", (0xFC, 0x4), (), ("f32",), ("i64",)),
("i64.trunc_sat_f32_u", (0xFC, 0x5), (), ("f32",), ("i64",)),
("i64.trunc_sat_f64_s", (0xFC, 0x6), (), ("f64",), ("i64",)),
("i64.trunc_sat_f64_u", (0xFC, 0x7), (), ("f64",), ("i64",)),
]
OPCODES = {r[0]: r[1] for r in instruction_table}
REVERZ = {r[1]: r[0] for r in instruction_table}
OPERANDS = {r[0]: r[2] if len(r) > 2 else () for r in instruction_table}
EVAL = {
r[0]: (r[3], r[4], r[5]) if len(r) >= 6 else ((), (), None)
for r in instruction_table
}
STACK_IO = {
r[0]: (r[3], r[4]) if len(r) >= 5 else None for r in instruction_table
}
STORE_OPS = {
"f64.store",
"f32.store",
"i64.store",
"i64.store8",
"i64.store16",
"i64.store32",
"i32.store",
"i32.store8",
"i32.store16",
}
LOAD_OPS = {
"f64.load",
"f32.load",
"i64.load",
"i64.load8_u",
"i64.load8_s",
"i64.load16_u",
"i64.load16_s",
"i64.load32_u",
"i64.load32_s",
"i32.load",
"i32.load8_u",
"i32.load8_s",
"i32.load16_u",
"i32.load16_s",
}
BINOPS = {
"f64.add",
"f64.sub",
"f64.mul",
"f64.div",
"f32.add",
"f32.sub",
"f32.mul",
"f32.div",
"i64.add",
"i64.sub",
"i64.mul",
"i32.add",
"i32.sub",
"i32.mul",
"i64.div_s",
"i64.div_u",
"i32.div_s",
"i32.div_u",
"i32.rem_u",
"i32.rem_s",
"i64.rem_u",
"i64.rem_s",
"i64.and",
"i64.or",
"i64.xor",
"i64.shl",
"i64.shr_s",
"i64.shr_u",
"i32.and",
"i32.or",
"i32.xor",
"i32.shl",
"i32.shr_s",
"i32.shr_u",
}
CASTOPS = {
"i32.wrap_i64",
"i64.extend_i32_s",
"i64.extend_i32_u",
"f64.convert_i32_s",
"f64.convert_i32_u",
"f64.convert_i64_s",
"f64.convert_i64_u",
"f32.convert_i32_s",
"f32.convert_i32_u",
"f32.convert_i64_s",
"f32.convert_i64_u",
}
CMPOPS = {
"f64.eq",
"f64.ne",
"f64.ge",
"f64.gt",
"f64.le",
"f64.lt",
"f32.eq",
"f32.ne",
"f32.ge",
"f32.gt",
"f32.le",
"f32.lt",
"i32.eqz",
"i32.eq",
"i32.ne",
"i32.lt_s",
"i32.lt_u",
"i32.gt_s",
"i32.gt_u",
"i32.le_s",
"i32.le_u",
"i32.ge_s",
"i32.ge_u",
"i64.eqz",
"i64.eq",
"i64.ne",
"i64.lt_s",
"i64.lt_u",
"i64.gt_s",
"i64.gt_u",
"i64.le_s",
"i64.le_u",
"i64.ge_s",
"i64.ge_u",
}
# Generate an instructionset object that supports autocompletion
class Instructionset:
pass
def _make_instructionset():
main_i = Instructionset()
for opcode in OPCODES:
opcode = opcode.replace("/", "_")
parts = opcode.split(".")
i = main_i
for part in parts[:-1]:
if not hasattr(i, part):
setattr(i, part, Instructionset())
i = getattr(i, part)
setattr(i, parts[-1], opcode)
return main_i
I = _make_instructionset()
def eval_expr(expr):
""" Evaluate a sequence of instructions """
stack = []
for i in expr:
consumed_types, produced_types, action = EVAL[i.opcode]
if action is None:
raise RuntimeError("Cannot evaluate {}".format(i.opcode))
# Gather stack values:
values = []
for t in consumed_types:
vt, v = stack.pop(-1)
assert vt == t
values.append(v)
# Perform magic action of instruction:
values = action(i, values)
# Push results on tha stack:
assert len(values) == len(produced_types)
for vt, v in zip(produced_types, values):
stack.append((vt, v))
if len(stack) == 1:
return stack[0]
else:
raise ValueError("Expression does not leave value on stack!")
| {
"repo_name": "windelbouwman/ppci-mirror",
"path": "ppci/wasm/opcodes.py",
"copies": "1",
"size": "13301",
"license": "bsd-2-clause",
"hash": 3346913034764796400,
"line_mean": 29.2295454545,
"line_max": 75,
"alpha_frac": 0.4549282009,
"autogenerated": false,
"ratio": 2.3108061153578876,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3265734316257888,
"avg_score": null,
"num_lines": null
} |
"""A tab that displays a >>> prompt.
This plugin is disabled by default because sending a Ctrl+C ,to a
subprocess is basically impossible on Windows and interrupts. If you
don't use Windows, get rid of the _ in this file's name and have fun
with it :)
"""
# FIXME: >>> while True: print("lel")
# TODO: test this on windows, this may turn out to be pretty broken :(
import io
import platform
import queue
import signal
import subprocess
import sys
import threading
import tkinter as tk
from porcupine import tabs, textwidget, utils
_WINDOWS = (platform.system() == 'Windows')
def _tupleindex(index):
"""Convert 'line.column' to (line, column)."""
line, column = index.split('.')
return (int(line), int(column))
class PythonPrompt:
def __init__(self, textwidget, close_callback):
self.widget = textwidget
self.close_callback = close_callback
self.widget.bind('<Return>', self._on_return)
self.widget.bind('<Control-c>', self._keyboard_interrupt)
self.widget.bind('<Control-C>', self._copy)
self.widget.bind('<Control-l>', self._clear)
self.widget.bind('<Control-L>', self._clear)
self.widget.bind('<Control-d>', self._send_eof)
self.widget.bind('<Control-D>', self._send_eof)
# without -u python buffers stdout and everything is one enter
# press late :( see python --help
self.process = subprocess.Popen(
[sys.executable, '-i', '-u'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=0)
# the queuer thread is a daemon thread because it makes exiting
# porcupine easier and interrupting it isn't a problem
self._queue = queue.Queue()
threading.Thread(target=self._queuer, daemon=True).start()
self.widget.after_idle(self._queue_clearer)
def _keyboard_interrupt(self, event):
try:
self.process.send_signal(signal.SIGINT)
except ProcessLookupError:
# the subprocess has terminated, _queue_clearer should have
# taken care of it already
assert self.widget['state'] == 'disabled'
def _copy(self, event):
# i didn't find a way to do this like tkinter does it by default
try:
start, end = self.widget.tag_ranges('sel')
except ValueError:
return
text = self.widget.get(start, end)
if text:
self.widget.clipboard_clear()
self.widget.clipboard_append(text)
def _clear(self, event):
self.widget.delete('1.0', 'end-1l')
def _send_eof(self, event):
self.process.stdin.close()
def _on_return(self, event):
end_of_output = _tupleindex(str(self.widget.tag_ranges('output')[-1]))
cursor = _tupleindex(self.widget.index('insert'))
end = _tupleindex(self.widget.index('end - 1 char'))
# (line, column) tuples compare nicely
if not (end_of_output <= cursor <= end):
return 'break'
# this happens when inputting multiple lines at once
if end_of_output[0] < cursor[0]:
end_of_output = (cursor[0], 0)
# this needs to return 'break' to allow pressing enter with the
# cursor anywhere on the line
text = self.widget.get('%d.%d' % end_of_output, 'end') # ends with \n
self.widget.insert('end', '\n')
self.widget.mark_set('insert', 'end')
self.process.stdin.write(text.encode('utf-8'))
self.process.stdin.flush()
return 'break'
def _queuer(self):
while True:
output = self.process.stdout.read(io.DEFAULT_BUFFER_SIZE)
if not output:
# the process terminated, wait() will return the exit
# code immediately instead of actually waiting
self._queue.put(('exit', self.process.wait()))
break
self._queue.put(('output', output))
def _queue_clearer(self):
try:
state, value = self._queue.get(block=False)
except queue.Empty:
# nothing there right now, let's come back later
self.widget.after(50, self._queue_clearer)
return
if state == 'exit':
if value == 0:
# success
self.close_callback()
else:
self.widget.insert(
'end', "\n\n***********************\n" +
"the subprocess exited with code %d" % value)
self.widget['state'] = 'disabled'
return
assert state == 'output'
if _WINDOWS:
value = value.replace(b'\r\n', b'\n')
self.widget.insert(
'end-1c', value.decode('utf-8', errors='replace'), 'output')
self.widget.see('end-1c')
# we got something, let's try again as soon as possible
self.widget.after_idle(self._queue_clearer)
class PromptTab(tabs.Tab):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.title = "Interactive Prompt"
self.textwidget = textwidget.ThemedText(
self.content, width=1, height=1)
self.textwidget.pack(side='left', fill='both', expand=True)
self.prompt = PythonPrompt(self.textwidget, self.close)
self.scrollbar = tk.Scrollbar(self.content)
self.scrollbar.pack(side='left', fill='y')
self.textwidget['yscrollcommand'] = self.scrollbar.set
self.scrollbar['command'] = self.textwidget.yview
self.bind('<Destroy>', self.on_destroy, add=True)
def on_focus(self):
self.textwidget.focus()
def _on_destroy(self, event):
# TODO: what if terminating blocks? maybe a timeout and fall
# back to killing?
try:
self.prompt.process.terminate()
except ProcessLookupError:
# it has been terminated already
pass
def setup(editor):
def start_prompt():
tab = PromptTab(editor.tabmanager)
utils.copy_bindings(editor, tab.textwidget)
editor.tabmanager.add_tab(tab)
editor.tabmanager.current_tab = tab
editor.add_action(start_prompt, "Run/Interactive Prompt",
"Ctrl+I", "<Control-i>")
| {
"repo_name": "PurpleMyst/porcupine",
"path": "more_plugins/pythonprompt.py",
"copies": "1",
"size": "6303",
"license": "mit",
"hash": 9134854285427765000,
"line_mean": 33.0702702703,
"line_max": 79,
"alpha_frac": 0.5917816913,
"autogenerated": false,
"ratio": 3.919776119402985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 185
} |
"""ATA over Ethernet Protocol."""
import struct
import dpkt
class AOE(dpkt.Packet):
__hdr__ = (
('ver_fl', 'B', 0x10),
('err', 'B', 0),
('maj', 'H', 0),
('min', 'B', 0),
('cmd', 'B', 0),
('tag', 'I', 0),
)
_cmdsw = {}
def _get_ver(self): return self.ver_fl >> 4
def _set_ver(self, ver): self.ver_fl = (ver << 4) | (self.ver_fl & 0xf)
ver = property(_get_ver, _set_ver)
def _get_fl(self): return self.ver_fl & 0xf
def _set_fl(self, fl): self.ver_fl = (self.ver_fl & 0xf0) | fl
fl = property(_get_fl, _set_fl)
def set_cmd(cls, cmd, pktclass):
cls._cmdsw[cmd] = pktclass
set_cmd = classmethod(set_cmd)
def get_cmd(cls, cmd):
return cls._cmdsw[cmd]
get_cmd = classmethod(get_cmd)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
try:
self.data = self._cmdsw[self.cmd](self.data)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, struct.error, dpkt.UnpackError):
pass
def pack_hdr(self):
try:
return dpkt.Packet.pack_hdr(self)
except struct.error, e:
raise dpkt.PackError(str(e))
AOE_CMD_ATA = 0
AOE_CMD_CFG = 1
AOE_FLAG_RSP = 1 << 3
def __load_cmds():
prefix = 'AOE_CMD_'
g = globals()
for k, v in g.iteritems():
if k.startswith(prefix):
name = 'aoe' + k[len(prefix):].lower()
try:
mod = __import__(name, g)
except ImportError:
continue
AOE.set_cmd(v, getattr(mod, name.upper()))
if not AOE._cmdsw:
__load_cmds()
| {
"repo_name": "dproc/trex_odp_porting_integration",
"path": "scripts/external_libs/dpkt-1.8.6/dpkt/aoe.py",
"copies": "2",
"size": "1697",
"license": "apache-2.0",
"hash": 7738211281327974000,
"line_mean": 23.2428571429,
"line_max": 75,
"alpha_frac": 0.5085444903,
"autogenerated": false,
"ratio": 3.074275362318841,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9568534138333127,
"avg_score": 0.002857142857142857,
"num_lines": 70
} |
"""A Tape Head to the DNC.
This 'Tape Head' contains a 'Write Head' and a 'Read Head' as defined in the
DNC architecutre in DeepMind's Nature paper:
http://www.nature.com/nature/journal/vaop/ncurrent/full/nature20101.html
Author: Austin Derrow-Pinion
"""
import collections
import numpy as np
import sonnet as snt
import tensorflow as tf
from .. dnc.external_memory import ExternalMemory
from .. dnc.temporal_linkage import TemporalLinkage
from .. dnc.usage import Usage
TapeHeadState = collections.namedtuple('TapeHeadState', (
'read_weights', 'write_weights', 'alloc_gate', 'free_gate', 'memory',
'linkage', 'usage'))
class TapeHead(snt.RNNCore):
"""A Tape Head that utilizes any number of read heads and one write head.
A normal computer simply writes to memory the exact value at an exact
address in memory. This is differentiable and therefore trainable to
write at useful locations in memory for a given task.
Temporal Linkage is used to keep track of the order things are written in.
This allows the write head to iterate forward and backwards through
temporally related elements written in memory.
Usage keeps track of what memory locations have and have not been used
recently. If a memory location has not been used for a long time, the
controller learns to free up this space to make it available for future
writes to memory.
There exist three different read modes:
B: Backward reads for reading the location in memory that was
written to before the previously read location.
C: Content based addressing is useful for storing references to
data.
F: Forward reads for reading the location in memory that was
written to after the previously read location.
The Temporal Linkage matrix, L_t in the paper, is used for both
forward and backward reads:
- Backwards read:
b_t = Transpose(L_t) * w_(t-1)
- Forwards read:
f_t = L_t * w_(t-1)
Content based addressing uses a similarity calculation. In the paper,
they used cosine similarity:
D(u, v) = (u * v) / (|u| * |v|)
A read strength, beta, and a read key, k, is emitted from the
controller and used in the equation for content based lookup on
memory M with N slots of memory each W words long:
c_t = C(M, k, beta)[i] =
exp{D(k, M[i,:]) * beta} / [sum_j exp{D(k, M[j,:]) * beta}
The vectors b_t, c_t, and f_t, are computed through the different
read mode operations. These together are used to calculate the final
weighting through a sum with the read mode vector pi_t. If pi_t[1]
dominates, then the read head will prioritize the backwards read mode.
If pi_t[2] dominates, then the content based addressing will be used.
If pi_t[3] dominates, then the forward read mode will be used.
w_t = pi_t[1] * b_t + pi_t[2] * c_t + pi_t[3] * f_t
The read head calculates the weights through these different read
modes, but the emitted read vector, r_t, is a weighted sum of the
contents in memory:
r_t = Transpose(M_t) * w_t
"""
def __init__(self,
memory_size=16,
word_size=16,
num_read_heads=1,
name='tape_head'):
"""Initialize a Tape Head used in a DNC.
Args:
memory_size: The number of memory slots (N in the DNC paper).
Default value is 16.
word_size: The width of each memory slot (W in the DNC paper).
Default value is 16.
num_read_heads: The number of read heads is unbounded in the DNC,
but the number of write heads was changed from unbounded in the
Neural Turing Machine to only 1 in the DNC. Default value is 1.
name: The name of the module (default 'tape_head').
"""
super(TapeHead, self).__init__(name=name)
self._memory_size = memory_size
self._word_size = word_size
self._num_read_heads = num_read_heads
with self._enter_variable_scope():
self._external_memory = ExternalMemory(
memory_size=self._memory_size, word_size=self._word_size)
self._linkage = TemporalLinkage(memory_size=self._memory_size)
self._usage = Usage(memory_size=self._memory_size)
self._state_size = TapeHeadState(
read_weights=tf.TensorShape([self._num_read_heads,
self._memory_size]),
write_weights=tf.TensorShape([self._memory_size]),
alloc_gate=tf.TensorShape([1]),
free_gate=tf.TensorShape([self._num_read_heads]),
memory=self._external_memory.state_size,
linkage=self._linkage.state_size,
usage=self._usage.state_size)
def _build(self, inputs, prev_state):
"""Compute one timestep of computation for the Tape Head.
Args:
inputs: A Tensor of shape `[batch_size, num_read_heads *
word_size + 3 * word_size + 5 * num_read_heads + 3]` emitted
from the DNC controller. This holds data that controls what
this read head does.
prev_state: An instance of `TapeHeadState` containing the
previous state of this Tape Head.
Returns:
A tuple `(output, next_state)`. Where `output` is a Tensor of
shape `[batch_size, num_read_heads, word_size]` representing the
read vector result, `r_t`. The `next_state` is an instance of
`TapeHeadState` representing the next state of this Tape Head after
computation finishes.
"""
(
read_keys, read_strengths, write_key, write_strengths,
erase_vector, write_vector, free_gates, allocation_gate,
write_gate, read_modes
) = self.interface_parameters(inputs)
allocation_weighting, usage_next_state = self._usage(
prev_state.write_weights,
prev_state.read_weights,
free_gates,
prev_state.usage)
write_content_weighting = self._external_memory.content_weights(
write_key, write_strengths, prev_state.memory.memory)
# reshape because num_write_heads is always 1 in a DNC
write_content_weighting = tf.reshape(write_content_weighting,
[-1, self._memory_size])
write_weights = self.write_weighting(
write_gate,
allocation_gate,
allocation_weighting,
write_content_weighting)
content_weighting, memory_next_state = self._external_memory(
read_keys,
read_strengths,
write_weights,
erase_vector,
write_vector,
prev_state.memory)
linkage_next_state = self._linkage(write_weights, prev_state.linkage)
(forward_weighting,
backward_weighting) = self._linkage.directional_weights(
linkage_next_state.linkage_matrix, prev_state.read_weights)
read_weights = self.read_weights(
read_modes,
backward_weighting,
content_weighting,
forward_weighting)
read_vectors = tf.matmul(read_weights, memory_next_state.memory)
return (
read_vectors,
TapeHeadState(
read_weights=read_weights,
write_weights=write_weights,
alloc_gate=allocation_gate,
free_gate=free_gates,
memory=memory_next_state,
linkage=linkage_next_state,
usage=usage_next_state,
)
)
def write_weighting(self,
write_gate,
allocation_gate,
allocation_weighting,
write_content_weighting):
"""Compute the write weighting vector.
The write weighting vector, `w_t^w` is defined as:
w_t^w = g_t^w * [g_t^a * a_t + (1 - g_t^a) * c_t^w]
Where `g_t^w` is the write gate, `g_t^a` is the allocation gate, `a_t`
is the allocation weighting, and `c_t^w` is the write content
weighting.
Args:
write_gate: A Tensor of shape `[batch_size, 1]` containing the
write gate values from the interface parameters. Written as
`g_t^w` in the DNC paper.
allocation_gate: A Tensor of shape `[batch_size, 1]` containing the
allocation gate values from the interface parameters. Written
as `g_t^a` in the DNC paper.
allocation_weighting: A Tensor of shape `[batch_size, memory_size]`
containing the allocation weighting values from the Usage
class. Written as `a_t` in the DNC paper.
write_content_weighting: A Tensor of shape
`[batch_size, memory_size]` containing the write content
weighting values. Written as `c_t^w` in the DNC paper.
Returns:
A Tensor of shape `[batch_size, memory_size]` containing the write
weighting values. Written as `w_t^w` in the DNC paper.
"""
# [batch_size, 1]
output = 1 - allocation_gate
# [batch_size, memory_size]
output = output * write_content_weighting
# [batch_size, memory_size]
output = output + allocation_gate * allocation_weighting
# [batch_size, memory_size]
output = output * write_gate
return output
def read_weights(self,
read_modes,
backward_weighting,
content_weighting,
forward_weighting):
"""Compute the read weighting vector.
The read weighting vector is written in the DNC paper as `w_t^{r,i}`
for time `t` and read head `i`. This can be calculated by:
w_t^{r,i} = pi_t^i[0] * b_t^i
+ pi_t^i[1] * c_t^{r,i}
+ pi_t^i[2] * f_t^{r,i}
Args:
read_modes: A Tensor of shape `[batch_size, num_read_heads, 3]`
containing the read modes emitted by the DNC controller.
backward_weighting: A Tensor of shape
`[batch_size, num_read_heads, memory_size]` containing the
values for the backward weighting.
content_weighting: A Tensor of shape
`[batch_size, num_read_heads, memory_size]` containing the
values for the content weighting.
forward_weighting: A Tensor of shape
`[batch_size, num_read_heads, memory_size]` containing the
values for the forward weighting.
Returns:
A Tensor of shape `[batch_size, num_read_heads, memory_size]`.
"""
# [[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]],
# [[0.7, 0.8, 0.9], [0.8, 0.7, 0.6]]]
backward_mode = tf.slice(read_modes, [0, 0, 0], [-1, -1, 1])
content_mode = tf.slice(read_modes, [0, 0, 1], [-1, -1, 1])
forward_mode = tf.slice(read_modes, [0, 0, 2], [-1, -1, 1])
# [batch_size, num_read_heads, 1]
# * [batch_size, num_read_heads, memory_size]
# = [batch_size, num_read_heads, memory_size]
return tf.multiply(backward_mode, backward_weighting) + \
tf.multiply(content_mode, content_weighting) + \
tf.multiply(forward_mode, forward_weighting)
def interface_parameters(self, interface_vector):
"""Extract the interface parameters from the interface vector.
The interface vector is written as `xi_t` in the DNC paper for time
`t`. Below is the equation of what the interface vector contains:
xi_t = [
k_t^{r,1}; ...; k_t^{r,R}; (R read keys)
beta_t^{r,1}; ...; beta_t^{r,R}; (R read strenghts)
k_t^w; (the write key)
beta_t^w; (the write strength)
e_t; (the erase vector)
v_t; (the write vector)
f_t^1; ...; f_t^R; (R free gates)
g_t^a; (the allocation gate)
g_t^w; (the write gate)
pi_t^1; ...; pi_t^R (R read modes)
]
The read and write strengths are processed with the `oneplus` function
to restrict the values in the domain `[1, infinity)`:
oneplus(x) = 1 + log(1 + exp(x))
The erase vector, free gates, allocation gate, and write gate, are
processed with the logistic sigmoid function to constrain the values to
the domain `[0, 1]`.
The read modes are processed with the softmax function so that for any
read mode, `pi_t^i`, the values are bound to the domain `[0, 1]` and
the sum of the elements in the vector is equal to 1.
Args:
interface_vector: A Tensor of shape `[batch_size, num_read_heads *
word_size + 3 * word_size + 5 * num_read_heads + 3]` containing
the individual components emitted by the DNC controller. This
is written in the DNC paper as `xi_t` for time `t`.
Returns:
A tuple `(read_keys, read_strengths, write_key, write_strengths,
erase_vector, write_vector, free_gates, allocation_gate,
write_gate, read_modes)` as explained in the description of this
method.
"""
def _get(shape, offset):
size = np.prod(shape)
output = interface_vector[:, offset:offset + size]
return tf.reshape(output, shape=[-1] + shape), offset + size
def _oneplus(x):
return 1 + tf.log(1 + tf.exp(x))
offset = 0
read_keys, offset = _get([self._num_read_heads, self._word_size],
offset)
_read_strengths, offset = _get([self._num_read_heads], offset)
write_key, offset = _get([1, self._word_size], offset)
_write_strengths, offset = _get([1], offset)
_erase_vector, offset = _get([self._word_size], offset)
write_vector, offset = _get([self._word_size], offset)
_free_gates, offset = _get([self._num_read_heads], offset)
_allocation_gate, offset = _get([1], offset)
_write_gate, offset = _get([1], offset)
_read_modes, offset = _get([self._num_read_heads, 3], offset)
read_strengths = _oneplus(_read_strengths)
write_strengths = _oneplus(_write_strengths)
erase_vector = tf.sigmoid(_erase_vector)
free_gates = tf.sigmoid(_free_gates)
allocation_gate = tf.sigmoid(_allocation_gate)
write_gate = tf.sigmoid(_write_gate)
read_modes = tf.nn.softmax(_read_modes)
return (
read_keys, read_strengths, write_key, write_strengths,
erase_vector, write_vector, free_gates, allocation_gate,
write_gate, read_modes
)
@property
def state_size(self):
"""Return a description of the state size."""
return self._state_size
@property
def output_size(self):
"""Return the output shape."""
return tf.TensorShape([self._num_read_heads, self._word_size])
| {
"repo_name": "derrowap/DNC-TensorFlow",
"path": "src/dnc/tape_head.py",
"copies": "1",
"size": "15602",
"license": "mit",
"hash": 4561000431350604000,
"line_mean": 41.7452054795,
"line_max": 79,
"alpha_frac": 0.5746699141,
"autogenerated": false,
"ratio": 3.9369164774160987,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5011586391516099,
"avg_score": null,
"num_lines": null
} |
_atari7 = ['BeamRider', 'Breakout', 'Enduro', 'Pong', 'Qbert', 'Seaquest', 'SpaceInvaders']
_atariexpl7 = ['Freeway', 'Gravitar', 'MontezumaRevenge', 'Pitfall', 'PrivateEye', 'Solaris', 'Venture']
_BENCHMARKS = []
def register_benchmark(benchmark):
for b in _BENCHMARKS:
if b['name'] == benchmark['name']:
raise ValueError('Benchmark with name %s already registered!'%b['name'])
_BENCHMARKS.append(benchmark)
def list_benchmarks():
return [b['name'] for b in _BENCHMARKS]
def get_benchmark(benchmark_name):
for b in _BENCHMARKS:
if b['name'] == benchmark_name:
return b
raise ValueError('%s not found! Known benchmarks: %s' % (benchmark_name, list_benchmarks()))
def get_task(benchmark, env_id):
"""Get a task by env_id. Return None if the benchmark doesn't have the env"""
return next(filter(lambda task: task['env_id'] == env_id, benchmark['tasks']), None)
_ATARI_SUFFIX = 'NoFrameskip-v4'
register_benchmark({
'name' : 'Atari200M',
'description' :'7 Atari games from Mnih et al. (2013), with pixel observations, 200M frames',
'tasks' : [{'env_id' : _game + _ATARI_SUFFIX, 'trials' : 2, 'num_timesteps' : int(200e6)} for _game in _atari7]
})
register_benchmark({
'name' : 'Atari40M',
'description' :'7 Atari games from Mnih et al. (2013), with pixel observations, 40M frames',
'tasks' : [{'env_id' : _game + _ATARI_SUFFIX, 'trials' : 2, 'num_timesteps' : int(40e6)} for _game in _atari7]
})
register_benchmark({
'name' : 'Atari1Hr',
'description' :'7 Atari games from Mnih et al. (2013), with pixel observations, 1 hour of walltime',
'tasks' : [{'env_id' : _game + _ATARI_SUFFIX, 'trials' : 2, 'num_seconds' : 60*60} for _game in _atari7]
})
register_benchmark({
'name' : 'AtariExploration40M',
'description' :'7 Atari games emphasizing exploration, with pixel observations, 40M frames',
'tasks' : [{'env_id' : _game + _ATARI_SUFFIX, 'trials' : 2, 'num_timesteps' : int(40e6)} for _game in _atariexpl7]
})
_mujocosmall = [
'InvertedDoublePendulum-v1', 'InvertedPendulum-v1',
'HalfCheetah-v1', 'Hopper-v1', 'Walker2d-v1',
'Reacher-v1', 'Swimmer-v1']
register_benchmark({
'name' : 'Mujoco1M',
'description' : 'Some small 2D MuJoCo tasks, run for 1M timesteps',
'tasks' : [{'env_id' : _envid, 'trials' : 3, 'num_timesteps' : int(1e6)} for _envid in _mujocosmall]
})
_roboschool_mujoco = [
'RoboschoolInvertedDoublePendulum-v0', 'RoboschoolInvertedPendulum-v0', # cartpole
'RoboschoolHalfCheetah-v0', 'RoboschoolHopper-v0', 'RoboschoolWalker2d-v0', # forward walkers
'RoboschoolReacher-v0'
]
register_benchmark({
'name' : 'RoboschoolMujoco2M',
'description' : 'Same small 2D tasks, still improving up to 2M',
'tasks' : [{'env_id' : _envid, 'trials' : 3, 'num_timesteps' : int(2e6)} for _envid in _roboschool_mujoco]
})
_atari50 = [ # actually 49
'Alien', 'Amidar', 'Assault', 'Asterix', 'Asteroids',
'Atlantis', 'BankHeist', 'BattleZone', 'BeamRider', 'Bowling',
'Boxing', 'Breakout', 'Centipede', 'ChopperCommand', 'CrazyClimber',
'DemonAttack', 'DoubleDunk', 'Enduro', 'FishingDerby', 'Freeway',
'Frostbite', 'Gopher', 'Gravitar', 'IceHockey', 'Jamesbond',
'Kangaroo', 'Krull', 'KungFuMaster', 'MontezumaRevenge', 'MsPacman',
'NameThisGame', 'Pitfall', 'Pong', 'PrivateEye', 'Qbert',
'Riverraid', 'RoadRunner', 'Robotank', 'Seaquest', 'SpaceInvaders',
'StarGunner', 'Tennis', 'TimePilot', 'Tutankham', 'UpNDown',
'Venture', 'VideoPinball', 'WizardOfWor', 'Zaxxon',
]
register_benchmark({
'name' : 'Atari50_40M',
'description' :'7 Atari games from Mnih et al. (2013), with pixel observations, 40M frames',
'tasks' : [{'env_id' : _game + _ATARI_SUFFIX, 'trials' : 3, 'num_timesteps' : int(40e6)} for _game in _atari50]
})
| {
"repo_name": "machine-intelligence/rl-teacher-atari",
"path": "agents/pposgd-mpi/pposgd_mpi/bench/benchmarks.py",
"copies": "3",
"size": "3963",
"license": "mit",
"hash": 4242218768454199300,
"line_mean": 41.6129032258,
"line_max": 119,
"alpha_frac": 0.6278072168,
"autogenerated": false,
"ratio": 2.7237113402061857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9785476800036619,
"avg_score": 0.01320835139391339,
"num_lines": 93
} |
## AtariTrainer.py Dana Hughes 21-Sept-2017
##
## Class to perform training and evaluation on Atari agents.
import numpy as np
class AtariTrainer:
"""
"""
def __init__(self, environment, agent, counter, **kwargs):
"""
Load the game and create a display using pygame
"""
self.environment = environment
# Hang on to the provided controller and replay memory
self.agent = agent
self.eval_agent = kwargs.get('eval_agent', agent)
self.evaluate = False
# Maximum number of no-op that can be performed at the start of an episode
self.noop_max = kwargs.get('noop_max', 30)
self.action_repeat = kwargs.get('action_repeat', 4)
self.counter = counter
# Listeners for storing parameters, tensorboard, etc.
self.listeners = []
def add_listener(self, listener):
"""
"""
self.listeners.append(listener)
def learn_episode(self):
"""
Allow for controller to learn while playing the game
"""
# Reset the game to start a new episode, and let the agent know
self.environment.reset_game()
self.agent.start_episode()
num_lives = self.environment.lives()
score = 0
for listener in self.listeners:
listener.start_episode({})
# Wait a random number of frames before starting
for i in range(np.random.randint(self.noop_max)):
self.environment.act(0)
while not self.environment.terminal():
state = self.environment.get_state()
# Have the agent observe the environment, then act
self.agent.observe(state)
action, Q = self.agent.act()
# Run the action 4 times
reward = 0.0
for i in range(self.action_repeat):
reward += self.environment.act(action)
score += reward
self.counter.step()
# Cap reward to be between -1 and 1
reward = min(max(reward, -1.0), 1.0)
for listener in self.listeners:
listener.record({'Q': np.max(Q), 'reward': reward, 'action': action})
is_terminal = self.environment.terminal() or self.environment.lives() != num_lives
num_lives = self.environment.lives()
self.agent.learn(action, reward, is_terminal)
for listener in self.listeners:
listener.end_episode({'score': score})
return score
def play_episode(self, num_noop = 0):
"""
Allow the eval agent to play
"""
total_score = 0
# Reset the game to start a new episode
self.environment.reset_game()
# self.environment.display()
# Perform a certain number of noops
for i in range(num_noop):
_ = self.environment.act(0)
while not self.environment.terminal():
state = self.environment.get_state()
# self.environment.display()
self.eval_agent.observe(state)
action, Q = self.eval_agent.act()
for i in range(self.action_repeat):
reward = self.environment.act(action)
total_score += reward
return total_score | {
"repo_name": "danathughes/AtariRL",
"path": "AtariTrainer.py",
"copies": "1",
"size": "2771",
"license": "mit",
"hash": -9189570839213579000,
"line_mean": 21.9090909091,
"line_max": 85,
"alpha_frac": 0.6820642367,
"autogenerated": false,
"ratio": 3.263839811542992,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4445904048242992,
"avg_score": null,
"num_lines": null
} |
""" a task for waiting on a Batch Apex job to complete """
from datetime import datetime
from typing import Sequence, Optional
from cumulusci.utils import parse_api_datetime
from cumulusci.tasks.salesforce import BaseSalesforceApiTask
from cumulusci.core.exceptions import SalesforceException
COMPLETED_STATUSES = ["Completed", "Aborted", "Failed"]
STOPPED_STATUSES = ["Aborted"]
FAILED_STATUSES = ["Failed"]
class BatchApexWait(BaseSalesforceApiTask):
"""BatchApexWait polls an org until the latest batch job or queueable job
for an apex class completes or fails."""
name = "BatchApexWait"
original_created_date = None
task_options = {
"class_name": {
"description": "Name of the Apex class to wait for.",
"required": True,
},
"poll_interval": {
"description": "Seconds to wait before polling for batch or queueable job completion. "
"Defaults to 10 seconds."
},
}
def _run_task(self):
self.poll_interval_s = int(self.options.get("poll_interval", 10))
self._poll() # will block until poll_complete
self.logger.info("Job is complete.")
summary = self.summarize_subjobs(self.subjobs)
failed_batches = self.failed_batches(self.subjobs)
job_aborted = summary["AnyAborted"]
job_failed = summary[
"AnyFailed"
] # note that a failed sub-job is different than a failed batch
# per https://help.salesforce.com/articleView?id=code_apex_job.htm&type=5
if job_aborted:
raise SalesforceException("Job was aborted by a user.")
elif job_failed:
raise SalesforceException("Job experienced a system failure.")
elif failed_batches:
self.logger.info("There have been some batch failures.")
raise SalesforceException(
f"There were batch errors: {repr(failed_batches)}"
)
elif not summary["CountsAddUp"]:
self.logger.info("The final record counts do not add up.")
self.logger.info("This is probably related to W-1132237")
self.logger.info(repr(summary))
if len(self.subjobs) > 1:
subjob_summary = f" in {len(self.subjobs)} sub-jobs"
else:
subjob_summary = ""
self.logger.info(
f"{self.options['class_name']} took {summary['ElapsedTime']} seconds to process {summary['TotalJobItems']} batches{subjob_summary}."
)
def failed_batches(self, subjobs: Sequence[dict]):
failed_batches = []
for subjob in subjobs:
if subjob["NumberOfErrors"]:
failed_batches.append(
{
key: value
for key, value in subjob.items()
if key
in {
"Id",
"Status",
"ExtendedStatus",
"NumberOfErrors",
"JobItemsProcessed",
"TotalJobItems",
}
}
)
return failed_batches
def _poll_action(self):
# get batch status
if not self.original_created_date:
query_results = self.tooling.query(self._batch_query(date_limit=None))
if not query_results["records"]:
raise SalesforceException(f"No {self.options['class_name']} job found.")
self.original_created_date = parse_api_datetime(
query_results["records"][0]["CreatedDate"]
)
else:
query_results = self.tooling.query(
self._batch_query(date_limit=self.original_created_date)
)
self.subjobs = query_results["records"]
current_subjob = self.subjobs[0]
summary = self.summarize_subjobs(self.subjobs)
if len(self.subjobs) > 1:
subjob_info = f" in {len(self.subjobs)} sub-jobs."
else:
subjob_info = ""
self.logger.info(
f"{self.options['class_name']}: "
f"Job: {current_subjob['Id']} "
f"{summary['JobItemsProcessed']} of {summary['TotalJobItems']} "
f"({summary['NumberOfErrors']} failures)" + subjob_info
)
self.poll_complete = summary["Completed"]
def summarize_subjobs(self, subjobs: Sequence[dict]):
def reduce_key(valname: str, summary_func):
return summary_func(subjob[valname] for subjob in subjobs)
rc = {
"JobItemsProcessed": reduce_key("JobItemsProcessed", sum),
"TotalJobItems": reduce_key("TotalJobItems", sum),
"NumberOfErrors": reduce_key("NumberOfErrors", sum),
"Completed": all(
subjob["Status"] in COMPLETED_STATUSES for subjob in subjobs
),
"AnyAborted": any(
subjob["Status"] in STOPPED_STATUSES for subjob in subjobs
),
"AnyFailed": any(subjob["Status"] in FAILED_STATUSES for subjob in subjobs),
}
rc["Success"] = rc["NumberOfErrors"] == 0
rc["ElapsedTime"] = self.elapsed_time(subjobs)
rc["CountsAddUp"] = rc["JobItemsProcessed"] == rc["TotalJobItems"]
return rc
def elapsed_time(self, subjobs: Sequence[dict]):
""" returns the time (in seconds) that the subjobs took, if complete """
completed_dates = [
subjob["CompletedDate"] for subjob in subjobs if subjob.get("CompletedDate")
]
if completed_dates:
most_recently_completed = max(completed_dates)
completed_date = parse_api_datetime(most_recently_completed)
else:
completed_date = datetime.now()
created_date = parse_api_datetime(
min(subjob["CreatedDate"] for subjob in subjobs)
)
td = completed_date - created_date
return td.total_seconds()
def _batch_query(self, date_limit: Optional[datetime] = None):
if not date_limit:
limit = " LIMIT 1 "
date_clause = " "
else:
limit = " "
date_clause = f" AND CreatedDate >= {date_limit.isoformat()}Z "
query = (
"SELECT Id, ApexClass.Name, Status, ExtendedStatus, TotalJobItems, "
"JobItemsProcessed, NumberOfErrors, CreatedDate, CompletedDate "
"FROM AsyncApexJob "
"WHERE JobType IN ('BatchApex','Queueable') "
+ f"AND ApexClass.Name='{self.options['class_name']}' "
+ date_clause
+ " ORDER BY CreatedDate DESC "
+ limit
)
return query
| {
"repo_name": "SalesforceFoundation/CumulusCI",
"path": "cumulusci/tasks/apex/batch.py",
"copies": "1",
"size": "6781",
"license": "bsd-3-clause",
"hash": -7715836974647470000,
"line_mean": 36.6722222222,
"line_max": 144,
"alpha_frac": 0.5645185076,
"autogenerated": false,
"ratio": 4.172923076923077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5237441584523077,
"avg_score": null,
"num_lines": null
} |
""" a task for waiting on a Batch Apex job to complete """
from cumulusci.tasks.salesforce import BaseSalesforceApiTask
from cumulusci.core.exceptions import SalesforceException
import arrow
COMPLETED_STATUSES = ['Completed']
class BatchApexWait(BaseSalesforceApiTask):
""" BatchApexWait polls an org until the latest batch job
for an apex class completes or fails """
name = 'BatchApexWait'
batch = object()
task_options = {
'class_name': {
'description': 'Name of the Apex class to wait for.',
'required': True
},
'poll_interval': {
'description': 'Seconds to wait before polling for batch job completion. ' \
'Defaults to 10 seconds.'
}
}
def _run_task(self):
self.poll_interval_s = int(self.options.get('poll_interval', 10))
self._poll() # will block until poll_complete
self.logger.info('Job is complete.')
if not self.success:
self.logger.info('There were some batch failures.')
raise SalesforceException(self.batch['ExtendedStatus'])
self.logger.info('%s took %d seconds to process %d batches.',
self.batch['ApexClass']['Name'],
self.delta,
self.batch['TotalJobItems'])
return self.success
def _poll_action(self):
# get batch status
query_results = self.tooling.query(self._batch_query)
self.batch = query_results['records'][0]
self.logger.info('%s: %d of %d (%d failures)',
self.batch['ApexClass']['Name'],
self.batch['JobItemsProcessed'],
self.batch['TotalJobItems'],
self.batch['NumberOfErrors'])
self.poll_complete = not self._poll_again()
def _poll_again(self):
return self.batch['Status'] not in COMPLETED_STATUSES
@property
def success(self):
""" returns True if all batches succeeded """
return (self.batch['JobItemsProcessed'] is self.batch['TotalJobItems']) and \
(self.batch['NumberOfErrors'] is 0)
@property
def delta(self):
""" returns the time (in seconds) that the batch took, if complete """
td = arrow.get(self.batch['CompletedDate']) - \
arrow.get(self.batch['CreatedDate'])
return td.total_seconds()
@property
def _batch_query(self):
return 'SELECT Id, ApexClass.Name, Status, ExtendedStatus, TotalJobItems, ' \
'JobItemsProcessed, NumberOfErrors, CreatedDate, CompletedDate ' \
'FROM AsyncApexJob ' \
'WHERE JobType=\'BatchApex\' '\
'AND ApexClass.Name=\'{}\' ' \
'ORDER BY CreatedDate DESC ' \
'LIMIT 1'.format(self.options['class_name'])
| {
"repo_name": "e02d96ec16/CumulusCI",
"path": "cumulusci/tasks/apex/batch.py",
"copies": "1",
"size": "2908",
"license": "bsd-3-clause",
"hash": -2306581599191575000,
"line_mean": 34.0361445783,
"line_max": 88,
"alpha_frac": 0.57565337,
"autogenerated": false,
"ratio": 4.239067055393586,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011896171228191821,
"num_lines": 83
} |
""" a task for waiting on a specific custom settings value """
from simple_salesforce.exceptions import SalesforceError
from cumulusci.tasks.salesforce import BaseSalesforceApiTask
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.core.utils import process_bool_arg
class CustomSettingValueWait(BaseSalesforceApiTask):
""" CustomSettingValueWait polls an org until the specific value exists in a custom settings field """
name = "CustomSettingValueWait"
task_options = {
"object": {
"description": "Name of the Hierarchical Custom Settings object to query. Can include the %%%NAMESPACE%%% token. ",
"required": True,
},
"field": {
"description": "Name of the field on the Custom Settings to query. Can include the %%%NAMESPACE%%% token. ",
"required": True,
},
"value": {
"description": "Value of the field to wait for (String, Integer or Boolean). ",
"required": True,
},
"managed": {
"description": (
"If True, will insert the project's namespace prefix. "
"Defaults to False or no namespace."
),
"required": False,
},
"namespaced": {
"description": (
"If True, the %%%NAMESPACE%%% token "
"will get replaced with the namespace prefix for the object and field."
"Defaults to False."
),
"required": False,
},
"poll_interval": {
"description": (
"Seconds to wait before polling for batch job completion. "
"Defaults to 10 seconds."
)
},
}
def _run_task(self):
self.poll_interval_s = int(self.options.get("poll_interval", 10))
# Retrieve polling object/field/value options
self.object_name = self.options["object"]
self.field_name = self.options["field"]
self.check_value = self.options["value"]
# Process namespace tokens
self._apply_namespace()
# will block until poll_complete
self._poll()
self.logger.info("Value Matched.")
return True
def _poll_action(self):
try:
query_results = self.sf.query(self._object_query)
except SalesforceError as e:
message = e.content[0]["message"]
if "No such column 'SetupOwnerId'" in message:
message = "Only Hierarchical Custom Settings objects are supported."
raise TaskOptionsError(f"Query Error: {message}")
self.record = None
for row in query_results["records"]:
if row["SetupOwnerId"].startswith("00D"):
self.record = row
if self.record:
self.poll_complete = not self._poll_again()
else:
self.logger.info(
f"{self.field_name}: Looking for {self.check_value} and found no custom settings record"
)
def _poll_again(self):
return not self.success
def _apply_namespace(self):
# Process namespace tokens
namespace = self.project_config.project__package__namespace
if "managed" in self.options:
managed = process_bool_arg(self.options["managed"])
else:
managed = (
bool(namespace) and namespace in self.org_config.installed_packages
)
if "namespaced" in self.options:
namespaced = process_bool_arg(self.options["namespaced"])
else:
namespaced = bool(namespace) and self.org_config.namespace == namespace
namespace_prefix = ""
if namespace and (managed or namespaced):
namespace_prefix = namespace + "__"
self.object_name = self.object_name.replace("%%%NAMESPACE%%%", namespace_prefix)
self.field_name = self.field_name.replace("%%%NAMESPACE%%%", namespace_prefix)
@property
def success(self):
lower_case_record = {k.lower(): v for k, v in self.record.items()}
self.field_value = lower_case_record[self.field_name.lower()]
if isinstance(self.field_value, bool):
self.check_value = process_bool_arg(self.check_value)
self.field_value = process_bool_arg(self.field_value)
elif isinstance(self.field_value, (int, float)):
self.check_value = float(self.check_value)
self.field_value = float(self.field_value)
elif isinstance(self.field_value, str):
self.check_value = str(self.check_value).lower()
self.field_value = str(self.field_value).lower()
self.logger.info(
f"{self.field_name}: Looking for {self.check_value} and found {self.field_value}"
)
return self.field_value == self.check_value
@property
def _object_query(self):
return f"SELECT SetupOwnerId, {self.field_name} FROM {self.object_name}"
| {
"repo_name": "SalesforceFoundation/CumulusCI",
"path": "cumulusci/tasks/salesforce/custom_settings_wait.py",
"copies": "1",
"size": "5010",
"license": "bsd-3-clause",
"hash": -8274162256345123000,
"line_mean": 36.1111111111,
"line_max": 127,
"alpha_frac": 0.5882235529,
"autogenerated": false,
"ratio": 4.29305912596401,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.538128267886401,
"avg_score": null,
"num_lines": null
} |
"""A tasklet decorator.
Tasklets are a way to write concurrently running functions without
threads; tasklets are executed by an event loop and can suspend
themselves blocking for I/O or some other operation using a yield
statement. The notion of a blocking operation is abstracted into the
Future class, but a tasklet may also yield an RPC in order to wait for
that RPC to complete.
The @tasklet decorator wraps generator function so that when it is
called, a Future is returned while the generator is executed by the
event loop. For example:
@tasklet
def foo():
a = yield <some Future>
c = yield <another Future>
raise Return(a + b)
def main():
f = foo()
x = f.get_result()
print x
Note that blocking until the Future's result is available using
get_result() is somewhat inefficient (though not vastly -- it is not
busy-waiting). In most cases such code should be rewritten as a tasklet
instead:
@tasklet
def main_tasklet():
f = foo()
x = yield f
print x
Calling a tasklet automatically schedules it with the event loop:
def main():
f = main_tasklet()
eventloop.run() # Run until no tasklets left to do
f.done() # Returns True
As a special feature, if the wrapped function is not a generator
function, its return value is returned via the Future. This makes the
following two equivalent:
@tasklet
def foo():
return 42
@tasklet
def foo():
if False: yield # The presence of 'yield' makes foo a generator
raise Return(42) # Or, after PEP 380, return 42
This feature (inspired by Monocle) is handy in case you are
implementing an interface that expects tasklets but you have no need to
suspend -- there's no need to insert a dummy yield in order to make
the tasklet into a generator.
"""
import collections
import logging
import os
import sys
import types
from .google_imports import apiproxy_stub_map
from .google_imports import apiproxy_rpc
from .google_imports import datastore_errors
from .google_imports import datastore_rpc
from .google_imports import namespace_manager
from . import eventloop
from . import utils
__all__ = ['Return', 'tasklet', 'synctasklet', 'toplevel', 'sleep',
'add_flow_exception', 'get_return_value',
'get_context', 'set_context',
'make_default_context', 'make_context',
'Future', 'MultiFuture', 'QueueFuture', 'SerialQueueFuture',
'ReducingFuture',
]
_logging_debug = utils.logging_debug
def _is_generator(obj):
"""Helper to test for a generator object.
NOTE: This tests for the (iterable) object returned by calling a
generator function, not for a generator function.
"""
return isinstance(obj, types.GeneratorType)
class _State(utils.threading_local):
"""Hold thread-local state."""
current_context = None
def __init__(self):
super(_State, self).__init__()
self.all_pending = set()
def add_pending(self, fut):
_logging_debug('all_pending: add %s', fut)
self.all_pending.add(fut)
def remove_pending(self, fut, status='success'):
if fut in self.all_pending:
_logging_debug('all_pending: %s: remove %s', status, fut)
self.all_pending.remove(fut)
else:
_logging_debug('all_pending: %s: not found %s', status, fut)
def clear_all_pending(self):
if self.all_pending:
logging.info('all_pending: clear %s', self.all_pending)
self.all_pending.clear()
else:
_logging_debug('all_pending: clear no-op')
def dump_all_pending(self, verbose=False):
pending = []
for fut in self.all_pending:
if verbose:
line = fut.dump() + ('\n' + '-'*40)
else:
line = fut.dump_stack()
pending.append(line)
return '\n'.join(pending)
_state = _State()
# Tuple of exceptions that should not be logged (except in debug mode).
_flow_exceptions = ()
def add_flow_exception(exc):
"""Add an exception that should not be logged.
The argument must be a subclass of Exception.
"""
global _flow_exceptions
if not isinstance(exc, type) or not issubclass(exc, Exception):
raise TypeError('Expected an Exception subclass, got %r' % (exc,))
as_set = set(_flow_exceptions)
as_set.add(exc)
_flow_exceptions = tuple(as_set)
def _init_flow_exceptions():
"""Internal helper to initialize _flow_exceptions.
This automatically adds webob.exc.HTTPException, if it can be imported.
"""
global _flow_exceptions
_flow_exceptions = ()
add_flow_exception(datastore_errors.Rollback)
try:
from webob import exc
except ImportError:
pass
else:
add_flow_exception(exc.HTTPException)
_init_flow_exceptions()
class Future(object):
"""A Future has 0 or more callbacks.
The callbacks will be called when the result is ready.
NOTE: This is somewhat inspired but not conformant to the Future interface
defined by PEP 3148. It is also inspired (and tries to be somewhat
compatible with) the App Engine specific UserRPC and MultiRpc classes.
"""
# TODO: Trim the API; there are too many ways to do the same thing.
# TODO: Compare to Monocle's much simpler Callback class.
# Constants for state property.
IDLE = apiproxy_rpc.RPC.IDLE # Not yet running (unused)
RUNNING = apiproxy_rpc.RPC.RUNNING # Not yet completed.
FINISHING = apiproxy_rpc.RPC.FINISHING # Completed.
# XXX Add docstrings to all methods. Separate PEP 3148 API from RPC API.
_geninfo = None # Extra info about suspended generator.
def __init__(self, info=None):
# TODO: Make done a method, to match PEP 3148?
__ndb_debug__ = 'SKIP' # Hide this frame from self._where
self._info = info # Info from the caller about this Future's purpose.
self._where = utils.get_stack()
self._context = None
self._reset()
def _reset(self):
self._done = False
self._result = None
self._exception = None
self._traceback = None
self._callbacks = []
self._immediate_callbacks = []
_state.add_pending(self)
self._next = None # Links suspended Futures together in a stack.
# TODO: Add a __del__ that complains if neither get_exception() nor
# check_success() was ever called? What if it's not even done?
def __repr__(self):
if self._done:
if self._exception is not None:
state = 'exception %s: %s' % (self._exception.__class__.__name__,
self._exception)
else:
state = 'result %r' % (self._result,)
else:
state = 'pending'
line = '?'
for line in self._where:
if 'tasklets.py' not in line:
break
if self._info:
line += ' for %s' % self._info
if self._geninfo:
line += ' %s' % self._geninfo
return '<%s %x created by %s; %s>' % (
self.__class__.__name__, id(self), line, state)
def dump(self):
return '%s\nCreated by %s' % (self.dump_stack(),
'\n called by '.join(self._where))
def dump_stack(self):
lines = []
fut = self
while fut is not None:
lines.append(str(fut))
fut = fut._next
return '\n waiting for '.join(lines)
def add_callback(self, callback, *args, **kwds):
if self._done:
eventloop.queue_call(None, callback, *args, **kwds)
else:
self._callbacks.append((callback, args, kwds))
def add_immediate_callback(self, callback, *args, **kwds):
if self._done:
callback(*args, **kwds)
else:
self._immediate_callbacks.append((callback, args, kwds))
def set_result(self, result):
if self._done:
raise RuntimeError('Result cannot be set twice.')
self._result = result
self._done = True
_state.remove_pending(self)
for callback, args, kwds in self._immediate_callbacks:
callback(*args, **kwds)
for callback, args, kwds in self._callbacks:
eventloop.queue_call(None, callback, *args, **kwds)
def set_exception(self, exc, tb=None):
if not isinstance(exc, BaseException):
raise TypeError('exc must be an Exception; received %r' % exc)
if self._done:
raise RuntimeError('Exception cannot be set twice.')
self._exception = exc
self._traceback = tb
self._done = True
_state.remove_pending(self, status='fail')
for callback, args, kwds in self._immediate_callbacks:
callback(*args, **kwds)
for callback, args, kwds in self._callbacks:
eventloop.queue_call(None, callback, *args, **kwds)
def done(self):
return self._done
@property
def state(self):
# This is just for compatibility with UserRPC and MultiRpc.
# A Future is considered running as soon as it is created.
if self._done:
return self.FINISHING
else:
return self.RUNNING
def wait(self):
if self._done:
return
ev = eventloop.get_event_loop()
while not self._done:
if not ev.run1():
logging.info('Deadlock in %s', self)
logging.info('All pending Futures:\n%s', _state.dump_all_pending())
_logging_debug('All pending Futures (verbose):\n%s',
_state.dump_all_pending(verbose=True))
self.set_exception(RuntimeError('Deadlock waiting for %s' % self))
def get_exception(self):
self.wait()
return self._exception
def get_traceback(self):
self.wait()
return self._traceback
def check_success(self):
self.wait()
if self._exception is not None:
raise self._exception.__class__, self._exception, self._traceback
def get_result(self):
self.check_success()
return self._result
# TODO: Have a tasklet that does this
@classmethod
def wait_any(cls, futures):
# TODO: Flatten MultiRpcs.
waiting_on = set(futures)
ev = eventloop.get_event_loop()
while waiting_on:
for f in waiting_on:
if f.state == cls.FINISHING:
return f
ev.run1()
return None
# TODO: Have a tasklet that does this
@classmethod
def wait_all(cls, futures):
# TODO: Flatten MultiRpcs.
waiting_on = set(futures)
ev = eventloop.get_event_loop()
while waiting_on:
waiting_on = set(f for f in waiting_on if f.state == cls.RUNNING)
ev.run1()
def _help_tasklet_along(self, ns, gen, val=None, exc=None, tb=None):
# XXX Docstring
info = utils.gen_info(gen)
__ndb_debug__ = info
try:
save_context = get_context()
save_namespace = namespace_manager.get_namespace()
try:
set_context(self._context)
namespace_manager.set_namespace(ns)
if exc is not None:
_logging_debug('Throwing %s(%s) into %s',
exc.__class__.__name__, exc, info)
value = gen.throw(exc.__class__, exc, tb)
else:
_logging_debug('Sending %r to %s', val, info)
value = gen.send(val)
self._context = get_context()
finally:
ns = namespace_manager.get_namespace()
set_context(save_context)
namespace_manager.set_namespace(save_namespace)
except StopIteration, err:
result = get_return_value(err)
_logging_debug('%s returned %r', info, result)
self.set_result(result)
return
except GeneratorExit:
# In Python 2.5, this derives from Exception, but we don't want
# to handle it like other Exception instances. So we catch and
# re-raise it immediately. See issue 127. http://goo.gl/2p5Pn
# TODO: Remove when Python 2.5 is no longer supported.
raise
except Exception, err:
_, _, tb = sys.exc_info()
if isinstance(err, _flow_exceptions):
# Flow exceptions aren't logged except in "heavy debug" mode,
# and then only at DEBUG level, without a traceback.
_logging_debug('%s raised %s(%s)',
info, err.__class__.__name__, err)
elif utils.DEBUG and logging.getLogger().level < logging.DEBUG:
# In "heavy debug" mode, log a warning with traceback.
# (This is the same condition as used in utils.logging_debug().)
logging.warning('%s raised %s(%s)',
info, err.__class__.__name__, err, exc_info=True)
else:
# Otherwise, log a warning without a traceback.
logging.warning('%s raised %s(%s)', info, err.__class__.__name__, err)
self.set_exception(err, tb)
return
else:
_logging_debug('%s yielded %r', info, value)
if isinstance(value, (apiproxy_stub_map.UserRPC,
datastore_rpc.MultiRpc)):
# TODO: Tail recursion if the RPC is already complete.
eventloop.queue_rpc(value, self._on_rpc_completion, value, ns, gen)
return
if isinstance(value, Future):
# TODO: Tail recursion if the Future is already done.
if self._next:
raise RuntimeError('Future has already completed yet next is %r' %
self._next)
self._next = value
self._geninfo = utils.gen_info(gen)
_logging_debug('%s is now blocked waiting for %s', self, value)
value.add_callback(self._on_future_completion, value, ns, gen)
return
if isinstance(value, (tuple, list)):
# Arrange for yield to return a list of results (not Futures).
info = 'multi-yield from %s' % utils.gen_info(gen)
mfut = MultiFuture(info)
try:
for subfuture in value:
mfut.add_dependent(subfuture)
mfut.complete()
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
mfut.set_exception(err, tb)
mfut.add_callback(self._on_future_completion, mfut, ns, gen)
return
if _is_generator(value):
# TODO: emulate PEP 380 here?
raise NotImplementedError('Cannot defer to another generator.')
raise RuntimeError('A tasklet should not yield a plain value: '
'%.200s yielded %.200r' % (info, value))
def _on_rpc_completion(self, rpc, ns, gen):
try:
result = rpc.get_result()
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self._help_tasklet_along(ns, gen, exc=err, tb=tb)
else:
self._help_tasklet_along(ns, gen, result)
def _on_future_completion(self, future, ns, gen):
if self._next is future:
self._next = None
self._geninfo = None
_logging_debug('%s is no longer blocked waiting for %s', self, future)
exc = future.get_exception()
if exc is not None:
self._help_tasklet_along(ns, gen, exc=exc, tb=future.get_traceback())
else:
val = future.get_result() # This won't raise an exception.
self._help_tasklet_along(ns, gen, val)
def sleep(dt):
"""Public function to sleep some time.
Example:
yield tasklets.sleep(0.5) # Sleep for half a sec.
"""
fut = Future('sleep(%.3f)' % dt)
eventloop.queue_call(dt, fut.set_result, None)
return fut
class MultiFuture(Future):
"""A Future that depends on multiple other Futures.
This is used internally by 'v1, v2, ... = yield f1, f2, ...'; the
semantics (e.g. error handling) are constrained by that use case.
The protocol from the caller's POV is:
mf = MultiFuture()
mf.add_dependent(<some other Future>) -OR- mf.putq(<some value>)
mf.add_dependent(<some other Future>) -OR- mf.putq(<some value>)
.
. (More mf.add_dependent() and/or mf.putq() calls)
.
mf.complete() # No more dependents will be added.
.
. (Time passes)
.
results = mf.get_result()
Now, results is a list of results from all dependent Futures in
the order in which they were added.
It is legal to add the same dependent multiple times.
Callbacks can be added at any point.
From a dependent Future POV, there's nothing to be done: a callback
is automatically added to each dependent Future which will signal
its completion to the MultiFuture.
Error handling: if any dependent future raises an error, it is
propagated to mf. To force an early error, you can call
mf.set_exception() instead of mf.complete(). After this you can't
call mf.add_dependent() or mf.putq() any more.
"""
def __init__(self, info=None):
__ndb_debug__ = 'SKIP' # Hide this frame from self._where
self._full = False
self._dependents = set()
self._results = []
super(MultiFuture, self).__init__(info=info)
def __repr__(self):
# TODO: This may be invoked before __init__() returns,
# from Future.__init__(). Beware.
line = super(MultiFuture, self).__repr__()
lines = [line]
for fut in self._results:
lines.append(fut.dump_stack().replace('\n', '\n '))
return '\n waiting for '.join(lines)
# TODO: Maybe rename this method, since completion of a Future/RPC
# already means something else. But to what?
def complete(self):
if self._full:
raise RuntimeError('MultiFuture cannot complete twice.')
self._full = True
if not self._dependents:
self._finish()
# TODO: Maybe don't overload set_exception() with this?
def set_exception(self, exc, tb=None):
self._full = True
super(MultiFuture, self).set_exception(exc, tb)
def _finish(self):
if not self._full:
raise RuntimeError('MultiFuture cannot finish until completed.')
if self._dependents:
raise RuntimeError('MultiFuture cannot finish whilst waiting for '
'dependents %r' % self._dependents)
if self._done:
raise RuntimeError('MultiFuture done before finishing.')
try:
result = [r.get_result() for r in self._results]
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self.set_exception(err, tb)
else:
self.set_result(result)
def putq(self, value):
if isinstance(value, Future):
fut = value
else:
fut = Future()
fut.set_result(value)
self.add_dependent(fut)
def add_dependent(self, fut):
if isinstance(fut, list):
mfut = MultiFuture()
map(mfut.add_dependent, fut)
mfut.complete()
fut = mfut
elif not isinstance(fut, Future):
raise TypeError('Expected Future, received %s: %r' % (type(fut), fut))
if self._full:
raise RuntimeError('MultiFuture cannot add a dependent once complete.')
self._results.append(fut)
if fut not in self._dependents:
self._dependents.add(fut)
fut.add_callback(self._signal_dependent_done, fut)
def _signal_dependent_done(self, fut):
self._dependents.remove(fut)
if self._full and not self._dependents and not self._done:
self._finish()
class QueueFuture(Future):
"""A Queue following the same protocol as MultiFuture.
However, instead of returning results as a list, it lets you
retrieve results as soon as they are ready, one at a time, using
getq(). The Future itself finishes with a result of None when the
last result is ready (regardless of whether it was retrieved).
The getq() method returns a Future which blocks until the next
result is ready, and then returns that result. Each getq() call
retrieves one unique result. Extra getq() calls after the last
result is already returned return EOFError as their Future's
exception. (I.e., q.getq() returns a Future as always, but yieding
that Future raises EOFError.)
NOTE: Values can also be pushed directly via .putq(value). However
there is no flow control -- if the producer is faster than the
consumer, the queue will grow unbounded.
"""
# TODO: Refactor to share code with MultiFuture.
def __init__(self, info=None):
self._full = False
self._dependents = set()
self._completed = collections.deque()
self._waiting = collections.deque()
# Invariant: at least one of _completed and _waiting is empty.
# Also: _full and not _dependents <==> _done.
super(QueueFuture, self).__init__(info=info)
# TODO: __repr__
def complete(self):
if self._full:
raise RuntimeError('MultiFuture cannot complete twice.')
self._full = True
if not self._dependents:
self.set_result(None)
self._mark_finished()
def set_exception(self, exc, tb=None):
self._full = True
super(QueueFuture, self).set_exception(exc, tb)
if not self._dependents:
self._mark_finished()
def putq(self, value):
if isinstance(value, Future):
fut = value
else:
fut = Future()
fut.set_result(value)
self.add_dependent(fut)
def add_dependent(self, fut):
if not isinstance(fut, Future):
raise TypeError('fut must be a Future instance; received %r' % fut)
if self._full:
raise RuntimeError('QueueFuture add dependent once complete.')
if fut not in self._dependents:
self._dependents.add(fut)
fut.add_callback(self._signal_dependent_done, fut)
def _signal_dependent_done(self, fut):
if not fut.done():
raise RuntimeError('Future not done before signalling dependant done.')
self._dependents.remove(fut)
exc = fut.get_exception()
tb = fut.get_traceback()
val = None
if exc is None:
val = fut.get_result()
if self._waiting:
waiter = self._waiting.popleft()
self._pass_result(waiter, exc, tb, val)
else:
self._completed.append((exc, tb, val))
if self._full and not self._dependents and not self._done:
self.set_result(None)
self._mark_finished()
def _mark_finished(self):
if not self.done():
raise RuntimeError('Future not done before marking as finished.')
while self._waiting:
waiter = self._waiting.popleft()
self._pass_eof(waiter)
def getq(self):
fut = Future()
if self._completed:
exc, tb, val = self._completed.popleft()
self._pass_result(fut, exc, tb, val)
elif self._full and not self._dependents:
self._pass_eof(fut)
else:
self._waiting.append(fut)
return fut
def _pass_eof(self, fut):
if not self._done:
raise RuntimeError('QueueFuture cannot pass EOF until done.')
exc = self.get_exception()
if exc is not None:
tb = self.get_traceback()
else:
exc = EOFError('Queue is empty')
tb = None
self._pass_result(fut, exc, tb, None)
def _pass_result(self, fut, exc, tb, val):
if exc is not None:
fut.set_exception(exc, tb)
else:
fut.set_result(val)
class SerialQueueFuture(Future):
"""Like QueueFuture but maintains the order of insertion.
This class is used by Query operations.
Invariants:
- At least one of _queue and _waiting is empty.
- The Futures in _waiting are always pending.
(The Futures in _queue may be pending or completed.)
In the discussion below, add_dependent() is treated the same way as
putq().
If putq() is ahead of getq(), the situation is like this:
putq()
v
_queue: [f1, f2, ...]; _waiting: []
^
getq()
Here, putq() appends a Future to the right of _queue, and getq()
removes one from the left.
If getq() is ahead of putq(), it's like this:
putq()
v
_queue: []; _waiting: [f1, f2, ...]
^
getq()
Here, putq() removes a Future from the left of _waiting, and getq()
appends one to the right.
When both are empty, putq() appends a Future to the right of _queue,
while getq() appends one to the right of _waiting.
The _full flag means that no more calls to putq() will be made; it
is set by calling either complete() or set_exception().
Calling complete() signals that no more putq() calls will be made.
If getq() is behind, subsequent getq() calls will eat up _queue
until it is empty, and after that will return a Future that passes
EOFError (note that getq() itself never raises EOFError). If getq()
is ahead when complete() is called, the Futures in _waiting are all
passed an EOFError exception (thereby eating up _waiting).
If, instead of complete(), set_exception() is called, the exception
and traceback set there will be used instead of EOFError.
"""
def __init__(self, info=None):
self._full = False
self._queue = collections.deque()
self._waiting = collections.deque()
super(SerialQueueFuture, self).__init__(info=info)
# TODO: __repr__
def complete(self):
if self._full:
raise RuntimeError('SerialQueueFuture cannot complete twice.')
self._full = True
while self._waiting:
waiter = self._waiting.popleft()
waiter.set_exception(EOFError('Queue is empty'))
if not self._queue:
self.set_result(None)
def set_exception(self, exc, tb=None):
self._full = True
super(SerialQueueFuture, self).set_exception(exc, tb)
while self._waiting:
waiter = self._waiting.popleft()
waiter.set_exception(exc, tb)
def putq(self, value):
if isinstance(value, Future):
fut = value
else:
if self._waiting:
waiter = self._waiting.popleft()
waiter.set_result(value)
return
fut = Future()
fut.set_result(value)
self.add_dependent(fut)
def add_dependent(self, fut):
if not isinstance(fut, Future):
raise TypeError('fut must be a Future instance; received %r' % fut)
if self._full:
raise RuntimeError('SerialQueueFuture cannot add dependent '
'once complete.')
if self._waiting:
waiter = self._waiting.popleft()
fut.add_callback(_transfer_result, fut, waiter)
else:
self._queue.append(fut)
def getq(self):
if self._queue:
fut = self._queue.popleft()
# TODO: Isn't it better to call self.set_result(None) in complete()?
if not self._queue and self._full and not self._done:
self.set_result(None)
else:
fut = Future()
if self._full:
if not self._done:
raise RuntimeError('self._queue should be non-empty.')
err = self.get_exception()
if err is not None:
tb = self.get_traceback()
else:
err = EOFError('Queue is empty')
tb = None
fut.set_exception(err, tb)
else:
self._waiting.append(fut)
return fut
def _transfer_result(fut1, fut2):
"""Helper to transfer result or errors from one Future to another."""
exc = fut1.get_exception()
if exc is not None:
tb = fut1.get_traceback()
fut2.set_exception(exc, tb)
else:
val = fut1.get_result()
fut2.set_result(val)
class ReducingFuture(Future):
"""A Queue following the same protocol as MultiFuture.
However the result, instead of being a list of results of dependent
Futures, is computed by calling a 'reducer' tasklet. The reducer tasklet
takes a list of values and returns a single value. It may be called
multiple times on sublists of values and should behave like
e.g. sum().
NOTE: The reducer input values may be reordered compared to the
order in which they were added to the queue.
"""
# TODO: Refactor to reuse some code with MultiFuture.
def __init__(self, reducer, info=None, batch_size=20):
self._reducer = reducer
self._batch_size = batch_size
self._full = False
self._dependents = set()
self._completed = collections.deque()
self._queue = collections.deque()
super(ReducingFuture, self).__init__(info=info)
# TODO: __repr__
def complete(self):
if self._full:
raise RuntimeError('ReducingFuture cannot complete twice.')
self._full = True
if not self._dependents:
self._mark_finished()
def set_exception(self, exc, tb=None):
self._full = True
self._queue.clear()
super(ReducingFuture, self).set_exception(exc, tb)
def putq(self, value):
if isinstance(value, Future):
fut = value
else:
fut = Future()
fut.set_result(value)
self.add_dependent(fut)
def add_dependent(self, fut):
if self._full:
raise RuntimeError('ReducingFuture cannot add dependent once complete.')
self._internal_add_dependent(fut)
def _internal_add_dependent(self, fut):
if not isinstance(fut, Future):
raise TypeError('fut must be a Future; received %r' % fut)
if fut not in self._dependents:
self._dependents.add(fut)
fut.add_callback(self._signal_dependent_done, fut)
def _signal_dependent_done(self, fut):
if not fut.done():
raise RuntimeError('Future not done before signalling dependant done.')
self._dependents.remove(fut)
if self._done:
return # Already done.
try:
val = fut.get_result()
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self.set_exception(err, tb)
return
self._queue.append(val)
if len(self._queue) >= self._batch_size:
todo = list(self._queue)
self._queue.clear()
try:
nval = self._reducer(todo)
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self.set_exception(err, tb)
return
if isinstance(nval, Future):
self._internal_add_dependent(nval)
else:
self._queue.append(nval)
if self._full and not self._dependents:
self._mark_finished()
def _mark_finished(self):
if not self._queue:
self.set_result(None)
elif len(self._queue) == 1:
self.set_result(self._queue.pop())
else:
todo = list(self._queue)
self._queue.clear()
try:
nval = self._reducer(todo)
except GeneratorExit:
raise
except Exception, err:
_, _, tb = sys.exc_info()
self.set_exception(err, tb)
return
if isinstance(nval, Future):
self._internal_add_dependent(nval)
else:
self.set_result(nval)
# Alias for StopIteration used to mark return values.
# To use this, raise Return(<your return value>). The semantics
# are exactly the same as raise StopIteration(<your return value>)
# but using Return clarifies that you are intending this to be the
# return value of a tasklet.
# TODO: According to Monocle authors Steve and Greg Hazel, Twisted
# used an exception to signal a return value from a generator early
# on, and they found out it was error-prone. Should I worry?
Return = StopIteration
def get_return_value(err):
# XXX Docstring
if not err.args:
result = None
elif len(err.args) == 1:
result = err.args[0]
else:
result = err.args
return result
def tasklet(func):
# XXX Docstring
@utils.wrapping(func)
def tasklet_wrapper(*args, **kwds):
# XXX Docstring
# TODO: make most of this a public function so you can take a bare
# generator and turn it into a tasklet dynamically. (Monocle has
# this I believe.)
# __ndb_debug__ = utils.func_info(func)
fut = Future('tasklet %s' % utils.func_info(func))
fut._context = get_context()
try:
result = func(*args, **kwds)
except StopIteration, err:
# Just in case the function is not a generator but still uses
# the "raise Return(...)" idiom, we'll extract the return value.
result = get_return_value(err)
if _is_generator(result):
ns = namespace_manager.get_namespace()
eventloop.queue_call(None, fut._help_tasklet_along, ns, result)
else:
fut.set_result(result)
return fut
return tasklet_wrapper
def synctasklet(func):
"""Decorator to run a function as a tasklet when called.
Use this to wrap a request handler function that will be called by
some web application framework (e.g. a Django view function or a
webapp.RequestHandler.get method).
"""
@utils.wrapping(func)
def synctasklet_wrapper(*args, **kwds):
__ndb_debug__ = utils.func_info(func)
taskletfunc = tasklet(func)
return taskletfunc(*args, **kwds).get_result()
return synctasklet_wrapper
def toplevel(func):
"""A sync tasklet that sets a fresh default Context.
Use this for toplevel view functions such as
webapp.RequestHandler.get() or Django view functions.
"""
@utils.wrapping(func)
def add_context_wrapper(*args, **kwds):
__ndb_debug__ = utils.func_info(func)
_state.clear_all_pending()
# Create and install a new context.
ctx = make_default_context()
try:
set_context(ctx)
return synctasklet(func)(*args, **kwds)
finally:
set_context(None)
ctx.flush().check_success()
eventloop.run() # Ensure writes are flushed, etc.
return add_context_wrapper
_CONTEXT_KEY = '__CONTEXT__'
def get_context():
# XXX Docstring
ctx = None
if os.getenv(_CONTEXT_KEY):
ctx = _state.current_context
if ctx is None:
ctx = make_default_context()
set_context(ctx)
return ctx
def make_default_context():
# XXX Docstring
return make_context()
@utils.positional(0)
def make_context(conn=None, config=None):
# XXX Docstring
from . import context # Late import to deal with circular imports.
return context.Context(conn=conn, config=config)
def set_context(new_context):
# XXX Docstring
os.environ[_CONTEXT_KEY] = '1'
_state.current_context = new_context
# TODO: Rework the following into documentation.
# A tasklet/coroutine/generator can yield the following things:
# - Another tasklet/coroutine/generator; this is entirely equivalent to
# "for x in g: yield x"; this is handled entirely by the @tasklet wrapper.
# (Actually, not. @tasklet returns a function that when called returns
# a Future. You can use the pep380 module's @gwrap decorator to support
# yielding bare generators though.)
# - An RPC (or MultiRpc); the tasklet will be resumed when this completes.
# This does not use the RPC's callback mechanism.
# - A Future; the tasklet will be resumed when the Future is done.
# This uses the Future's callback mechanism.
# A Future can be used in several ways:
# - Yield it from a tasklet; see above.
# - Check (poll) its status via f.done.
# - Call its wait() method, perhaps indirectly via check_success()
# or get_result(). This invokes the event loop.
# - Call the Future.wait_any() or Future.wait_all() method.
# This is waits for any or all Futures and RPCs in the argument list.
# XXX HIRO XXX
# - A tasklet is a (generator) function decorated with @tasklet.
# - Calling a tasklet schedules the function for execution and returns a Future.
# - A function implementing a tasklet may:
# = yield a Future; this waits for the Future which returns f.get_result();
# = yield an RPC; this waits for the RPC and then returns rpc.get_result();
# = raise Return(result); this sets the outer Future's result;
# = raise StopIteration or return; this sets the outer Future's result;
# = raise another exception: this sets the outer Future's exception.
# - If a function implementing a tasklet is not a generator it will be
# immediately executed to completion and the tasklet wrapper will
# return a Future that is already done. (XXX Alternative behavior:
# it schedules the call to be run by the event loop.)
# - Code not running in a tasklet can call f.get_result() or f.wait() on
# a future. This is implemented by a simple loop like the following:
# while not self._done:
# eventloop.run1()
# - Here eventloop.run1() runs one "atomic" part of the event loop:
# = either it calls one immediately ready callback;
# = or it waits for the first RPC to complete;
# = or it sleeps until the first callback should be ready;
# = or it raises an exception indicating all queues are empty.
# - It is possible but suboptimal to call rpc.get_result() or
# rpc.wait() directly on an RPC object since this will not allow
# other callbacks to run as they become ready. Wrapping an RPC in a
# Future will take care of this issue.
# - The important insight is that when a generator function
# implementing a tasklet yields, raises or returns, there is always a
# wrapper that catches this event and either turns it into a
# callback sent to the event loop, or sets the result or exception
# for the tasklet's Future.
| {
"repo_name": "undoware/neutron-drive",
"path": "google_appengine/google/appengine/ext/ndb/tasklets.py",
"copies": "1",
"size": "36074",
"license": "bsd-3-clause",
"hash": -3757450836407581700,
"line_mean": 30.811287478,
"line_max": 80,
"alpha_frac": 0.647668681,
"autogenerated": false,
"ratio": 3.6949708081532315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9788037600627755,
"avg_score": 0.010920377705095371,
"num_lines": 1134
} |
"""A Task logger that presents our DB interface,
but exists entirely in memory and implemented with dicts.
Authors:
* Min RK
TaskRecords are dicts of the form::
{
'msg_id' : str(uuid),
'client_uuid' : str(uuid),
'engine_uuid' : str(uuid) or None,
'header' : dict(header),
'content': dict(content),
'buffers': list(buffers),
'submitted': datetime or None,
'started': datetime or None,
'completed': datetime or None,
'received': datetime or None,
'resubmitted': str(uuid) or None,
'result_header' : dict(header) or None,
'result_content' : dict(content) or None,
'result_buffers' : list(buffers) or None,
}
With this info, many of the special categories of tasks can be defined by query,
e.g.:
* pending: completed is None
* client's outstanding: client_uuid = uuid && completed is None
* MIA: arrived is None (and completed is None)
DictDB supports a subset of mongodb operators::
$lt,$gt,$lte,$gte,$ne,$in,$nin,$all,$mod,$exists
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
from copy import deepcopy as copy
from datetime import datetime
from IPython.config.configurable import LoggingConfigurable
from IPython.utils.py3compat import iteritems, itervalues
from IPython.utils.traitlets import Dict, Unicode, Integer, Float
filters = {
'$lt' : lambda a,b: a < b,
'$gt' : lambda a,b: b > a,
'$eq' : lambda a,b: a == b,
'$ne' : lambda a,b: a != b,
'$lte': lambda a,b: a <= b,
'$gte': lambda a,b: a >= b,
'$in' : lambda a,b: a in b,
'$nin': lambda a,b: a not in b,
'$all': lambda a,b: all([ a in bb for bb in b ]),
'$mod': lambda a,b: a%b[0] == b[1],
'$exists' : lambda a,b: (b and a is not None) or (a is None and not b)
}
class CompositeFilter(object):
"""Composite filter for matching multiple properties."""
def __init__(self, dikt):
self.tests = []
self.values = []
for key, value in iteritems(dikt):
self.tests.append(filters[key])
self.values.append(value)
def __call__(self, value):
for test,check in zip(self.tests, self.values):
if not test(value, check):
return False
return True
class BaseDB(LoggingConfigurable):
"""Empty Parent class so traitlets work on DB."""
# base configurable traits:
session = Unicode("")
class DictDB(BaseDB):
"""Basic in-memory dict-based object for saving Task Records.
This is the first object to present the DB interface
for logging tasks out of memory.
The interface is based on MongoDB, so adding a MongoDB
backend should be straightforward.
"""
_records = Dict()
_culled_ids = set() # set of ids which have been culled
_buffer_bytes = Integer(0) # running total of the bytes in the DB
size_limit = Integer(1024**3, config=True,
help="""The maximum total size (in bytes) of the buffers stored in the db
When the db exceeds this size, the oldest records will be culled until
the total size is under size_limit * (1-cull_fraction).
default: 1 GB
"""
)
record_limit = Integer(1024, config=True,
help="""The maximum number of records in the db
When the history exceeds this size, the first record_limit * cull_fraction
records will be culled.
"""
)
cull_fraction = Float(0.1, config=True,
help="""The fraction by which the db should culled when one of the limits is exceeded
In general, the db size will spend most of its time with a size in the range:
[limit * (1-cull_fraction), limit]
for each of size_limit and record_limit.
"""
)
def _match_one(self, rec, tests):
"""Check if a specific record matches tests."""
for key,test in iteritems(tests):
if not test(rec.get(key, None)):
return False
return True
def _match(self, check):
"""Find all the matches for a check dict."""
matches = []
tests = {}
for k,v in iteritems(check):
if isinstance(v, dict):
tests[k] = CompositeFilter(v)
else:
tests[k] = lambda o: o==v
for rec in itervalues(self._records):
if self._match_one(rec, tests):
matches.append(copy(rec))
return matches
def _extract_subdict(self, rec, keys):
"""extract subdict of keys"""
d = {}
d['msg_id'] = rec['msg_id']
for key in keys:
d[key] = rec[key]
return copy(d)
# methods for monitoring size / culling history
def _add_bytes(self, rec):
for key in ('buffers', 'result_buffers'):
for buf in rec.get(key) or []:
self._buffer_bytes += len(buf)
self._maybe_cull()
def _drop_bytes(self, rec):
for key in ('buffers', 'result_buffers'):
for buf in rec.get(key) or []:
self._buffer_bytes -= len(buf)
def _cull_oldest(self, n=1):
"""cull the oldest N records"""
for msg_id in self.get_history()[:n]:
self.log.debug("Culling record: %r", msg_id)
self._culled_ids.add(msg_id)
self.drop_record(msg_id)
def _maybe_cull(self):
# cull by count:
if len(self._records) > self.record_limit:
to_cull = int(self.cull_fraction * self.record_limit)
self.log.info("%i records exceeds limit of %i, culling oldest %i",
len(self._records), self.record_limit, to_cull
)
self._cull_oldest(to_cull)
# cull by size:
if self._buffer_bytes > self.size_limit:
limit = self.size_limit * (1 - self.cull_fraction)
before = self._buffer_bytes
before_count = len(self._records)
culled = 0
while self._buffer_bytes > limit:
self._cull_oldest(1)
culled += 1
self.log.info("%i records with total buffer size %i exceeds limit: %i. Culled oldest %i records.",
before_count, before, self.size_limit, culled
)
def _check_dates(self, rec):
for key in ('submitted', 'started', 'completed'):
value = rec.get(key, None)
if value is not None and not isinstance(value, datetime):
raise ValueError("%s must be None or datetime, not %r" % (key, value))
# public API methods:
def add_record(self, msg_id, rec):
"""Add a new Task Record, by msg_id."""
if msg_id in self._records:
raise KeyError("Already have msg_id %r"%(msg_id))
self._check_dates(rec)
self._records[msg_id] = rec
self._add_bytes(rec)
self._maybe_cull()
def get_record(self, msg_id):
"""Get a specific Task Record, by msg_id."""
if msg_id in self._culled_ids:
raise KeyError("Record %r has been culled for size" % msg_id)
if not msg_id in self._records:
raise KeyError("No such msg_id %r"%(msg_id))
return copy(self._records[msg_id])
def update_record(self, msg_id, rec):
"""Update the data in an existing record."""
if msg_id in self._culled_ids:
raise KeyError("Record %r has been culled for size" % msg_id)
self._check_dates(rec)
_rec = self._records[msg_id]
self._drop_bytes(_rec)
_rec.update(rec)
self._add_bytes(_rec)
def drop_matching_records(self, check):
"""Remove a record from the DB."""
matches = self._match(check)
for rec in matches:
self._drop_bytes(rec)
del self._records[rec['msg_id']]
def drop_record(self, msg_id):
"""Remove a record from the DB."""
rec = self._records[msg_id]
self._drop_bytes(rec)
del self._records[msg_id]
def find_records(self, check, keys=None):
"""Find records matching a query dict, optionally extracting subset of keys.
Returns dict keyed by msg_id of matching records.
Parameters
----------
check: dict
mongodb-style query argument
keys: list of strs [optional]
if specified, the subset of keys to extract. msg_id will *always* be
included.
"""
matches = self._match(check)
if keys:
return [ self._extract_subdict(rec, keys) for rec in matches ]
else:
return matches
def get_history(self):
"""get all msg_ids, ordered by time submitted."""
msg_ids = self._records.keys()
# Remove any that do not have a submitted timestamp.
# This is extremely unlikely to happen,
# but it seems to come up in some tests on VMs.
msg_ids = [ m for m in msg_ids if self._records[m]['submitted'] is not None ]
return sorted(msg_ids, key=lambda m: self._records[m]['submitted'])
NODATA = KeyError("NoDB backend doesn't store any data. "
"Start the Controller with a DB backend to enable resubmission / result persistence."
)
class NoDB(BaseDB):
"""A blackhole db backend that actually stores no information.
Provides the full DB interface, but raises KeyErrors on any
method that tries to access the records. This can be used to
minimize the memory footprint of the Hub when its record-keeping
functionality is not required.
"""
def add_record(self, msg_id, record):
pass
def get_record(self, msg_id):
raise NODATA
def update_record(self, msg_id, record):
pass
def drop_matching_records(self, check):
pass
def drop_record(self, msg_id):
pass
def find_records(self, check, keys=None):
raise NODATA
def get_history(self):
raise NODATA
| {
"repo_name": "poojavade/Genomics_Docker",
"path": "Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/ipython-2.2.0-py2.7.egg/IPython/parallel/controller/dictdb.py",
"copies": "8",
"size": "10346",
"license": "apache-2.0",
"hash": -1563589498940623400,
"line_mean": 31.6372239748,
"line_max": 110,
"alpha_frac": 0.5720085057,
"autogenerated": false,
"ratio": 3.893865261573203,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.023246707133617782,
"num_lines": 317
} |
"""A task queue."""
import select
import psycopg2.extras
import handler
import schema
import task
class PgTq(object):
"""Represents a single task queue."""
def __init__(self, name, connection_string):
"""Create a task queue with the given name in the given DB."""
self.name = name
self.connection_string = connection_string
self.conn = psycopg2.connect(connection_string)
self.create_tables()
self.handlers = {}
def create_tables(self):
"""Ensure that the structures needed to store tasks exist."""
sql = schema.SQL_TEMPLATE.format(self.name)
with self.conn:
with self.conn.cursor() as cursor:
cursor.execute(sql)
# pylint: disable=unused-argument
def handler(self, name=None, max_retries=None):
"""Return a decorator for creating new handlers."""
if max_retries is not None and max_retries < 0:
raise ValueError("max_retries must be positive")
def decorator(procedure):
"""Create a new handler from the decorated function."""
nonlocal name
if not name:
name = procedure.__name__
new_handler = handler.Handler(self, procedure, name, max_retries)
if new_handler.name in self.handlers:
err = "Conflict: handler for task '{}' already exists."
raise RuntimeError(err.format(new_handler.name))
self.handlers[new_handler.name] = new_handler
return new_handler
return decorator
def push(self, handler_name, max_retries, args, kwargs):
"""Insert a task into the end of the queue."""
sql_template = "SELECT pgtq_{0}_push(%s, %s);"
sql = sql_template.format(self.name)
serialised_task = psycopg2.extras.Json({'name': handler_name,
'args': args,
'kwargs': kwargs})
with self.conn:
with self.conn.cursor() as cursor:
cursor.execute(sql, [serialised_task, max_retries])
def pop(self):
"""Remove a task from the start of the queue, returning it."""
sql = "EXECUTE pgtq_{0}_lock_task;".format(self.name)
with self.conn:
with self.conn.cursor() as cursor:
cursor.execute(sql)
json_repr = cursor.fetchone()
if json_repr:
return task.Task(self, json_repr)
def run_scheduled(self):
"""Move scheduled tasks into task queue.
Any and all schedule items, including failed task retries, that
are at or past their scheduled time, are pushed onto the end of
the task queue to be picked up by free workers.
Return the time of the next scheduled task.
"""
sql = "SELECT pgtq_{0}_run_scheduled();".format(self.name)
with self.conn:
with self.conn.cursor() as cursor:
cursor.execute(sql)
return cursor.fetchone()[0]
def wait_for_a_task(self):
"""Block the thread until the DB notifies a task exists.
In the presense of multiple worker processes, there is no
garentee that a task will exist when this method returns.
"""
connection = psycopg2.connect(self.connection_string)
connection.autocommit = True
cursor = connection.cursor()
channel = "pgtq_{0}_runnable_channel".format(self.name)
cursor.execute("LISTEN {};".format(channel))
while True:
select.select([connection], [], [])
connection.poll()
if connection.notifies:
cursor.execute("UNLISTEN {};".format(channel))
cursor.close()
connection.close()
return
def wait_for_a_schedule(self, timeout):
"""Wait for a new shceduled item.
Block the thread until the DB notifies that a new task has been
scheduled, up to timeout seconds.
"""
connection = psycopg2.connect(self.connection_string)
connection.autocommit = True
cursor = connection.cursor()
channel = "pgtq_{0}_scheduled_channel".format(self.name)
cursor.execute("LISTEN {};".format(channel))
while True:
select.select([connection], [], [], timeout)
connection.poll()
if connection.notifies:
cursor.execute("UNLISTEN {};".format(channel))
cursor.close()
connection.close()
def mark_completed(self, task_key):
"""Move the given task from the running set to the completed set."""
sql = "EXECUTE pgtq_{0}_mark_completed (%s);".format(self.name)
with self.conn:
with self.conn.cursor() as cursor:
cursor.execute(sql, [task_key])
def mark_interupted(self, task_key):
"""Move the given task from the running set to the interupted set."""
sql = "SELECT pgtq_{0}_interupt(%s);".format(self.name)
with self.conn:
with self.conn.cursor() as cursor:
cursor.execute(sql, [task_key])
| {
"repo_name": "DanielCollins/pgtq",
"path": "src/pgtq.py",
"copies": "1",
"size": "5214",
"license": "isc",
"hash": -6665344497449318000,
"line_mean": 38.2030075188,
"line_max": 77,
"alpha_frac": 0.5784426544,
"autogenerated": false,
"ratio": 4.381512605042017,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5459955259442018,
"avg_score": null,
"num_lines": null
} |
"""A TaskRecord backend using mongodb
Authors:
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
from pymongo import Connection
from bson import Binary
from IPython.utils.traitlets import Dict, List, Unicode, Instance
from .dictdb import BaseDB
#-----------------------------------------------------------------------------
# MongoDB class
#-----------------------------------------------------------------------------
class MongoDB(BaseDB):
"""MongoDB TaskRecord backend."""
connection_args = List(config=True,
help="""Positional arguments to be passed to pymongo.Connection. Only
necessary if the default mongodb configuration does not point to your
mongod instance.""")
connection_kwargs = Dict(config=True,
help="""Keyword arguments to be passed to pymongo.Connection. Only
necessary if the default mongodb configuration does not point to your
mongod instance."""
)
database = Unicode(config=True,
help="""The MongoDB database name to use for storing tasks for this session. If unspecified,
a new database will be created with the Hub's IDENT. Specifying the database will result
in tasks from previous sessions being available via Clients' db_query and
get_result methods.""")
_connection = Instance(Connection) # pymongo connection
def __init__(self, **kwargs):
super(MongoDB, self).__init__(**kwargs)
if self._connection is None:
self._connection = Connection(*self.connection_args, **self.connection_kwargs)
if not self.database:
self.database = self.session
self._db = self._connection[self.database]
self._records = self._db['task_records']
self._records.ensure_index('msg_id', unique=True)
self._records.ensure_index('submitted') # for sorting history
# for rec in self._records.find
def _binary_buffers(self, rec):
for key in ('buffers', 'result_buffers'):
if rec.get(key, None):
rec[key] = map(Binary, rec[key])
return rec
def add_record(self, msg_id, rec):
"""Add a new Task Record, by msg_id."""
# print rec
rec = self._binary_buffers(rec)
self._records.insert(rec)
def get_record(self, msg_id):
"""Get a specific Task Record, by msg_id."""
r = self._records.find_one({'msg_id': msg_id})
if not r:
# r will be '' if nothing is found
raise KeyError(msg_id)
return r
def update_record(self, msg_id, rec):
"""Update the data in an existing record."""
rec = self._binary_buffers(rec)
self._records.update({'msg_id':msg_id}, {'$set': rec})
def drop_matching_records(self, check):
"""Remove a record from the DB."""
self._records.remove(check)
def drop_record(self, msg_id):
"""Remove a record from the DB."""
self._records.remove({'msg_id':msg_id})
def find_records(self, check, keys=None):
"""Find records matching a query dict, optionally extracting subset of keys.
Returns list of matching records.
Parameters
----------
check: dict
mongodb-style query argument
keys: list of strs [optional]
if specified, the subset of keys to extract. msg_id will *always* be
included.
"""
if keys and 'msg_id' not in keys:
keys.append('msg_id')
matches = list(self._records.find(check,keys))
for rec in matches:
rec.pop('_id')
return matches
def get_history(self):
"""get all msg_ids, ordered by time submitted."""
cursor = self._records.find({},{'msg_id':1}).sort('submitted')
return [ rec['msg_id'] for rec in cursor ]
| {
"repo_name": "sodafree/backend",
"path": "build/ipython/IPython/parallel/controller/mongodb.py",
"copies": "3",
"size": "4214",
"license": "bsd-3-clause",
"hash": 4783443282388012000,
"line_mean": 35.0170940171,
"line_max": 100,
"alpha_frac": 0.561461794,
"autogenerated": false,
"ratio": 4.526315789473684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6587777583473684,
"avg_score": null,
"num_lines": null
} |
"""A TaskRecord backend using sqlite3
Authors:
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import json
import os
import cPickle as pickle
from datetime import datetime
try:
import sqlite3
except ImportError:
sqlite3 = None
from zmq.eventloop import ioloop
from IPython.utils.traitlets import Unicode, Instance, List, Dict
from .dictdb import BaseDB
from IPython.utils.jsonutil import date_default, extract_dates, squash_dates
#-----------------------------------------------------------------------------
# SQLite operators, adapters, and converters
#-----------------------------------------------------------------------------
try:
buffer
except NameError:
# py3k
buffer = memoryview
operators = {
'$lt' : "<",
'$gt' : ">",
# null is handled weird with ==,!=
'$eq' : "=",
'$ne' : "!=",
'$lte': "<=",
'$gte': ">=",
'$in' : ('=', ' OR '),
'$nin': ('!=', ' AND '),
# '$all': None,
# '$mod': None,
# '$exists' : None
}
null_operators = {
'=' : "IS NULL",
'!=' : "IS NOT NULL",
}
def _adapt_dict(d):
return json.dumps(d, default=date_default)
def _convert_dict(ds):
if ds is None:
return ds
else:
if isinstance(ds, bytes):
# If I understand the sqlite doc correctly, this will always be utf8
ds = ds.decode('utf8')
return extract_dates(json.loads(ds))
def _adapt_bufs(bufs):
# this is *horrible*
# copy buffers into single list and pickle it:
if bufs and isinstance(bufs[0], (bytes, buffer)):
return sqlite3.Binary(pickle.dumps(map(bytes, bufs),-1))
elif bufs:
return bufs
else:
return None
def _convert_bufs(bs):
if bs is None:
return []
else:
return pickle.loads(bytes(bs))
#-----------------------------------------------------------------------------
# SQLiteDB class
#-----------------------------------------------------------------------------
class SQLiteDB(BaseDB):
"""SQLite3 TaskRecord backend."""
filename = Unicode('tasks.db', config=True,
help="""The filename of the sqlite task database. [default: 'tasks.db']""")
location = Unicode('', config=True,
help="""The directory containing the sqlite task database. The default
is to use the cluster_dir location.""")
table = Unicode("", config=True,
help="""The SQLite Table to use for storing tasks for this session. If unspecified,
a new table will be created with the Hub's IDENT. Specifying the table will result
in tasks from previous sessions being available via Clients' db_query and
get_result methods.""")
if sqlite3 is not None:
_db = Instance('sqlite3.Connection')
else:
_db = None
# the ordered list of column names
_keys = List(['msg_id' ,
'header' ,
'content',
'buffers',
'submitted',
'client_uuid' ,
'engine_uuid' ,
'started',
'completed',
'resubmitted',
'received',
'result_header' ,
'result_content' ,
'result_buffers' ,
'queue' ,
'pyin' ,
'pyout',
'pyerr',
'stdout',
'stderr',
])
# sqlite datatypes for checking that db is current format
_types = Dict({'msg_id' : 'text' ,
'header' : 'dict text',
'content' : 'dict text',
'buffers' : 'bufs blob',
'submitted' : 'timestamp',
'client_uuid' : 'text',
'engine_uuid' : 'text',
'started' : 'timestamp',
'completed' : 'timestamp',
'resubmitted' : 'text',
'received' : 'timestamp',
'result_header' : 'dict text',
'result_content' : 'dict text',
'result_buffers' : 'bufs blob',
'queue' : 'text',
'pyin' : 'text',
'pyout' : 'text',
'pyerr' : 'text',
'stdout' : 'text',
'stderr' : 'text',
})
def __init__(self, **kwargs):
super(SQLiteDB, self).__init__(**kwargs)
if sqlite3 is None:
raise ImportError("SQLiteDB requires sqlite3")
if not self.table:
# use session, and prefix _, since starting with # is illegal
self.table = '_'+self.session.replace('-','_')
if not self.location:
# get current profile
from IPython.core.application import BaseIPythonApplication
if BaseIPythonApplication.initialized():
app = BaseIPythonApplication.instance()
if app.profile_dir is not None:
self.location = app.profile_dir.location
else:
self.location = u'.'
else:
self.location = u'.'
self._init_db()
# register db commit as 2s periodic callback
# to prevent clogging pipes
# assumes we are being run in a zmq ioloop app
loop = ioloop.IOLoop.instance()
pc = ioloop.PeriodicCallback(self._db.commit, 2000, loop)
pc.start()
def _defaults(self, keys=None):
"""create an empty record"""
d = {}
keys = self._keys if keys is None else keys
for key in keys:
d[key] = None
return d
def _check_table(self):
"""Ensure that an incorrect table doesn't exist
If a bad (old) table does exist, return False
"""
cursor = self._db.execute("PRAGMA table_info(%s)"%self.table)
lines = cursor.fetchall()
if not lines:
# table does not exist
return True
types = {}
keys = []
for line in lines:
keys.append(line[1])
types[line[1]] = line[2]
if self._keys != keys:
# key mismatch
self.log.warn('keys mismatch')
return False
for key in self._keys:
if types[key] != self._types[key]:
self.log.warn(
'type mismatch: %s: %s != %s'%(key,types[key],self._types[key])
)
return False
return True
def _init_db(self):
"""Connect to the database and get new session number."""
# register adapters
sqlite3.register_adapter(dict, _adapt_dict)
sqlite3.register_converter('dict', _convert_dict)
sqlite3.register_adapter(list, _adapt_bufs)
sqlite3.register_converter('bufs', _convert_bufs)
# connect to the db
dbfile = os.path.join(self.location, self.filename)
self._db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES,
# isolation_level = None)#,
cached_statements=64)
# print dir(self._db)
first_table = previous_table = self.table
i=0
while not self._check_table():
i+=1
self.table = first_table+'_%i'%i
self.log.warn(
"Table %s exists and doesn't match db format, trying %s"%
(previous_table, self.table)
)
previous_table = self.table
self._db.execute("""CREATE TABLE IF NOT EXISTS %s
(msg_id text PRIMARY KEY,
header dict text,
content dict text,
buffers bufs blob,
submitted timestamp,
client_uuid text,
engine_uuid text,
started timestamp,
completed timestamp,
resubmitted text,
received timestamp,
result_header dict text,
result_content dict text,
result_buffers bufs blob,
queue text,
pyin text,
pyout text,
pyerr text,
stdout text,
stderr text)
"""%self.table)
self._db.commit()
def _dict_to_list(self, d):
"""turn a mongodb-style record dict into a list."""
return [ d[key] for key in self._keys ]
def _list_to_dict(self, line, keys=None):
"""Inverse of dict_to_list"""
keys = self._keys if keys is None else keys
d = self._defaults(keys)
for key,value in zip(keys, line):
d[key] = value
return d
def _render_expression(self, check):
"""Turn a mongodb-style search dict into an SQL query."""
expressions = []
args = []
skeys = set(check.keys())
skeys.difference_update(set(self._keys))
skeys.difference_update(set(['buffers', 'result_buffers']))
if skeys:
raise KeyError("Illegal testing key(s): %s"%skeys)
for name,sub_check in check.iteritems():
if isinstance(sub_check, dict):
for test,value in sub_check.iteritems():
try:
op = operators[test]
except KeyError:
raise KeyError("Unsupported operator: %r"%test)
if isinstance(op, tuple):
op, join = op
if value is None and op in null_operators:
expr = "%s %s" % (name, null_operators[op])
else:
expr = "%s %s ?"%(name, op)
if isinstance(value, (tuple,list)):
if op in null_operators and any([v is None for v in value]):
# equality tests don't work with NULL
raise ValueError("Cannot use %r test with NULL values on SQLite backend"%test)
expr = '( %s )'%( join.join([expr]*len(value)) )
args.extend(value)
else:
args.append(value)
expressions.append(expr)
else:
# it's an equality check
if sub_check is None:
expressions.append("%s IS NULL" % name)
else:
expressions.append("%s = ?"%name)
args.append(sub_check)
expr = " AND ".join(expressions)
return expr, args
def add_record(self, msg_id, rec):
"""Add a new Task Record, by msg_id."""
d = self._defaults()
d.update(rec)
d['msg_id'] = msg_id
line = self._dict_to_list(d)
tups = '(%s)'%(','.join(['?']*len(line)))
self._db.execute("INSERT INTO %s VALUES %s"%(self.table, tups), line)
# self._db.commit()
def get_record(self, msg_id):
"""Get a specific Task Record, by msg_id."""
cursor = self._db.execute("""SELECT * FROM %s WHERE msg_id==?"""%self.table, (msg_id,))
line = cursor.fetchone()
if line is None:
raise KeyError("No such msg: %r"%msg_id)
return self._list_to_dict(line)
def update_record(self, msg_id, rec):
"""Update the data in an existing record."""
query = "UPDATE %s SET "%self.table
sets = []
keys = sorted(rec.keys())
values = []
for key in keys:
sets.append('%s = ?'%key)
values.append(rec[key])
query += ', '.join(sets)
query += ' WHERE msg_id == ?'
values.append(msg_id)
self._db.execute(query, values)
# self._db.commit()
def drop_record(self, msg_id):
"""Remove a record from the DB."""
self._db.execute("""DELETE FROM %s WHERE msg_id==?"""%self.table, (msg_id,))
# self._db.commit()
def drop_matching_records(self, check):
"""Remove a record from the DB."""
expr,args = self._render_expression(check)
query = "DELETE FROM %s WHERE %s"%(self.table, expr)
self._db.execute(query,args)
# self._db.commit()
def find_records(self, check, keys=None):
"""Find records matching a query dict, optionally extracting subset of keys.
Returns list of matching records.
Parameters
----------
check: dict
mongodb-style query argument
keys: list of strs [optional]
if specified, the subset of keys to extract. msg_id will *always* be
included.
"""
if keys:
bad_keys = [ key for key in keys if key not in self._keys ]
if bad_keys:
raise KeyError("Bad record key(s): %s"%bad_keys)
if keys:
# ensure msg_id is present and first:
if 'msg_id' in keys:
keys.remove('msg_id')
keys.insert(0, 'msg_id')
req = ', '.join(keys)
else:
req = '*'
expr,args = self._render_expression(check)
query = """SELECT %s FROM %s WHERE %s"""%(req, self.table, expr)
cursor = self._db.execute(query, args)
matches = cursor.fetchall()
records = []
for line in matches:
rec = self._list_to_dict(line, keys)
records.append(rec)
return records
def get_history(self):
"""get all msg_ids, ordered by time submitted."""
query = """SELECT msg_id FROM %s ORDER by submitted ASC"""%self.table
cursor = self._db.execute(query)
# will be a list of length 1 tuples
return [ tup[0] for tup in cursor.fetchall()]
__all__ = ['SQLiteDB'] | {
"repo_name": "sodafree/backend",
"path": "build/ipython/build/lib.linux-i686-2.7/IPython/parallel/controller/sqlitedb.py",
"copies": "3",
"size": "13828",
"license": "bsd-3-clause",
"hash": 2962140669798790000,
"line_mean": 32.5655339806,
"line_max": 110,
"alpha_frac": 0.5013017067,
"autogenerated": false,
"ratio": 4.260012322858903,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6261314029558903,
"avg_score": null,
"num_lines": null
} |
"""A TaskRecord backend using sqlite3"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import json
import os
try:
import cPickle as pickle
except ImportError:
import pickle
from datetime import datetime
try:
import sqlite3
except ImportError:
sqlite3 = None
from zmq.eventloop import ioloop
from IPython.utils.traitlets import Unicode, Instance, List, Dict
from .dictdb import BaseDB
from IPython.utils.jsonutil import date_default, extract_dates, squash_dates
from IPython.utils.py3compat import iteritems
#-----------------------------------------------------------------------------
# SQLite operators, adapters, and converters
#-----------------------------------------------------------------------------
try:
buffer
except NameError:
# py3k
buffer = memoryview
operators = {
'$lt': "<",
'$gt': ">",
# null is handled weird with ==,!=
'$eq': "=",
'$ne': "!=",
'$lte': "<=",
'$gte': ">=",
'$in': ('=', ' OR '),
'$nin': ('!=', ' AND '),
# '$all': None,
# '$mod': None,
# '$exists' : None
}
null_operators = {
'=': "IS NULL",
'!=': "IS NOT NULL",
}
def _adapt_dict(d):
return json.dumps(d, default=date_default)
def _convert_dict(ds):
if ds is None:
return ds
else:
if isinstance(ds, bytes):
# If I understand the sqlite doc correctly, this will always be
# utf8
ds = ds.decode('utf8')
return extract_dates(json.loads(ds))
def _adapt_bufs(bufs):
# this is *horrible*
# copy buffers into single list and pickle it:
if bufs and isinstance(bufs[0], (bytes, buffer)):
return sqlite3.Binary(pickle.dumps(list(map(bytes, bufs)), -1))
elif bufs:
return bufs
else:
return None
def _convert_bufs(bs):
if bs is None:
return []
else:
return pickle.loads(bytes(bs))
#-----------------------------------------------------------------------------
# SQLiteDB class
#-----------------------------------------------------------------------------
class SQLiteDB(BaseDB):
"""SQLite3 TaskRecord backend."""
filename = Unicode('tasks.db', config=True,
help="""The filename of the sqlite task database. [default: 'tasks.db']""")
location = Unicode('', config=True,
help="""The directory containing the sqlite task database. The default
is to use the cluster_dir location.""")
table = Unicode("ipython-tasks", config=True,
help="""The SQLite Table to use for storing tasks for this session. If unspecified,
a new table will be created with the Hub's IDENT. Specifying the table will result
in tasks from previous sessions being available via Clients' db_query and
get_result methods.""")
if sqlite3 is not None:
_db = Instance('sqlite3.Connection')
else:
_db = None
# the ordered list of column names
_keys = List(['msg_id',
'header',
'metadata',
'content',
'buffers',
'submitted',
'client_uuid',
'engine_uuid',
'started',
'completed',
'resubmitted',
'received',
'result_header',
'result_metadata',
'result_content',
'result_buffers',
'queue',
'execute_input',
'execute_result',
'error',
'stdout',
'stderr',
])
# sqlite datatypes for checking that db is current format
_types = Dict({'msg_id': 'text',
'header': 'dict text',
'metadata': 'dict text',
'content': 'dict text',
'buffers': 'bufs blob',
'submitted': 'timestamp',
'client_uuid': 'text',
'engine_uuid': 'text',
'started': 'timestamp',
'completed': 'timestamp',
'resubmitted': 'text',
'received': 'timestamp',
'result_header': 'dict text',
'result_metadata': 'dict text',
'result_content': 'dict text',
'result_buffers': 'bufs blob',
'queue': 'text',
'execute_input': 'text',
'execute_result': 'text',
'error': 'text',
'stdout': 'text',
'stderr': 'text',
})
def __init__(self, **kwargs):
super(SQLiteDB, self).__init__(**kwargs)
if sqlite3 is None:
raise ImportError("SQLiteDB requires sqlite3")
if not self.table:
# use session, and prefix _, since starting with # is illegal
self.table = '_' + self.session.replace('-', '_')
if not self.location:
# get current profile
from IPython.core.application import BaseIPythonApplication
if BaseIPythonApplication.initialized():
app = BaseIPythonApplication.instance()
if app.profile_dir is not None:
self.location = app.profile_dir.location
else:
self.location = u'.'
else:
self.location = u'.'
self._init_db()
# register db commit as 2s periodic callback
# to prevent clogging pipes
# assumes we are being run in a zmq ioloop app
loop = ioloop.IOLoop.instance()
pc = ioloop.PeriodicCallback(self._db.commit, 2000, loop)
pc.start()
def _defaults(self, keys=None):
"""create an empty record"""
d = {}
keys = self._keys if keys is None else keys
for key in keys:
d[key] = None
return d
def _check_table(self):
"""Ensure that an incorrect table doesn't exist
If a bad (old) table does exist, return False
"""
cursor = self._db.execute("PRAGMA table_info('%s')" % self.table)
lines = cursor.fetchall()
if not lines:
# table does not exist
return True
types = {}
keys = []
for line in lines:
keys.append(line[1])
types[line[1]] = line[2]
if self._keys != keys:
# key mismatch
self.log.warn('keys mismatch')
return False
for key in self._keys:
if types[key] != self._types[key]:
self.log.warn(
'type mismatch: %s: %s != %s' % (
key, types[key], self._types[key])
)
return False
return True
def _init_db(self):
"""Connect to the database and get new session number."""
# register adapters
sqlite3.register_adapter(dict, _adapt_dict)
sqlite3.register_converter('dict', _convert_dict)
sqlite3.register_adapter(list, _adapt_bufs)
sqlite3.register_converter('bufs', _convert_bufs)
# connect to the db
dbfile = os.path.join(self.location, self.filename)
self._db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES,
# isolation_level = None)#,
cached_statements=64)
# print dir(self._db)
first_table = previous_table = self.table
i = 0
while not self._check_table():
i += 1
self.table = first_table + '_%i' % i
self.log.warn(
"Table %s exists and doesn't match db format, trying %s" %
(previous_table, self.table)
)
previous_table = self.table
self._db.execute("""CREATE TABLE IF NOT EXISTS '%s'
(msg_id text PRIMARY KEY,
header dict text,
metadata dict text,
content dict text,
buffers bufs blob,
submitted timestamp,
client_uuid text,
engine_uuid text,
started timestamp,
completed timestamp,
resubmitted text,
received timestamp,
result_header dict text,
result_metadata dict text,
result_content dict text,
result_buffers bufs blob,
queue text,
execute_input text,
execute_result text,
error text,
stdout text,
stderr text)
""" % self.table)
self._db.commit()
def _dict_to_list(self, d):
"""turn a mongodb-style record dict into a list."""
return [d[key] for key in self._keys]
def _list_to_dict(self, line, keys=None):
"""Inverse of dict_to_list"""
keys = self._keys if keys is None else keys
d = self._defaults(keys)
for key, value in zip(keys, line):
d[key] = value
return d
def _render_expression(self, check):
"""Turn a mongodb-style search dict into an SQL query."""
expressions = []
args = []
skeys = set(check.keys())
skeys.difference_update(set(self._keys))
skeys.difference_update(set(['buffers', 'result_buffers']))
if skeys:
raise KeyError("Illegal testing key(s): %s" % skeys)
for name, sub_check in iteritems(check):
if isinstance(sub_check, dict):
for test, value in iteritems(sub_check):
try:
op = operators[test]
except KeyError:
raise KeyError("Unsupported operator: %r" % test)
if isinstance(op, tuple):
op, join = op
if value is None and op in null_operators:
expr = "%s %s" % (name, null_operators[op])
else:
expr = "%s %s ?" % (name, op)
if isinstance(value, (tuple, list)):
if op in null_operators and any([v is None for v in value]):
# equality tests don't work with NULL
raise ValueError(
"Cannot use %r test with NULL values on SQLite backend" % test)
expr = '( %s )' % (join.join([expr] * len(value)))
args.extend(value)
else:
args.append(value)
expressions.append(expr)
else:
# it's an equality check
if sub_check is None:
expressions.append("%s IS NULL" % name)
else:
expressions.append("%s = ?" % name)
args.append(sub_check)
expr = " AND ".join(expressions)
return expr, args
def add_record(self, msg_id, rec):
"""Add a new Task Record, by msg_id."""
d = self._defaults()
d.update(rec)
d['msg_id'] = msg_id
line = self._dict_to_list(d)
tups = '(%s)' % (','.join(['?'] * len(line)))
self._db.execute("INSERT INTO '%s' VALUES %s" %
(self.table, tups), line)
# self._db.commit()
def get_record(self, msg_id):
"""Get a specific Task Record, by msg_id."""
cursor = self._db.execute(
"""SELECT * FROM '%s' WHERE msg_id==?""" % self.table, (msg_id,))
line = cursor.fetchone()
if line is None:
raise KeyError("No such msg: %r" % msg_id)
return self._list_to_dict(line)
def update_record(self, msg_id, rec):
"""Update the data in an existing record."""
query = "UPDATE '%s' SET " % self.table
sets = []
keys = sorted(rec.keys())
values = []
for key in keys:
sets.append('%s = ?' % key)
values.append(rec[key])
query += ', '.join(sets)
query += ' WHERE msg_id == ?'
values.append(msg_id)
self._db.execute(query, values)
# self._db.commit()
def drop_record(self, msg_id):
"""Remove a record from the DB."""
self._db.execute(
"""DELETE FROM '%s' WHERE msg_id==?""" % self.table, (msg_id,))
# self._db.commit()
def drop_matching_records(self, check):
"""Remove a record from the DB."""
expr, args = self._render_expression(check)
query = "DELETE FROM '%s' WHERE %s" % (self.table, expr)
self._db.execute(query, args)
# self._db.commit()
def find_records(self, check, keys=None):
"""Find records matching a query dict, optionally extracting subset of keys.
Returns list of matching records.
Parameters
----------
check: dict
mongodb-style query argument
keys: list of strs [optional]
if specified, the subset of keys to extract. msg_id will *always* be
included.
"""
if keys:
bad_keys = [key for key in keys if key not in self._keys]
if bad_keys:
raise KeyError("Bad record key(s): %s" % bad_keys)
if keys:
# ensure msg_id is present and first:
if 'msg_id' in keys:
keys.remove('msg_id')
keys.insert(0, 'msg_id')
req = ', '.join(keys)
else:
req = '*'
expr, args = self._render_expression(check)
query = """SELECT %s FROM '%s' WHERE %s""" % (req, self.table, expr)
cursor = self._db.execute(query, args)
matches = cursor.fetchall()
records = []
for line in matches:
rec = self._list_to_dict(line, keys)
records.append(rec)
return records
def get_history(self):
"""get all msg_ids, ordered by time submitted."""
query = """SELECT msg_id FROM '%s' ORDER by submitted ASC""" % self.table
cursor = self._db.execute(query)
# will be a list of length 1 tuples
return [tup[0] for tup in cursor.fetchall()]
__all__ = ['SQLiteDB']
| {
"repo_name": "mattvonrocketstein/smash",
"path": "smashlib/ipy3x/parallel/controller/sqlitedb.py",
"copies": "1",
"size": "14541",
"license": "mit",
"hash": 3610325204712102000,
"line_mean": 33.1338028169,
"line_max": 103,
"alpha_frac": 0.4877243656,
"autogenerated": false,
"ratio": 4.431880524230418,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5419604889830417,
"avg_score": null,
"num_lines": null
} |
# ATCA Rapid Response Service
# Jamie.Stevens@csiro.au
# example1.py
# This example script shows how to make a schedule and request time for it from
# the web service.
# The modules we'll need.
import atca_rapid_response_api as arrApi
import cabb_scheduler as cabb
# Example 1.
# The situation is the same as in example 1 of the CABB scheduling library example 1.
# Suppose an event trigger has been received for a flaring magnetar at
# coordinates RA = 01:00:43.1, Dec = -72:11:33.8.
# Make a new schedule.
schedule = cabb.schedule()
# Add a scan to look at the magnetar's coordinates.
# This is also where we set our project code; in this example we'll use
# the code C007 (we have a test authorisation token for this project).
# We'll also set it to be 20 minutes long, with Dwell mode.
scan1 = schedule.addScan(
{ 'source': "magnetar", 'rightAscension': "01:00:43.1", 'declination': "-72:11:33.8",
'freq1': 5500, 'freq2': 9000, 'project': "C007",
'scanLength': "00:20:00", 'scanType': "Dwell" }
)
# Since we definitely want to get onto source as quickly as possible, we tell the
# library not to go to the calibrator first.
schedule.disablePriorCalibration()
# Request a list of nearby calibrators from the ATCA calibrator database.
calList = scan1.findCalibrator()
# Ask for the library to choose the best one for the current array. We first need to
# get the current array from MoniCA.
currentArray = cabb.monica_information.getArray()
# And pass this as the arggument to the calibrator selector.
bestCal = calList.getBestCalibrator(currentArray)
# This should choose 2353-686.
print "Calibrator chosen: %s, %.1f degrees away" % (bestCal['calibrator'].getName(),
bestCal['distance'])
# We add this calibrator to the schedule, attaching it to the scan it
# will be the calibrator for. We'll ask to observe the calibrator for 2
# minutes.
calScan = schedule.addCalibrator(bestCal['calibrator'], scan1, { 'scanLength': "00:02:00" })
# We want the schedule to run for about an hour, so we want another two copies
# of these two scans. Remembering that the library will take care of
# associating a calibrator to each source, we only need to copy the source
# scan.
for i in xrange(0, 2):
schedule.copyScans([ scan1.getId() ])
# Tell the library that we won't be looping, so there will be a calibrator scan at the
# end of the schedule.
schedule.setLooping(False)
# We need to turn this schedule into a string.
schedString = schedule.toString()
# We have our schedule now, so we need to craft the service request to submit it to
# the rapid response service.
rapidObj = { 'schedule': schedString }
# The authentication token needs to go with it, and we point to the file that
# contains the token.
rapidObj['authenticationTokenFile'] = "authorisation_token_test_C007_2016OCT.jwt"
# The name of the main target needs to be specified.
rapidObj['nameTarget'] = "magnetar"
# So does the name of the calibrator.
rapidObj['nameCalibrator'] = bestCal['calibrator'].getName()
# The email address of the requester needs to be there.
rapidObj['email'] = "Jamie.Stevens@csiro.au"
# Because this is a test run, we'll specify a few parameters to just try things out.
rapidObj['test'] = True
rapidObj['emailOnly'] = "Jamie.Stevens@csiro.au"
rapidObj['noTimeLimit'] = True
rapidObj['noScoreLimit'] = True
#rapidObj['noEmail'] = True
# Send the request.
request = arrApi.api(rapidObj)
try:
response = request.send()
except arrApi.responseError as r:
print r.value
| {
"repo_name": "ste616/atca-rapid-response-api",
"path": "python/example1.py",
"copies": "1",
"size": "3549",
"license": "mit",
"hash": 3545998996935254000,
"line_mean": 38,
"line_max": 92,
"alpha_frac": 0.7275288814,
"autogenerated": false,
"ratio": 3.357615894039735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9557544474715733,
"avg_score": 0.005520060144800236,
"num_lines": 91
} |
# ATCA Rapid Response Service
# Jamie.Stevens@csiro.au
# example2.py
# This example script shows how to make a schedule and request time for it from
# the web service.
# The modules we'll need.
import atca_rapid_response_api as arrApi
import cabb_scheduler as cabb
# Example 1.
# The situation is the same as in example 1 of the CABB scheduling library example 1.
# Suppose an event trigger has been received for a flaring magnetar at
# coordinates RA = 01:00:43.1, Dec = -72:11:33.8.
# Make a new schedule.
schedule = cabb.schedule()
# Add a scan to look at the magnetar's coordinates.
# This is also where we set our project code; in this example we'll use
# the code C007 (we have a test authorisation token for this project).
# We'll also set it to be 20 minutes long, with Dwell mode.
scan1 = schedule.addScan(
{ 'source': "magnetar", 'rightAscension': "01:00:43.1", 'declination': "-72:11:33.8",
'freq1': 5500, 'freq2': 9000, 'project': "C007",
'scanLength': "00:20:00", 'scanType': "Dwell" }
)
# Since we definitely want to get onto source as quickly as possible, we tell the
# library not to go to the calibrator first.
schedule.disablePriorCalibration()
# Request a list of nearby calibrators from the ATCA calibrator database.
calList = scan1.findCalibrator()
# Ask for the library to choose the best one for the current array. We first need to
# get the current array from MoniCA.
currentArray = cabb.monica_information.getArray()
# And pass this as the arggument to the calibrator selector.
bestCal = calList.getBestCalibrator(currentArray)
# This should choose 2353-686.
print "Calibrator chosen: %s, %.1f degrees away" % (bestCal['calibrator'].getName(),
bestCal['distance'])
# We add this calibrator to the schedule, attaching it to the scan it
# will be the calibrator for. We'll ask to observe the calibrator for 2
# minutes.
calScan = schedule.addCalibrator(bestCal['calibrator'], scan1, { 'scanLength': "00:02:00" })
# We want the schedule to run for about an hour, so we want another two copies
# of these two scans. Remembering that the library will take care of
# associating a calibrator to each source, we only need to copy the source
# scan.
for i in xrange(0, 2):
schedule.copyScans([ scan1.getId() ])
# Tell the library that we won't be looping, so there will be a calibrator scan at the
# end of the schedule.
schedule.setLooping(False)
# We need to turn this schedule into a string.
schedString = schedule.toString()
# We have our schedule now, so we need to craft the service request to submit it to
# the rapid response service.
rapidObj = { 'schedule': schedString }
# The authentication token needs to go with it, and we point to the file that
# contains the token.
rapidObj['authenticationTokenFile'] = "authorisation_token_test_C007_2019APR.jwt"
# The name of the main target needs to be specified.
rapidObj['nameTarget'] = "magnetar"
# So does the name of the calibrator.
rapidObj['nameCalibrator'] = bestCal['calibrator'].getName()
# The email address of the requester needs to be there.
rapidObj['email'] = "Jamie.Stevens@csiro.au"
# We want to use whatever frequencies are running at the time.
rapidObj['usePreviousFrequencies'] = True
# We only ask for times more than 0.2 hours.
rapidObj['minimumTime'] = 0.2
# Because this is a test run, we'll specify a few parameters to just try things out.
rapidObj['test'] = True
rapidObj['emailOnly'] = "Jamie.Stevens@csiro.au"
#rapidObj['noTimeLimit'] = True
rapidObj['maxTime'] = 0.5
rapidObj['noScoreLimit'] = True
#rapidObj['noEmail'] = True
# Send the request.
request = arrApi.api(rapidObj)
try:
response = request.send()
except arrApi.responseError as r:
print r.value
| {
"repo_name": "ste616/atca-rapid-response-api",
"path": "python/example2.py",
"copies": "1",
"size": "3756",
"license": "mit",
"hash": 4618140915194058000,
"line_mean": 38.125,
"line_max": 92,
"alpha_frac": 0.7281682641,
"autogenerated": false,
"ratio": 3.3475935828877006,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9547971457759741,
"avg_score": 0.005558077845591891,
"num_lines": 96
} |
# ATCA Rapid Response Service
# Jamie.Stevens@csiro.au
# example3_multifreq.py
# This example script shows how to make a schedule with more than one frequency
# and request time for it from the web service.
# The modules we'll need.
import atca_rapid_response_api as arrApi
import cabb_scheduler as cabb
# Example 3.
# The situation is the same as in example 1 of the CABB scheduling library example 1.
# Suppose an event trigger has been received for a flaring magnetar at
# coordinates RA = 01:00:43.1, Dec = -72:11:33.8.
# Make a new schedule.
schedule = cabb.schedule()
# Add a scan to look at the magnetar's coordinates.
# This is also where we set our project code; in this example we'll use
# the code C006 (we have a test authorisation token for this project).
# We'll also set it to be 20 minutes long, with Dwell mode.
scan1 = schedule.addScan(
{ 'source': "magnetar", 'rightAscension': "08:00:43.1", 'declination': "-72:11:33.8",
'freq1': 5500, 'freq2': 9000, 'project': "C006",
'scanLength': "00:20:00", 'scanType': "Dwell" }
)
# Request a list of nearby calibrators from the ATCA calibrator database.
calList = scan1.findCalibrator()
# Ask for the library to choose the best one for the current array. We first need to
# get the current array from MoniCA.
currentArray = cabb.monica_information.getArray()
# And pass this as the arggument to the calibrator selector.
bestCal = calList.getBestCalibrator(currentArray)
# This should choose 2353-686.
print ("Calibrator chosen: %s, %.1f degrees away" % (bestCal['calibrator'].getName(),
bestCal['distance']))
# We add this calibrator to the schedule, attaching it to the scan it
# will be the calibrator for. We'll ask to observe the calibrator for 2
# minutes.
calScan = schedule.addCalibrator(bestCal['calibrator'], scan1, { 'scanLength': "00:02:00" })
# Now we add another scan at a different frequency.
scan2 = schedule.addScan(
{ 'source': "magnetar", 'rightAscension': "08:00:43.1", 'declination': "-72:11:33.8",
'freq1': 17000, 'freq2': 19000, 'project': "C006", 'scanLength': "00:10:00",
'scanType': "Dwell" }
)
# Find the calibrators.
cal_mm_List = scan2.findCalibrator()
best_mm_Cal = cal_mm_List.getBestCalibrator(currentArray)
print ("MM Calibrator chosen: %s, %.1f degrees away" % (best_mm_Cal['calibrator'].getName(),
best_mm_Cal['distance']))
cal_mm_Scan = schedule.addCalibrator(best_mm_Cal['calibrator'], scan2, { 'scanLength': "00:02:00" })
# We want the schedule to run for some time, so we want another two copies
# of these scans. Remembering that the library will take care of
# associating a calibrator to each source, we only need to copy the source
# scan.
for i in range(0, 2):
schedule.copyScans([ scan1.getId() ])
schedule.copyScans([ scan2.getId() ])
# Tell the library that we won't be looping, so there will be a calibrator scan at the
# end of the schedule.
schedule.setLooping(False)
# Ensure proper focus scans are put in.
schedule.completeSchedule()
# We need to turn this schedule into a string.
schedString = schedule.toString()
# Let's keep a copy.
schedule.write("example3.sch")
# We have our schedule now, so we need to craft the service request to submit it to
# the rapid response service.
rapidObj = { 'schedule': schedString }
# The authentication token needs to go with it, and we point to the file that
# contains the token.
rapidObj['authenticationTokenFile'] = "authorisation_token_test_C006_2020APR.jwt"
# The name of the main target needs to be specified.
rapidObj['nameTarget'] = "magnetar"
# So does the name of the calibrator.
rapidObj['nameCalibrator'] = bestCal['calibrator'].getName()
# The email address of the requester needs to be there.
rapidObj['email'] = "Jamie.Stevens@csiro.au"
# We don't want to use whatever frequencies are running at the time.
rapidObj['usePreviousFrequencies'] = False
# We only ask for times more than 0.2 hours.
rapidObj['minimumTime'] = 0.2
# Because this is a test run, we'll specify a few parameters to just try things out.
rapidObj['test'] = True
rapidObj['emailOnly'] = "Jamie.Stevens@csiro.au"
#rapidObj['noTimeLimit'] = True
rapidObj['maxTime'] = 10
rapidObj['noScoreLimit'] = True
#rapidObj['noEmail'] = True
# Send the request.
request = arrApi.api(rapidObj)
try:
response = request.send()
except arrApi.responseError as r:
print (r.value)
# Write out the altered schedule to check what happened to it.
asched = cabb.schedule()
#print ("%s" % response['schedule']['altered'])
asched.parse(response['schedule']['altered'])
asched.write("example3_altered.sch")
| {
"repo_name": "ste616/atca-rapid-response-api",
"path": "python/example3_multifreq.py",
"copies": "1",
"size": "4690",
"license": "mit",
"hash": -5498181470124600000,
"line_mean": 39.0854700855,
"line_max": 100,
"alpha_frac": 0.710021322,
"autogenerated": false,
"ratio": 3.2797202797202796,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44897416017202796,
"avg_score": null,
"num_lines": null
} |
"""atcui URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic.base import RedirectView
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
# Django ATC API
url(r'^api/v1/', include('atc_api.urls')),
# Django ATC Demo UI
url(r'^atc_demo_ui/', include('atc_demo_ui.urls')),
# Django ATC profile storage
url(r'^api/v1/profiles/', include('atc_profile_storage.urls')),
url(r'^$', RedirectView.as_view(url='atc_demo_ui', permanent=False))
]
| {
"repo_name": "duydb2/ZTC",
"path": "atcui/atcui/urls.py",
"copies": "1",
"size": "1129",
"license": "bsd-3-clause",
"hash": 4885653333462414000,
"line_mean": 37.9310344828,
"line_max": 77,
"alpha_frac": 0.684676705,
"autogenerated": false,
"ratio": 3.2819767441860463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9466653449186047,
"avg_score": 0,
"num_lines": 29
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.