text stringlengths 0 1.05M | meta dict |
|---|---|
from functools import reduce
import operator
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.db.models import Q
from autorski.models import Joke
def __add_pages(request, jokes):
paginator = Paginator(jokes, 15)
page = request.GET.get('page')
try:
jokes = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
jokes = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
jokes = paginator.page(paginator.num_pages)
return jokes
def __add_user(request, context):
user = request.user
if user.is_authenticated():
if user.first_name:
name = user.first_name
if user.last_name:
name += ' ' + user.last_name
else:
name = user.username
username = user.username
else:
name = None
username = None
context.update({'user_fullname': name, 'username': username})
moderator = True if user.groups.filter(name='Moderator') else False
context.update({'moderator': moderator})
def all_jokes(request, pages=True):
sort = request.GET.get('sort', 'date')
reverse = request.GET.get('reversed', True)
if reverse != 'true':
reverse = True
else:
reverse = False
context = {}
jokes = Joke.objects.all()
search = request.GET.get('q', '')
if search.strip() != '':
items = search.split()
filter = reduce(operator.and_, (Q(body__icontains=x) for x in items))
jokes = jokes.filter(filter)
context.update({'search': search})
jokes = sorted(jokes, key=lambda joke: joke.__getattribute__(sort), reverse=reverse)
if pages:
jokes = __add_pages(request, jokes)
context.update({'jokes': jokes})
__add_user(request, context)
return context
def one_joke(request, id):
joke = Joke.objects.get(pk=id)
context = {'joke': joke}
__add_user(request, context)
return context | {
"repo_name": "jchmura/suchary-django",
"path": "autorski/extras/prepare_view.py",
"copies": "1",
"size": "2075",
"license": "mit",
"hash": -6517948922761374000,
"line_mean": 26.68,
"line_max": 88,
"alpha_frac": 0.6202409639,
"autogenerated": false,
"ratio": 3.685612788632327,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9801400985158211,
"avg_score": 0.0008905534748231377,
"num_lines": 75
} |
from functools import reduce
import operator
from django.db.models import CharField, Value as V
from django.db.models.functions import Concat
from pontoon.base.models import User, TranslatedResource
from pontoon.machinery.utils import (
get_google_translate_data,
get_translation_memory_data,
)
def get_translations(entity, locale):
"""
Get pretranslations for the entity-locale pair
:arg Entity entity: the Entity object
:arg Locale locale: the Locale object
:returns: a list of tuple with:
- a pretranslation of the entity
- plural form
- user - tm_user/gt_user
"""
tm_user = User.objects.get(email="pontoon-tm@example.com")
gt_user = User.objects.get(email="pontoon-gt@example.com")
strings = []
plural_forms = range(0, locale.nplurals or 1)
# Try to get matches from translation_memory
tm_response = get_translation_memory_data(text=entity.string, locale=locale,)
tm_response = [t for t in tm_response if int(t["quality"]) == 100]
if tm_response:
if entity.string_plural == "":
strings = [(tm_response[0]["target"], None, tm_user)]
else:
for plural_form in plural_forms:
strings.append((tm_response[0]["target"], plural_form, tm_user))
# Else fetch from google translate
elif locale.google_translate_code:
gt_response = get_google_translate_data(
text=entity.string, locale_code=locale.google_translate_code,
)
if gt_response["status"]:
if entity.string_plural == "":
strings = [(gt_response["translation"], None, gt_user)]
else:
for plural_form in plural_forms:
strings.append((gt_response["translation"], plural_form, gt_user))
return strings
def update_changed_instances(tr_filter, tr_dict, translations):
"""
Update the latest activity and stats for changed Locales, ProjectLocales
& TranslatedResources
"""
tr_filter = tuple(tr_filter)
# Combine all generated filters with an OK operator.
# `operator.ior` is the '|' Python operator, which turns into a logical OR
# when used between django ORM query objects.
tr_query = reduce(operator.ior, tr_filter)
translatedresources = TranslatedResource.objects.filter(tr_query).annotate(
locale_resource=Concat(
"locale_id", V("-"), "resource_id", output_field=CharField()
)
)
translatedresources.update_stats()
for tr in translatedresources:
index = tr_dict[tr.locale_resource]
translation = translations[index]
translation.update_latest_translation()
| {
"repo_name": "mozilla/pontoon",
"path": "pontoon/pretranslation/pretranslate.py",
"copies": "3",
"size": "2692",
"license": "bsd-3-clause",
"hash": -1319611961148128500,
"line_mean": 32.2345679012,
"line_max": 86,
"alpha_frac": 0.6526745914,
"autogenerated": false,
"ratio": 4.017910447761194,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004448772078657828,
"num_lines": 81
} |
from functools import reduce
import os
import struct
import numpy as np
class LABEL:
NONE = 0
VESSEL = 1
BACKGROUND = 2
PADDING = 3
class IMG:
BACKGROUND = 0
VESSEL = 1
NONE = 255
is_eq = lambda a: lambda b: a == b
is_neq = lambda a: lambda b: a != b
def read_image(filename, dtype=np.float32):
# Returns a 3D array for given OCT scan file ('.bin' file)
with open(filename, 'rb') as openfile:
# First, extract 3D lengths from file header
shape = np.fromfile(
openfile,
dtype=np.int32,
count=3,
)
image_size = int(np.prod(shape))
# Then, read extracted amount of bytes and reshape data to 3D array
image = np.fromfile(
openfile,
dtype=dtype,
count=image_size,
).reshape(*shape)
return image
def read_labels(filename):
# Returns a 3D array for given OCT scan labels file ('.bin.labels' file)
# Same as `read_image`, but the labels data type is int32
return read_image(filename=filename, dtype=np.int32)
def write_image(image, filename, dtype=np.float32):
image = image.astype(dtype)
voxels_count = np.prod(image.shape)
label_descriptions = (
# (NAME, (R, G, B)),
('NONE', (0x00, 0x00, 0x00)),
('VESSEL', (0xff, 0x26, 0x26)),
('BACKGROUND', (0x00, 0x33, 0xff)),
)
with open(filename, 'wb') as openfile:
openfile.write(struct.pack('<%sI' % image.ndim, *image.shape))
if dtype == np.float32:
openfile.write(struct.pack('<%sf' % voxels_count, *image.flatten()))
else:
openfile.write(struct.pack('<%sI' % voxels_count, *image.flatten()))
openfile.write(struct.pack('<I', len(label_descriptions)))
for i, (name, rgb) in enumerate(label_descriptions):
openfile.write(struct.pack('<I', i))
openfile.write(struct.pack('<I', len(name)))
openfile.write(name.encode())
openfile.write(struct.pack('<%sB' % len(rgb), *reversed(rgb)))
openfile.write(struct.pack('<B', 0xff))
def write_labels(labels, filename):
return write_image(image=labels, filename=filename, dtype=np.int32)
def cache_targets(targets, filename):
# Input: (n, 3) shaped array
targets = np.transpose(targets)
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'wb') as openfile:
openfile.write(struct.pack('<I', targets.shape[1]))
openfile.write(struct.pack('<%sh' % targets.size, *targets.flatten()))
def read_cached_targets(filename):
with open(filename, 'rb') as openfile:
length = int(np.fromfile(
openfile,
dtype=np.int32,
count=1,
))
targets = np.fromfile(
openfile,
dtype=np.int16,
count=3 * length,
).reshape(3, length).astype(np.int32)
# Output: (n, 3) shaped array
return np.transpose(targets)
def path(filepath):
return os.path.join(
os.path.normpath(
os.path.dirname(__file__),
),
filepath,
)
def preprocess_patch(patch):
# Performs ZMUV normalization on patch
return (patch - patch.mean()) / patch.std()
def preprocess_batch(batch):
# Performs ZMUV normalization on whole batch of patches
return (
batch - np.mean(
batch,
axis=tuple(range(1, batch.ndim)),
).reshape(
-1,
*(1,) * (batch.ndim - 1),
)
) / np.std(
batch,
axis=tuple(range(1, batch.ndim)),
).reshape(
-1,
*(1,) * (batch.ndim - 1),
)
def merge_labels(labels):
first_labels = labels[0]
assert all(label.shape == first_labels.shape for label in labels)
assert all(label.dtype == first_labels.dtype for label in labels)
merged_labels = np.zeros(
shape=first_labels.shape,
dtype=first_labels.dtype,
)
# Initially, set all voxels as belonging to the background
merged_labels[...] = LABEL.BACKGROUND
# Mark voxels, for which none of the labels belong to the
# background, as vessels
merged_labels[
reduce(
np.logical_and,
map(is_neq(LABEL.BACKGROUND), labels),
)
] = LABEL.VESSEL
# Mark voxels, for which all the labels are unassigned or
# belong to both vessel and background classes, as unassigned
merged_labels[
np.logical_or(
reduce(
np.logical_and,
map(is_eq(LABEL.NONE), labels),
),
np.logical_and(
reduce(
np.logical_or,
map(is_eq(LABEL.VESSEL), labels),
),
reduce(
np.logical_or,
map(is_eq(LABEL.BACKGROUND), labels),
),
),
),
] = LABEL.NONE
return merged_labels
| {
"repo_name": "molski/pmag",
"path": "utils.py",
"copies": "1",
"size": "4995",
"license": "mit",
"hash": 3394424271241360400,
"line_mean": 26.1467391304,
"line_max": 80,
"alpha_frac": 0.5627627628,
"autogenerated": false,
"ratio": 3.593525179856115,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9654085753406825,
"avg_score": 0.00044043784985813966,
"num_lines": 184
} |
from functools import reduce
import os
import time
from collections import deque
import pickle
import warnings
import gym
import numpy as np
import tensorflow as tf
import tensorflow.contrib as tc
from mpi4py import MPI
from stable_baselines import logger
from stable_baselines.common import tf_util, OffPolicyRLModel, SetVerbosity, TensorboardWriter
from stable_baselines.common.vec_env import VecEnv
from stable_baselines.common.mpi_adam import MpiAdam
from stable_baselines.common.buffers import ReplayBuffer
from stable_baselines.common.math_util import unscale_action, scale_action
from stable_baselines.common.mpi_running_mean_std import RunningMeanStd
from stable_baselines.ddpg.policies import DDPGPolicy
def normalize(tensor, stats):
"""
normalize a tensor using a running mean and std
:param tensor: (TensorFlow Tensor) the input tensor
:param stats: (RunningMeanStd) the running mean and std of the input to normalize
:return: (TensorFlow Tensor) the normalized tensor
"""
if stats is None:
return tensor
return (tensor - stats.mean) / stats.std
def denormalize(tensor, stats):
"""
denormalize a tensor using a running mean and std
:param tensor: (TensorFlow Tensor) the normalized tensor
:param stats: (RunningMeanStd) the running mean and std of the input to normalize
:return: (TensorFlow Tensor) the restored tensor
"""
if stats is None:
return tensor
return tensor * stats.std + stats.mean
def reduce_std(tensor, axis=None, keepdims=False):
"""
get the standard deviation of a Tensor
:param tensor: (TensorFlow Tensor) the input tensor
:param axis: (int or [int]) the axis to itterate the std over
:param keepdims: (bool) keep the other dimensions the same
:return: (TensorFlow Tensor) the std of the tensor
"""
return tf.sqrt(reduce_var(tensor, axis=axis, keepdims=keepdims))
def reduce_var(tensor, axis=None, keepdims=False):
"""
get the variance of a Tensor
:param tensor: (TensorFlow Tensor) the input tensor
:param axis: (int or [int]) the axis to itterate the variance over
:param keepdims: (bool) keep the other dimensions the same
:return: (TensorFlow Tensor) the variance of the tensor
"""
tensor_mean = tf.reduce_mean(tensor, axis=axis, keepdims=True)
devs_squared = tf.square(tensor - tensor_mean)
return tf.reduce_mean(devs_squared, axis=axis, keepdims=keepdims)
def get_target_updates(_vars, target_vars, tau, verbose=0):
"""
get target update operations
:param _vars: ([TensorFlow Tensor]) the initial variables
:param target_vars: ([TensorFlow Tensor]) the target variables
:param tau: (float) the soft update coefficient (keep old values, between 0 and 1)
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:return: (TensorFlow Operation, TensorFlow Operation) initial update, soft update
"""
if verbose >= 2:
logger.info('setting up target updates ...')
soft_updates = []
init_updates = []
assert len(_vars) == len(target_vars)
for var, target_var in zip(_vars, target_vars):
if verbose >= 2:
logger.info(' {} <- {}'.format(target_var.name, var.name))
init_updates.append(tf.assign(target_var, var))
soft_updates.append(tf.assign(target_var, (1. - tau) * target_var + tau * var))
assert len(init_updates) == len(_vars)
assert len(soft_updates) == len(_vars)
return tf.group(*init_updates), tf.group(*soft_updates)
def get_perturbable_vars(scope):
"""
Get the trainable variables that can be perturbed when using
parameter noise.
:param scope: (str) tensorflow scope of the variables
:return: ([tf.Variables])
"""
return [var for var in tf_util.get_trainable_vars(scope) if 'LayerNorm' not in var.name]
def get_perturbed_actor_updates(actor, perturbed_actor, param_noise_stddev, verbose=0):
"""
Get the actor update, with noise.
:param actor: (str) the actor
:param perturbed_actor: (str) the pertubed actor
:param param_noise_stddev: (float) the std of the parameter noise
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:return: (TensorFlow Operation) the update function
"""
assert len(tf_util.get_globals_vars(actor)) == len(tf_util.get_globals_vars(perturbed_actor))
assert len(get_perturbable_vars(actor)) == len(get_perturbable_vars(perturbed_actor))
updates = []
for var, perturbed_var in zip(tf_util.get_globals_vars(actor), tf_util.get_globals_vars(perturbed_actor)):
if var in get_perturbable_vars(actor):
if verbose >= 2:
logger.info(' {} <- {} + noise'.format(perturbed_var.name, var.name))
# Add Gaussian noise to the parameter
updates.append(tf.assign(perturbed_var,
var + tf.random_normal(tf.shape(var), mean=0., stddev=param_noise_stddev)))
else:
if verbose >= 2:
logger.info(' {} <- {}'.format(perturbed_var.name, var.name))
updates.append(tf.assign(perturbed_var, var))
assert len(updates) == len(tf_util.get_globals_vars(actor))
return tf.group(*updates)
class DDPG(OffPolicyRLModel):
"""
Deep Deterministic Policy Gradient (DDPG) model
DDPG: https://arxiv.org/pdf/1509.02971.pdf
:param policy: (DDPGPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, LnMlpPolicy, ...)
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param gamma: (float) the discount factor
:param memory_policy: (ReplayBuffer) the replay buffer
(if None, default to baselines.deepq.replay_buffer.ReplayBuffer)
.. deprecated:: 2.6.0
This parameter will be removed in a future version
:param eval_env: (Gym Environment) the evaluation environment (can be None)
:param nb_train_steps: (int) the number of training steps
:param nb_rollout_steps: (int) the number of rollout steps
:param nb_eval_steps: (int) the number of evaluation steps
:param param_noise: (AdaptiveParamNoiseSpec) the parameter noise type (can be None)
:param action_noise: (ActionNoise) the action noise type (can be None)
:param param_noise_adaption_interval: (int) apply param noise every N steps
:param tau: (float) the soft update coefficient (keep old values, between 0 and 1)
:param normalize_returns: (bool) should the critic output be normalized
:param enable_popart: (bool) enable pop-art normalization of the critic output
(https://arxiv.org/pdf/1602.07714.pdf), normalize_returns must be set to True.
:param normalize_observations: (bool) should the observation be normalized
:param batch_size: (int) the size of the batch for learning the policy
:param observation_range: (tuple) the bounding values for the observation
:param return_range: (tuple) the bounding values for the critic output
:param critic_l2_reg: (float) l2 regularizer coefficient
:param actor_lr: (float) the actor learning rate
:param critic_lr: (float) the critic learning rate
:param clip_norm: (float) clip the gradients (disabled if None)
:param reward_scale: (float) the value the reward should be scaled by
:param render: (bool) enable rendering of the environment
:param render_eval: (bool) enable rendering of the evaluation environment
:param memory_limit: (int) the max number of transitions to store, size of the replay buffer
.. deprecated:: 2.6.0
Use `buffer_size` instead.
:param buffer_size: (int) the max number of transitions to store, size of the replay buffer
:param random_exploration: (float) Probability of taking a random action (as in an epsilon-greedy strategy)
This is not needed for DDPG normally but can help exploring when using HER + DDPG.
This hack was present in the original OpenAI Baselines repo (DDPG + HER)
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
:param full_tensorboard_log: (bool) enable additional logging when using tensorboard
WARNING: this logging can take a lot of space quickly
:param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).
If None (default), use random seed. Note that if you want completely deterministic
results, you must set `n_cpu_tf_sess` to 1.
:param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations
If None, the number of cpu of the current machine will be used.
"""
def __init__(self, policy, env, gamma=0.99, memory_policy=None, eval_env=None, nb_train_steps=50,
nb_rollout_steps=100, nb_eval_steps=100, param_noise=None, action_noise=None,
normalize_observations=False, tau=0.001, batch_size=128, param_noise_adaption_interval=50,
normalize_returns=False, enable_popart=False, observation_range=(-5., 5.), critic_l2_reg=0.,
return_range=(-np.inf, np.inf), actor_lr=1e-4, critic_lr=1e-3, clip_norm=None, reward_scale=1.,
render=False, render_eval=False, memory_limit=None, buffer_size=50000, random_exploration=0.0,
verbose=0, tensorboard_log=None, _init_setup_model=True, policy_kwargs=None,
full_tensorboard_log=False, seed=None, n_cpu_tf_sess=1):
super(DDPG, self).__init__(policy=policy, env=env, replay_buffer=None,
verbose=verbose, policy_base=DDPGPolicy,
requires_vec_env=False, policy_kwargs=policy_kwargs,
seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)
# Parameters.
self.gamma = gamma
self.tau = tau
# TODO: remove this param in v3.x.x
if memory_policy is not None:
warnings.warn("memory_policy will be removed in a future version (v3.x.x) "
"it is now ignored and replaced with ReplayBuffer", DeprecationWarning)
if memory_limit is not None:
warnings.warn("memory_limit will be removed in a future version (v3.x.x) "
"use buffer_size instead", DeprecationWarning)
buffer_size = memory_limit
self.normalize_observations = normalize_observations
self.normalize_returns = normalize_returns
self.action_noise = action_noise
self.param_noise = param_noise
self.return_range = return_range
self.observation_range = observation_range
self.actor_lr = actor_lr
self.critic_lr = critic_lr
self.clip_norm = clip_norm
self.enable_popart = enable_popart
self.reward_scale = reward_scale
self.batch_size = batch_size
self.critic_l2_reg = critic_l2_reg
self.eval_env = eval_env
self.render = render
self.render_eval = render_eval
self.nb_eval_steps = nb_eval_steps
self.param_noise_adaption_interval = param_noise_adaption_interval
self.nb_train_steps = nb_train_steps
self.nb_rollout_steps = nb_rollout_steps
self.memory_limit = memory_limit
self.buffer_size = buffer_size
self.tensorboard_log = tensorboard_log
self.full_tensorboard_log = full_tensorboard_log
self.random_exploration = random_exploration
# init
self.graph = None
self.stats_sample = None
self.replay_buffer = None
self.policy_tf = None
self.target_init_updates = None
self.target_soft_updates = None
self.critic_loss = None
self.critic_grads = None
self.critic_optimizer = None
self.sess = None
self.stats_ops = None
self.stats_names = None
self.perturbed_actor_tf = None
self.perturb_policy_ops = None
self.perturb_adaptive_policy_ops = None
self.adaptive_policy_distance = None
self.actor_loss = None
self.actor_grads = None
self.actor_optimizer = None
self.old_std = None
self.old_mean = None
self.renormalize_q_outputs_op = None
self.obs_rms = None
self.ret_rms = None
self.target_policy = None
self.actor_tf = None
self.normalized_critic_tf = None
self.critic_tf = None
self.normalized_critic_with_actor_tf = None
self.critic_with_actor_tf = None
self.target_q = None
self.obs_train = None
self.action_train_ph = None
self.obs_target = None
self.action_target = None
self.obs_noise = None
self.action_noise_ph = None
self.obs_adapt_noise = None
self.action_adapt_noise = None
self.terminals_ph = None
self.rewards = None
self.actions = None
self.critic_target = None
self.param_noise_stddev = None
self.param_noise_actor = None
self.adaptive_param_noise_actor = None
self.params = None
self.summary = None
self.tb_seen_steps = None
self.target_params = None
self.obs_rms_params = None
self.ret_rms_params = None
if _init_setup_model:
self.setup_model()
def _get_pretrain_placeholders(self):
policy = self.policy_tf
# Rescale
deterministic_action = unscale_action(self.action_space, self.actor_tf)
return policy.obs_ph, self.actions, deterministic_action
def setup_model(self):
with SetVerbosity(self.verbose):
assert isinstance(self.action_space, gym.spaces.Box), \
"Error: DDPG cannot output a {} action space, only spaces.Box is supported.".format(self.action_space)
assert issubclass(self.policy, DDPGPolicy), "Error: the input policy for the DDPG model must be " \
"an instance of DDPGPolicy."
self.graph = tf.Graph()
with self.graph.as_default():
self.set_random_seed(self.seed)
self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)
self.replay_buffer = ReplayBuffer(self.buffer_size)
with tf.variable_scope("input", reuse=False):
# Observation normalization.
if self.normalize_observations:
with tf.variable_scope('obs_rms'):
self.obs_rms = RunningMeanStd(shape=self.observation_space.shape)
else:
self.obs_rms = None
# Return normalization.
if self.normalize_returns:
with tf.variable_scope('ret_rms'):
self.ret_rms = RunningMeanStd()
else:
self.ret_rms = None
self.policy_tf = self.policy(self.sess, self.observation_space, self.action_space, 1, 1, None,
**self.policy_kwargs)
# Create target networks.
self.target_policy = self.policy(self.sess, self.observation_space, self.action_space, 1, 1, None,
**self.policy_kwargs)
self.obs_target = self.target_policy.obs_ph
self.action_target = self.target_policy.action_ph
normalized_obs = tf.clip_by_value(normalize(self.policy_tf.processed_obs, self.obs_rms),
self.observation_range[0], self.observation_range[1])
normalized_next_obs = tf.clip_by_value(normalize(self.target_policy.processed_obs, self.obs_rms),
self.observation_range[0], self.observation_range[1])
if self.param_noise is not None:
# Configure perturbed actor.
self.param_noise_actor = self.policy(self.sess, self.observation_space, self.action_space, 1, 1,
None, **self.policy_kwargs)
self.obs_noise = self.param_noise_actor.obs_ph
self.action_noise_ph = self.param_noise_actor.action_ph
# Configure separate copy for stddev adoption.
self.adaptive_param_noise_actor = self.policy(self.sess, self.observation_space,
self.action_space, 1, 1, None,
**self.policy_kwargs)
self.obs_adapt_noise = self.adaptive_param_noise_actor.obs_ph
self.action_adapt_noise = self.adaptive_param_noise_actor.action_ph
# Inputs.
self.obs_train = self.policy_tf.obs_ph
self.action_train_ph = self.policy_tf.action_ph
self.terminals_ph = tf.placeholder(tf.float32, shape=(None, 1), name='terminals')
self.rewards = tf.placeholder(tf.float32, shape=(None, 1), name='rewards')
self.actions = tf.placeholder(tf.float32, shape=(None,) + self.action_space.shape, name='actions')
self.critic_target = tf.placeholder(tf.float32, shape=(None, 1), name='critic_target')
self.param_noise_stddev = tf.placeholder(tf.float32, shape=(), name='param_noise_stddev')
# Create networks and core TF parts that are shared across setup parts.
with tf.variable_scope("model", reuse=False):
self.actor_tf = self.policy_tf.make_actor(normalized_obs)
self.normalized_critic_tf = self.policy_tf.make_critic(normalized_obs, self.actions)
self.normalized_critic_with_actor_tf = self.policy_tf.make_critic(normalized_obs,
self.actor_tf,
reuse=True)
# Noise setup
if self.param_noise is not None:
self._setup_param_noise(normalized_obs)
with tf.variable_scope("target", reuse=False):
critic_target = self.target_policy.make_critic(normalized_next_obs,
self.target_policy.make_actor(normalized_next_obs))
with tf.variable_scope("loss", reuse=False):
self.critic_tf = denormalize(
tf.clip_by_value(self.normalized_critic_tf, self.return_range[0], self.return_range[1]),
self.ret_rms)
self.critic_with_actor_tf = denormalize(
tf.clip_by_value(self.normalized_critic_with_actor_tf,
self.return_range[0], self.return_range[1]),
self.ret_rms)
q_next_obs = denormalize(critic_target, self.ret_rms)
self.target_q = self.rewards + (1. - self.terminals_ph) * self.gamma * q_next_obs
tf.summary.scalar('critic_target', tf.reduce_mean(self.critic_target))
if self.full_tensorboard_log:
tf.summary.histogram('critic_target', self.critic_target)
# Set up parts.
if self.normalize_returns and self.enable_popart:
self._setup_popart()
self._setup_stats()
self._setup_target_network_updates()
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('rewards', tf.reduce_mean(self.rewards))
tf.summary.scalar('param_noise_stddev', tf.reduce_mean(self.param_noise_stddev))
if self.full_tensorboard_log:
tf.summary.histogram('rewards', self.rewards)
tf.summary.histogram('param_noise_stddev', self.param_noise_stddev)
if len(self.observation_space.shape) == 3 and self.observation_space.shape[0] in [1, 3, 4]:
tf.summary.image('observation', self.obs_train)
else:
tf.summary.histogram('observation', self.obs_train)
with tf.variable_scope("Adam_mpi", reuse=False):
self._setup_actor_optimizer()
self._setup_critic_optimizer()
tf.summary.scalar('actor_loss', self.actor_loss)
tf.summary.scalar('critic_loss', self.critic_loss)
self.params = tf_util.get_trainable_vars("model") \
+ tf_util.get_trainable_vars('noise/') + tf_util.get_trainable_vars('noise_adapt/')
self.target_params = tf_util.get_trainable_vars("target")
self.obs_rms_params = [var for var in tf.global_variables()
if "obs_rms" in var.name]
self.ret_rms_params = [var for var in tf.global_variables()
if "ret_rms" in var.name]
with self.sess.as_default():
self._initialize(self.sess)
self.summary = tf.summary.merge_all()
def _setup_target_network_updates(self):
"""
set the target update operations
"""
init_updates, soft_updates = get_target_updates(tf_util.get_trainable_vars('model/'),
tf_util.get_trainable_vars('target/'), self.tau,
self.verbose)
self.target_init_updates = init_updates
self.target_soft_updates = soft_updates
def _setup_param_noise(self, normalized_obs):
"""
Setup the parameter noise operations
:param normalized_obs: (TensorFlow Tensor) the normalized observation
"""
assert self.param_noise is not None
with tf.variable_scope("noise", reuse=False):
self.perturbed_actor_tf = self.param_noise_actor.make_actor(normalized_obs)
with tf.variable_scope("noise_adapt", reuse=False):
adaptive_actor_tf = self.adaptive_param_noise_actor.make_actor(normalized_obs)
with tf.variable_scope("noise_update_func", reuse=False):
if self.verbose >= 2:
logger.info('setting up param noise')
self.perturb_policy_ops = get_perturbed_actor_updates('model/pi/', 'noise/pi/', self.param_noise_stddev,
verbose=self.verbose)
self.perturb_adaptive_policy_ops = get_perturbed_actor_updates('model/pi/', 'noise_adapt/pi/',
self.param_noise_stddev,
verbose=self.verbose)
self.adaptive_policy_distance = tf.sqrt(tf.reduce_mean(tf.square(self.actor_tf - adaptive_actor_tf)))
def _setup_actor_optimizer(self):
"""
setup the optimizer for the actor
"""
if self.verbose >= 2:
logger.info('setting up actor optimizer')
self.actor_loss = -tf.reduce_mean(self.critic_with_actor_tf)
actor_shapes = [var.get_shape().as_list() for var in tf_util.get_trainable_vars('model/pi/')]
actor_nb_params = sum([reduce(lambda x, y: x * y, shape) for shape in actor_shapes])
if self.verbose >= 2:
logger.info(' actor shapes: {}'.format(actor_shapes))
logger.info(' actor params: {}'.format(actor_nb_params))
self.actor_grads = tf_util.flatgrad(self.actor_loss, tf_util.get_trainable_vars('model/pi/'),
clip_norm=self.clip_norm)
self.actor_optimizer = MpiAdam(var_list=tf_util.get_trainable_vars('model/pi/'), beta1=0.9, beta2=0.999,
epsilon=1e-08)
def _setup_critic_optimizer(self):
"""
setup the optimizer for the critic
"""
if self.verbose >= 2:
logger.info('setting up critic optimizer')
normalized_critic_target_tf = tf.clip_by_value(normalize(self.critic_target, self.ret_rms),
self.return_range[0], self.return_range[1])
self.critic_loss = tf.reduce_mean(tf.square(self.normalized_critic_tf - normalized_critic_target_tf))
if self.critic_l2_reg > 0.:
critic_reg_vars = [var for var in tf_util.get_trainable_vars('model/qf/')
if 'bias' not in var.name and 'qf_output' not in var.name and 'b' not in var.name]
if self.verbose >= 2:
for var in critic_reg_vars:
logger.info(' regularizing: {}'.format(var.name))
logger.info(' applying l2 regularization with {}'.format(self.critic_l2_reg))
critic_reg = tc.layers.apply_regularization(
tc.layers.l2_regularizer(self.critic_l2_reg),
weights_list=critic_reg_vars
)
self.critic_loss += critic_reg
critic_shapes = [var.get_shape().as_list() for var in tf_util.get_trainable_vars('model/qf/')]
critic_nb_params = sum([reduce(lambda x, y: x * y, shape) for shape in critic_shapes])
if self.verbose >= 2:
logger.info(' critic shapes: {}'.format(critic_shapes))
logger.info(' critic params: {}'.format(critic_nb_params))
self.critic_grads = tf_util.flatgrad(self.critic_loss, tf_util.get_trainable_vars('model/qf/'),
clip_norm=self.clip_norm)
self.critic_optimizer = MpiAdam(var_list=tf_util.get_trainable_vars('model/qf/'), beta1=0.9, beta2=0.999,
epsilon=1e-08)
def _setup_popart(self):
"""
setup pop-art normalization of the critic output
See https://arxiv.org/pdf/1602.07714.pdf for details.
Preserving Outputs Precisely, while Adaptively Rescaling Targets”.
"""
self.old_std = tf.placeholder(tf.float32, shape=[1], name='old_std')
new_std = self.ret_rms.std
self.old_mean = tf.placeholder(tf.float32, shape=[1], name='old_mean')
new_mean = self.ret_rms.mean
self.renormalize_q_outputs_op = []
for out_vars in [[var for var in tf_util.get_trainable_vars('model/qf/') if 'qf_output' in var.name],
[var for var in tf_util.get_trainable_vars('target/qf/') if 'qf_output' in var.name]]:
assert len(out_vars) == 2
# wieght and bias of the last layer
weight, bias = out_vars
assert 'kernel' in weight.name
assert 'bias' in bias.name
assert weight.get_shape()[-1] == 1
assert bias.get_shape()[-1] == 1
self.renormalize_q_outputs_op += [weight.assign(weight * self.old_std / new_std)]
self.renormalize_q_outputs_op += [bias.assign((bias * self.old_std + self.old_mean - new_mean) / new_std)]
def _setup_stats(self):
"""
Setup the stat logger for DDPG.
"""
ops = [
tf.reduce_mean(self.critic_tf),
reduce_std(self.critic_tf),
tf.reduce_mean(self.critic_with_actor_tf),
reduce_std(self.critic_with_actor_tf),
tf.reduce_mean(self.actor_tf),
reduce_std(self.actor_tf)
]
names = [
'reference_Q_mean',
'reference_Q_std',
'reference_actor_Q_mean',
'reference_actor_Q_std',
'reference_action_mean',
'reference_action_std'
]
if self.normalize_returns:
ops += [self.ret_rms.mean, self.ret_rms.std]
names += ['ret_rms_mean', 'ret_rms_std']
if self.normalize_observations:
ops += [tf.reduce_mean(self.obs_rms.mean), tf.reduce_mean(self.obs_rms.std)]
names += ['obs_rms_mean', 'obs_rms_std']
if self.param_noise:
ops += [tf.reduce_mean(self.perturbed_actor_tf), reduce_std(self.perturbed_actor_tf)]
names += ['reference_perturbed_action_mean', 'reference_perturbed_action_std']
self.stats_ops = ops
self.stats_names = names
def _policy(self, obs, apply_noise=True, compute_q=True):
"""
Get the actions and critic output, from a given observation
:param obs: ([float] or [int]) the observation
:param apply_noise: (bool) enable the noise
:param compute_q: (bool) compute the critic output
:return: ([float], float) the action and critic value
"""
obs = np.array(obs).reshape((-1,) + self.observation_space.shape)
feed_dict = {self.obs_train: obs}
if self.param_noise is not None and apply_noise:
actor_tf = self.perturbed_actor_tf
feed_dict[self.obs_noise] = obs
else:
actor_tf = self.actor_tf
if compute_q:
action, q_value = self.sess.run([actor_tf, self.critic_with_actor_tf], feed_dict=feed_dict)
else:
action = self.sess.run(actor_tf, feed_dict=feed_dict)
q_value = None
action = action.flatten()
if self.action_noise is not None and apply_noise:
noise = self.action_noise()
action += noise
action = np.clip(action, -1, 1)
return action, q_value
def _store_transition(self, obs, action, reward, next_obs, done, info):
"""
Store a transition in the replay buffer
:param obs: ([float] or [int]) the last observation
:param action: ([float]) the action
:param reward: (float] the reward
:param next_obs: ([float] or [int]) the current observation
:param done: (bool) Whether the episode is over
:param info: (dict) extra values used to compute reward when using HER
"""
reward *= self.reward_scale
self.replay_buffer_add(obs, action, reward, next_obs, done, info)
if self.normalize_observations:
self.obs_rms.update(np.array([obs]))
def _train_step(self, step, writer, log=False):
"""
run a step of training from batch
:param step: (int) the current step iteration
:param writer: (TensorFlow Summary.writer) the writer for tensorboard
:param log: (bool) whether or not to log to metadata
:return: (float, float) critic loss, actor loss
"""
# Get a batch
obs, actions, rewards, next_obs, terminals = self.replay_buffer.sample(batch_size=self.batch_size,
env=self._vec_normalize_env)
# Reshape to match previous behavior and placeholder shape
rewards = rewards.reshape(-1, 1)
terminals = terminals.reshape(-1, 1)
if self.normalize_returns and self.enable_popart:
old_mean, old_std, target_q = self.sess.run([self.ret_rms.mean, self.ret_rms.std, self.target_q],
feed_dict={
self.obs_target: next_obs,
self.rewards: rewards,
self.terminals_ph: terminals
})
self.ret_rms.update(target_q.flatten())
self.sess.run(self.renormalize_q_outputs_op, feed_dict={
self.old_std: np.array([old_std]),
self.old_mean: np.array([old_mean]),
})
else:
target_q = self.sess.run(self.target_q, feed_dict={
self.obs_target: next_obs,
self.rewards: rewards,
self.terminals_ph: terminals
})
# Get all gradients and perform a synced update.
ops = [self.actor_grads, self.actor_loss, self.critic_grads, self.critic_loss]
td_map = {
self.obs_train: obs,
self.actions: actions,
self.action_train_ph: actions,
self.rewards: rewards,
self.critic_target: target_q,
self.param_noise_stddev: 0 if self.param_noise is None else self.param_noise.current_stddev
}
if writer is not None:
# run loss backprop with summary if the step_id was not already logged (can happen with the right
# parameters as the step value is only an estimate)
if self.full_tensorboard_log and log and step not in self.tb_seen_steps:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary, actor_grads, actor_loss, critic_grads, critic_loss = \
self.sess.run([self.summary] + ops, td_map, options=run_options, run_metadata=run_metadata)
writer.add_run_metadata(run_metadata, 'step%d' % step)
self.tb_seen_steps.append(step)
else:
summary, actor_grads, actor_loss, critic_grads, critic_loss = self.sess.run([self.summary] + ops,
td_map)
writer.add_summary(summary, step)
else:
actor_grads, actor_loss, critic_grads, critic_loss = self.sess.run(ops, td_map)
self.actor_optimizer.update(actor_grads, learning_rate=self.actor_lr)
self.critic_optimizer.update(critic_grads, learning_rate=self.critic_lr)
return critic_loss, actor_loss
def _initialize(self, sess):
"""
initialize the model parameters and optimizers
:param sess: (TensorFlow Session) the current TensorFlow session
"""
self.sess = sess
self.sess.run(tf.global_variables_initializer())
self.actor_optimizer.sync()
self.critic_optimizer.sync()
self.sess.run(self.target_init_updates)
def _update_target_net(self):
"""
run target soft update operation
"""
self.sess.run(self.target_soft_updates)
def _get_stats(self):
"""
Get the mean and standard deviation of the model's inputs and outputs
:return: (dict) the means and stds
"""
if self.stats_sample is None:
# Get a sample and keep that fixed for all further computations.
# This allows us to estimate the change in value for the same set of inputs.
obs, actions, rewards, next_obs, terminals = self.replay_buffer.sample(batch_size=self.batch_size,
env=self._vec_normalize_env)
self.stats_sample = {
'obs': obs,
'actions': actions,
'rewards': rewards,
'next_obs': next_obs,
'terminals': terminals
}
feed_dict = {
self.actions: self.stats_sample['actions']
}
for placeholder in [self.action_train_ph, self.action_target, self.action_adapt_noise, self.action_noise_ph]:
if placeholder is not None:
feed_dict[placeholder] = self.stats_sample['actions']
for placeholder in [self.obs_train, self.obs_target, self.obs_adapt_noise, self.obs_noise]:
if placeholder is not None:
feed_dict[placeholder] = self.stats_sample['obs']
values = self.sess.run(self.stats_ops, feed_dict=feed_dict)
names = self.stats_names[:]
assert len(names) == len(values)
stats = dict(zip(names, values))
if self.param_noise is not None:
stats = {**stats, **self.param_noise.get_stats()}
return stats
def _adapt_param_noise(self):
"""
calculate the adaptation for the parameter noise
:return: (float) the mean distance for the parameter noise
"""
if self.param_noise is None:
return 0.
# Perturb a separate copy of the policy to adjust the scale for the next "real" perturbation.
obs, *_ = self.replay_buffer.sample(batch_size=self.batch_size, env=self._vec_normalize_env)
self.sess.run(self.perturb_adaptive_policy_ops, feed_dict={
self.param_noise_stddev: self.param_noise.current_stddev,
})
distance = self.sess.run(self.adaptive_policy_distance, feed_dict={
self.obs_adapt_noise: obs, self.obs_train: obs,
self.param_noise_stddev: self.param_noise.current_stddev,
})
mean_distance = MPI.COMM_WORLD.allreduce(distance, op=MPI.SUM) / MPI.COMM_WORLD.Get_size()
self.param_noise.adapt(mean_distance)
return mean_distance
def _reset(self):
"""
Reset internal state after an episode is complete.
"""
if self.action_noise is not None:
self.action_noise.reset()
if self.param_noise is not None:
self.sess.run(self.perturb_policy_ops, feed_dict={
self.param_noise_stddev: self.param_noise.current_stddev,
})
def learn(self, total_timesteps, callback=None, log_interval=100, tb_log_name="DDPG",
reset_num_timesteps=True, replay_wrapper=None):
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
callback = self._init_callback(callback)
if replay_wrapper is not None:
self.replay_buffer = replay_wrapper(self.replay_buffer)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
as writer:
self._setup_learn()
# a list for tensorboard logging, to prevent logging with the same step number, if it already occured
self.tb_seen_steps = []
rank = MPI.COMM_WORLD.Get_rank()
if self.verbose >= 2:
logger.log('Using agent with the following configuration:')
logger.log(str(self.__dict__.items()))
eval_episode_rewards_history = deque(maxlen=100)
episode_rewards_history = deque(maxlen=100)
episode_successes = []
with self.sess.as_default(), self.graph.as_default():
# Prepare everything.
self._reset()
obs = self.env.reset()
# Retrieve unnormalized observation for saving into the buffer
if self._vec_normalize_env is not None:
obs_ = self._vec_normalize_env.get_original_obs().squeeze()
eval_obs = None
if self.eval_env is not None:
eval_obs = self.eval_env.reset()
episode_reward = 0.
episode_step = 0
episodes = 0
step = 0
total_steps = 0
start_time = time.time()
epoch_episode_rewards = []
epoch_episode_steps = []
epoch_actor_losses = []
epoch_critic_losses = []
epoch_adaptive_distances = []
eval_episode_rewards = []
eval_qs = []
epoch_actions = []
epoch_qs = []
epoch_episodes = 0
epoch = 0
callback.on_training_start(locals(), globals())
while True:
for _ in range(log_interval):
callback.on_rollout_start()
# Perform rollouts.
for _ in range(self.nb_rollout_steps):
if total_steps >= total_timesteps:
callback.on_training_end()
return self
# Predict next action.
action, q_value = self._policy(obs, apply_noise=True, compute_q=True)
assert action.shape == self.env.action_space.shape
# Execute next action.
if rank == 0 and self.render:
self.env.render()
# Randomly sample actions from a uniform distribution
# with a probability self.random_exploration (used in HER + DDPG)
if np.random.rand() < self.random_exploration:
# actions sampled from action space are from range specific to the environment
# but algorithm operates on tanh-squashed actions therefore simple scaling is used
unscaled_action = self.action_space.sample()
action = scale_action(self.action_space, unscaled_action)
else:
# inferred actions need to be transformed to environment action_space before stepping
unscaled_action = unscale_action(self.action_space, action)
new_obs, reward, done, info = self.env.step(unscaled_action)
self.num_timesteps += 1
callback.update_locals(locals())
if callback.on_step() is False:
callback.on_training_end()
return self
step += 1
total_steps += 1
if rank == 0 and self.render:
self.env.render()
# Book-keeping.
epoch_actions.append(action)
epoch_qs.append(q_value)
# Store only the unnormalized version
if self._vec_normalize_env is not None:
new_obs_ = self._vec_normalize_env.get_original_obs().squeeze()
reward_ = self._vec_normalize_env.get_original_reward().squeeze()
else:
# Avoid changing the original ones
obs_, new_obs_, reward_ = obs, new_obs, reward
self._store_transition(obs_, action, reward_, new_obs_, done, info)
obs = new_obs
# Save the unnormalized observation
if self._vec_normalize_env is not None:
obs_ = new_obs_
episode_reward += reward_
episode_step += 1
if writer is not None:
ep_rew = np.array([reward_]).reshape((1, -1))
ep_done = np.array([done]).reshape((1, -1))
tf_util.total_episode_reward_logger(self.episode_reward, ep_rew, ep_done,
writer, self.num_timesteps)
if done:
# Episode done.
epoch_episode_rewards.append(episode_reward)
episode_rewards_history.append(episode_reward)
epoch_episode_steps.append(episode_step)
episode_reward = 0.
episode_step = 0
epoch_episodes += 1
episodes += 1
maybe_is_success = info.get('is_success')
if maybe_is_success is not None:
episode_successes.append(float(maybe_is_success))
self._reset()
if not isinstance(self.env, VecEnv):
obs = self.env.reset()
callback.on_rollout_end()
# Train.
epoch_actor_losses = []
epoch_critic_losses = []
epoch_adaptive_distances = []
for t_train in range(self.nb_train_steps):
# Not enough samples in the replay buffer
if not self.replay_buffer.can_sample(self.batch_size):
break
# Adapt param noise, if necessary.
if len(self.replay_buffer) >= self.batch_size and \
t_train % self.param_noise_adaption_interval == 0:
distance = self._adapt_param_noise()
epoch_adaptive_distances.append(distance)
# weird equation to deal with the fact the nb_train_steps will be different
# to nb_rollout_steps
step = (int(t_train * (self.nb_rollout_steps / self.nb_train_steps)) +
self.num_timesteps - self.nb_rollout_steps)
critic_loss, actor_loss = self._train_step(step, writer, log=t_train == 0)
epoch_critic_losses.append(critic_loss)
epoch_actor_losses.append(actor_loss)
self._update_target_net()
# Evaluate.
eval_episode_rewards = []
eval_qs = []
if self.eval_env is not None:
eval_episode_reward = 0.
for _ in range(self.nb_eval_steps):
if total_steps >= total_timesteps:
return self
eval_action, eval_q = self._policy(eval_obs, apply_noise=False, compute_q=True)
unscaled_action = unscale_action(self.action_space, eval_action)
eval_obs, eval_r, eval_done, _ = self.eval_env.step(unscaled_action)
if self.render_eval:
self.eval_env.render()
eval_episode_reward += eval_r
eval_qs.append(eval_q)
if eval_done:
if not isinstance(self.env, VecEnv):
eval_obs = self.eval_env.reset()
eval_episode_rewards.append(eval_episode_reward)
eval_episode_rewards_history.append(eval_episode_reward)
eval_episode_reward = 0.
mpi_size = MPI.COMM_WORLD.Get_size()
# Not enough samples in the replay buffer
if not self.replay_buffer.can_sample(self.batch_size):
continue
# Log stats.
# XXX shouldn't call np.mean on variable length lists
duration = time.time() - start_time
stats = self._get_stats()
combined_stats = stats.copy()
combined_stats['rollout/return'] = np.mean(epoch_episode_rewards)
combined_stats['rollout/return_history'] = np.mean(episode_rewards_history)
combined_stats['rollout/episode_steps'] = np.mean(epoch_episode_steps)
combined_stats['rollout/actions_mean'] = np.mean(epoch_actions)
combined_stats['rollout/Q_mean'] = np.mean(epoch_qs)
combined_stats['train/loss_actor'] = np.mean(epoch_actor_losses)
combined_stats['train/loss_critic'] = np.mean(epoch_critic_losses)
if len(epoch_adaptive_distances) != 0:
combined_stats['train/param_noise_distance'] = np.mean(epoch_adaptive_distances)
combined_stats['total/duration'] = duration
combined_stats['total/steps_per_second'] = float(step) / float(duration)
combined_stats['total/episodes'] = episodes
combined_stats['rollout/episodes'] = epoch_episodes
combined_stats['rollout/actions_std'] = np.std(epoch_actions)
# Evaluation statistics.
if self.eval_env is not None:
combined_stats['eval/return'] = np.mean(eval_episode_rewards)
combined_stats['eval/return_history'] = np.mean(eval_episode_rewards_history)
combined_stats['eval/Q'] = np.mean(eval_qs)
combined_stats['eval/episodes'] = len(eval_episode_rewards)
def as_scalar(scalar):
"""
check and return the input if it is a scalar, otherwise raise ValueError
:param scalar: (Any) the object to check
:return: (Number) the scalar if x is a scalar
"""
if isinstance(scalar, np.ndarray):
assert scalar.size == 1
return scalar[0]
elif np.isscalar(scalar):
return scalar
else:
raise ValueError('expected scalar, got %s' % scalar)
combined_stats_sums = MPI.COMM_WORLD.allreduce(
np.array([as_scalar(x) for x in combined_stats.values()]))
combined_stats = {k: v / mpi_size for (k, v) in zip(combined_stats.keys(), combined_stats_sums)}
# Total statistics.
combined_stats['total/epochs'] = epoch + 1
combined_stats['total/steps'] = step
for key in sorted(combined_stats.keys()):
logger.record_tabular(key, combined_stats[key])
if len(episode_successes) > 0:
logger.logkv("success rate", np.mean(episode_successes[-100:]))
logger.dump_tabular()
logger.info('')
logdir = logger.get_dir()
if rank == 0 and logdir:
if hasattr(self.env, 'get_state'):
with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as file_handler:
pickle.dump(self.env.get_state(), file_handler)
if self.eval_env and hasattr(self.eval_env, 'get_state'):
with open(os.path.join(logdir, 'eval_env_state.pkl'), 'wb') as file_handler:
pickle.dump(self.eval_env.get_state(), file_handler)
def predict(self, observation, state=None, mask=None, deterministic=True):
observation = np.array(observation)
vectorized_env = self._is_vectorized_observation(observation, self.observation_space)
observation = observation.reshape((-1,) + self.observation_space.shape)
actions, _, = self._policy(observation, apply_noise=not deterministic, compute_q=False)
actions = actions.reshape((-1,) + self.action_space.shape) # reshape to the correct action shape
actions = unscale_action(self.action_space, actions) # scale the output for the prediction
if not vectorized_env:
actions = actions[0]
return actions, None
def action_probability(self, observation, state=None, mask=None, actions=None, logp=False):
_ = np.array(observation)
if actions is not None:
raise ValueError("Error: DDPG does not have action probabilities.")
# here there are no action probabilities, as DDPG does not use a probability distribution
warnings.warn("Warning: action probability is meaningless for DDPG. Returning None")
return None
def get_parameter_list(self):
return (self.params +
self.target_params +
self.obs_rms_params +
self.ret_rms_params)
def save(self, save_path, cloudpickle=False):
data = {
"observation_space": self.observation_space,
"action_space": self.action_space,
"nb_eval_steps": self.nb_eval_steps,
"param_noise_adaption_interval": self.param_noise_adaption_interval,
"nb_train_steps": self.nb_train_steps,
"nb_rollout_steps": self.nb_rollout_steps,
"verbose": self.verbose,
"param_noise": self.param_noise,
"action_noise": self.action_noise,
"gamma": self.gamma,
"tau": self.tau,
"normalize_returns": self.normalize_returns,
"enable_popart": self.enable_popart,
"normalize_observations": self.normalize_observations,
"batch_size": self.batch_size,
"observation_range": self.observation_range,
"return_range": self.return_range,
"critic_l2_reg": self.critic_l2_reg,
"actor_lr": self.actor_lr,
"critic_lr": self.critic_lr,
"clip_norm": self.clip_norm,
"reward_scale": self.reward_scale,
"memory_limit": self.memory_limit,
"buffer_size": self.buffer_size,
"random_exploration": self.random_exploration,
"policy": self.policy,
"n_envs": self.n_envs,
"n_cpu_tf_sess": self.n_cpu_tf_sess,
"seed": self.seed,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
params_to_save = self.get_parameters()
self._save_to_file(save_path,
data=data,
params=params_to_save,
cloudpickle=cloudpickle)
@classmethod
def load(cls, load_path, env=None, custom_objects=None, **kwargs):
data, params = cls._load_from_file(load_path, custom_objects=custom_objects)
if 'policy_kwargs' in kwargs and kwargs['policy_kwargs'] != data['policy_kwargs']:
raise ValueError("The specified policy kwargs do not equal the stored policy kwargs. "
"Stored kwargs: {}, specified kwargs: {}".format(data['policy_kwargs'],
kwargs['policy_kwargs']))
model = cls(None, env, _init_setup_model=False)
model.__dict__.update(data)
model.__dict__.update(kwargs)
model.set_env(env)
model.setup_model()
# Patch for version < v2.6.0, duplicated keys where saved
if len(params) > len(model.get_parameter_list()):
n_params = len(model.params)
n_target_params = len(model.target_params)
n_normalisation_params = len(model.obs_rms_params) + len(model.ret_rms_params)
# Check that the issue is the one from
# https://github.com/hill-a/stable-baselines/issues/363
assert len(params) == 2 * (n_params + n_target_params) + n_normalisation_params,\
"The number of parameter saved differs from the number of parameters"\
" that should be loaded: {}!={}".format(len(params), len(model.get_parameter_list()))
# Remove duplicates
params_ = params[:n_params + n_target_params]
if n_normalisation_params > 0:
params_ += params[-n_normalisation_params:]
params = params_
model.load_parameters(params)
return model
| {
"repo_name": "hill-a/stable-baselines",
"path": "stable_baselines/ddpg/ddpg.py",
"copies": "1",
"size": "56965",
"license": "mit",
"hash": 8262858876242834000,
"line_mean": 47.4791489362,
"line_max": 120,
"alpha_frac": 0.5513052332,
"autogenerated": false,
"ratio": 4.220419352448692,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5271724585648692,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import os.path as osp
import collections
import numpy as np
import gc
from rllab.envs.mujoco.mujoco_env import MujocoEnv
from rllab import spaces
from rllab.misc.overrides import overrides
from rllab.misc import logger
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
BIG = 1e6
class MujocoEnv_ObsInit(MujocoEnv):
"""
- add plot_visitation (possibly used by robots moving in 2D). Compatible with latents.
- get_ori() base method, to implement in each robot
- Cached observation_space at initialization to speed up training (x2)
"""
def __init__(self,
visit_axis_bound=None,
*args, **kwargs):
super(MujocoEnv_ObsInit, self).__init__(*args, **kwargs)
shp = self.get_current_obs().shape
ub = BIG * np.ones(shp)
self._observation_space = spaces.Box(ub * -1, ub)
self.visit_axis_bound = visit_axis_bound
@property
@overrides
def observation_space(self):
return self._observation_space
def get_ori(self):
raise NotImplementedError
def plot_visitations(self, paths, mesh_density=20, visit_prefix='', visit_axis_bound=None, maze=None, scaling=2):
if 'env_infos' not in paths[0].keys() or 'com' not in paths[0]['env_infos'].keys():
raise KeyError("No 'com' key in your path['env_infos']: please change you step function")
fig, ax = plt.subplots()
# now we will grid the space and check how much of it the policy is covering
x_max = np.int(np.ceil(np.max(np.abs(np.concatenate([path["env_infos"]['com'][:, 0] for path in paths])))))
y_max = np.int(np.ceil(np.max(np.abs(np.concatenate([path["env_infos"]['com'][:, 1] for path in paths])))))
furthest = max(x_max, y_max)
print('THE FUTHEST IT WENT COMPONENT-WISE IS: x_max={}, y_max={}'.format(x_max, y_max))
if visit_axis_bound is None:
visit_axis_bound = self.visit_axis_bound
if visit_axis_bound and visit_axis_bound >= furthest:
furthest = max(furthest, visit_axis_bound)
# if maze:
# x_max = max(scaling * len(
# maze) / 2. - 1, x_max) # maze enlarge plot to include the walls. ASSUME ROBOT STARTS IN CENTER!
# y_max = max(scaling * len(maze[0]) / 2. - 1, y_max) # the max here should be useless...
# print("THE MAZE LIMITS ARE: x_max={}, y_max={}".format(x_max, y_max))
delta = 1. / mesh_density
y, x = np.mgrid[-furthest:furthest + delta:delta, -furthest:furthest + delta:delta]
if 'agent_infos' in list(paths[0].keys()) and (('latents' in list(paths[0]['agent_infos'].keys())
and np.size(paths[0]['agent_infos']['latents'])) or
('selectors' in list(paths[0]['agent_infos'].keys())
and np.size(paths[0]['agent_infos']['selectors']))):
selectors_name = 'selectors' if 'selectors' in list(paths[0]['agent_infos'].keys()) else 'latents'
dict_visit = collections.OrderedDict() # keys: latents, values: np.array with number of visitations
num_latents = np.size(paths[0]["agent_infos"][selectors_name][0])
# set all the labels for the latents and initialize the entries of dict_visit
for i in range(num_latents): # use integer to define the latents
dict_visit[i] = np.zeros((2 * furthest * mesh_density + 1, 2 * furthest * mesh_density + 1))
# keep track of the overlap
overlap = 0
# now plot all the paths
for path in paths:
lats = [np.argmax(lat, axis=-1) for lat in path['agent_infos'][selectors_name]] # list of all lats by idx
com_x = np.ceil(((np.array(path['env_infos']['com'][:, 0]) + furthest) * mesh_density)).astype(int)
com_y = np.ceil(((np.array(path['env_infos']['com'][:, 1]) + furthest) * mesh_density)).astype(int)
coms = list(zip(com_x, com_y))
for i, com in enumerate(coms):
dict_visit[lats[i]][com] += 1
# fix the colors for each latent
num_colors = num_latents + 2 # +2 for the 0 and Repetitions NOT COUNTING THE WALLS
cmap = plt.get_cmap('nipy_spectral', num_colors) # add one color for the walls
# create a matrix with entries corresponding to the latent that was there (or other if several/wall/nothing)
visitation_by_lat = np.zeros((2 * furthest * mesh_density + 1, 2 * furthest * mesh_density + 1))
for i, visit in dict_visit.items():
lat_visit = np.where(visit == 0, visit, i + 1) # transform the map into 0 or i+1
visitation_by_lat += lat_visit
overlap += np.sum(np.where(visitation_by_lat > lat_visit)) # add the overlaps of this latent
visitation_by_lat = np.where(visitation_by_lat <= i + 1, visitation_by_lat,
num_colors - 1) # mark overlaps
# if maze: # remember to also put a +1 for cmap!!
# for row in range(len(maze)):
# for col in range(len(maze[0])):
# if maze[row][col] == 1:
# wall_min_x = max(0, (row - 0.5) * mesh_density * scaling)
# wall_max_x = min(2 * furthest * mesh_density * scaling + 1,
# (row + 0.5) * mesh_density * scaling)
# wall_min_y = max(0, (col - 0.5) * mesh_density * scaling)
# wall_max_y = min(2 * furthest * mesh_density * scaling + 1,
# (col + 0.5) * mesh_density * scaling)
# visitation_by_lat[wall_min_x: wall_max_x,
# wall_min_y: wall_max_y] = num_colors
# gx_min, gfurthest, gy_min, gfurthest = self._find_goal_range()
# ax.add_patch(patches.Rectangle(
# (gx_min, gy_min),
# gfurthest - gx_min,
# gfurthest - gy_min,
# edgecolor='g', fill=False, linewidth=2,
# ))
# ax.annotate('G', xy=(0.5*(gx_min+gfurthest), 0.5*(gy_min+gfurthest)), color='g', fontsize=20)
map_plot = ax.pcolormesh(x, y, visitation_by_lat, cmap=cmap, vmin=0.1,
vmax=num_latents + 1) # before 1 (will it affect when no walls?)
color_len = (num_colors - 1.) / num_colors
ticks = np.arange(color_len / 2., num_colors - 1, color_len)
cbar = fig.colorbar(map_plot, ticks=ticks)
latent_tick_labels = ['latent: ' + str(i) for i in list(dict_visit.keys())]
cbar.ax.set_yticklabels(
['No visitation'] + latent_tick_labels + ['Repetitions']) # horizontal colorbar
# still log the total visitation
visitation_all = reduce(np.add, [visit for visit in dict_visit.values()])
else:
visitation_all = np.zeros((2 * furthest * mesh_density + 1, 2 * furthest * mesh_density + 1))
for path in paths:
com_x = np.ceil(((np.array(path['env_infos']['com'][:, 0]) + furthest) * mesh_density)).astype(int)
com_y = np.ceil(((np.array(path['env_infos']['com'][:, 1]) + furthest) * mesh_density)).astype(int)
coms = list(zip(com_x, com_y))
for com in coms:
visitation_all[com] += 1
plt.pcolormesh(x, y, visitation_all, vmax=mesh_density)
overlap = np.sum(np.where(visitation_all > 1, visitation_all, 0)) # sum of all visitations larger than 1
ax.set_xlim([x[0][0], x[0][-1]])
ax.set_ylim([y[0][0], y[-1][0]])
log_dir = logger.get_snapshot_dir()
exp_name = log_dir.split('/')[-1] if log_dir else '?'
ax.set_title(visit_prefix + 'visitation: ' + exp_name)
plt.savefig(osp.join(log_dir, visit_prefix + 'visitation.png')) # this saves the current figure, here f
plt.close()
with logger.tabular_prefix(visit_prefix):
total_visitation = np.count_nonzero(visitation_all)
logger.record_tabular('VisitationTotal', total_visitation)
logger.record_tabular('VisitationOverlap', overlap)
####
# This was giving some problem with matplotlib and maximum number of colors
####
# # now downsample the visitation
# for down in [5, 10, 20]:
# visitation_down = np.zeros(tuple((i//down for i in visitation_all.shape)))
# delta_down = delta * down
# y_down, x_down = np.mgrid[-furthest:furthest+delta_down:delta_down, -furthest:furthest+delta_down:delta_down]
# for i, row in enumerate(visitation_down):
# for j, v in enumerate(row):
# visitation_down[i, j] = np.sum(visitation_all[down*i:down*(1+i), down*j:down*(j+1)])
# plt.figure()
# plt.pcolormesh(x_down, y_down, visitation_down, vmax=mesh_density)
# plt.title('Visitation_down')
# plt.xlim([x_down[0][0], x_down[0][-1]])
# plt.ylim([y_down[0][0], y_down[-1][0]])
# plt.title('visitation_down{}: {}'.format(down, exp_name))
# plt.savefig(osp.join(log_dir, 'visitation_down{}.png'.format(down)))
# plt.close()
#
# total_visitation_down = np.count_nonzero(visitation_down)
# overlap_down = np.sum(np.where(visitation_down > 1, 1, 0)) # sum of all visitations larger than 1
# logger.record_tabular('VisitationTotal_down{}'.format(down), total_visitation_down)
# logger.record_tabular('VisitationOverlap_down{}'.format(down), overlap_down)
plt.cla()
plt.clf()
plt.close('all')
# del fig, ax, cmap, cbar, map_plot
gc.collect()
| {
"repo_name": "florensacc/snn4hrl",
"path": "envs/mujoco/mujoco_env.py",
"copies": "1",
"size": "10179",
"license": "mit",
"hash": -2979853239775346700,
"line_mean": 54.3206521739,
"line_max": 123,
"alpha_frac": 0.5491698595,
"autogenerated": false,
"ratio": 3.4123365739188736,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9417658840702334,
"avg_score": 0.008769518543307828,
"num_lines": 184
} |
from functools import reduce
import pprint
import time
import shodan
import requests
import api_key
from google_api import query_google_api
from shodan_api import query_shodan_api
from mxtoolbox_api import query_mxtoolbox_api
from dnsbl import query_dnsbl_list
from utils import assoc_default_score, combine_scores
pp = pprint.PrettyPrinter(indent=2)
def main():
start_time = time.time()
# No more than 10 requests
ips = get_some_ips()
scored_ips = assoc_default_score(ips)
shodan_scores = query_shodan_api(ips)
google_scores = query_google_api(ips)
dnsbl_scores = query_dnsbl_list(ips)
# Limited number of requests... Be careful
# mx_toolbox_scores = query_mxtoolbox_api(ips)
results = reduce(combine_scores, [scored_ips, shodan_scores, google_scores])
pp.pprint(results)
print("--------- %s seconds -------" % (time.time() - start_time))
return results
def get_some_ips():
req = requests.get("https://zeustracker.abuse.ch/blocklist.php?download=badips")
return [line for line in req.text.split('\n') if line and line[0].isdigit()]
def get_bad_ips():
with open("bad_ips.txt", "r") as file:
ips = list(filter(lambda line: line != '', file.read().split("\n")))
return ips
if __name__ == "__main__":
main()
| {
"repo_name": "Dominionized/anonymoustre",
"path": "anonymoustre/main.py",
"copies": "1",
"size": "1300",
"license": "mit",
"hash": 4594693178398562000,
"line_mean": 25,
"line_max": 84,
"alpha_frac": 0.6746153846,
"autogenerated": false,
"ratio": 3.2338308457711444,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44084462303711447,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import random
from string import ascii_letters, digits
import os
import teca.filesystem as tecafs
from six import next as _next
import logging
def _d(n, k):
return reduce(lambda x, y: x*y, range(n-k+1, n+1), 1)
def generateToken(list_of_tokens=None, lenght=7, alphabet=ascii_letters+digits):
if not list_of_tokens:
list_of_tokens = dict()
#anti endless loop math ahead!
if len(list_of_tokens) == _d(len(alphabet), lenght):
raise ValueError("it's not possible to generate a new token!")
is_ok, new_token = False, None
while not is_ok :
new_token = "".join(random.sample(alphabet, lenght))
is_ok = new_token not in list_of_tokens
return new_token
def chooseImage(path, files, cfg):
try:
filename = random.choice(files)
except IndexError:
#there are no files, so we have to look to a file in a subfolder
logging.debug("[choose_image] there are no images in {0}".format(path))
try:
lower_file_list = list(tecafs.walk(path, cfg, deep=True))[0]
filename = random.choice(lower_file_list.filenames)
except IndexError:
filename = cfg.default_image
# try:
# chosen_dir = random.choice(dirs)
# try:
# logging.debug("[choose_image] the path we're looking files into: {0}".format(os.path.join(path, chosen_dir)))
# _, _, files = _next(tecafs.walk(os.path.join(path, chosen_dir)))
# filename = os.path.join(os.path.join(path, chosen_dir), random.choice(files))
# except IndexError:
# #there are directories, but no files. we should go deeper.
# filename = chooseImage(os.path.join(path, chosen_dir), files, cfg)
# except IndexError:
# #no files and no dirs... let's provide a default image.
# filename = cfg.default_image
return filename
def chooseCoverImage(path, files, cfg):
filename=cfg.cover_image_name(path) or chooseImage(os.path.join(cfg.starting_path, path), files, cfg)
return os.path.join(path, filename)
| {
"repo_name": "alfateam123/Teca",
"path": "teca/generation.py",
"copies": "1",
"size": "2138",
"license": "mit",
"hash": 4460659562477127000,
"line_mean": 37.1785714286,
"line_max": 126,
"alpha_frac": 0.631898971,
"autogenerated": false,
"ratio": 3.5280528052805282,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4659951776280528,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import random
# Two groups of size 10
# Rest will be whatever left remains
GROUP_SIZES = [10, 10]
def to_groups(seq, group_sizes):
groups = {}
seq_index = 1
group_index = 1
total_sizes = reduce(lambda x, y: x + y, group_sizes)
if total_sizes < len(seq):
group_sizes.append(len(seq) - total_sizes)
for size in group_sizes:
elements_taken = 0
group_name = "Group {}".format(group_index)
groups[group_name] = []
while elements_taken < size and seq_index < len(seq):
groups[group_name].append(seq[seq_index])
seq_index += 1
elements_taken += 1
group_index += 1
return groups
def print_stats(teams):
teams_count = len(teams)
total_minutes = teams_count * 6
total_hours = total_minutes / 60
print("Total number of teams: {}".format(teams_count))
print("Total minutes of presenting: {}".format(total_minutes))
print("Total hours of presenting: {}".format(total_hours))
teams = open("teams").read().split("\n")
teams = [team.strip() for team in teams if team.strip() != ""]
random.shuffle(teams)
groups = to_groups(teams, GROUP_SIZES)
print_stats(teams)
result = []
for key in groups:
result.append("## " + key)
result.append("\n".join(list(map(lambda x: "* " + x, groups[key]))))
handler = open("schedule.md", "w")
handler.write("\n".join(result))
handler.close()
| {
"repo_name": "Hackfmi/HackFMI-4",
"path": "help-scripts/order.py",
"copies": "1",
"size": "1448",
"license": "mit",
"hash": 1874390520432823800,
"line_mean": 23.5423728814,
"line_max": 72,
"alpha_frac": 0.6201657459,
"autogenerated": false,
"ratio": 3.383177570093458,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9503343315993458,
"avg_score": 0,
"num_lines": 59
} |
from functools import reduce
import sqlite3
def insertion(cursor, table, data):
"""Insert data into table with given cursor"""
if cursor is None:
raise ValueError("Cursor is None!")
if len(data) == 0:
raise ValueError("Data is empty!")
expected_length = len(data[0])
if not all(map(lambda x: len(x)==expected_length, data)):
raise ValueError("Data rows have inconsistent length!")
value_str = "(?"
for _ in range(expected_length-1):
value_str += ", ?"
value_str += ")"
query_str = "INSERT INTO {} values{}".format(table, value_str)
try:
cursor.executemany(query_str, data)
except sqlite3.OperationalError as err:
print("Failed to insert data into", table)
print("Error message:")
print(err)
with sqlite3.connect("cars.db") as conn:
c = conn.cursor()
c.execute("""CREATE TABLE IF NOT EXISTS inventory
(make TEXT, model TEXT, quantity INT)
""")
data = [
("Ford", "Ichi", 5),
("Ford", "Ni", 250),
("Ford", "San", 625000),
("Honda", "Uno", 6),
("Honda", "Dos", 360)
]
insertion(c, "inventory", data)
try:
c.execute("""UPDATE inventory SET quantity = 50
WHERE make = 'Ford' AND model = 'Ichi'""")
except sqlite3.OperationalError as err:
print("Failed to update Ford Ichi quantity")
print("Error message:")
print(err)
c.execute("SELECT * FROM inventory WHERE make = 'Ford'")
rows = c.fetchall()
for make, model, quantity in rows:
print("{} {}: {} in stock".format(make, model, quantity))
| {
"repo_name": "imajunryou/RealPython2",
"path": "sql/06_sql_homework.py",
"copies": "1",
"size": "1741",
"license": "mit",
"hash": -487593062018831550,
"line_mean": 29.5438596491,
"line_max": 67,
"alpha_frac": 0.5491097071,
"autogenerated": false,
"ratio": 4.020785219399538,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5069894926499537,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import string
from django.db.models import Q
from django.views import generic
from gallery import models
class BasePatientView(object):
"""Responsible for generating basic patient context data"""
def get_context_data(self) -> dict:
"""
Get context data about patients.
Specifically, this method adds context about existing patient
categories.
Returns:
Context about existing `Patient` instances.
"""
context = dict()
context["in_memoriam"] = models.Patient.objects.filter(
deceased=True
).exists()
categories = []
for letter in string.ascii_uppercase:
if models.Patient.objects.filter(
deceased=False, first_letter=letter
).exists():
categories.append(letter)
context["pet_categories"] = categories
return context
class GalleryIndexView(BasePatientView, generic.base.TemplateView):
template_name = "gallery/index.html"
def get_context_data(self):
context = super(GalleryIndexView, self).get_context_data()
featured = models.Patient.objects.filter(featured=True)
context["featured_pets"] = featured
return context
class PetListView(BasePatientView, generic.base.TemplateView):
template_name = "gallery/pet-list.html"
def get_context_data(self, first_letter, *args, **kwargs):
context = super(PetListView, self).get_context_data(*args, **kwargs)
letter = first_letter.upper()
context["category"] = letter
pets = models.Patient.objects.filter(
first_letter__iexact=letter
).exclude(deceased=True)
context["pets"] = pets
return context
class PetMemoriamView(BasePatientView, generic.base.TemplateView):
template_name = "gallery/pet-list.html"
def get_context_data(self, *args, **kwargs):
context = super(PetMemoriamView, self).get_context_data(
*args, **kwargs
)
context["category"] = "In Memoriam"
pets = models.Patient.objects.filter(deceased=True)
context["pets"] = pets
return context
class PatientSearchView(BasePatientView, generic.base.TemplateView):
template_name = "gallery/search.html"
def get_context_data(self, *args, **kwargs):
context = super(PatientSearchView, self).get_context_data(
*args, **kwargs
)
query = self.request.GET.get("q")
context["query"] = query
if query:
pets = models.Patient.objects.filter(
reduce(
lambda q, f: q & Q(first_name__icontains=f),
query.split(),
Q(),
)
)
else:
pets = []
context["pets"] = pets
return context
| {
"repo_name": "cdriehuys/chmvh-website",
"path": "chmvh_website/gallery/views.py",
"copies": "1",
"size": "2902",
"license": "mit",
"hash": 4272160273594268000,
"line_mean": 25.8703703704,
"line_max": 76,
"alpha_frac": 0.6033769814,
"autogenerated": false,
"ratio": 4.116312056737589,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 108
} |
from functools import reduce
import sys
import os
import git
import glob
from astropy.table import Table
from tqdm import tqdm
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.ticker as ticker
import subprocess
import datetime
def analyse_file(filename, repo):
commits = repo.iter_commits(paths=filename)
allcommits = [c for c in commits]
if len(allcommits) == 0:
return None
authors = set()
for commit in allcommits:
authors.add(commit.author.name)
if len(authors) > 1:
return None
commits_number = len(allcommits)
return [str(authors.pop()), commits_number, "{:%Y-%m-%d}".format(allcommits[0].authored_datetime)]
def piechart(total, unique, project):
labels = ['ok', 'no ok']
fig1, ax1 = plt.subplots()
ax1.set_title("{}".format(project))
ax1.pie([total, unique], labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
fig1.tight_layout()
fig1.savefig("{}_total.png".format(project))
def plot_topusers(author_commits, author_lastdate, project):
colors_author = np.array([author_lastdate[x] for x in author_commits['author']]) / 365
color_max = 1 if colors_author.max() < 1 else colors_author.max()
colors_author = colors_author / color_max
color = [cm.viridis(ca) for ca in colors_author]
fig = plt.figure()
ax = fig.add_axes([0.30, 0.25, 0.65, 0.65])
# Example data
ax.set_title("{}".format(project))
if len(author_commits) > 5:
y_pos = np.arange(len(author_commits))
y_lab = author_commits['author']
ax.barh(y_pos, author_commits['commits'], align='center',
color=color, ecolor='black')
else:
n = 5 - len(author_commits)
y_pos = np.arange(5)
y_lab = [' '] * n + [a for a in author_commits['author']]
ax.barh(y_pos[n:], author_commits['commits'], align='center',
color=color, ecolor='black')
if max(author_commits['commits']) < 10:
tick_spacing = 2
ax.xaxis.set_major_locator(ticker.MultipleLocator(tick_spacing))
ax.set_yticks(y_pos)
ax.set_yticklabels(y_lab)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('# Files')
maxcommits = max(author_commits['commits'])
ax.set_xlim(0, maxcommits + 0.5 if maxcommits > 5 else 5.5)
ax.set_title(project)
ax1 = fig.add_axes([0.30, 0.1, 0.65, 0.05])
cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=cm.viridis,
norm=colors.Normalize(0, color_max),
orientation='horizontal', label='Years')
ax1.set_title
fig.savefig("{}_authors.png".format(project))
def topusers(table, top=5):
# Aggregate by authors
authorg = table.group_by('author')
authorsums = authorg.groups.aggregate(len)
authorsums.sort(['commits'])
if top is not None:
top = top * -1
people = authorsums[top:]
return people
def get_last_commit_of(authors):
author_dict = dict()
for author in authors:
author_git = author
if os.path.exists(".mailmap"):
author_git = subprocess.check_output("grep '{}' .mailmap".format(author) +
"| awk -F'<' '{print $1}'| head -n1", shell=True)
# it comes with a `\n` - why??
author_git = author_git.strip().decode()
# check if this name returns anything, if not use previous name
if author_git == '':
author_git = author
last_date = subprocess.check_output(("git log --use-mailmap --author='{}' --date=iso --pretty=format:'%cd' "
"| head -n1").format(author_git), shell=True)
date_diff = (datetime.datetime.today() -
datetime.datetime.strptime(last_date.decode()[:19],
"%Y-%m-%d %H:%M:%S"))
author_dict[author] = date_diff.days
return author_dict
def main():
files = reduce(lambda l1, l2: l1 + l2,
[glob.glob(sys.argv[1] + "/**/*." + e, recursive=True)
for e in ['py', 'f', 'c', 'h', 'pyx']])
not_wanted = ['__init__.py', 'setup_package.py']
files = [f for f in files if f.split('/')[-1] not in not_wanted]
t = Table([[], [], [], []],
names=('filename', 'author', 'commits', 'last date'),
dtype=('S100', 'S100', 'i4', 'S10'))
t.convert_bytestring_to_unicode()
gitrepo = git.Repo()
project = gitrepo.working_dir.split(os.sep)[-1]
for file_i in tqdm(files):
row = analyse_file(file_i, gitrepo)
if row:
t.add_row([file_i] + row)
t.sort(['last date', 'filename'])
t.write('{}_critic.txt'.format(project), format='ascii.fixed_width', overwrite=True)
piechart(len(files), len(t), project)
authors_commit = topusers(t, top=None)
author_dict = get_last_commit_of(authors_commit['author'])
plot_topusers(authors_commit, author_dict, project)
print(authors_commit)
# What else I want to do?
## DONE:sort table by date
## DONE: Find last commit from these critic authors (are they still contributing?)
## DONE: Plot pie chart with files vs unique // also in lines of code?
## DONE: Plot user ranking vs files (lines of code)
## ALOMST DONE:Accept list of files to ignore, e.g.: __init__.py, setup_package.py, ...
## TODO: Set up so it downloads repo (from user/repo in github or URL), and runs it all, and create slide/report
| {
"repo_name": "dpshelio/busfactor",
"path": "busfactor/counter.py",
"copies": "1",
"size": "5723",
"license": "mit",
"hash": 1482251426150112000,
"line_mean": 36.1623376623,
"line_max": 116,
"alpha_frac": 0.5951424078,
"autogenerated": false,
"ratio": 3.4663840096910965,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45615264174910963,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import torch
from torch._utils import _accumulate
from ..function import Function, InplaceFunction
class Index(Function):
def __init__(self, index):
super(Index, self).__init__()
self.index = index
def forward(self, i):
self.input_size = i.size()
result = i.index(self.index)
self.mark_shared_storage((i, result))
return result
def backward(self, grad_output):
grad_input = grad_output.new(self.input_size).zero_()
grad_input._set_index(self.index, grad_output)
return grad_input
class SetItem(InplaceFunction):
def __init__(self, index, value=None):
super(SetItem, self).__init__(True)
self.index = index
self.value = value
def forward(self, i, value=None):
self.mark_dirty(i)
if value is None: # value is scalar
value = self.value
else: # value is Tensor
self.value_size = value.size()
i._set_index(self.index, value)
return i
def backward(self, grad_output):
if self.value is None: # value is Tensor
grad_input = grad_output.clone()
grad_input._set_index(self.index, 0)
grad_value = grad_output.index(self.index).clone()
grad_value = grad_value.view(self.value_size)
return grad_input, grad_value
else:
grad_input = grad_output.clone()
grad_input._set_index(self.index, 0)
return grad_input
class NoGrad(Function):
def forward(self, i):
result = i.new(i)
self.mark_non_differentiable(result)
self.mark_shared_storage((i, result))
return result
def backward(self, grad_output):
assert False, "backward of NoGrad should never be called"
def _do_forward(self, *args, **kwargs):
result = super(NoGrad, self)._do_forward(*args, **kwargs)
self.requires_grad = False
return result
__call__ = _do_forward
class Transpose(Function):
def __init__(self, *dims):
super(Transpose, self).__init__()
assert len(dims) == 2
self.dims = dims
def forward(self, i):
result = i.transpose(*self.dims)
self.mark_shared_storage((i, result))
return result
def backward(self, grad_output):
return grad_output.transpose(*self.dims)
class View(Function):
def __init__(self, *sizes):
super(View, self).__init__()
self.sizes = sizes
def forward(self, i):
self.input_size = i.size()
result = i.view(*self.sizes)
self.mark_shared_storage((i, result))
return result
def backward(self, grad_output):
# TODO: not sure if this clone is necessary
return grad_output.contiguous().view(self.input_size)
class Expand(Function):
def __init__(self, sizes):
super(Expand, self).__init__()
self.sizes = sizes
self.expanded_dims = []
def forward(self, i):
result = i.expand(*self.sizes)
self.num_unsqueezed = len(self.sizes) - i.dim()
self.expanded_dims = [dim for dim, (expanded, original)
in enumerate(zip(self.sizes[self.num_unsqueezed:], i.size()))
if expanded != original]
self.mark_shared_storage((i, result))
return result
def backward(self, grad_output):
grad_input = grad_output
for i in range(self.num_unsqueezed):
grad_input = grad_input.sum(0).squeeze(0)
for dim in self.expanded_dims:
grad_input = grad_input.sum(dim)
return grad_input
class Type(Function):
def __init__(self, dest_type):
super(Type, self).__init__()
self.dest_type = dest_type
def forward(self, i):
assert self.dest_type != type(i)
self.input_type = type(i)
return i.type(self.dest_type)
def backward(self, grad_output):
return grad_output.type(self.input_type)
class CudaTransfer(Function):
def __init__(self, device_id=None, async=False):
super(CudaTransfer, self).__init__()
self.device_id = device_id
self.async = async
def forward(self, i):
self.source_device = -1 if not i.is_cuda else i.get_device()
self.source_was_cuda = i.is_cuda
if self.device_id:
return i.cuda(self.device_id, async=self.async)
else:
return i.cuda(async=self.async)
def backward(self, grad_output):
if self.source_device != -1:
return grad_output.cuda(self.source_device)
elif self.source_was_cuda:
return grad_output
else:
return grad_output.cpu()
class Permute(Function):
def __init__(self, dim_indices):
super(Permute, self).__init__()
self.dim_indices = dim_indices
self.rev_dim_indices = [None for _ in range(len(dim_indices))]
for i, dim_idx in enumerate(self.dim_indices):
self.rev_dim_indices[dim_idx] = i
def forward(self, i):
result = i.permute(*self.dim_indices)
self.mark_shared_storage((i, result))
return result
def backward(self, grad_output):
return grad_output.permute(*self.rev_dim_indices)
class IndexAdd(InplaceFunction):
def __init__(self, dim, inplace=False):
super(IndexAdd, self).__init__(inplace)
self.dim = dim
def forward(self, tensor1, index, tensor2):
assert not self.needs_input_grad[1]
if self.needs_input_grad[2]:
self.save_for_backward(index)
if not self.inplace:
tensor1 = tensor1.clone()
else:
self.mark_dirty(tensor1)
return tensor1.index_add_(self.dim, index, tensor2)
def backward(self, grad_output):
grad_tensor1 = grad_tensor2 = None
if self.needs_input_grad[0]:
grad_tensor1 = grad_output
if self.needs_input_grad[2]:
index, = self.saved_tensors
grad_tensor2 = grad_output.index_select(self.dim, index)
return grad_tensor1, None, grad_tensor2
class IndexCopy(InplaceFunction):
def __init__(self, dim, inplace=False):
super(IndexCopy, self).__init__(inplace)
self.dim = dim
def forward(self, tensor1, index, tensor2):
assert not self.needs_input_grad[1]
if any(self.needs_input_grad):
self.save_for_backward(index)
if not self.inplace:
tensor1 = tensor1.clone()
else:
self.mark_dirty(tensor1)
return tensor1.index_copy_(self.dim, index, tensor2)
def backward(self, grad_output):
grad_tensor1 = grad_tensor2 = None
if any(self.needs_input_grad):
index, = self.saved_tensors
if self.needs_input_grad[0]:
grad_tensor1 = grad_output.clone().index_fill_(self.dim, index, 0)
if self.needs_input_grad[2]:
grad_tensor2 = grad_output.index_select(self.dim, index)
return grad_tensor1, None, grad_tensor2
class IndexFill(InplaceFunction):
def __init__(self, dim, value, inplace=False):
super(IndexFill, self).__init__(inplace)
self.dim = dim
self.value = value
def forward(self, tensor, index):
assert not self.needs_input_grad[1]
if self.needs_input_grad[0]:
self.save_for_backward(index)
if not self.inplace:
tensor = tensor.clone()
else:
self.mark_dirty(tensor)
return tensor.index_fill_(self.dim, index, self.value)
def backward(self, grad_output):
grad_tensor = None
if self.needs_input_grad[0]:
index, = self.saved_tensors
grad_tensor = grad_output.clone().index_fill_(self.dim, index, 0)
return grad_tensor, None
class IndexSelect(Function):
def __init__(self, dim):
super(IndexSelect, self).__init__()
self.dim = dim
def forward(self, tensor, index):
assert not self.needs_input_grad[1]
if self.needs_input_grad[0]:
self.save_for_backward(index)
self.input_size = tensor.size()
return tensor.index_select(self.dim, index)
def backward(self, grad_output):
grad_tensor = None
if self.needs_input_grad[0]:
index, = self.saved_tensors
grad_tensor = grad_output.new(*self.input_size).zero_()
grad_tensor.index_add_(self.dim, index, grad_output)
return grad_tensor, None
class Concat(Function):
def __init__(self, dim):
super(Concat, self).__init__()
self.dim = dim
def forward(self, *inputs):
self.input_sizes = [i.size(self.dim) for i in inputs]
return torch.cat(inputs, self.dim)
def backward(self, grad_output):
return tuple(grad_output.narrow(self.dim, end - size, size) for size, end
in zip(self.input_sizes, _accumulate(self.input_sizes)))
class Resize(Function):
def __init__(self, *sizes):
super(Resize, self).__init__()
self.sizes = sizes
self.numel = reduce(lambda x, y: x * y, sizes, 1)
def forward(self, tensor):
if tensor.numel() != self.numel:
raise RuntimeError(("requested resize to {} ({} elements in total), "
"but the given tensor has a size of {} ({} elements). "
"autograd's resize can only change the shape of a given "
"tensor, while preserving the number of elements. ").format(
'x'.join(map(str, self.sizes)), self.numel,
'x'.join(map(str, tensor.size())), tensor.numel()))
self.input_sizes = tensor.size()
result = tensor.new(tensor).resize_(*self.sizes)
self.mark_shared_storage((tensor, result))
return result
def backward(self, grad_output):
assert grad_output.numel() == self.numel
return grad_output.new(grad_output).resize_(self.input_sizes)
class Clone(Function):
def forward(self, input):
return input.clone()
def backward(self, grad_output):
return grad_output
class Squeeze(Function):
def __init__(self, dim=None):
super(Squeeze, self).__init__()
self.dim = dim
def forward(self, input):
self.input_size = input.size()
self.numel = input.numel()
if self.dim is not None:
result = input.squeeze(self.dim)
else:
result = input.squeeze()
self.mark_shared_storage((input, result))
return result
def backward(self, grad_output):
assert grad_output.numel() == self.numel
return grad_output.contiguous().view(self.input_size)
class Unsqueeze(Function):
def __init__(self, dim):
super(Unsqueeze, self).__init__()
self.dim = dim
def forward(self, input):
result = input.unsqueeze(self.dim)
self.mark_shared_storage((input, result))
return result
def backward(self, grad_output):
return grad_output.squeeze(self.dim)
class MaskedCopy(InplaceFunction):
def forward(self, tensor1, mask, tensor2):
assert not self.needs_input_grad[1], "MaskedCopy can't differentiate " \
"the mask"
if not self.inplace:
tensor1 = tensor1.clone()
else:
self.mark_dirty(tensor1)
self.save_for_backward(mask)
return tensor1.masked_copy_(mask, tensor2)
def backward(self, grad_output):
mask, = self.saved_tensors
grad_tensor1 = grad_tensor2 = None
if self.needs_input_grad[0]:
grad_tensor1 = grad_output.clone().masked_fill_(mask, 0)
if self.needs_input_grad[2]:
grad_tensor2 = grad_output.masked_select(mask)
return grad_tensor1, None, grad_tensor2
class MaskedFill(InplaceFunction):
def __init__(self, value, inplace=False):
super(MaskedFill, self).__init__(inplace)
self.value = value
def forward(self, tensor, mask):
assert not self.needs_input_grad[1], "MaskedFill can't differentiate " \
"the mask"
if not self.inplace:
tensor = tensor.clone()
else:
self.mark_dirty(tensor)
self.save_for_backward(mask)
return tensor.masked_fill_(mask, self.value)
def backward(self, grad_output):
mask, = self.saved_tensors
grad_tensor = None
if self.needs_input_grad[0]:
grad_tensor = grad_output.clone().masked_fill_(mask, 0)
return grad_tensor, None
class MaskedSelect(Function):
def forward(self, tensor, mask):
assert not self.needs_input_grad[1], "MaskedSelect can't differentiate " \
"the mask"
self.input_size = tensor.size()
self.save_for_backward(mask)
return tensor.masked_select(mask)
def backward(self, grad_output):
mask, = self.saved_tensors
grad_tensor = None
if self.needs_input_grad[0]:
# TODO: remove zero
grad_tensor = grad_output.new(self.input_size).zero_()
grad_tensor.masked_copy_(mask, grad_output)
return grad_tensor, None
class _MultiSelectionFunction(Function):
def __init__(self, dim, return_indices):
super(_MultiSelectionFunction, self).__init__()
self.dim = dim
self.return_indices = return_indices
def forward(self, input):
fn = getattr(input, self.__class__.__name__.lower())
self.input_size = input.size()
output, indices = fn(*self.args)
if self.return_indices:
self.save_for_backward(indices)
self.mark_non_differentiable(indices)
return output, indices
else:
self.indices = indices
return output
def backward(self, grad_output, grad_indices=None):
grad_input = grad_output.new(self.input_size).zero_()
if self.return_indices:
indices, = self.saved_tensors
else:
indices = self.indices
dim = self.dim if self.dim is not None else grad_output.dim() - 1
return grad_input.scatter_(dim, indices, grad_output)
class Sort(_MultiSelectionFunction):
def __init__(self, dim=None, descending=False, return_indices=True):
super(Sort, self).__init__(dim, return_indices)
self.descending = descending
def forward(self, input):
dim = self.dim if self.dim is not None else input.dim() - 1
self.args = (dim, self.descending)
return super(Sort, self).forward(input)
class Topk(_MultiSelectionFunction):
def __init__(self, k, dim=None, largest=True, sort=True, return_indices=True):
super(Topk, self).__init__(dim, return_indices)
self.k = k
self.largest = largest
self.sort = sort
def forward(self, input):
dim = self.dim if self.dim is not None else input.dim() - 1
self.args = (self.k, dim, self.largest, self.sort)
return super(Topk, self).forward(input)
class Chunk(Function):
def __init__(self, num_chunks, dim=0):
super(Chunk, self).__init__()
self.num_chunks = num_chunks
self.dim = dim
def forward(self, i):
self.input_size = i.size()
result = i.chunk(self.num_chunks, self.dim)
self.mark_shared_storage(*((i, chunk) for chunk in result))
return result
def backward(self, *grad_output):
grad_input = grad_output[0].new(self.input_size)
offset = 0
for grad in grad_output:
grad_size = grad.size(self.dim)
grad_input.narrow(self.dim, offset, grad_size).copy_(grad)
offset += grad_size
return grad_input
class Gather(Function):
def __init__(self, dim):
super(Gather, self).__init__()
self.dim = dim
def forward(self, input, index):
assert not self.needs_input_grad[1], "Gather can't differentiate " \
"the index"
self.input_size = input.size()
self.save_for_backward(index)
return input.gather(self.dim, index)
def backward(self, grad_output):
index, = self.saved_tensors
grad_input = grad_output.new(self.input_size).zero_()
return grad_input.scatter_(self.dim, index, grad_output), None
class Scatter(InplaceFunction):
def __init__(self, dim, inplace=False):
super(Scatter, self).__init__(inplace)
self.dim = dim
def forward(self, input, index, source):
assert not self.needs_input_grad[1], "Scatter can't differentiate " \
"the index"
if self.inplace:
self.mark_dirty(input)
else:
input = input.clone()
self.save_for_backward(index)
return input.scatter_(self.dim, index, source)
def backward(self, grad_output):
index, = self.saved_tensors
grad_input = grad_source = None
if self.needs_input_grad[0]:
grad_input = grad_output.clone()
grad_input.scatter_(self.dim, index, 0)
if self.needs_input_grad[2]:
grad_source = grad_output.gather(self.dim, index)
return grad_input, None, grad_source
class Repeat(Function):
def __init__(self, repeats):
super(Repeat, self).__init__()
self.repeats = repeats
def forward(self, input):
return input.repeat(self.repeats)
def backward(self, grad_output):
grad_input = grad_output
for dim, repeat in enumerate(self.repeats):
if repeat == 1:
continue
grad_input = sum(grad_input.chunk(repeat, dim))
return grad_input
class Cumsum(Function):
def __init__(self, dim):
super(Cumsum, self).__init__()
self.dim = dim
def forward(self, input):
return torch.cumsum(input, dim=self.dim)
def backward(self, grad_output):
grad_input = torch.cumsum(-grad_output, dim=self.dim)
end_idx = grad_input.size(self.dim) - 1
grad_sum = grad_input.narrow(self.dim, end_idx, 1)
grad_input -= grad_sum.expand_as(grad_input)
grad_input += grad_output
return grad_input
# TODO: unfold
| {
"repo_name": "RPGOne/Skynet",
"path": "pytorch-master/torch/autograd/_functions/tensor.py",
"copies": "1",
"size": "18346",
"license": "bsd-3-clause",
"hash": 5408063517656617000,
"line_mean": 28.9282218597,
"line_max": 92,
"alpha_frac": 0.589174752,
"autogenerated": false,
"ratio": 3.746375331835818,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4835550083835818,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import torch
import torch._utils
from ..function import Function
class Type(Function):
@staticmethod
def forward(ctx, i, dest_type):
ctx.input_type = type(i)
ctx.input_device = -1 if not i.is_cuda else i.get_device()
return i.type(dest_type)
@staticmethod
def backward(ctx, grad_output):
if ctx.input_device == -1:
return grad_output.type(ctx.input_type), None
else:
with torch.cuda.device(ctx.input_device):
return grad_output.type(ctx.input_type), None
# TODO: deprecate this
class Resize(Function):
@staticmethod
def forward(ctx, tensor, sizes):
ctx.sizes = sizes
ctx.numel = reduce(lambda x, y: x * y, sizes, 1)
if tensor.numel() != ctx.numel:
raise RuntimeError(("requested resize to {} ({} elements in total), "
"but the given tensor has a size of {} ({} elements). "
"autograd's resize can only change the shape of a given "
"tensor, while preserving the number of elements. ").format(
'x'.join(map(str, sizes)), ctx.numel,
'x'.join(map(str, tensor.size())), tensor.numel()))
ctx.input_sizes = tensor.size()
if tensor.is_contiguous():
result = tensor.new(tensor).contiguous().view(*sizes)
return result
else:
return tensor.contiguous().view(*sizes)
@staticmethod
def backward(ctx, grad_output):
assert grad_output.numel() == ctx.numel
return grad_output.contiguous().view(ctx.input_sizes), None
| {
"repo_name": "ryfeus/lambda-packs",
"path": "pytorch/source/torch/autograd/_functions/tensor.py",
"copies": "1",
"size": "1691",
"license": "mit",
"hash": 6015001933895220000,
"line_mean": 34.2291666667,
"line_max": 92,
"alpha_frac": 0.5771732703,
"autogenerated": false,
"ratio": 4.064903846153846,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5142077116453846,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import urllib
import json
import urllib.request
import lxml.html
from PIL import Image
from django.core import serializers
from django.core.files.base import ContentFile
from django.db import transaction
from django.http import HttpResponse
from django.db.models import Q
import operator
from urllib.request import urlopen
from io import BytesIO
from library.f_lib import *
from library.models import *
def addItem(request):
query = json.loads(str(request.body.decode()))
isbn = query["isbn"]
val = query["val"]
context = isauth(request)
if registrRevers(context):
return HttpResponse(json.dumps({"info": 4}))
person = context["person"]
book = Book.objects.get(pk=isbn)
bi = BookItem(book=book, owner=person, reader=person, value=val, status=1)
bi.save()
return HttpResponse(json.dumps({"info": 1, "biid": bi.id, "owner": person.natural_key()}))
def addOpinion(request):
query = json.loads(str(request.body.decode()))
isbn = query["isbn"]
opiniontext = query["opiniontext"]
rating = query["rating"]
context = isauth(request)
if registrRevers(context):
return HttpResponse(json.dumps({"info": 4}))
person = context["person"]
book = Book.objects.get(pk=isbn)
opinion = Opinion(person=person, book=book, date=timezone.now(), rating=rating, text=opiniontext)
opinion.save()
return HttpResponse(json.dumps({"info": 1}))
def checkBook(request):
query = json.loads(str(request.body.decode()))
typeQ = query["type"]
isbn = query["isbn"]
if typeQ == 1:
book = Book.objects.filter(isbn=isbn)
if book:
return HttpResponse(json.dumps({"info": 1, "book": book[0].isbn}))
return HttpResponse(json.dumps({"info": 2, "books": ""}))
def getExp(name):
exp = name.split('.')[-1]
if exp not in ["png", "jpg", "gif"]:
exp = "jpg"
return exp
def upldBI(img, exp, person):
img.thumbnail((200, 300), Image.ANTIALIAS)
file_name = default_storage.get_available_name('tmp/book.' + exp)
path = str(default_storage.location) + '/' + file_name
img.save(path)
personImage = PersonImage(person=person, image=file_name)
personImage.save()
return file_name
def uploadBI(request):
if "file" not in request.FILES:
return HttpResponse(json.dumps({"info": 2}))
data = request.FILES['file']
try:
img = Image.open(data)
except BaseException:
return HttpResponse(json.dumps({"info": 2}))
exp = getExp(data.name)
person = Person.objects.get(pk=request.session["person_id"])
file_name = upldBI(img, exp, person)
return HttpResponse(json.dumps({"info": 1, "path": file_name}))
def uplBLLinkPerson(link, person):
try:
data = urlopen(link).read()
stream = BytesIO(data)
img = Image.open(stream)
except BaseException:
return None
exp = getExp(link)
file_name = upldBI(img, exp, person)
return file_name
def loadImgByLink(request):
query = json.loads(str(request.body.decode()))
link = query["link"]
person = Person.objects.get(pk=request.session["person_id"])
file_name = uplBLLinkPerson(link, person)
if not file_name:
return HttpResponse(json.dumps({"info": 2}))
return HttpResponse(json.dumps({"info": 1, "path": file_name}))
@transaction.commit_on_success
def addbajax(request):
query = json.loads(str(request.body.decode()))
isbn = query["isbn"]
book = Book.objects.filter(isbn=isbn)
if book: # book exists
return HttpResponse(json.dumps({"info": 2}))
link = query["link"]
title = query["title"]
lang = query["lang"]
desc = query["desc"]
val = query["val"]
image = query["image"]
authors = set(query["authors"])
keywords = set(query["keywords"])
context = isauth(request)
if registrRevers(context):
return HttpResponse(json.dumps({"info": 4}))
person = context["person"]
language, created = Language.objects.get_or_create(language=lang)
path = None
try:
if len(image) > 0:
photo = default_storage.open(image)
exp = getExp(photo.name)
file_name = default_storage.get_available_name('book_image/book.' + exp)
path = default_storage.save(file_name, ContentFile(photo.read()))
except BaseException:
pass
book = Book(isbn=isbn, ozon=link, title=title, language=language, description=desc, date=timezone.now())
if path:
book.image = path
book.save()
for auth in authors:
author = auth.split(" ", 1)
author, created = Author.objects.get_or_create(fname=author[0], lname=author[1])
book.authors.add(author)
for key in keywords:
keyw, created = Keyword.objects.get_or_create(word=key)
book.keywords.add(keyw)
bi = BookItem(book=book, owner=person, reader=person, value=val, status=1)
bi.save()
return HttpResponse(json.dumps({"info": 1, "biid": bi.id}))
@transaction.commit_on_success
def editbajax(request):
query = json.loads(str(request.body.decode()))
isbn = query["isbn"]
book = Book.objects.filter(isbn=isbn)[0]
link = query["link"]
title = query["title"]
lang = query["lang"]
desc = query["desc"]
image = query["image"]
authors = set(query["authors"])
keywords = set(query["keywords"])
context = isauth(request)
if registrRevers(context):
return HttpResponse(json.dumps({"info": 4}))
person = context["person"]
language, created = Language.objects.get_or_create(language=lang)
if image != book.image:
try:
default_storage.delete(book.image)
except BaseException:
pass
try:
if len(image) > 0:
photo = default_storage.open(image)
exp = getExp(photo.name)
file_name = default_storage.get_available_name('book_image/book.' + exp)
path = default_storage.save(file_name, ContentFile(photo.read()))
book.image = path
except BaseException:
pass
book.ozon = link
book.title = title
book.language = language
book.description = desc
book.authors = []
book.keywords = []
book.save()
book.save()
for auth in authors:
author = auth.split(" ", 1)
author, created = Author.objects.get_or_create(fname=author[0], lname=author[1])
book.authors.add(author)
for key in keywords:
keyw, created = Keyword.objects.get_or_create(word=key)
book.keywords.add(keyw)
return HttpResponse(json.dumps({"info": 1}))
def getbooks(request):
query = json.loads(str(request.body.decode()))
pageS = int(query["page"]["size"])
pageN = int(query["page"]["num"])
start = (pageN - 1) * pageS
context = isauth(request)
if registrRevers(context):
return HttpResponse(json.dumps({"info": 4}))
person = context["person"]
sWord = query["search"]["word"]
orderF = query["sort"]["column"]
if query["sort"]["type"] == 1:
orderF = "-" + orderF
qList = [Q(isbn__icontains=sWord),
Q(title__icontains=sWord),
Q(language__language__icontains=sWord),
Q(authors__fname__icontains=sWord),
Q(authors__lname__icontains=sWord),
Q(keywords__word__icontains=sWord)]
addQuery = {}
if (query["search"]["person"] == 'reading'):
addQuery = {'bookitem__reader': person}
elif (query["search"]["person"] == 'owning'):
addQuery = {'bookitem__owner': person}
bookslist = Book.objects.filter(reduce(operator.or_, qList), **addQuery).distinct().order_by(orderF)[
start:start + pageS]
count = Book.objects.filter(reduce(operator.or_, qList), **addQuery).distinct().count()
bookslist = [{'pk':book.isbn,'fields':
{'ozon':book.ozon,
'isbn':book.isbn,
'title':book.title,
'language':book.language.language,
'rating':book.rating,
'owners':book.owners,
'item_count':book.item_count,
'authors':[author.natural_key() for author in book.authors.all()],
'keywords':[kw.word for kw in book.keywords.all()],
}} for book in bookslist]
return HttpResponse(json.dumps({"info": "yes", "count": count, "books": bookslist}))
def getlastbooks(request):
query = json.loads(str(request.body.decode()))
count = int(query["count"])
bookslist = Book.objects.order_by("-date")[:count]
bookslist = serializers.serialize("json", bookslist, use_natural_keys=True,
fields=('isbn', 'title', 'language', 'authors'))
return HttpResponse(bookslist)
def loadItems(request):
query = json.loads(str(request.body.decode()))
isbn = query["isbn"]
bilist = [bi.getValues() for bi in Book.objects.get(pk=isbn).bookitem_set.filter(status=1)]
return HttpResponse(json.dumps({"info": 1, "bilist": bilist}))
def loadTextFormatBooks(request):
person = Person.objects.get(pk=request.session["person_id"])
ss = SysSetting.objects.latest('id')
booklist = BookItem.objects.filter(owner=person)
blUpdate = [{'libname': str(ss.libname), 'book_title': str(bi.book.title),
'biid': str(bi.id), 'owner': bi.owner.natural_key()} for bi in booklist]
return HttpResponse(json.dumps({"info": 1, "books": blUpdate}))
def loadFromOzon(request):
query = json.loads(str(request.body.decode()))
link = query["link"]
try:
response = urllib.request.urlopen(link)
except BaseException:
return HttpResponse(json.dumps({"info": 2}))
person = Person.objects.get(pk=request.session["person_id"])
text = response.read().decode('windows-1251')
page = lxml.html.fromstring(text)
try:
content = page.xpath('//div[@class="l l-content"]')[0]
title = content.xpath('div/div/h1/text()')[0]
product_detail = content.xpath('div[@class="product-detail"]')[0]
authors = [auth.strip() for auth in page.xpath('//p[contains(text(),"Автор")]/a/text()')[0].split(',')]
language = page.xpath('//p[contains(text(),"Язык")]/text()')[0].split(':')[1][1:]
language = page.xpath('//p[contains(text(),"Язык")]/text()')[0].split(':')[1][1:]
isbn_year = page.xpath('//p[contains(text(),"ISBN")]/text()')[0].split(',')
isbn = isbn_year[0].split(';')[0][5:]
year = isbn_year[-1][-7:-3]
description = '\n'.join([desc.replace('\r', '').replace('\t', '').strip() for desc in
page.xpath('//div[@itemprop="description"]/table/tr/td[1]/text()')[2:]])
kwords = page.xpath('//div/ul[contains(li/text(),"Метки:")]/li/a/text()')
img_link = "http://" + page.xpath('//img[@id="js_article_picture"]/@src')[0][2:]
img = uplBLLinkPerson(img_link, person)
except BaseException:
return HttpResponse(json.dumps({"info": 2}))
# print(title)
# sprint(authors)
# print(language)
# print(isbn)
# print(year)
# print(kwords)
return HttpResponse(json.dumps({"info": 1, 'book': {
'link': link,
'isbn': isbn,
'title': title,
'language': language,
'authors': authors,
'kwords': kwords,
'description': description,
'img': img}}))
def removeOpinion(request):
query = json.loads(str(request.body.decode()))
opinion_id = query["opinion_id"]
context = isauth(request)
if registrRevers(context):
return HttpResponse(json.dumps({"info": 4}))
person = context["person"]
opinion=Opinion.objects.get(pk=opinion_id)
if (opinion.person==person):
opinion.delete()
return HttpResponse(json.dumps({"info": 1}))
def searchById(request):
query = json.loads(str(request.body.decode()))
bi_id = query["id"]
bookItem=BookItem.objects.filter(id=bi_id)
if len(bookItem)!=0:
return HttpResponse(json.dumps({"info": 1,"location":"/book/info/?isbn="+bookItem[0].book.isbn}))
else:
return HttpResponse(json.dumps({"info": 2}))
| {
"repo_name": "artbart/DistributedLibrary",
"path": "library/pac_views/v_books.py",
"copies": "1",
"size": "12238",
"license": "mit",
"hash": 1597173270698162400,
"line_mean": 30.3333333333,
"line_max": 111,
"alpha_frac": 0.6130932897,
"autogenerated": false,
"ratio": 3.5236447520184546,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9627057993297445,
"avg_score": 0.0019360096842017734,
"num_lines": 390
} |
from functools import reduce
import urllib.request as urllib2
import json
import time
import os
travis_entry = 'https://api.travis-ci.org'
build_id = os.getenv("TRAVIS_BUILD_ID")
polling_interval = int(os.getenv("POLLING_INTERVAL", '5'))
gh_token = os.getenv("GITHUB_TOKEN")
job_number = os.getenv("TRAVIS_JOB_NUMBER")
is_leader = lambda job: job.endswith('.1')
if not job_number:
# seems even for builds with only one job, this won't get here
print("Don't use defining leader for build without matrix")
exit(1)
elif not is_leader(job_number):
print("not the leader")
exit(1)
class MatrixElement(object):
def __init__(self, json_raw):
self.is_finished = json_raw['finished_at'] is not None
self.is_succeeded = json_raw['result'] == 0
self.number = json_raw['number']
self.is_leader = is_leader(self.number)
def matrix_snapshot(travis_token):
"""
:return: Matrix List
"""
headers = {'content-type': 'application/json', 'Authorization': 'token {}'.format(travis_token)}
req = urllib2.Request("{0}/builds/{1}".format(travis_entry, build_id), headers=headers)
response = urllib2.urlopen(req).read().decode("utf-8")
raw_json = json.loads(response)
matrix_without_leader = [MatrixElement(job) for job in raw_json["matrix"] if not is_leader(job['number'])]
return matrix_without_leader
def wait_others_to_finish(travis_token):
def others_finished():
"""
Dumps others to finish
Leader cannot finish, it is working now
:return: tuple(True or False, List of not finished jobs)
"""
snapshot = matrix_snapshot(travis_token)
finished = [job.is_finished for job in snapshot if not job.is_leader]
return reduce(lambda a, b: a and b, finished), [job.number for job in snapshot if
not job.is_leader and not job.is_finished]
while True:
finished, waiting_list = others_finished()
if finished:
break
print("Leader waits for minions {0}...".format(waiting_list)) # just in case do not get "silence timeout"
time.sleep(polling_interval)
def get_token():
assert gh_token, 'GITHUB_TOKEN is not set'
data = {"github_token": gh_token}
headers = {'content-type': 'application/json'}
req = urllib2.Request("{0}/auth/github".format(travis_entry), json.dumps(data).encode("utf-8"), headers)
response = urllib2.urlopen(req).read().decode("utf-8")
travis_token = json.loads(response).get('access_token')
return travis_token
token = get_token()
wait_others_to_finish(token)
final_snapshot = matrix_snapshot(token)
print("Final Results: {0}".format([(e.number, e.is_succeeded) for e in final_snapshot]))
others_snapshot = [el for el in final_snapshot if not el.is_leader]
if reduce(lambda a, b: a and b, [e.is_succeeded for e in others_snapshot]):
print("Succeeded – continue.")
exit(0)
| {
"repo_name": "beavyHQ/beavy",
"path": ".infrastructure/travis/scripts/travis_after_all.py",
"copies": "1",
"size": "2977",
"license": "mpl-2.0",
"hash": -1325974866063924700,
"line_mean": 33.1954022989,
"line_max": 114,
"alpha_frac": 0.6531092437,
"autogenerated": false,
"ratio": 3.5,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.964716939205358,
"avg_score": 0.0011879703292841165,
"num_lines": 87
} |
from functools import reduce
activation_func = {
# 跃迁函数
'transition': lambda x: 1 if x > 0 else 0,
# 线性函数
'linear': lambda x: x
}
class Perceptron(object):
def __init__(self, input_num, activator):
'''
初始化感知器,设置输入参数的个数,以及激活函数。
激活函数的类型为double -> double
'''
self.activator = activator
# 权重向量初始化为0
self.weights = [0.0] * input_num
# 偏置项初始化为0
self.bias = 0.0
def __str__(self):
'''
打印学习到的权重、偏置项
'''
return 'weights\t:%s\nbias\t:%f' % (self.weights, self.bias)
def predict(self, input_vec):
'''
输入向量,输出感知器的计算结果
'''
# 把input_vec[x1,x2,x3...]和weights[w1,w2,w3,...]打包在一起
# 变成[(x1,w1),(x2,w2),(x3,w3),...]
# 然后利用map函数计算[x1*w1, x2*w2, x3*w3]
# 最后利用reduce求和
return self.activator(
reduce(lambda a, b: a + b, map(lambda x_w: x_w[0] * x_w[1], zip(input_vec, self.weights)), 0.0)
+ self.bias
)
def train(self, input_vecs, labels, iteration, rate):
'''
输入训练数据:一组向量、与每个向量对应的label;以及训练轮数、学习率
'''
for i in range(iteration):
self._one_iteration(input_vecs, labels, rate)
def _one_iteration(self, input_vecs, labels, rate):
'''
一次迭代,把所有的训练数据过一遍
'''
# 把输入和输出打包在一起,成为样本的列表[(input_vec, label), ...]
# 而每个训练样本是(input_vec, label)
samples = zip(input_vecs, labels)
# 对每个样本,按照感知器规则更新权重
for (input_vec, label) in samples:
# 计算感知器在当前权重下的输出
output = self.predict(input_vec)
# 更新权重
self._update_weights(input_vec, output, label, rate)
def _update_weights(self, input_vec, output, label, rate):
'''
按照感知器规则更新权重
'''
# 把input_vec[x1,x2,x3,...]和weights[w1,w2,w3,...]打包在一起
# 变成[(x1,w1),(x2,w2),(x3,w3),...]
# 然后利用感知器规则更新权重
delta = label - output
self.weights = list(map(lambda x_w: rate * delta * x_w[0] + x_w[1], zip(input_vec, self.weights)))
# 更新bias
self.bias += rate * delta
def get_training_dataset():
'''
基于and真值表构建训练数据
'''
# 构建训练数据
# 输入向量列表
input_vecs = [[1, 1], [0, 0], [1, 0], [0, 1]]
# 期望的输出列表,注意要与输入一一对应
# [1,1] -> 1, [0,0] -> 0, [1,0] -> 0, [0,1] -> 0
labels = [1, 0, 0, 0]
return input_vecs, labels
def train_and_perceptron():
'''
使用and真值表训练感知器
'''
# 创建感知器,输入参数个数为2(因为and是二元函数),激活函数为f
p = Perceptron(2, activation_func.get('transition'))
# 训练,迭代10轮, 学习速率为0.1
input_vecs, labels = get_training_dataset()
p.train(input_vecs, labels, 10, 0.1)
# 返回训练好的感知器
return p
if __name__ == '__main__':
# 训练and感知器
and_perception = train_and_perceptron()
# 打印训练获得的权重
print(and_perception)
# 测试
print('1 and 1 = %d' % and_perception.predict([1, 1]))
print('0 and 0 = %d' % and_perception.predict([0, 0]))
print('1 and 0 = %d' % and_perception.predict([1, 0]))
print('0 and 1 = %d' % and_perception.predict([0, 1]))
| {
"repo_name": "ycaxgjd/VIL",
"path": "ANN/perceptron.py",
"copies": "1",
"size": "3810",
"license": "mit",
"hash": 15477771638392836,
"line_mean": 26.7657657658,
"line_max": 107,
"alpha_frac": 0.5304996755,
"autogenerated": false,
"ratio": 2.032981530343008,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.30634812058430083,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
class ActionChecker:
@classmethod
def correct_action(self, players, player_pos, sb_amount, action, amount=None):
if self.is_allin(players[player_pos], action, amount):
amount = players[player_pos].stack + players[player_pos].paid_sum()
elif self.__is_illegal(players, player_pos, sb_amount, action, amount):
action, amount = "fold", 0
return action, amount
@classmethod
def is_allin(self, player, action, bet_amount):
if action == 'call':
return bet_amount >= player.stack + player.paid_sum()
elif action == 'raise':
return bet_amount == player.stack + player.paid_sum()
else:
return False
@classmethod
def need_amount_for_action(self, player, amount):
return amount - player.paid_sum()
@classmethod
def agree_amount(self, players):
last_raise = self.__fetch_last_raise(players)
return last_raise["amount"] if last_raise else 0
@classmethod
def legal_actions(self, players, player_pos, sb_amount):
min_raise = self.__min_raise_amount(players, sb_amount)
max_raise = players[player_pos].stack + players[player_pos].paid_sum()
agree_amount = self.agree_amount(players)
if max_raise < min_raise:
if max_raise > agree_amount:
min_raise = max_raise
else:
min_raise = max_raise = -1
return [
{ "action" : "fold" , "amount" : 0 },
{ "action" : "call" , "amount" : min(agree_amount, players[player_pos].stack + players[player_pos].paid_sum()) },
{ "action" : "raise", "amount" : { "min": min_raise, "max": max_raise } }
]
@classmethod
def _is_legal(self, players, player_pos, sb_amount, action, amount=None):
return not self.__is_illegal(players, player_pos, sb_amount, action, amount)
@classmethod
def __is_illegal(self, players, player_pos, sb_amount, action, amount=None):
if action == 'fold':
return False
elif action == 'call':
return self.__is_short_of_money(players[player_pos], amount)\
or self.__is_illegal_call(players, amount)
elif action == 'raise':
return self.__is_short_of_money(players[player_pos], amount) \
or self.__is_illegal_raise(players, amount, sb_amount)
@classmethod
def __is_illegal_call(self, players, amount):
return amount != self.agree_amount(players)
@classmethod
def __is_illegal_raise(self, players, amount, sb_amount):
return self.__min_raise_amount(players, sb_amount) > amount
@classmethod
def __min_raise_amount(self, players, sb_amount):
raise_histories = self.__raise_histories(players)
min_add_amount = sb_amount*2
max_amount = 0
for h in raise_histories:
min_add_amount = max(min_add_amount, h['add_amount'])
max_amount = max(max_amount, h['amount'])
return max_amount + min_add_amount
@classmethod
def __is_short_of_money(self, player, amount):
return player.stack < amount - player.paid_sum()
@classmethod
def __fetch_last_raise(self, players, by='amount'):
all_histories = [p.action_histories for p in players]
all_histories = reduce(lambda acc, e: acc + e, all_histories) # flatten
raise_histories = [h for h in all_histories if h["action"] in ["RAISE", "SMALLBLIND", "BIGBLIND"]]
if len(raise_histories) == 0:
return None
else:
return max(raise_histories, key=lambda h: h[by]) # maxby
@classmethod
def __raise_histories(self, players):
all_histories = [p.action_histories for p in players]
all_histories = reduce(lambda acc, e: acc + e, all_histories) # flatten
raise_histories = [h for h in all_histories if h["action"] in ["RAISE", "SMALLBLIND", "BIGBLIND"]]
return raise_histories
| {
"repo_name": "sberbank-ai/holdem-challenge",
"path": "PyPokerEngine/pypokerengine/engine/action_checker.py",
"copies": "1",
"size": "3715",
"license": "mit",
"hash": -130873296404262190,
"line_mean": 34.380952381,
"line_max": 121,
"alpha_frac": 0.6522207268,
"autogenerated": false,
"ratio": 3.3989021043000913,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9449024229202756,
"avg_score": 0.020419720379467092,
"num_lines": 105
} |
from functools import reduce
class BaseNegotiator:
# Constructor - Note that you can add other fields here; the only
# required fields are self.preferences and self.offer
def __init__(self):
self.preferences = []
self.offer = []
self.iter_limit = 0
# initialize(self : BaseNegotiator, preferences : list(String), iter_limit : Int)
# Performs per-round initialization - takes in a list of items, ordered by the item's
# preferability for this negotiator
# You can do other work here, but still need to store the preferences
def initialize(self, preferences, iter_limit):
self.preferences = preferences
self.iter_limit = iter_limit
# make_offer(self : BaseNegotiator, offer : list(String)) --> list(String)
# Given the opposing negotiator's last offer (represented as an ordered list),
# return a new offer. If you wish to accept an offer & end negotiations, return the same offer
# Note: Store a copy of whatever offer you make in self.offer at the end of this method.
def make_offer(self, offer):
pass
# utility(self : BaseNegotiator) --> Float
# Return the utility given by the last offer - Do not modify this method.
def utility(self):
total = len(self.preferences)
util = reduce(lambda points, item: points + ((total / (self.offer.index(item) + 1)) - abs(self.offer.index(item) - self.preferences.index(item))), self.offer, 0)
return util
# receive_utility(self : BaseNegotiator, utility : Float)
# Store the utility the other negotiator received from their last offer
def receive_utility(self, utility):
pass
# receive_results(self : BaseNegotiator, results : (Boolean, Float, Float, Int))
# Store the results of the last series of negotiation (points won, success, etc.)
def receive_results(self, results):
pass | {
"repo_name": "A-Beck/AINegotiaor",
"path": "negotiator_base.py",
"copies": "1",
"size": "1937",
"license": "mit",
"hash": -7350407524437980000,
"line_mean": 46.2682926829,
"line_max": 169,
"alpha_frac": 0.6675271038,
"autogenerated": false,
"ratio": 4.183585313174946,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006377936569635221,
"num_lines": 41
} |
from functools import reduce
class BaseNegotiator:
# Constructor - Note that you can add other fields here; the only
# required fields are self.preferences and self.offer
def __init__(self):
self.preferences = []
self.offer = []
self.iter_limit = 0
# initialize(self : BaseNegotiator, preferences : list(String), iter_limit : Int)
# Performs per-round initialization - takes in a list of items, ordered by the item's
# preferability for this negotiator
# You can do other work here, but still need to store the preferences
def initialize(self, preferences, iter_limit):
self.preferences = preferences
self.iter_limit = iter_limit
# make_offer(self : BaseNegotiator, offer : list(String)) --> list(String)
# Given the opposing negotiator's last offer (represented as an ordered list),
# return a new offer. If you wish to accept an offer & end negotiations, return the same offer
# Note: Store a copy of whatever offer you make in self.offer at the end of this method.
def make_offer(self, offer):
pass
# utility(self : BaseNegotiator) --> Float
# Return the utility given by the last offer - Do not modify this method.
def utility(self):
total = len(self.preferences)
return reduce(lambda points, item: points + ((total / (self.offer.index(item) + 1)) - abs(self.offer.index(item) - self.preferences.index(item))), self.offer, 0)
# receive_utility(self : BaseNegotiator, utility : Float)
# Store the utility the other negotiator received from their last offer
def receive_utility(self, utility):
pass
# receive_results(self : BaseNegotiator, results : (Boolean, Float, Float, Int))
# Store the results of the last series of negotiation (points won, success, etc.)
def receive_results(self, results):
pass
| {
"repo_name": "nealp9084/hw3",
"path": "negotiator_base.py",
"copies": "1",
"size": "1915",
"license": "mit",
"hash": -3475747217960891400,
"line_mean": 46.875,
"line_max": 169,
"alpha_frac": 0.6710182768,
"autogenerated": false,
"ratio": 4.163043478260869,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02959161430368029,
"num_lines": 40
} |
from functools import reduce
class BaseNegotiator:
# Constructor - Note that you can add other fields here; the only
# required fields are self.preferences and self.offer
def __init__(self):
self.preferences = []
self.offer = []
self.iter_limit = 0
# initialize(self : BaseNegotiator, preferences : list(String), iter_limit : Int)
# Performs per-round initialization - takes in a list of items, ordered by the item's
# preferability for this negotiator
# You can do other work here, but still need to store the preferences
def initialize(self, preferences, iter_limit):
self.preferences = preferences
self.iter_limit = iter_limit
# make_offer(self : BaseNegotiator, offer : list(String)) --> list(String)
# Given the opposing negotiator's last offer (represented as an ordered list),
# return a new offer. If you wish to accept an offer & end negotiations, return the same offer
# Note: Store a copy of whatever offer you make in self.offer at the end of this method.
def make_offer(self, offer):
pass
# utility(self : BaseNegotiator) --> Float
# Return the utility given by the last offer - Do not modify this method.
def utility(self):
total = len(self.preferences)
return reduce(lambda points, item: points + ((total / (self.offer.index(item) + 1)) - abs(self.offer.index(item) - self.preferences.index(item))), self.offer, 0)
# receive_utility(self : BaseNegotiator, utility : Float)
# Store the utility the other negotiator received from their last offer
def receive_utility(self, utility):
pass
# receive_results(self : BaseNegotiator, results : (Boolean, Float, Float, Int))
# Store the results of the last series of negotiation (points won, success, etc.)
def receive_results(self, results):
pass
| {
"repo_name": "zws3mb/CS4710HW3",
"path": "negotiator_base.py",
"copies": "4",
"size": "1917",
"license": "mit",
"hash": 4916590863553692000,
"line_mean": 48.1538461538,
"line_max": 169,
"alpha_frac": 0.6703182055,
"autogenerated": false,
"ratio": 4.167391304347826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6837709509847827,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
class Perceptron(object):
def __init__(self, input_num, activator):
'''
初始化感知器,设置输入参数的个数,以及激活函数。
激活函数的类型为double -> double
'''
self.activator = activator
# 权重向量初始化为0
self.weights = [0.0 for _ in range(input_num)]
# 偏置项初始化为0
self.bias = 0.0
def __str__(self):
'''
打印学习到的权重、偏置项
'''
return 'weights\t:%s\nbias\t:%f\n' % (self.weights, self.bias)
def predict(self, input_vec):
'''
输入向量,输出感知器的计算结果
'''
# 把input_vec[x1,x2,x3...]和weights[w1,w2,w3,...]打包在一起
# 变成[(x1,w1),(x2,w2),(x3,w3),...]
# 然后利用map函数计算[x1*w1, x2*w2, x3*w3]
# 最后利用reduce求和
return self.activator( reduce(lambda a, b: a + b,
map(lambda x, w: x * w,input_vec, self.weights))
+ self.bias)
def train(self, input_vecs, labels, iteration, rate):
'''
输入训练数据:一组向量、与每个向量对应的label;以及训练轮数、学习率
'''
for i in range(iteration):
self._one_iteration(input_vecs, labels, rate)
def _one_iteration(self, input_vecs, labels, rate):
'''
一次迭代,把所有的训练数据过一遍
'''
# 把输入和输出打包在一起,成为样本的列表[(input_vec, label), ...]
# 而每个训练样本是(input_vec, label)
samples = zip(input_vecs, labels)
# 对每个样本,按照感知器规则更新权重
for (input_vec, label) in samples:
# 计算感知器在当前权重下的输出
output = self.predict(input_vec)
# 更新权重
self._update_weights(input_vec, output, label, rate)
def _update_weights(self, input_vec, output, label, rate):
'''
按照感知器规则更新权重
'''
# 把input_vec[x1,x2,x3,...]和weights[w1,w2,w3,...]打包在一起
# 变成[(x1,w1),(x2,w2),(x3,w3),...]
# 然后利用感知器规则更新权重
delta = label - output
self.weights = list(map(
lambda x, w: w + rate * delta * x,
input_vec, self.weights))
# 更新bias
self.bias += rate * delta | {
"repo_name": "thanple/ThinkingInPython",
"path": "pythinking/ml/common.py",
"copies": "1",
"size": "2528",
"license": "apache-2.0",
"hash": 8004354193364071000,
"line_mean": 32.5573770492,
"line_max": 86,
"alpha_frac": 0.5097751711,
"autogenerated": false,
"ratio": 2.207119741100324,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8197808353393932,
"avg_score": 0.0038173117612785436,
"num_lines": 61
} |
from functools import reduce
class ScopedString (object):
def __init__ (self):
self._stack = []
def push (self, frame):
self._stack.append (frame)
def pop (self):
frame = self._stack.pop()
return frame
def __str__ (self):
return '.'.join (self._stack)
class ScopedList (object):
def __init__ (self, stack=None):
if stack:
self._stack = stack
else:
self._stack = []
self.push()
def push (self):
self._stack.append ([])
def pop (self):
if (len (self._stack) <= 1):
raise IndexError ("Attempt to pop global scope")
self._stack.pop()
def append (self, val):
self._stack[-1].append (val)
def _normalize (self):
return reduce (lambda x, y: x + y, self._stack, [])
def __str__ (self):
return str (self._normalize())
def __repr__ (self):
return "ScopedDict(" + repr(self._stack) + ")"
def __iter__ (self):
return self._normalize().__iter__()
class ScopedDict (object):
def __init__ (self, stack=None):
if stack:
self._stack = stack
else:
self._stack = []
self.push ()
def push (self):
self._stack.insert (0, {})
def pop (self):
if (len (self._stack) <= 1):
raise IndexError ("Attempt to pop global scope")
temp = self._stack[0]
del (self._stack[0])
return temp
def _normalize (self):
normal = {}
for frame in self._stack:
for key, value in frame.items():
if key not in normal:
normal[key] = value
return normal
def __getitem__ (self, key):
for frame in self._stack:
if key in frame:
return frame[key]
raise KeyError (key)
def __setitem__ (self, key, value):
self._stack[0][key] = value
def __contains__ (self, key):
for frame in self._stack:
if key in frame:
return True
return False
def __str__ (self):
return str (self._normalize())
def __repr__ (self):
return "ScopedDict(" + repr(self._stack) + ")"
def __iter__ (self):
return self._normalize().__iter__()
def items (self):
return self._normalize().items()
def keys (self):
return self._normalize().keys()
def values (self):
return self._normalize().values()
| {
"repo_name": "doffm/dbuf",
"path": "src/dbuf/util.py",
"copies": "1",
"size": "3108",
"license": "bsd-3-clause",
"hash": 7000044123501045000,
"line_mean": 28.046728972,
"line_max": 72,
"alpha_frac": 0.4057271557,
"autogenerated": false,
"ratio": 5.037277147487845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.020504305848244097,
"num_lines": 107
} |
from functools import reduce
# constants used in the multGF2 function
mask1 = mask2 = polyred = None
def setGF2(degree, irPoly):
"""Define parameters of binary finite field GF(2^m)/g(x)
- degree: extension degree of binary field
- irPoly: coefficients of irreducible polynomial g(x)
"""
def i2P(sInt):
"""Convert an integer into a polynomial"""
return [(sInt >> i) & 1
for i in reversed(range(sInt.bit_length()))]
global mask1, mask2, polyred
mask1 = mask2 = 1 << degree
mask2 -= 1
polyred = reduce(lambda x, y: (x << 1) + y, i2P(irPoly)[1:])
def multGF2(p1, p2):
"""Multiply two polynomials in GF(2^m)/g(x)"""
p = 0
while p2:
if p2 & 1:
p ^= p1
p1 <<= 1
if p1 & mask1:
p1 ^= polyred
p2 >>= 1
return p & mask2
if __name__ == "__main__":
# Define binary field GF(2^3)/x^3 + x + 1
setGF2(127, 2**127 + 2**63 + 1)
# Evaluate the product (x^2 + x + 1)(x^2 + 1)
print("{:02x}".format(multGF2(0x3f7e0000000000000000000000000000L, 0x3f7e00000000000000000000L))) | {
"repo_name": "srijs/hwsl2-core",
"path": "calc.py",
"copies": "1",
"size": "1141",
"license": "mit",
"hash": -1679128641056926200,
"line_mean": 28.2820512821,
"line_max": 101,
"alpha_frac": 0.5609114812,
"autogenerated": false,
"ratio": 3.1174863387978142,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4178397819997814,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
def format_comment_title(product):
"""Produce a Markdown-formatted string based on a given "product"--a string
containing a browser identifier optionally followed by a colon and a
release channel. (For example: "firefox" or "chrome:dev".) The generated
title string is used both to create new comments and to locate (and
subsequently update) previously-submitted comments."""
parts = product.split(":")
title = parts[0].title()
if len(parts) > 1:
title += " (%s)" % parts[1]
return "# %s #" % title
def markdown_adjust(s):
"""Escape problematic markdown sequences."""
s = s.replace('\t', u'\\t')
s = s.replace('\n', u'\\n')
s = s.replace('\r', u'\\r')
s = s.replace('`', u'')
s = s.replace('|', u'\\|')
return s
def table(headings, data, log):
"""Create and log data to specified logger in tabular format."""
cols = range(len(headings))
assert all(len(item) == len(cols) for item in data)
max_widths = reduce(lambda prev, cur: [(len(cur[i]) + 2)
if (len(cur[i]) + 2) > prev[i]
else prev[i]
for i in cols],
data,
[len(item) + 2 for item in headings])
log("|%s|" % "|".join(item.center(max_widths[i]) for i, item in enumerate(headings)))
log("|%s|" % "|".join("-" * max_widths[i] for i in cols))
for row in data:
log("|%s|" % "|".join(" %s" % row[i].ljust(max_widths[i] - 1) for i in cols))
log("")
| {
"repo_name": "Acidburn0zzz/servo",
"path": "tests/wpt/web-platform-tests/tools/wpt/markdown.py",
"copies": "43",
"size": "1622",
"license": "mpl-2.0",
"hash": 6199813624525708000,
"line_mean": 37.619047619,
"line_max": 89,
"alpha_frac": 0.5351418002,
"autogenerated": false,
"ratio": 3.711670480549199,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
def is_trivial(dependency):
(x, y) = dependency
return y <= x;
def is_superkey(key, dependency):
(x, y) = dependency
return key <= x
def is_bcnf(dependency, key):
return is_superkey(key, dependency) or is_trivial(dependency)
def str_dependency(dependency):
(x, y) = dependency
return f'{x}-->{y}'.replace("'", '')
def str_relation(key, subset):
attrs = subset - key
keys = map(lambda key: "**" + str(key) + "**", key)
return "(" + ", ".join(list(keys) + list(attrs)).replace("'", '') + ")"
def print_header(point, stream):
stream.write(f'# Base de Datos 1\n')
stream.write(f'## Práctica 3\n')
stream.write(f'### {point})\n')
def print_dependencies(dependencies, stream):
stream.write('**Dependencias funcionales:**\n')
stream.write('```\n')
i = 1
for dependency in dependencies:
stream.write(f'df{i}: {str_dependency(dependency)}\n')
i += 1
stream.write('```\n')
def get_key(relation, dependencies):
cc = relation
for (x, y) in dependencies:
cc = cc - y
return cc
def print_ck(key, stream):
stream.write('**Clave Candidata:**\n')
stream.write('```\n')
stream.write(f'CC: {key}\n'.replace("'", ''))
stream.write('```\n')
def print_block(text, stream):
stream.write('\n')
stream.write(f'{text}\n')
stream.write('\n')
def print_list_block(list, stream):
stream.write('\n')
stream.write('\n'.join([str(e) for e in list]))
stream.write('\n')
stream.write('\n')
def print_md_block(list, stream):
print_list_block(['```markdown'] + list + ['```'], stream)
def plus(subset, dependencies):
result = None
newResult = subset
while newResult != result:
result = newResult
for (x, y) in dependencies:
if(x <= result):
newResult = result | y
return result
def loose_dependency(dependency, schema, dependencies):
x, y = dependency
result = None
newResult = x
while newResult != result:
result = newResult
for relation in schema:
llego = plus(result & relation, dependencies)
newResult = result | (plus(result & relation, dependencies) & relation)
if(y <= newResult):
return False
return not (y <= result)
def loose_dependencies(schema, dependencies):
deps = []
for dependency in dependencies:
if(loose_dependency(dependency, schema, dependencies)):
deps.append(dependency)
return deps
class Partition:
def __init__(self, key, attrs, number=0):
self.key = key
self.attrs = attrs
self.number = number
def __str__(self):
return f'R{self.number}: {str_relation(self.key, self.attrs)}'
def as_set(self):
return self.key | self.attrs
def __sub__(self, subset):
return Partition(self.key - subset, self.attrs - subset)
def denay_bcnf(self, dependencies):
deps = [dependency for dependency in dependencies if
dependency in self and (not is_bcnf(dependency, self.key)) ]
return (deps[0] if len(deps) > 0 else None)
def __contains__(self, dependency):
(x, y) = dependency
return (x | y) <= self.as_set()
def print_md_block(self, stream):
stream.write('```markdown\n')
stream.write(str(self))
stream.write('\n')
stream.write('```\n')
def md_name(self):
return f' `R{self.number}` '
def part(relation, dependency):
(x, y) = dependency
p1 = Partition(x, y)
p2 = relation - y
return (p1, p2)
def to_3nf(original, dependencies, file=None):
print_block(f'Como se pierde información al particionar {original.md_name()}, se lleva la misma a 3NF', file)
parts = []
for (x, y) in dependencies:
parts.append(Partition(x, y, original.number + len(parts) + 1))
if (len([ p for p in parts if (p.key == original.key) ]) < 1):
parts.append(Partition(original.key, {}, original.number + len(parts) + 1))
print_md_block(parts, file)
return parts
def to_bcnf(relation, dependencies, key=None, file=None):
if(key == None):
key = get_key(relation, dependencies)
dependencyBag = [ d for d in dependencies ]
schema = []
def loose_information(p1, p2, dependency):
x, y = dependency
text = ['**a) ¿Perdí información?**']
if(x <= p1.as_set() & p2.as_set()):
text.append('No, ya que la interseccion entre las dos relaciones da el determinate de la dependencia funcional')
text.append('')
text.append('**b) ¿Perdí dependencias funcionales?**')
deps = loose_dependencies([part.as_set() for part in schema + [p1, p2]], dependencies)
if( len(deps) > 0 ):
text.append('Si, se perdieron las dependencias funcionales: ' + " ".join([
f'`{str_dependency(dep)}`' for dep in deps
]))
print_list_block(text, file)
return True
text.append("No se perdio ninguna dependencia funcional")
print_list_block(text, file)
return False
text.append("Si ya que las particiones no comparten al determinante.")
print_list_block(text, file)
return True
def bcnf_process(original):
dependency = original.denay_bcnf(dependencyBag)
text = [f'**¿{original.md_name()} está en bcnf?**']
if( dependency != None ):
text.append(f'No, exite la dependencia funciona `{str_dependency(dependency)}` que no es trivail ni superclave')
text.append('Se realiza entonces el particionado de esta relación por medio de dicha dependencia funcioanl')
dependencyBag.remove(dependency)
(p1, p2) = part(original, dependency)
p1.number = original.number + 1
p2.number = original.number + 2
text.append('')
text.append('Se proponen los siguientes esquemas:')
print_list_block(text, file)
print_md_block([p1, p2], file)
if(loose_information(p1, p2, dependency)):
for p in to_3nf(original, dependencies, file):
bcnf_process(p)
else:
bcnf_process(p1)
bcnf_process(p2)
elif len(dependencies) > 0:
text.append(f'Si, todas sus dependencias funcionales son o triviales o sus determinantes son superclave')
print_list_block(text, file)
schema.append(original)
else:
text.append(f'Si, todas sus dependencias funcionales son o triviales o sus determinantes son superclave')
print_list_block(text, file)
schema.append(original)
p0 = Partition(key, relation, 0)
print_block(f'Se plantea ahora una nueva relacion {p0.md_name()} para realizar el proceso de normalizacion:',file)
p0.print_md_block(file)
bcnf_process(p0)
print_block('El esquema final en en bcnf sera:', file)
print_md_block(schema, file)
def normalize(relation, dependencies, filename, point=0):
with open(filename, 'w') as file:
file.seek(0)
print_header(point, file)
print_block('Se buscand dependencias funcionales en la relacion para luego determinar las claves candidatas, y se encuentran:', file)
print_dependencies(dependencies, file)
key = get_key(relation, dependencies)
print_ck(key, file)
to_bcnf(relation, dependencies, key, file)
if(__name__ == '__init__'):
relation = { 'nombreBuque', 'nYApDueño', 'dniDueño', 'tipoBuque', 'tonelaje', 'tipoCasco', '#Viaje', 'puertoOrigen',
'puertoDestino', 'puertoIntermedio', 'nomPaísPuertoDestino', 'nombrePaisPuertoOrigen',
'nombrePaisPuertoIntermedio', 'posicionActual', 'fechaPosicionActual', 'nYApPasajero',
'dniPasajero', 'dirPasajero', 'puertoInicioPasajero', 'puertoFinalPasajero' }
dependencies = [
({'tipoBuque'}, {'tonelaje', 'tipoCasco'}),
({'nombreBuque'}, {'tipoBuque'}),
({'dniDueño'}, {'nYApDueño'}),
({'puertoOrigen'}, {'nombrePaisPuertoOrigen'}),
({'puertoDestino'}, {'nomPaísPuertoDestino'}),
({'puertoIntermedio'}, {'nombrePaisPuertoIntermedio'}),
({'nombreBuque', '#Viaje'}, {'puertoOrigen', 'puertoDestino'}),
({'dniPasajero'}, {'nYApPasajero', 'dirPasajero'}),
({'nombreBuque', '#Viaje', 'dniPasajero'}, {'puertoInicioPasajero', 'puertoFinalPasajero'}),
({'nombreBuque', 'fechaPosicionActual'}, {'posicionActual'})
]
normalize(relation, dependencies, 'prueba.md')
| {
"repo_name": "matias-pierobon/informatica-unlp",
"path": "db1/normalizer/normalizer.py",
"copies": "1",
"size": "8822",
"license": "mit",
"hash": -6561618493799354000,
"line_mean": 33.9444444444,
"line_max": 141,
"alpha_frac": 0.5935725642,
"autogenerated": false,
"ratio": 3.398687765341567,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4492260329541567,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
def largest_product_sequence(grid, sequence_length):
""" given a grid and sequence length returns the sequence
with the maximum product
args:
grid: the 2d array to sequence over.
sequence_length: the length of the sequence for calculating
the product
"""
def product(seq):
return reduce(lambda x, y: x * y, seq)
return max(product(seq) for seq in sequences(grid, sequence_length))
def sequences(grid, sequence_length):
""" Geneartor that returns all possible permutations of grid."""
col_length = len(grid)
row_length = len(grid[0])
def upward_seq_p(x, y):
return y - (sequence_length - 1) >= 0
def forward_seq_p(x, y):
return x + sequence_length < row_length
def forward_seq(x, y):
coords = []
for index in range(sequence_length):
coords.append(grid[y][x+index])
return coords
def downward_seq_p(x, y):
return y + sequence_length < col_length
def downward_seq(x, y):
coords = []
for index in range(sequence_length):
coords.append(grid[y+index][x])
return coords
def up_diag_p(x, y):
return forward_seq_p(x, y) and upward_seq_p(x, y)
def up_diag(x, y):
coords = []
for index in range(sequence_length):
coords.append(grid[y-index][x+index])
return coords
def down_diag_p(x, y):
return forward_seq_p(x, y) and downward_seq_p(x, y)
def down_diag(x, y):
coords = []
for index in range(sequence_length):
coords.append(grid[y+index][x+index])
return coords
for y in range(col_length):
for x in range(row_length):
if forward_seq_p(x, y):
yield forward_seq(x, y)
if downward_seq_p(x, y):
yield downward_seq(x, y)
if up_diag_p(x, y):
yield up_diag(x, y)
if down_diag_p(x, y):
yield down_diag(x, y)
| {
"repo_name": "PurityControl/uchi-komi-python",
"path": "problems/euler/0011-largest-product-in-grid/ichi/largest_product.py",
"copies": "1",
"size": "2058",
"license": "mit",
"hash": 2022208254553614000,
"line_mean": 27.1917808219,
"line_max": 72,
"alpha_frac": 0.563654033,
"autogenerated": false,
"ratio": 3.597902097902098,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4661556130902098,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
def p_empty_or_comment(line, comment='#'):
"""Predicate matching strings that are not empty and don't start
with a comment character. For best results, the line should no
longer contain whitespace at the start."""
return len(line) and not line.startswith(comment)
def mreplace(s, replacements=[]):
"""Apply multiple replace operations to the string s. The
replacements are a list of (old, new) tuples."""
return reduce(lambda x, rep: x.replace(*rep), replacements, s)
def split_and_replace(s, split=',', replace={}):
# variable replacements use the format ${var}. Here, we build
# tuples out of the replace dict that can be applied to the
# str.replace function:
replacements = [("${%s}" % key, value)
for key, value in replace.iteritems()]
list = [mreplace(item, replacements) for item in s.split(split)]
return list[0] if len(list) is 1 else list
def resolve_continuation(l, char='\\'):
"""Concatenates elements of the given string list with the
following one, if they end with the continuation character."""
result = []
temp = ''
for line in l:
if not line.endswith(char):
result.append(temp + line if len(temp) else line)
temp = ''
else:
temp += line.rstrip(" \t\\")
if len(temp):
raise EOFError(temp)
return result
def parse_properties(fhandle, replace={}, assign='=', comment='#', split=','):
"""Reads a .properties file and returns it's contents as a
dict. If the values of this dict contain the split character, a
str.split operation on this character is automatically applied,
resulting in a list of strings. Otherwise, values are
strings. Optionally, a dictionary of replacements can be given to
automatically expand variables. This expansion is applied last
(i. e. after the comment/assign/split parsing), so that variables
cannot 'inject' syntax elements.
The kwargs 'assign' and 'comment' can be used, if your .properties
file uses a different syntax."""
if type(fhandle) != file:
raise ValueError("file expected")
# read all lines, strip them, ignore comments and resolve
# continuations (lines ending with '\')
lines = resolve_continuation(
filter(lambda x: p_empty_or_comment(x, comment),
[line.strip() for line in fhandle]))
# split on the first assignment character, build a list of tuples
kv_tuples = [tuple(l.split(assign, 1)) for l in lines]
# strip key and value again and apply replacements
return dict([(key.strip(), split_and_replace(value.strip(), split, replace))
for (key, value) in kv_tuples])
| {
"repo_name": "MnM/tomcat-ear",
"path": "lib/properties.py",
"copies": "1",
"size": "2746",
"license": "bsd-3-clause",
"hash": -3454626335900183000,
"line_mean": 41.2461538462,
"line_max": 80,
"alpha_frac": 0.6613255645,
"autogenerated": false,
"ratio": 4.244204018547141,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0056158277127307185,
"num_lines": 65
} |
from functools import reduce
def pf1(n, facts):
i = 2
while i <= n:
if n % i == 0:
facts.append(i)
pf1(n // i, facts)
break
else:
i += 1
def pf2(n):
facts = []
i = 2
while i <= n:
if n % i == 0:
#print(i)
facts.append(i)
if n == i:
break
n = n // i
i = 2
else:
i += 1
return facts
def allfacts(n):
d = 2
f = []
a = 0
while d <= n:
if(a > 0 and d > a):
break
if n % d == 0:
f.append(d)
if a == 0:
a = n // d
d = d + 1
f.insert(0, 1)
f.append(n)
return f
def lcm(nums):
nums = [(int(n), pf2(int(n))) for n in nums_str.split()]
d = dict()
for n in nums:
for p in n[1]:
if p in d:
if n[1].count(p) > d[p]:
d[p] = n[1].count(p)
else:
d[p] = n[1].count(p)
x = [i[0] ** i[1] for i in d.items()]
ans = reduce(lambda p, c: p * c, x)
return ans
def hcf(nums):
s = set(allfacts(nums[0]))
for i in range(1, len(nums)):
a = allfacts(nums[i])
s = s.intersection(a)
return max(s)
if __name__ == "__main__":
nums_str = input('Input nubers separated by space: ')
nums = [int(n) for n in nums_str.split()]
print(f'LCM is {lcm(nums)} HCF is {hcf(nums)}')
| {
"repo_name": "mitesh1112/smallpy",
"path": "LCMHCF.py",
"copies": "1",
"size": "1510",
"license": "unlicense",
"hash": -4395737029726131000,
"line_mean": 18.1139240506,
"line_max": 60,
"alpha_frac": 0.3894039735,
"autogenerated": false,
"ratio": 3.069105691056911,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8908051159901205,
"avg_score": 0.010091700931141302,
"num_lines": 79
} |
from functools import reduce
from boto3.dynamodb.conditions import Attr
IMAGE_TYPE = 'image'
VIDEO_TYPE = 'video'
class MediaDB(object):
def list_media_files(self, label=None):
pass
def add_media_file(self, name, media_type, labels=None):
pass
def get_media_file(self, name):
pass
def delete_media_file(self, name):
pass
class DynamoMediaDB(MediaDB):
def __init__(self, table_resource):
self._table = table_resource
def list_media_files(self, startswith=None, media_type=None, label=None):
scan_params = {}
filter_expression = None
if startswith is not None:
filter_expression = self._add_to_filter_expression(
filter_expression, Attr('name').begins_with(startswith)
)
if media_type is not None:
filter_expression = self._add_to_filter_expression(
filter_expression, Attr('type').eq(media_type)
)
if label is not None:
filter_expression = self._add_to_filter_expression(
filter_expression, Attr('labels').contains(label)
)
if filter_expression:
scan_params['FilterExpression'] = filter_expression
response = self._table.scan(**scan_params)
return response['Items']
def add_media_file(self, name, media_type, labels=None):
if labels is None:
labels = []
self._table.put_item(
Item={
'name': name,
'type': media_type,
'labels': labels,
}
)
def get_media_file(self, name):
response = self._table.get_item(
Key={
'name': name,
},
)
return response.get('Item')
def delete_media_file(self, name):
self._table.delete_item(
Key={
'name': name,
}
)
def _add_to_filter_expression(self, expression, condition):
if expression is None:
return condition
return expression & condition
| {
"repo_name": "aws-samples/chalice-workshop",
"path": "code/media-query/07-videos/chalicelib/db.py",
"copies": "6",
"size": "2117",
"license": "apache-2.0",
"hash": -1289366893132646400,
"line_mean": 26.4935064935,
"line_max": 77,
"alpha_frac": 0.5507794048,
"autogenerated": false,
"ratio": 4.192079207920792,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7742858612720792,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from ckeditor_uploader import views as cku_views
from django.conf import settings
from django.contrib.auth import views as auth_views
from django.contrib.auth.decorators import login_required
from django.urls import include, path
from django.views.i18n import JavaScriptCatalog
from drf_spectacular.views import (
SpectacularJSONAPIView, SpectacularSwaggerView, SpectacularRedocView
)
from modoboa.admin.views import user as user_views
from modoboa.core import signals as core_signals, views as core_views
from modoboa.core.extensions import exts_pool
urlpatterns = [
path('jsi18n/', JavaScriptCatalog.as_view(), name="javascript-catalog"),
path('ckeditor/upload/', login_required(cku_views.upload),
name="ckeditor_upload"),
path('ckeditor/browse/', login_required(cku_views.browse),
name="ckeditor_browse"),
path('', include("modoboa.core.urls")),
path('admin/', include("modoboa.admin.urls")),
path('dnstools/', include("modoboa.dnstools.urls")),
path('stats/', include("modoboa.maillog.urls")),
path('user/forward/', user_views.forward, name="user_forward"),
path('accounts/password_reset/', core_views.PasswordResetView.as_view(),
name="password_reset"),
path('accounts/password_reset/done/',
auth_views.PasswordResetDoneView.as_view(),
name="password_reset_done"),
path('reset/confirm_code/',
core_views.VerifySMSCodeView.as_view(),
name="password_reset_confirm_code"),
path('reset/resend_code/',
core_views.ResendSMSCodeView.as_view(),
name="password_reset_resend_code"),
path('reset/<uidb64>/<token>/',
auth_views.PasswordResetConfirmView.as_view(),
name="password_reset_confirm"),
path('reset/done/', auth_views.PasswordResetCompleteView.as_view(),
name="password_reset_complete"),
]
exts_pool.load_all()
urlpatterns += exts_pool.get_urls()
extra_routes = core_signals.extra_uprefs_routes.send(sender="urls")
if extra_routes:
extra_routes = reduce(
lambda a, b: a + b, [route[1] for route in extra_routes])
urlpatterns += extra_routes
# API urls
urlpatterns += [
# FIXME: legacy, to remove ASAP
path('docs/openapi.json',
SpectacularJSONAPIView.as_view(api_version="v1"), name='schema-v1-legacy'),
path('api/schema-v1/',
SpectacularJSONAPIView.as_view(api_version="v1"), name='schema-v1'),
path('api/schema-v1/swagger/',
SpectacularSwaggerView.as_view(url_name='schema-v1'),
name='docs-index-v1'),
path('api/schema-v1/redoc/',
SpectacularRedocView.as_view(url_name='schema-v1')),
path('api/schema-v2/',
SpectacularJSONAPIView.as_view(api_version="v2"), name='schema-v2'),
path('api/schema-v2/swagger/',
SpectacularSwaggerView.as_view(url_name='schema-v2'),
name='docs-index-v2'),
path('api/schema-v2/redoc/',
SpectacularRedocView.as_view(url_name='schema-v2')),
path('api/v1/', include("modoboa.urls_api_v1", namespace="v1")),
path('api/v2/', include("modoboa.urls_api_v2", namespace="v2")),
]
if settings.DEBUG:
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls.static import static
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
)
| {
"repo_name": "modoboa/modoboa",
"path": "modoboa/urls.py",
"copies": "1",
"size": "3428",
"license": "isc",
"hash": -9107362390492353000,
"line_mean": 36.2608695652,
"line_max": 84,
"alpha_frac": 0.6843640607,
"autogenerated": false,
"ratio": 3.3542074363992174,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45385714970992175,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from ...combinators import Lazy
from ...AST.expressions.arithmetic import IntAexp, VarAexp, BinopAexp
def any_operator_in_list(ops):
"""
Returns the parser appropriate to the keyword list (ops).
"""
from ..common import keyword
op_parsers = [keyword(op) for op in ops]
parser = reduce(lambda l, r: l | r, op_parsers)
return parser
""" Precedence levels for arithmetic operations. """
aexp_precedence_levels = [
['*', '/', '%'],
['+', '-'],
]
def precedence(value_parser, precedence_levels, combine):
"""
Prioritizes operations (brackets) according to the precedence levels.
Example:
Input: 4 * a + b / 2 - (6 + c)
1) E0(4) * E0(a) + E0(b) / E0(2) - E0(6+c)
2) E1(4*a) + E1(b/2) - E1(6+c)
3) E2((4*a)+(b/2)-(6+c))
"""
def op_parser(precedence_level):
return any_operator_in_list(precedence_level) ^ combine
parser = value_parser * op_parser(precedence_levels[0])
for precedence_level in precedence_levels[1:]:
parser = parser * op_parser(precedence_level)
return parser
def aexp_value():
"""
Converts the values returned by 'num' and 'id' to the object of AST classes.
First of all, try to parse integer, if unsuccessful, try to parse as a variable (via Alternate combinator).
"""
from ..common import id, boolean, num
from ..expressions import objects, arrays, call
return objects.object_val() | arrays.el_exp() | call.fun_call_stmt() | \
((boolean | num) ^ (lambda i: IntAexp(i))) | \
(id ^ (lambda v: VarAexp(v)))
def process_group(parsed):
"""
Removes parentheses and returns an expression in them.
"""
((_, p), _) = parsed
return p
def aexp_group():
"""
Parse the arithmetic expression in parentheses.
"""
from ..common import keyword
from ..expressions import logical
return keyword('(') + (Lazy(logical.bexp) | Lazy(aexp)) + keyword(')') ^ process_group
def aexp_term():
"""
Parse the arithmetic expression.
Try to first parse as just arithmetic expressions,
if not possible - as a parentheses group of arithmetic expressions.
"""
return aexp_value() | aexp_group()
def process_binop(operator):
"""
Parse the binary operation arithmetic expression.
Convert operator to fabric of AST-classes 'BinopAexp'.
"""
return lambda l, r: BinopAexp(operator, l, r)
def aexp():
"""
Main arithmetic expressions parser.
"""
return precedence(aexp_term(), aexp_precedence_levels, process_binop)
| {
"repo_name": "PetukhovVictor/compiler",
"path": "src/Parser/Parsers/expressions/arithmetic.py",
"copies": "1",
"size": "2601",
"license": "mit",
"hash": -291049085711451460,
"line_mean": 26.670212766,
"line_max": 111,
"alpha_frac": 0.6289888504,
"autogenerated": false,
"ratio": 3.5975103734439835,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47264992238439835,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from ...combinators import Rep, Exp
from ...AST.common import CompoundStatement
def stmt():
"""
Main statement parser.
Try to first parse as 'assign' statement,
if not possible - as 'if' statement,
if not possible - as 'while' statement.
"""
from ..expressions import arithmetic, objects, call
from ..statements import conditional, assignment, loop, return_, skip, write
return assignment.assign_stmt() | \
conditional.if_stmt() | \
loop.while_stmt() | \
loop.repeat_stmt() | \
loop.for_stmt() | \
return_.return_stmt() | \
write.write_stmt() | \
call.fun_call_stmt() | \
objects.object_method() | \
skip.skip_stmt() | \
arithmetic.aexp()
def stmt_list():
"""
Parsing statement list (by ';' separator).
Example:
x := 56;
y := 12;
z := 512
"""
from ..common import keyword
from ..declarations import function
def process_stmt(x):
return lambda l, r: CompoundStatement(l, r)
def process(parsed_list):
if len(parsed_list) == 1:
return parsed_list[0]
return reduce(lambda stmt1, stmt2: CompoundStatement(stmt1, stmt2), parsed_list)
return Rep(function.fun() | Exp(stmt(), keyword(';') ^ process_stmt)) ^ process
| {
"repo_name": "PetukhovVictor/compiler",
"path": "src/Parser/Parsers/statements/base.py",
"copies": "1",
"size": "1345",
"license": "mit",
"hash": -4895065253277678000,
"line_mean": 27.0208333333,
"line_max": 88,
"alpha_frac": 0.5977695167,
"autogenerated": false,
"ratio": 3.9098837209302326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007392999161101532,
"num_lines": 48
} |
from functools import reduce
from dateutil.parser import parse
from django.db.models import Case, Count, F, IntegerField, Manager, Max, Sum, When
from kolibri.auth.models import FacilityUser
from kolibri.content.content_db_router import default_database_is_attached, get_active_content_database
from kolibri.content.models import ContentNode
from kolibri.logger.models import ContentSummaryLog
from le_utils.constants import content_kinds
from rest_framework import serializers
from .utils.return_users import get_members_or_user
class UserReportSerializer(serializers.ModelSerializer):
progress = serializers.SerializerMethodField()
last_active = serializers.SerializerMethodField()
class Meta:
model = FacilityUser
fields = (
'pk', 'username', 'full_name', 'progress', 'last_active',
)
def get_progress(self, target_user):
content_node = ContentNode.objects.get(pk=self.context['view'].kwargs['content_node_id'])
# progress details for a topic node and everything under it
if content_node.kind == content_kinds.TOPIC:
kind_counts = content_node.get_descendant_kind_counts()
topic_details = ContentSummaryLog.objects \
.filter_by_topic(content_node) \
.filter(user=target_user) \
.values('kind') \
.annotate(total_progress=Sum('progress')) \
.annotate(log_count_total=Count('pk')) \
.annotate(log_count_complete=Sum(Case(When(progress=1, then=1), default=0, output_field=IntegerField())))
# evaluate queryset so we can add data for kinds that do not have logs
topic_details = list(topic_details)
for kind in topic_details:
del kind_counts[kind['kind']]
for key in kind_counts:
topic_details.append({'kind': key, 'total_progress': 0.0, 'log_count_total': 0, 'log_count_complete': 0})
return topic_details
else:
# progress details for a leaf node (exercise, video, etc.)
leaf_details = ContentSummaryLog.objects \
.filter(user=target_user) \
.filter(content_id=content_node.content_id) \
.annotate(total_progress=F('progress')) \
.values('kind', 'time_spent', 'total_progress')
return leaf_details if leaf_details else [{'kind': content_node.kind, 'time_spent': 0, 'total_progress': 0.0}]
def get_last_active(self, target_user):
content_node = ContentNode.objects.get(pk=self.context['view'].kwargs['content_node_id'])
try:
if content_node.kind == content_kinds.TOPIC:
return ContentSummaryLog.objects \
.filter_by_topic(content_node) \
.filter(user=target_user) \
.latest('end_timestamp').end_timestamp
else:
return ContentSummaryLog.objects \
.filter(user=target_user) \
.get(content_id=content_node.content_id).end_timestamp
except ContentSummaryLog.DoesNotExist:
return None
def sum_progress_dicts(total_progress, progress_dict):
return total_progress + progress_dict.get('total_progress', 0.0)
def get_progress_and_last_active(target_nodes, **kwargs):
# Prepare dictionaries to output the progress and last active, keyed by content_id
output_progress_dict = {}
output_last_active_dict = {}
# Get a list of all the users that we are querying
users = list(get_members_or_user(kwargs['collection_kind'], kwargs['collection_id']))
# Get a list of all content ids for all target nodes and their descendants
content_ids = target_nodes.get_descendants(include_self=True).order_by().values_list("content_id", flat=True)
# get all summary logs for the current user that correspond to the content nodes and descendant content nodes
if default_database_is_attached(): # if possible, do a direct join between the content and default databases
channel_alias = get_active_content_database()
SummaryLogManager = ContentSummaryLog.objects.using(channel_alias)
else: # otherwise, convert the leaf queryset into a flat list of ids and use that
SummaryLogManager = ContentSummaryLog.objects
content_ids = list(content_ids)
# Filter by users and the content ids
progress_query = SummaryLogManager \
.filter(user__in=users, content_id__in=content_ids)
# Conditionally filter by last active time
if kwargs.get('last_active_time'):
progress_query.filter(end_timestamp__gte=parse(kwargs.get('last_active_time')))
# Get an annotated list of dicts of type:
# {
# 'content_id': <content_id>,
# 'kind': <kind>,
# 'total_progress': <sum of all progress for this content>,
# 'log_count_total': <number of summary logs for this content>,
# 'log_count_complete': <number of complete summary logs for this content>,
# 'last_active': <most recent end_timestamp for this content>,
# }
progress_list = progress_query.values('content_id', 'kind').annotate(
total_progress=Sum('progress'),
log_count_total=Count('pk'),
log_count_complete=Sum(Case(When(progress=1, then=1), default=0, output_field=IntegerField())),
last_active=Max('end_timestamp'))
# Evaluate query and make a loop dict of all progress
progress_dict = {item.get('content_id'): item for item in progress_list}
if isinstance(target_nodes, ContentNode):
# Have been passed an individual model
target_nodes = [target_nodes]
# Loop through each node to add progress and last active information to the output dicts
for target_node in target_nodes:
# In the case of a topic, we need to look at the progress and last active from each of its descendants
if target_node.kind == content_kinds.TOPIC:
# Get all the content_ids and kinds of each leaf node as a tuple
# (about half the size of the dict from 'values' method)
# Remove topics in generator comprehension, rather than using .exclude as kind is not indexed
# Use set to remove repeated content
leaf_nodes = set(node for node in target_node.get_descendants(include_self=False).order_by().values_list(
'content_id', 'kind') if node[1] != content_kinds.TOPIC)
# Get a unique set of all non-topic content kinds
leaf_kinds = sorted(set(leaf_node[1] for leaf_node in leaf_nodes))
# Create a list of progress summary dicts for each content kind
progress = [{
# For total progress sum across all the progress dicts for the descendant content leaf nodes
'total_progress': reduce(
# Reduce with a function that just adds the total_progress of the passed in dict to the accumulator
sum_progress_dicts,
# Get all dicts of progress for every leaf_id that has some progress recorded
# and matches the kind we are aggregating over
(progress_dict.get(leaf_node[0]) for leaf_node in leaf_nodes\
if leaf_node[0] in progress_dict and leaf_node[1] == kind),
# Pass in an initial value of total_progress as zero to initialize the reduce
0.0,
),
'kind': kind,
# Count the number of leaf nodes of this particular kind
'node_count': reduce(lambda x, y: x + int(y[1] == kind), leaf_nodes, 0)
} for kind in leaf_kinds]
# Set the output progress for this topic to this list of progress dicts
output_progress_dict[target_node.content_id] = progress
# Create a generator of last active times for the leaf_ids
last_active_times = map(
# Return the last active time for this leaf_node
lambda leaf_node: progress_dict[leaf_node[0]]['last_active'],
filter(
# Filter leaf_nodes to those that are in the progress_dict
lambda leaf_node: leaf_node[0] in progress_dict,
leaf_nodes))
# Max does not handle empty iterables, so try this
try:
# If it is not empty, great!
output_last_active_dict[target_node.content_id] = max(last_active_times)
except (ValueError, TypeError):
# If it is empty, catch the value error and set the last active time to None
# If they are all none, catch the TypeError and also set to None
output_last_active_dict[target_node.content_id] = None
else:
if target_node.content_id in progress_dict:
progress = progress_dict.pop(target_node.content_id)
output_last_active_dict[target_node.content_id] = progress.pop('last_active')
# return as array for consistency in api
output_progress_dict[target_node.content_id] = [{
'total_progress': progress['total_progress'],
'log_count_total': progress['log_count_total'],
'log_count_complete': progress['log_count_complete'],
}]
elif target_node.content_id not in output_progress_dict:
# Not in the progress dict, but also not in our output, so supply default values
output_last_active_dict[target_node.content_id] = None
output_progress_dict[target_node.content_id] = [{
'total_progress': 0.0,
'log_count_total': 0,
'log_count_complete': 0,
}]
return output_progress_dict, output_last_active_dict
class ContentReportListSerializer(serializers.ListSerializer):
def to_representation(self, data):
if not data:
return data
if 'request' not in self.context:
progress_dict = {}
else:
kwargs = self.context['view'].kwargs
progress_dict, last_active_dict = get_progress_and_last_active(data, **kwargs)
# Dealing with nested relationships, data can be a Manager,
# so, first get a queryset from the Manager if needed
iterable = data.all() if isinstance(data, Manager) else data
return [
self.child.to_representation(
item,
progress=progress_dict.get(item.content_id),
last_active=last_active_dict.get(item.content_id)) for item in iterable
]
class ContentReportSerializer(serializers.ModelSerializer):
class Meta:
model = ContentNode
fields = (
'pk', 'content_id', 'title', 'kind',
)
list_serializer_class = ContentReportListSerializer
def to_representation(self, instance, progress=None, last_active=None):
if progress is None:
if 'request' not in self.context:
progress = [{'total_progress': 0, 'log_count_total': 0, 'log_count_complete': 0}]
else:
kwargs = self.context['view'].kwargs
progress_dict, last_active_dict = get_progress_and_last_active(instance, **kwargs)
progress = progress_dict.get(instance.content_id)
last_active = last_active_dict.get(instance.content_id)
value = super(ContentReportSerializer, self).to_representation(instance)
value['progress'] = progress
value['last_active'] = last_active
return value
class ContentSummarySerializer(ContentReportSerializer):
ancestors = serializers.SerializerMethodField()
num_users = serializers.SerializerMethodField()
class Meta:
model = ContentNode
fields = (
'pk', 'content_id', 'title', 'kind', 'ancestors', 'num_users',
)
list_serializer_class = ContentReportListSerializer
def get_ancestors(self, target_node):
"""
in descending order (root ancestor first, immediate parent last)
"""
return target_node.get_ancestors().values('pk', 'title')
def get_num_users(self, target_node):
kwargs = self.context['view'].kwargs
return get_members_or_user(kwargs['collection_kind'], kwargs['collection_id']).count()
| {
"repo_name": "rtibbles/kolibri",
"path": "kolibri/plugins/coach/serializers.py",
"copies": "1",
"size": "12487",
"license": "mit",
"hash": -6937374488170230000,
"line_mean": 49.1485943775,
"line_max": 122,
"alpha_frac": 0.6213662209,
"autogenerated": false,
"ratio": 4.260320709655407,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017399210627979862,
"num_lines": 249
} |
from functools import reduce
from dateutil.parser import parse
from django.db.models import Case
from django.db.models import Count
from django.db.models import F
from django.db.models import IntegerField
from django.db.models import Manager
from django.db.models import Max
from django.db.models import Sum
from django.db.models import When
from le_utils.constants import content_kinds
from rest_framework import serializers
from .utils.return_users import get_members_or_user
from kolibri.auth.models import FacilityUser
from kolibri.content.models import ContentNode
from kolibri.content.serializers import ContentNodeSerializer
from kolibri.core.lessons.models import Lesson
from kolibri.logger.models import ContentSummaryLog
class UserReportSerializer(serializers.ModelSerializer):
progress = serializers.SerializerMethodField()
last_active = serializers.SerializerMethodField()
class Meta:
model = FacilityUser
fields = (
'pk', 'username', 'full_name', 'progress', 'last_active',
)
def get_progress(self, target_user):
content_node = ContentNode.objects.get(pk=self.context['view'].kwargs['content_node_id'])
# progress details for a topic node and everything under it
if content_node.kind == content_kinds.TOPIC:
kinds = content_node.get_descendants().values_list('kind', flat=True).distinct()
topic_details = ContentSummaryLog.objects \
.filter_by_topic(content_node) \
.filter(user=target_user) \
.values('kind') \
.annotate(total_progress=Sum('progress')) \
.annotate(log_count_total=Count('pk')) \
.annotate(log_count_complete=Sum(Case(When(progress=1, then=1), default=0, output_field=IntegerField())))
# evaluate queryset so we can add data for kinds that do not have logs
topic_details = list(topic_details)
for kind in topic_details:
kinds.remove(kind['kind'])
for kind in kinds:
topic_details.append({'kind': kind, 'total_progress': 0.0, 'log_count_total': 0, 'log_count_complete': 0})
return topic_details
else:
# progress details for a leaf node (exercise, video, etc.)
leaf_details = ContentSummaryLog.objects \
.filter(user=target_user) \
.filter(content_id=content_node.content_id) \
.annotate(total_progress=F('progress')) \
.values('kind', 'time_spent', 'total_progress')
return leaf_details if leaf_details else [{'kind': content_node.kind, 'time_spent': 0, 'total_progress': 0.0}]
def get_last_active(self, target_user):
content_node = ContentNode.objects.get(pk=self.context['view'].kwargs['content_node_id'])
try:
if content_node.kind == content_kinds.TOPIC:
return ContentSummaryLog.objects \
.filter_by_topic(content_node) \
.filter(user=target_user) \
.latest('end_timestamp').end_timestamp
else:
return ContentSummaryLog.objects \
.filter(user=target_user) \
.get(content_id=content_node.content_id).end_timestamp
except ContentSummaryLog.DoesNotExist:
return None
def sum_progress_dicts(total_progress, progress_dict):
return total_progress + progress_dict.get('total_progress', 0.0)
def get_progress_and_last_active(target_nodes, **kwargs):
# Prepare dictionaries to output the progress and last active, keyed by content_id
output_progress_dict = {}
output_last_active_dict = {}
# Get a list of all the users that we are querying
users = list(get_members_or_user(kwargs['collection_kind'], kwargs['collection_id']))
# Get a list of all content ids for all target nodes and their descendants
content_ids = target_nodes.get_descendants(include_self=True).order_by().values_list("content_id", flat=True)
# get all summary logs for the current user that correspond to the content nodes and descendant content nodes
# Filter by users and the content ids
progress_query = ContentSummaryLog.objects\
.filter(user__in=users, content_id__in=content_ids)
# Conditionally filter by last active time
if kwargs.get('last_active_time'):
progress_query = progress_query.filter(end_timestamp__gte=parse(kwargs.get('last_active_time')))
# Get an annotated list of dicts of type:
# {
# 'content_id': <content_id>,
# 'kind': <kind>,
# 'total_progress': <sum of all progress for this content>,
# 'log_count_total': <number of summary logs for this content>,
# 'log_count_complete': <number of complete summary logs for this content>,
# 'last_active': <most recent end_timestamp for this content>,
# }
progress_list = progress_query.values('content_id', 'kind').annotate(
total_progress=Sum('progress'),
log_count_total=Count('pk'),
log_count_complete=Sum(Case(When(progress=1, then=1), default=0, output_field=IntegerField())),
last_active=Max('end_timestamp'))
# Evaluate query and make a loop dict of all progress
progress_dict = {item.get('content_id'): item for item in progress_list}
if isinstance(target_nodes, ContentNode):
# Have been passed an individual model
target_nodes = [target_nodes]
# Loop through each node to add progress and last active information to the output dicts
for target_node in target_nodes:
# In the case of a topic, we need to look at the progress and last active from each of its descendants
if target_node.kind == content_kinds.TOPIC:
# Get all the content_ids and kinds of each leaf node as a tuple
# (about half the size of the dict from 'values' method)
# Remove topics in generator comprehension, rather than using .exclude as kind is not indexed
# Use set to remove repeated content
leaf_nodes = set(node for node in target_node.get_descendants(include_self=False).order_by().values_list(
'content_id', 'kind') if node[1] != content_kinds.TOPIC)
# Get a unique set of all non-topic content kinds
leaf_kinds = sorted(set(leaf_node[1] for leaf_node in leaf_nodes))
# Create a list of progress summary dicts for each content kind
progress = [{
# For total progress sum across all the progress dicts for the descendant content leaf nodes
'total_progress': reduce(
# Reduce with a function that just adds the total_progress of the passed in dict to the accumulator
sum_progress_dicts,
# Get all dicts of progress for every leaf_id that has some progress recorded
# and matches the kind we are aggregating over
(progress_dict.get(leaf_node[0]) for leaf_node in leaf_nodes\
if leaf_node[0] in progress_dict and leaf_node[1] == kind),
# Pass in an initial value of total_progress as zero to initialize the reduce
0.0,
),
'kind': kind,
# Count the number of leaf nodes of this particular kind
'node_count': reduce(lambda x, y: x + int(y[1] == kind), leaf_nodes, 0)
} for kind in leaf_kinds]
# Set the output progress for this topic to this list of progress dicts
output_progress_dict[target_node.content_id] = progress
# Create a generator of last active times for the leaf_ids
last_active_times = map(
# Return the last active time for this leaf_node
lambda leaf_node: progress_dict[leaf_node[0]]['last_active'],
filter(
# Filter leaf_nodes to those that are in the progress_dict
lambda leaf_node: leaf_node[0] in progress_dict,
leaf_nodes))
# Max does not handle empty iterables, so try this
try:
# If it is not empty, great!
output_last_active_dict[target_node.content_id] = max(last_active_times)
except (ValueError, TypeError):
# If it is empty, catch the value error and set the last active time to None
# If they are all none, catch the TypeError and also set to None
output_last_active_dict[target_node.content_id] = None
else:
if target_node.content_id in progress_dict:
progress = progress_dict.pop(target_node.content_id)
output_last_active_dict[target_node.content_id] = progress.pop('last_active')
# return as array for consistency in api
output_progress_dict[target_node.content_id] = [{
'total_progress': progress['total_progress'],
'log_count_total': progress['log_count_total'],
'log_count_complete': progress['log_count_complete'],
}]
elif target_node.content_id not in output_progress_dict:
# Not in the progress dict, but also not in our output, so supply default values
output_last_active_dict[target_node.content_id] = None
output_progress_dict[target_node.content_id] = [{
'total_progress': 0.0,
'log_count_total': 0,
'log_count_complete': 0,
}]
return output_progress_dict, output_last_active_dict
class ContentReportListSerializer(serializers.ListSerializer):
def to_representation(self, data):
if not data:
return data
if 'request' not in self.context:
progress_dict = {}
else:
kwargs = self.context['view'].kwargs
progress_dict, last_active_dict = get_progress_and_last_active(data, **kwargs)
# Dealing with nested relationships, data can be a Manager,
# so, first get a queryset from the Manager if needed
iterable = data.all() if isinstance(data, Manager) else data
return [
self.child.to_representation(
item,
progress=progress_dict.get(item.content_id),
last_active=last_active_dict.get(item.content_id)) for item in iterable
]
class ContentReportSerializer(serializers.ModelSerializer):
class Meta:
model = ContentNode
fields = (
'pk', 'content_id', 'title', 'kind',
)
list_serializer_class = ContentReportListSerializer
def to_representation(self, instance, progress=None, last_active=None):
if progress is None:
if 'request' not in self.context:
progress = [{'total_progress': 0, 'log_count_total': 0, 'log_count_complete': 0}]
else:
kwargs = self.context['view'].kwargs
progress_dict, last_active_dict = get_progress_and_last_active(instance, **kwargs)
progress = progress_dict.get(instance.content_id)
last_active = last_active_dict.get(instance.content_id)
value = super(ContentReportSerializer, self).to_representation(instance)
value['progress'] = progress
value['last_active'] = last_active
value['num_coach_contents'] = ContentNodeSerializer(instance, context=self.context).data['num_coach_contents']
return value
class ContentSummarySerializer(ContentReportSerializer):
ancestors = serializers.SerializerMethodField()
num_users = serializers.SerializerMethodField()
class Meta:
model = ContentNode
fields = (
'pk', 'content_id', 'title', 'kind', 'ancestors', 'num_users',
)
list_serializer_class = ContentReportListSerializer
def get_ancestors(self, target_node):
"""
in descending order (root ancestor first, immediate parent last)
"""
return target_node.get_ancestors().values('pk', 'title')
def get_num_users(self, target_node):
kwargs = self.context['view'].kwargs
return get_members_or_user(kwargs['collection_kind'], kwargs['collection_id']).count()
class LessonReportSerializer(serializers.ModelSerializer):
"""
Annotates a Lesson with a 'progress' array, which maps 1-to-1 with Lesson.resources.
Each entry in the 'progress' array gives the total number of Learners who have
been assigned the Lesson and have 'mastered' the Resource.
"""
progress = serializers.SerializerMethodField()
total_learners = serializers.SerializerMethodField()
class Meta:
model = Lesson
fields = ('id', 'title', 'progress', 'total_learners',)
def get_progress(self, instance):
learners = instance.get_all_learners()
if learners.count() is 0:
return []
return [self._resource_progress(r, learners) for r in instance.resources]
def get_total_learners(self, instance):
return instance.get_all_learners().count()
def _resource_progress(self, resource, learners):
response = {
'contentnode_id': resource['contentnode_id'],
'num_learners_completed': 0,
}
completed_content_logs = ContentSummaryLog.objects \
.filter(
content_id=resource['content_id'],
user__in=learners,
progress=1.0,
) \
.values('content_id') \
.annotate(total=Count('pk'))
# If no logs for the Content Item,
if completed_content_logs.count() is 0:
return response
else:
response['num_learners_completed'] = completed_content_logs[0]['total']
return response
| {
"repo_name": "jonboiser/kolibri",
"path": "kolibri/plugins/coach/serializers.py",
"copies": "1",
"size": "13991",
"license": "mit",
"hash": 8160696601574850000,
"line_mean": 46.2668918919,
"line_max": 122,
"alpha_frac": 0.6213994711,
"autogenerated": false,
"ratio": 4.2371290127195635,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5358528483819563,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from dict_utils.public import DictUtils
class BlueprintResolver():
"""
takes care of resolving the dependencies of blueprints into one independent document
"""
class ResolvingException(Exception):
"""
is thrown, in case an error occurs, during resolving a blueprint
"""
pass
def __init__(self, base_blueprints):
"""
is initialized with a set of base blueprints, which are used to resolve the given blueprints
:param base_blueprints: set of base blueprints
:type base_blueprints: dict
"""
self.base_blueprints = base_blueprints
def resolve(self, blueprint):
"""
resolves the given blueprint with all its referencing dependency into one single blueprint
:param blueprint: this either is a blueprint dict, or a sting id, referencing the parent directly
:type blueprint: dict or str
:return: resolved dict
:rtype: dict
:raises BlueprintResolver.ResolvingException: raised in case the blueprint is not valid and therefore can't be
resolved
"""
if isinstance(blueprint, str):
return self._clean_representation(self._get_base_blueprint_by_id(blueprint))
if isinstance(blueprint, dict):
parents = self._get_parents(blueprint)
return self._clean_representation(
reduce(DictUtils.merge_dicts, [blueprint] + parents) if parents else blueprint
)
raise BlueprintResolver.ResolvingException(
'following blueprint is not valid: {blueprint}'.format(blueprint=blueprint)
)
def _get_parents(self, blueprint):
"""
returns all parents for a given blueprint
:param blueprint: blueprint to get the parents for
:type blueprint: dict
:return: list of all parents. It is sorted with the nearest parent coming first
:rtype: list
"""
if 'parent' in blueprint:
parent_blueprint = self._get_base_blueprint_by_id(blueprint['parent'])
return [parent_blueprint] + self._get_parents(parent_blueprint)
return []
def _get_base_blueprint_by_id(self, id):
"""
returns a base blueprint by id and validates the id
:param id: the id of the blueprint to get
:type id: str
:return: the base blueprint
:rtype: dict
:raises BlueprintResolver.ResolvingException: raise if id is not found
"""
if id not in self.base_blueprints:
raise BlueprintResolver.ResolvingException(
'blueprint is referencing a non existing base blueprint: {parent_blueprint_id}'.format(
parent_blueprint_id=id
)
)
return self.base_blueprints[id]
def _clean_representation(self, blueprint):
"""
clean the blueprint of data, not relevant for the final representation
:param blueprint: blueprint to clean
:type: dict
:return: cleaned blueprint
:rtype: dict
"""
blueprint.pop('parent', None)
return blueprint
| {
"repo_name": "jdepoix/goto_cloud",
"path": "goto_cloud/migration_plan_parsing/blueprint_resolving.py",
"copies": "1",
"size": "3237",
"license": "mit",
"hash": -1883769820102194000,
"line_mean": 34.1847826087,
"line_max": 118,
"alpha_frac": 0.6184738956,
"autogenerated": false,
"ratio": 4.86036036036036,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.597883425596036,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from django.apps.registry import apps
from django.conf.urls import url as conf_url, include
from django.contrib.auth import get_permission_codename
from django.core.exceptions import ImproperlyConfigured
import declarative_urlconf as urlconf
from . import views
from .registry import RegistryMixin
class LabeledURLs(urlconf.URLs):
label = None
verbose_name = None
def __init__(self):
self.label = self.get_label()
self.verbose_name = self.get_verbose_name()
super().__init__()
def get_label(self):
return self.label
def get_verbose_name(self):
return self.verbose_name
def as_urls(self, extra_urls=None):
extra_urls = [] if extra_urls is None else extra_urls
# Prefix the urls with the label
return [conf_url(
r'^{label}/'.format(label=self.label),
include(super().as_urls() + extra_urls)
)]
#
# Sections
#
class AdminSection(RegistryMixin, LabeledURLs):
index = urlconf.URL(r'^$', views.SectionIndexView, name='{section}_index')
def __init__(self, site=None):
self.site = site
super().__init__()
site.register(self)
def get_namespace(self):
return self.site.namespace
def register(self, admin_class):
super().register(self.init_admin(admin_class))
def init_admin(self, admin_class):
return admin_class(section=self)
def has_permission(self, request):
return True
@property
def admins(self):
return self.values()
def admins_as_urls(self):
return reduce(lambda a, b: a + b, [admin.as_urls() for admin in self.admins])
def get_view_kwargs(self, name):
return {
'site': self.site,
'section': self,
}
def get_view_name_fields(self, name):
return {
'section': self.label,
}
def as_urls(self, extra_urls=None):
extra_urls = [] if extra_urls is None else extra_urls
return super().as_urls(extra_urls=self.admins_as_urls() + extra_urls)
class AppAdminSection(AdminSection):
app = None
def __init__(self, app_label=None, **kwargs):
self.app = apps.get_app_config(app_label)
super().__init__(**kwargs)
def get_label(self):
label = super().get_label()
return label if label is not None else self.app.label
def get_verbose_name(self):
verbose_name = super().get_verbose_name()
return verbose_name if verbose_name is not None else self.app.verbose_name
def has_permission(self, request):
return request.user.has_module_perms(self.app.label)
#
# Admin within a Section
#
class Admin(LabeledURLs):
def __init__(self, section=None):
self.section = section
super().__init__()
@property
def site(self):
return self.section.site
def has_add_permission(self, request):
"""
Returns True if the given request has permission to add an object.
Can be overridden by the user in subclasses.
"""
raise NotImplementedError
def has_change_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to change the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to change *any* object of the given type.
"""
raise NotImplementedError
def has_delete_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to delete the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to delete *any* object of the given type.
"""
raise NotImplementedError
def get_permissions(self, request):
"""
Returns a dict of all perms for this model. This dict has the keys
``add``, ``change``, and ``delete`` mapping to the True/False for each
of those actions.
"""
return {
'add': self.has_add_permission(request),
'change': self.has_change_permission(request),
'delete': self.has_delete_permission(request)
}
class ModelAdmin(Admin):
model = None
list_display = ('__str__',)
fields = '__all__' # The admin shows all fields by default... for now
index = urlconf.URL(r'^$', views.ListView, name='{app}_{model}_index')
create = urlconf.URL(r'^add/$', views.CreateView, name='{app}_{model}_create')
detail = urlconf.URL(r'^(?P<pk>.+)/preview/$', views.DetailView, name='{app}_{model}_detail')
update = urlconf.URL(r'^(?P<pk>.+)/edit/$', views.UpdateView, name='{app}_{model}_update')
delete = urlconf.URL(r'^(?P<pk>.+)/delete/$', views.DeleteView, name='{app}_{model}_delete')
def __init__(self, model=None, **kwargs):
# We need a model, either with the init or on the class
self.model = self.model if model is None else model
if self.model is None:
raise ImproperlyConfigured('Model class is not set on ModelAdmin or in constructor')
self.opts = self.model._meta
super().__init__(**kwargs)
#
# URLs
#
def get_namespace(self):
return self.section.site.namespace
def get_view_kwargs(self, name):
base_kwargs = {
'model': self.model,
'site': self.site,
'admin': self,
}
if name in ['update', 'create']:
base_kwargs.update({
'fields': self.fields,
})
return base_kwargs
def get_view_name_fields(self, name):
return {
'app': self.section.label,
'model': self.opts.model_name,
}
#
# Labels and naming
#
def get_label(self):
label = super().get_label()
return label if label is not None else self.opts.model_name
def get_verbose_name(self):
verbose_name = super().get_verbose_name()
return verbose_name if verbose_name is not None else self.opts.verbose_name_plural
#
# Permissions
#
def has_add_permission(self, request):
opts = self.opts
codename = get_permission_codename('add', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_change_permission(self, request, obj=None):
opts = self.opts
codename = get_permission_codename('change', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
opts = self.opts
codename = get_permission_codename('delete', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
| {
"repo_name": "bpeschier/reek-admin",
"path": "admin/admins.py",
"copies": "1",
"size": "7321",
"license": "mit",
"hash": -4863488015621554000,
"line_mean": 30.0211864407,
"line_max": 97,
"alpha_frac": 0.6139871602,
"autogenerated": false,
"ratio": 4.049225663716814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003731451594537353,
"num_lines": 236
} |
from functools import reduce
from django.contrib.auth.models import User
from approver.models import Response, Question, Project, Choice
from approver.constants import answer_submit_names, answer_response_names
from approver.utils import get_current_user_gatorlink, after_approval
def add_update_response(post_data, request):
"""
This function is responsible for updating responses as a user is
filling out the project approver form, ajax-style.
This is important because we want the user to not lose work as
they are going along but we also dont want the page refreshing constantly
Also this function is used by the form update so we dont duplicate code
"""
api_response = {}
question_id = int(post_data.get(answer_submit_names.get('question_id')))
project_id = int(post_data.get(answer_submit_names.get('project_id')))
choice_id = int(post_data.get(answer_submit_names.get('choice_id')))
editing_user = request.user
question = Question.objects.get(id=question_id)
project = Project.objects.get(id=project_id)
choice = Choice.objects.get(id=choice_id)
response = Response.objects.filter(question=question, project=project, user=editing_user)
if len(response) is 0:
new_response = Response(question=question, project=project, choice=choice, user=editing_user)
new_response.save(editing_user)
api_response[answer_response_names.get('response_id')] = new_response.id
api_response[answer_response_names.get('newly_created')] = 'true'
elif len(response) is 1:
response[0].choice = choice
response[0].save(editing_user)
api_response[answer_response_names.get('response_id')] = response[0].id
api_response[answer_response_names.get('newly_created')] = 'false'
api_response[answer_response_names.get('user_id')] = editing_user.id
api_response[answer_response_names.get('question_id')] = question_id
api_response[answer_response_names.get('choice_id')] = choice_id
api_response[answer_response_names.get('project_id')] = project_id
return api_response
def save_project_with_form(project, question_form, request):
"""
Calls the api method to add responses
Builds a proper call from a project, question_form, and session
"""
form_fields = question_form.keys()
for key in form_fields:
if 'question' in str(key):
data = {
answer_submit_names['question_id']: str(key).split('_')[1],
answer_submit_names['choice_id']: question_form[str(key)],
answer_submit_names['project_id']: project.id,
}
add_update_response(data, request)
return project
def approve_or_next_steps(project, user):
"""
Checks the project for correct question survey responses and
whether there is an advisor on the project
...if QI is required for the project
"""
responses = project.response.all()
total_responses = len(responses)
is_correct_response = False
if total_responses > 0:
if __response_count_matches_question_count(responses):
is_correct_response = reduce(lambda acc,response : acc and response.is_correct_response(), responses, True)
# A project is only approved if the self certification questions were answered
# correctly and the project does not require an advisor (based on if a QI project
# is required for the PQIs training program
if (is_correct_response and (project.get_need_advisor() is False)):
project.approve(user)
return after_approval(project)
def __response_count_matches_question_count(response_list):
"""
This function takes a list of responses, finds the section in which
the questions came from, then returns a boolean if they amount of
responses matches the amount of total questions in all of the sections
"""
total_responses = len(response_list)
question_count = 0
sections_set = set()
#For each response, add the section id from each question to the set
for response in response_list:
sections_set.add(response.question.section.id)
#Count each question in the sections
for section_id in sections_set:
question_count += len(Question.objects.filter(section=section_id))
#If question count doesn't equal response count, fail it
if question_count == total_responses:
return True
return False
| {
"repo_name": "PFWhite/qipr_approver",
"path": "qipr_approver/approver/workflows/approve_workflow.py",
"copies": "1",
"size": "4445",
"license": "apache-2.0",
"hash": 7523950253024121000,
"line_mean": 39.7798165138,
"line_max": 119,
"alpha_frac": 0.694488189,
"autogenerated": false,
"ratio": 3.9616755793226384,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5156163768322638,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models import F, Sum
from . import exceptions
class QuotaLimitField(models.IntegerField):
""" Django virtual model field.
Could be used to manage quotas transparently as model fields.
"""
concrete = False
def __init__(self, quota_field=None, *args, **kwargs):
super(QuotaLimitField, self).__init__(*args, **kwargs)
self._quota_field = quota_field
def db_type(self, connection):
# virtual field -- ignore in migrations
return None
def contribute_to_class(self, cls, name):
self.model = cls
self.name = self.attname = name
# setting column as none will tell django to not consider this a concrete field
self.column = None
# connect myself as the descriptor for this field
setattr(cls, name, property(self._get_func(), self._set_func()))
cls._meta.add_field(self, private=True)
def deconstruct(self, *args, **kwargs):
name, path, args, kwargs = super(QuotaField, self).deconstruct(*args, **kwargs)
return (name, path, args, {'default': kwargs.get('default'), 'to': None})
def _get_func(self):
# retrieve quota limit from related object
def func(instance, quota_field=self._quota_field):
if instance is None:
raise AttributeError("Can only be accessed via instance")
try:
return instance.quotas.get(name=quota_field).limit
except instance.quotas.model.DoesNotExist:
return quota_field.default_limit
return func
def _set_func(self):
# store quota limit as related object
def func(instance, value, quota_field=self._quota_field):
# a hook to properly init quota after object saved to DB
quota_field.scope_default_limit(instance, value)
instance.set_quota_limit(quota_field, value)
return func
class FieldsContainerMeta(type):
""" Initiates quota fields names.
Quotas fields should be located in class with FieldsContainerMeta metaclass.
Example:
example_quota = QuotaField() # this quota field will have name 'example_quota'
"""
def __new__(self, name, bases, attrs):
for key in attrs:
if isinstance(attrs[key], QuotaField):
attrs[key].name = key
return type.__new__(self, name, bases, attrs)
class QuotaField:
""" Base quota field.
Links quota to its scope right after its creation.
Allows to define:
- default_limit
- default_usage
- is_backend - is quota represents backend limitation. It is impossible to modify backend quotas.
- creation_condition - function that receive quota scope and return True if quota should be created
for given scope. Quota will be created automatically if creation_condition is None.
Default limit and usage can be defined as callable function.
Example:
quota_name = QuotaField(default_limit=lambda scope: scope.attr)
"""
def __init__(
self,
default_limit=-1,
default_usage=0,
is_backend=False,
creation_condition=None,
):
self.default_limit = default_limit
self.default_usage = default_usage
self.is_backend = is_backend
self.creation_condition = creation_condition
def is_connected_to_scope(self, scope):
if self.creation_condition is None:
return True
return self.creation_condition(scope)
def scope_default_limit(self, scope, value=None):
attr_name = '_default_quota_limit_%s' % self.name
if value is not None:
setattr(scope, attr_name, value)
try:
return getattr(scope, attr_name)
except AttributeError:
return (
self.default_limit(scope)
if callable(self.default_limit)
else self.default_limit
)
def get_or_create_quota(self, scope):
if not self.is_connected_to_scope(scope):
raise exceptions.CreationConditionFailedQuotaError(
'Wrong scope: Cannot create quota "%s" for scope "%s".'
% (self.name, scope)
)
defaults = {
'limit': self.scope_default_limit(scope),
'usage': self.default_usage(scope)
if callable(self.default_usage)
else self.default_usage,
}
return scope.quotas.get_or_create(name=self.name, defaults=defaults)
def get_aggregator_quotas(self, quota):
""" Fetch ancestors quotas that have the same name and are registered as aggregator quotas. """
ancestors = quota.scope.get_quota_ancestors()
aggregator_quotas = []
for ancestor in ancestors:
for ancestor_quota_field in ancestor.get_quotas_fields(
field_class=AggregatorQuotaField
):
if ancestor_quota_field.get_child_quota_name() == quota.name:
aggregator_quotas.append(
ancestor.quotas.get(name=ancestor_quota_field)
)
return aggregator_quotas
def __str__(self):
return self.name
def recalculate(self, scope):
if not self.is_connected_to_scope(scope):
return
self.recalculate_usage(scope)
def recalculate_usage(self, scope):
pass
class CounterQuotaField(QuotaField):
""" Provides limitation on target models instances count.
Automatically increases/decreases usage on target instances creation/deletion.
By default usage is increased by 1. You may tweak this delta by defining get_delta function,
which accepts target instance and returns number.
Example:
# This quota will increase/decrease own values on any resource creation/deletion
nc_resource_count = CounterQuotaField(
target_models=lambda: Resource.get_all_models(), # list or function that return list of target models
path_to_scope='service_project_link.project', # path from target model to scope
)
It is possible to define trickier calculation by passing `get_current_usage` function as parameter.
Function should accept two parameters:
- models - list of target models
- scope - quota scope
And return count of current usage.
"""
def __init__(
self,
target_models,
path_to_scope,
get_current_usage=None,
get_delta=None,
**kwargs
):
self._raw_target_models = target_models
self._raw_get_current_usage = get_current_usage
self._raw_get_delta = get_delta
self.path_to_scope = path_to_scope
super(CounterQuotaField, self).__init__(**kwargs)
def get_delta(self, target_instance):
if not self._raw_get_delta:
return 1
return self._raw_get_delta(target_instance)
def get_current_usage(self, models, scope):
if self._raw_get_current_usage is not None:
return self._raw_get_current_usage(models, scope)
else:
filter_path_to_scope = self.path_to_scope.replace('.', '__')
return sum(
[
m.objects.filter(**{filter_path_to_scope: scope}).count()
for m in models
]
)
@property
def target_models(self):
if not hasattr(self, '_target_models'):
self._target_models = (
self._raw_target_models()
if callable(self._raw_target_models)
else self._raw_target_models
)
return self._target_models
def recalculate_usage(self, scope):
current_usage = self.get_current_usage(self.target_models, scope)
scope.set_quota_usage(self.name, current_usage)
def add_usage(self, target_instance, delta):
try:
scope = self._get_scope(target_instance)
except ObjectDoesNotExist:
# ignore as scope has been deleted
return
delta *= self.get_delta(target_instance)
if self.is_connected_to_scope(scope):
scope.add_quota_usage(self.name, delta, validate=True)
def _get_scope(self, target_instance):
return reduce(getattr, self.path_to_scope.split('.'), target_instance)
class TotalQuotaField(CounterQuotaField):
"""
This field aggregates sum of value for the same field of children objects.
For example, it allows to compute total volume size for the project.
class Quotas(quotas_models.QuotaModelMixin.Quotas):
nc_volume_size = quotas_fields.TotalQuotaField(
target_models=lambda: Volume.get_all_models(),
path_to_scope='project',
target_field='size',
)
"""
def __init__(self, target_models, path_to_scope, target_field):
self.target_field = target_field
super(TotalQuotaField, self).__init__(target_models, path_to_scope)
def get_current_usage(self, models, scope):
total_usage = 0
filter_path_to_scope = self.path_to_scope.replace('.', '__')
query = {filter_path_to_scope: scope}
for model in models:
resources = model.objects.filter(**query)
subtotal = resources.values(self.target_field).aggregate(
total_usage=Sum(self.target_field)
)['total_usage']
if subtotal:
total_usage += subtotal
return total_usage
def get_delta(self, target_instance):
return getattr(target_instance, self.target_field)
class AggregatorQuotaField(QuotaField):
""" Aggregates sum of quota scope children with the same name.
Automatically increases/decreases usage if corresponding child quota <aggregation_field> changed.
Example:
# This quota will store sum of all customer projects resources
nc_resource_count = quotas_fields.UsageAggregatorQuotaField(
get_children=lambda customer: customer.projects.all(),
)
"""
aggregation_field = NotImplemented
def __init__(self, get_children, child_quota_name=None, **kwargs):
self.get_children = get_children
self._child_quota_name = child_quota_name
super(AggregatorQuotaField, self).__init__(**kwargs)
def get_child_quota_name(self):
return (
self._child_quota_name if self._child_quota_name is not None else self.name
)
def recalculate_usage(self, scope):
children = self.get_children(scope)
current_usage = 0
for child in children:
child_quota = child.quotas.get(name=self.get_child_quota_name())
current_usage += getattr(child_quota, self.aggregation_field)
scope.set_quota_usage(self.name, current_usage)
def post_child_quota_save(self, scope, child_quota, created=False):
current_value = getattr(child_quota, self.aggregation_field)
if created:
diff = current_value
else:
diff = current_value - child_quota.tracker.previous(self.aggregation_field)
if diff:
scope.quotas.filter(name=self.name).update(usage=F('usage') + diff)
def pre_child_quota_delete(self, scope, child_quota):
diff = getattr(child_quota, self.aggregation_field)
if diff:
scope.quotas.filter(name=self.name).update(usage=F('usage') - diff)
class UsageAggregatorQuotaField(AggregatorQuotaField):
""" Aggregates sum children quotas usages.
Note! It is impossible to aggregate usage of another usage aggregator quotas.
This restriction was added to avoid calls duplications on quota usage field update.
"""
aggregation_field = 'usage'
class LimitAggregatorQuotaField(AggregatorQuotaField):
""" Aggregates sum children quotas limits. """
aggregation_field = 'limit'
# TODO: Implement GlobalQuotaField and GlobalCounterQuotaField
| {
"repo_name": "opennode/nodeconductor-assembly-waldur",
"path": "src/waldur_core/quotas/fields.py",
"copies": "1",
"size": "12173",
"license": "mit",
"hash": -2991388658199226000,
"line_mean": 34.6979472141,
"line_max": 114,
"alpha_frac": 0.6251540294,
"autogenerated": false,
"ratio": 4.2847588877155935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5409912917115594,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from django.core.management.base import BaseCommand
from chat.serializers import LessonSerializer
from chat.models import Chat
class Command(BaseCommand):
"""
Update all Chats progress.
"""
def handle(self, *args, **options):
for chat in Chat.objects.all():
messages = chat.message_set.filter(contenttype='chatdivider', is_additional=False)
lessons = list(
chat.enroll_code.courseUnit.unit.unitlesson_set.filter(
order__isnull=False
).order_by('order')
)
for each in messages:
try:
if each.content.unitlesson in lessons:
lessons[lessons.index(each.content.unitlesson)].message = each.id
elif each.content.unitlesson and each.content.unitlesson.kind != 'answers':
lesson = each.content.unitlesson
lesson.message = each.id
lessons.append(lesson)
except:
pass
lessons_dict = LessonSerializer(many=True).to_representation(lessons)
if lessons_dict and chat.state:
done = reduce(lambda x, y: x+y, [x['isDone'] for x in lessons_dict])
progress = round(float(done)/len(lessons_dict), 2)
else:
# if no lessons passed yet - return 1
progress = 1
if not chat.progress == (progress * 100):
chat.progress = progress * 100
chat.save()
self.stdout.write('Updated.')
| {
"repo_name": "cjlee112/socraticqs2",
"path": "mysite/chat/management/commands/update_progress.py",
"copies": "1",
"size": "1651",
"license": "apache-2.0",
"hash": 2815920373167082500,
"line_mean": 36.5227272727,
"line_max": 95,
"alpha_frac": 0.5481526348,
"autogenerated": false,
"ratio": 4.4144385026737964,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0022200133651702407,
"num_lines": 44
} |
from functools import reduce
from django.db import models
from django.core.validators import MinLengthValidator
from .validators import (
IsDigitValidator, ControlNumberValidation
)
class SNILS(str):
RAZOR = '001-001-998'
control_mask = range(9, 0, -1)
@property
def representation(self):
return '%s-%s-%s %s' % (
self[0:3], self[3:6], self[6:9], self[9:11]
)
@property
def record_number(self):
return self[0:9]
@property
def control_number(self):
return int(self[9:11])
@property
def is_valid_control(self):
if self <= self.RAZOR:
return ValueError('Unverifiable SNILS')
return self._check_control_number()
def _check_control_number(self):
summ = self._get_position_sum()
if summ > 101:
summ %= 101
return self._check_position_summ(summ)
def _check_position_summ(self, summ):
if summ < 100:
return summ == self.control_number
if summ in (100, 101):
return self.control_number == 0
return False
def _get_position_sum(self):
def pair_processor(s, p):
s, p = int(s), int(p)
return s*p
pos_control = [pair_processor(s, p) for s, p in zip(self.record_number, self.control_mask)]
summ = reduce(lambda res, x: res + x, pos_control, 0)
return summ
class SNILSField(models.CharField):
description = 'SNILS'
DEFAULT_MAX_LENGTH = DEFAULT_MIN_LENGTH = 11
STOP_CHARACTER = '- _'
def __init__(self, *args, **kwargs):
kwargs['max_length'] = self.DEFAULT_MAX_LENGTH
super(SNILSField, self).__init__(*args, **kwargs)
self.validators.extend([
MinLengthValidator(self.DEFAULT_MIN_LENGTH),
IsDigitValidator(),
ControlNumberValidation()
])
def from_db_value(self, value, expression, connection, context):
if value is None:
return value
return SNILS(value)
def to_python(self, value):
if isinstance(value, SNILS):
return value
if value is None:
return value
return SNILS(value)
def deconstruct(self):
name, path, args, kwargs = super(SNILSField, self).deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
def pre_save(self, model_instance, add):
value = getattr(model_instance, self.attname)
if value:
value = reduce(
lambda res, x: res.replace(x, ''),
self.STOP_CHARACTER,
value
)
setattr(model_instance, self.attname, value)
return value
| {
"repo_name": "wolfstein9119/django-russian-fields",
"path": "russian_fields/snils.py",
"copies": "1",
"size": "2731",
"license": "bsd-2-clause",
"hash": -3323046112380606000,
"line_mean": 27.1546391753,
"line_max": 99,
"alpha_frac": 0.5759794947,
"autogenerated": false,
"ratio": 3.730874316939891,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48068538116398907,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from django.db import models
from django.db.models import Min
from django.db.models.signals import post_delete, post_save
from django.dispatch.dispatcher import receiver
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin
from filer.fields.image import FilerImageField
from mptt.models import TreeForeignKey
from mptt.managers import TreeManager
from orderedmodel import OrderedMPTTModel
class PictureCategoryManager(TreeManager):
def get_visible(self, *args, **kwargs):
category = self.filter(*args, **kwargs).get()
if category.is_shown():
return category
raise self.model.DoesNotExist
def whole_tree(self):
roots = self.filter(parent=None, is_visible=True)
if roots.exists():
return reduce(lambda x, y: x | y, (root.get_visible_descendants() for root in roots))
else:
return roots
def show_subtree(self, include_self=True, from_node=None, depth=None):
"""
If from_node argument is omitted we start from roots
"""
nodes = self.filter(parent=None, is_visible=True) if from_node is None else [from_node]
return reduce(lambda x, y: x | y, (
node.get_visible_descendants(include_self=include_self, depth=depth) for node in nodes))
class PictureCategory(OrderedMPTTModel):
parent = TreeForeignKey('self', null=True, blank=True, related_name='children')
title = models.CharField(_(u'Title'), max_length=255)
description = models.TextField(verbose_name=_(u'Description'), blank=True, default='')
slug = models.SlugField(unique=True, max_length=255, db_index=True)
is_published = models.BooleanField(_(u'Published'), default=False, db_index=True)
is_visible = models.BooleanField(_(u'Visible'), default=False)
objects = PictureCategoryManager()
class Meta:
ordering = ['tree_id', 'lft']
verbose_name = _('Picture Category')
verbose_name_plural = _('Picture Categories')
permissions = (
('publish_permission', "Can publish picture categories"),
)
def __unicode__(self):
return unicode(self.title)
def has_visible_children(self):
return any(child.check_visibility() for child in self.children.all().iterator())
def check_visibility(self):
"""
Returns True when category is published and:
- it has pictures in it or
- it's children are visible(published with pictures or subcategories)
- or both.
"""
if self.is_leaf_node():
return self.is_published and self.pictures.exists()
else:
return self.is_published and (self.pictures.exists() or self.has_visible_children())
def save(self, *args, **kwargs):
visibility = self.check_visibility()
changed = False
if visibility != self.is_visible:
self.is_visible = visibility
changed = True
super(PictureCategory, self).save(*args, **kwargs)
if changed and self.parent and self.parent.is_visible != self.is_visible:
self.parent.save()
def get_cover(self):
if self.pictures.exists():
return self.pictures.order_by('-is_cover', 'pk')[0]
def get_visible_descendants(self, include_self=True, depth=None):
"""
Visible descendants are one that are visible and all their parents are visible too
It is imporant for this method to always return a QuerySet of objects
"""
if depth == 0:
return PictureCategory.objects.filter(pk=self.pk)
_depth = self.get_descendants().exclude(is_visible=True).aggregate(lvl=Min('level'))['lvl']
depth, _depth = depth or 100, _depth or 100
depth = depth + self.level if depth + self.level < _depth else _depth - 1
return self.get_descendants(include_self=include_self).filter(is_visible=True,
level__lte=depth)
def is_shown(self):
"""
If self is_visible and all its parents are visible too self is_shown() = True
"""
return (self.is_visible and
self.get_ancestors().count() ==
self.get_ancestors().filter(is_visible=True).count())
@receiver(post_delete, sender=PictureCategory)
def update_visibility_on_delete(sender, instance, **kwargs):
"""
We must update is_visible of all parent categories in case they are
visible because of this subcategory we are deleting at this very moment
"""
if instance.parent:
instance.parent.save()
class Picture(models.Model):
folder = models.ForeignKey(PictureCategory, related_name='pictures')
image = FilerImageField(related_name='+')
title = models.CharField(verbose_name=_('Title'), max_length=255, blank=True, default='')
description = models.TextField(verbose_name=_('Description'), blank=True, default='')
is_cover = models.BooleanField(default=False, verbose_name=_('Use as cover'))
class Meta:
verbose_name = _('Picture')
verbose_name_plural = _('Pictures')
def __unicode__(self):
if self.title:
return unicode(self.title)
else:
return "Picture {}".format(self.pk)
def __init__(self, *args, **kwargs):
super(Picture, self).__init__(*args, **kwargs)
self._update_current_folder(self)
def _update_current_folder(self, instance):
instance._current_folder = self.folder_id
def save(self, *args, **kwargs):
if self._current_folder is None:
self._update_current_folder(self)
super(Picture, self).save(*args, **kwargs)
@receiver(post_save, sender=Picture)
def set_category_visibility_on_save(sender, instance, **kwargs):
if instance._current_folder != instance.folder_id:
PictureCategory.objects.get(pk=instance._current_folder).save()
instance._update_current_folder(instance)
if not instance.folder.is_visible and instance.folder.is_published:
instance.folder.save()
@receiver(post_delete, sender=Picture)
def set_category_visibility_on_delete(sender, instance, **kwargs):
try:
if instance.folder.is_published:
instance.folder.save()
except PictureCategory.DoesNotExist:
pass
class MediaPlugin(CMSPlugin):
MEDIA_SKINS = (
('list', _('List view')),
('thumbnails', _('Thumbnail view')),
)
template = models.CharField(choices=MEDIA_SKINS, max_length=20, default='list')
| {
"repo_name": "MagicSolutions/cmsplugin-media-center",
"path": "cmsplugin_media_center/models.py",
"copies": "1",
"size": "6618",
"license": "mit",
"hash": 3373914652532916700,
"line_mean": 37.0344827586,
"line_max": 100,
"alpha_frac": 0.6429434875,
"autogenerated": false,
"ratio": 4.055147058823529,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012902190665033803,
"num_lines": 174
} |
from functools import reduce
from django.db import models
from django.db.models import Sum
import six
from . import exceptions
class QuotaLimitField(models.IntegerField):
""" Django virtual model field.
Could be used to manage quotas transparently as model fields.
"""
concrete = False
def __init__(self, quota_field=None, *args, **kwargs):
super(QuotaLimitField, self).__init__(*args, **kwargs)
self._quota_field = quota_field
def db_type(self, connection):
# virtual field -- ignore in migrations
return None
def contribute_to_class(self, cls, name):
self.model = cls
self.name = self.attname = name
# setting column as none will tell django to not consider this a concrete field
self.column = None
# connect myself as the descriptor for this field
setattr(cls, name, property(self._get_func(), self._set_func()))
cls._meta.add_field(self, private=True)
def deconstruct(self, *args, **kwargs):
name, path, args, kwargs = super(QuotaField, self).deconstruct(*args, **kwargs)
return (name, path, args, {'default': kwargs.get('default'), 'to': None})
def _get_func(self):
# retrieve quota limit from related object
def func(instance, quota_field=self._quota_field):
if instance is None:
raise AttributeError("Can only be accessed via instance")
try:
return instance.quotas.get(name=quota_field).limit
except instance.quotas.model.DoesNotExist:
return quota_field.default_limit
return func
def _set_func(self):
# store quota limit as related object
def func(instance, value, quota_field=self._quota_field):
# a hook to properly init quota after object saved to DB
quota_field.scope_default_limit(instance, value)
instance.set_quota_limit(quota_field, value, fail_silently=True)
return func
class FieldsContainerMeta(type):
""" Initiates quota fields names.
Quotas fields should be located in class with FieldsContainerMeta metaclass.
Example:
example_quota = QuotaField() # this quota field will have name 'example_quota'
"""
def __new__(self, name, bases, attrs):
for key in attrs:
if isinstance(attrs[key], QuotaField):
attrs[key].name = key
return type.__new__(self, name, bases, attrs)
class QuotaField(object):
""" Base quota field.
Links quota to its scope right after its creation.
Allows to define:
- default_limit
- default_usage
- is_backend - is quota represents backend limitation. It is impossible to modify backend quotas.
- creation_condition - function that receive quota scope and return True if quota should be created
for given scope. Quota will be created automatically if creation_condition is None.
Default limit and usage can be defined as callable function.
Example:
quota_name = QuotaField(default_limit=lambda scope: scope.attr)
"""
def __init__(self, default_limit=-1, default_usage=0, is_backend=False, creation_condition=None):
self.default_limit = default_limit
self.default_usage = default_usage
self.is_backend = is_backend
self.creation_condition = creation_condition
def is_connected_to_scope(self, scope):
if self.creation_condition is None:
return True
return self.creation_condition(scope)
def scope_default_limit(self, scope, value=None):
attr_name = '_default_quota_limit_%s' % self.name
if value is not None:
setattr(scope, attr_name, value)
try:
return getattr(scope, attr_name)
except AttributeError:
return self.default_limit(scope) if six.callable(self.default_limit) else self.default_limit
def get_or_create_quota(self, scope):
if not self.is_connected_to_scope(scope):
raise exceptions.CreationConditionFailedQuotaError(
'Wrong scope: Cannot create quota "%s" for scope "%s".' % (self.name, scope))
defaults = {
'limit': self.scope_default_limit(scope),
'usage': self.default_usage(scope) if six.callable(self.default_usage) else self.default_usage,
}
return scope.quotas.get_or_create(name=self.name, defaults=defaults)
def get_aggregator_quotas(self, quota):
""" Fetch ancestors quotas that have the same name and are registered as aggregator quotas. """
ancestors = quota.scope.get_quota_ancestors()
aggregator_quotas = []
for ancestor in ancestors:
for ancestor_quota_field in ancestor.get_quotas_fields(field_class=AggregatorQuotaField):
if ancestor_quota_field.get_child_quota_name() == quota.name:
aggregator_quotas.append(ancestor.quotas.get(name=ancestor_quota_field))
return aggregator_quotas
def __str__(self):
return self.name
def recalculate(self, scope):
if not self.is_connected_to_scope(scope):
return
self.recalculate_usage(scope)
def recalculate_usage(self, scope):
pass
class CounterQuotaField(QuotaField):
""" Provides limitation on target models instances count.
Automatically increases/decreases usage on target instances creation/deletion.
By default usage is increased by 1. You may tweak this delta by defining get_delta function,
which accepts target instance and returns number.
Example:
# This quota will increase/decrease own values on any resource creation/deletion
nc_resource_count = CounterQuotaField(
target_models=lambda: Resource.get_all_models(), # list or function that return list of target models
path_to_scope='service_project_link.project', # path from target model to scope
)
It is possible to define trickier calculation by passing `get_current_usage` function as parameter.
Function should accept two parameters:
- models - list of target models
- scope - quota scope
And return count of current usage.
"""
def __init__(self, target_models, path_to_scope, get_current_usage=None, get_delta=None, **kwargs):
self._raw_target_models = target_models
self._raw_get_current_usage = get_current_usage
self._raw_get_delta = get_delta
self.path_to_scope = path_to_scope
super(CounterQuotaField, self).__init__(**kwargs)
def get_delta(self, target_instance):
if not self._raw_get_delta:
return 1
return self._raw_get_delta(target_instance)
def get_current_usage(self, models, scope):
if self._raw_get_current_usage is not None:
return self._raw_get_current_usage(models, scope)
else:
filter_path_to_scope = self.path_to_scope.replace('.', '__')
return sum([m.objects.filter(**{filter_path_to_scope: scope}).count() for m in models])
@property
def target_models(self):
if not hasattr(self, '_target_models'):
self._target_models = (self._raw_target_models() if six.callable(self._raw_target_models)
else self._raw_target_models)
return self._target_models
def recalculate_usage(self, scope):
current_usage = self.get_current_usage(self.target_models, scope)
scope.set_quota_usage(self.name, current_usage)
def add_usage(self, target_instance, delta, fail_silently=False):
scope = self._get_scope(target_instance)
delta *= self.get_delta(target_instance)
if self.is_connected_to_scope(scope):
scope.add_quota_usage(self.name, delta, fail_silently=fail_silently, validate=True)
def _get_scope(self, target_instance):
return reduce(getattr, self.path_to_scope.split('.'), target_instance)
class TotalQuotaField(CounterQuotaField):
"""
This field aggregates sum of value for the same field of children objects.
For example, it allows to compute total volume size for the project.
class Quotas(quotas_models.QuotaModelMixin.Quotas):
nc_volume_size = quotas_fields.TotalQuotaField(
target_models=lambda: Volume.get_all_models(),
path_to_scope='project',
target_field='size',
)
"""
def __init__(self, target_models, path_to_scope, target_field):
self.target_field = target_field
super(TotalQuotaField, self).__init__(target_models, path_to_scope)
def get_current_usage(self, models, scope):
total_usage = 0
filter_path_to_scope = self.path_to_scope.replace('.', '__')
query = {filter_path_to_scope: scope}
for model in models:
resources = model.objects.filter(**query)
subtotal = (resources.values(self.target_field)
.aggregate(total_usage=Sum(self.target_field))['total_usage'])
if subtotal:
total_usage += subtotal
return total_usage
def get_delta(self, target_instance):
return getattr(target_instance, self.target_field)
class AggregatorQuotaField(QuotaField):
""" Aggregates sum of quota scope children with the same name.
Automatically increases/decreases usage if corresponding child quota <aggregation_field> changed.
Example:
# This quota will store sum of all customer projects resources
nc_resource_count = quotas_fields.UsageAggregatorQuotaField(
get_children=lambda customer: customer.projects.all(),
)
"""
aggregation_field = NotImplemented
def __init__(self, get_children, child_quota_name=None, **kwargs):
self.get_children = get_children
self._child_quota_name = child_quota_name
super(AggregatorQuotaField, self).__init__(**kwargs)
def get_child_quota_name(self):
return self._child_quota_name if self._child_quota_name is not None else self.name
def recalculate_usage(self, scope):
children = self.get_children(scope)
current_usage = 0
for child in children:
child_quota = child.quotas.get(name=self.get_child_quota_name())
current_usage += getattr(child_quota, self.aggregation_field)
scope.set_quota_usage(self.name, current_usage)
def post_child_quota_save(self, scope, child_quota, created=False):
quota = scope.quotas.get(name=self.name)
current_value = getattr(child_quota, self.aggregation_field)
if created:
diff = current_value
else:
diff = current_value - child_quota.tracker.previous(self.aggregation_field)
if diff:
quota.usage += diff
quota.save()
def pre_child_quota_delete(self, scope, child_quota):
quota = scope.quotas.get(name=self.name)
diff = getattr(child_quota, self.aggregation_field)
if diff:
quota.usage -= diff
quota.save()
class UsageAggregatorQuotaField(AggregatorQuotaField):
""" Aggregates sum children quotas usages.
Note! It is impossible to aggregate usage of another usage aggregator quotas.
This restriction was added to avoid calls duplications on quota usage field update.
"""
aggregation_field = 'usage'
class LimitAggregatorQuotaField(AggregatorQuotaField):
""" Aggregates sum children quotas limits. """
aggregation_field = 'limit'
# TODO: Implement GlobalQuotaField and GlobalCounterQuotaField
| {
"repo_name": "opennode/nodeconductor",
"path": "waldur_core/quotas/fields.py",
"copies": "1",
"size": "11711",
"license": "mit",
"hash": -1403234775563532500,
"line_mean": 37.9069767442,
"line_max": 114,
"alpha_frac": 0.6481086158,
"autogenerated": false,
"ratio": 4.176533523537803,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010743738400301756,
"num_lines": 301
} |
from functools import reduce
from django.db import models
from crm.models import Company
from products.models import Product
INVOICE_TYPES = (
('ci', 'Client'),
('si', 'Supplier'),
)
INVOICE_STATES = (
('draft', 'Draft'),
('open', 'Open'),
('paid', 'Paid'),
)
class Invoice(models.Model):
date = models.DateField(auto_now_add=True)
invoice_type = models.CharField(max_length=2, choices=INVOICE_TYPES)
related_party = models.ForeignKey(Company, related_name='invoices', on_delete=models.CASCADE)
state = models.CharField(max_length=5, choices=INVOICE_STATES)
@property
def sign(self):
if self.invoice_type == 'si':
return '-'
return ''
@property
def total(self):
rv = 0
for line in self.lines.all():
rv += line.total
return rv
def __str__(self):
return 'Invoice #{} - {} ({}{})'.format(self.id, self.related_party, self.sign, self.total)
class InvoiceLine(models.Model):
invoice = models.ForeignKey(Invoice, related_name='lines', on_delete=models.CASCADE)
product = models.ForeignKey(Product, related_name='+', on_delete=models.CASCADE)
quantity = models.PositiveIntegerField()
position = models.PositiveSmallIntegerField(default=0)
@property
def total(self):
return self.quantity * self.product.price
def __str__(self):
return '{} X {}'.format(self.quantity, self.product.name)
| {
"repo_name": "drf-forms/ember_sample",
"path": "back/accounting/models.py",
"copies": "1",
"size": "1466",
"license": "mit",
"hash": -6216469767982337000,
"line_mean": 24.7192982456,
"line_max": 99,
"alpha_frac": 0.636425648,
"autogenerated": false,
"ratio": 3.720812182741117,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9853447941041891,
"avg_score": 0.0007579779398453645,
"num_lines": 57
} |
from functools import reduce
from django.db.models import Q
from django.http import Http404
from django.shortcuts import render
from django.db import models
# Create your views here.
from django.http import Http404
from dcal.models import DimCalendar
from django.views.generic import TemplateView, ListView
# Create your views here.
class SQLIndexView(TemplateView):
"""
Index of countries and SQL dialects
"""
template_name = "buildsql/index.html"
def get_context_data(self, **kwargs):
context = super(TemplateView, self).get_context_data(**kwargs)
dc = DimCalendar()
context['calcs'] = DimCalendar().calcs
context['countries'] = DimCalendar().countries
return context
class SQLTableView(TemplateView):
"""
Tables & indexes
"""
template_name = "buildsql/table.sql92.sql"
def get_context_data(self, **kwargs):
context = super(TemplateView, self).get_context_data(**kwargs)
dc = DimCalendar()
context['calcs'] = dc.calcs
context['countries'] = dc.countries
dialect = context['dialect'] or 'SQL92'
dialect = dialect.upper()
if dialect == 'MYSQL':
self.template_name = "buildsql/table.mysql.sql"
if dialect == 'TSQL':
self.template_name = "buildsql/table.tsql.sql"
if dialect == 'PLPGSQL':
self.template_name = "buildsql/table.plpgsql.sql"
return context
class SQLCommonView(TemplateView):
"""
Insert keys
"""
template_name = "buildsql/common.sql92.sql"
def get_context_data(self, **kwargs):
context = super(TemplateView, self).get_context_data(**kwargs)
dc = DimCalendar()
context['calcs'] = dc.calcs
context['countries'] = dc.countries
dialect = context['dialect'] or 'SQL92'
dialect = dialect.upper()
if dialect == 'MYSQL':
self.template_name = "buildsql/common.mysql.sql"
if dialect == 'TSQL':
self.template_name = "buildsql/common.tsql.sql"
if dialect == 'PLPGSQL':
self.template_name = "buildsql/common.plpgsql.sql"
return context
class SQLCalculationsView(TemplateView):
"""
SQL Updates for calc_ columns
"""
template_name = "buildsql/calc.sql92.sql"
def get_queryset(self):
dc = DimCalendar()
queryset = DimCalendar.objects.raw("""
SELECT *
FROM dim_calendar
WHERE
""" + reduce(lambda x, y: x + """ OR
""" + y, ['calc_{0} = True '.format(cc) for cc in dc.calcs]) + """
ORDER BY calendar_date""")
if list(queryset):
return queryset
else:
raise Http404('Oops, no data found')
def get_context_data(self, **kwargs):
context = super(TemplateView, self).get_context_data(**kwargs)
dc = DimCalendar()
context['calcs'] = dc.calcs
# bucket the Dim_Calendars by calculation column
calculations_dict = {}
for dc in self.get_queryset():
for cc in ['calc_' + short_name for short_name in dc.calcs]:
if getattr(dc, cc): # Is this calc column set on this Dim_Calendar.calendar_date ?
calculations_dict.setdefault(cc, []).append(dc.calendar_date)
context['calculations_dict'] = calculations_dict
dialect = context['dialect'] or 'SQL92'
dialect = dialect.upper()
if dialect == 'MYSQL':
self.template_name = "buildsql/calc.mysql.sql"
if dialect == 'TSQL':
self.template_name = "buildsql/calc.tsql.sql"
if dialect == 'PLPGSQL':
self.template_name = "buildsql/calc.plpgsql.sql"
return context
class SQLDataView(TemplateView):
"""
SQL Updates for a dim_calendar country
"""
template_name = "buildsql/data.sql92.sql"
def get_queryset(self):
# DimCalendar only has attributes for populated countries
page = 'hol_' + self.kwargs['country'].lower()
if hasattr(DimCalendar, page):
filter_dict = {page: True}
queryset = DimCalendar.objects.filter(models.Q(**filter_dict)).order_by('calendar_date')
return queryset
else:
raise Http404('Oops, no data found')
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(TemplateView, self).get_context_data(**kwargs)
context['country_col'] = 'hol_' + self.kwargs['country'].lower()
# bucket the Dim_Calendars by year
year_dict = {}
for dc in self.get_queryset():
year_dict.setdefault(dc.calendar_date.year, []).append(dc)
context['year_dict'] = year_dict
dialect = context['dialect'] or 'SQL92'
dialect = dialect.upper()
if dialect == 'MYSQL':
self.template_name = "buildsql/data.mysql.sql"
if dialect == 'TSQL':
self.template_name = "buildsql/data.tsql.sql"
if dialect == 'PLPGSQL':
self.template_name = "buildsql/data.plpgsql.sql"
return context
| {
"repo_name": "gregn610/dimcal",
"path": "src/dimcal/buildsql/views.py",
"copies": "1",
"size": "5155",
"license": "unlicense",
"hash": -8593790262031897000,
"line_mean": 31.4213836478,
"line_max": 100,
"alpha_frac": 0.6031037827,
"autogenerated": false,
"ratio": 3.8759398496240602,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49790436323240606,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from django.db.models import Q
__all__ = [
'Condition',
'AndCondition', 'OrCondition',
'LookupCondition',
]
class Condition:
def __init__(self, query, *args, **kwargs):
self.query = query
def annotate(self, queryset):
"""
Hook for conditions (or filters) that need to annotate the queryset.
This method will be called before the filter method, to ensure that all
annotations are in place when filters are added.
NOTE:
This method is at the moment just a workaround for Django's missing
support for filtering on 'Exists'-querys. When support for that is
added, this method will probably be removed.
:param queryset: The queryset to annotate
:returns: A queryset with any required annotations added
"""
return queryset
def filter(self, queryset):
"""
Add this condition and return a new queryset
:param queryset: The queryset to filter
"""
return self.annotate(queryset).filter(self.get_filter())
class AndCondition(Condition):
kind = 'and'
def __init__(self, *args, conditions=None, **kwargs):
super().__init__(*args, **kwargs)
assert isinstance(conditions, list) and len(conditions) > 0, \
'Conditions must be a list with at least one element'
self.conditions = [self.query.resolve_condition(c) for c in conditions]
def is_valid(self):
"""
Validate that the provided data is valid.
"""
def reduce_bool(a, b):
return a and b
return reduce(reduce_bool, [c.is_valid() for c in self.conditions])
def annotate(self, queryset):
"""
Allow subqueries to add their annotations
"""
for c in self.conditions:
queryset = c.annotate(queryset)
return queryset
def get_filter(self):
def reduce_and(a, b):
return Q(a & b)
# Return filtered queryset
return reduce(reduce_and, [c.get_filter() for c in self.conditions])
class OrCondition(Condition):
kind = 'or'
def __init__(self, *args, conditions=None, **kwargs):
super().__init__(*args, **kwargs)
assert isinstance(conditions, list) and len(conditions) > 0, \
'Conditions must be a list with at least one element'
self.conditions = [self.query.resolve_condition(c) for c in conditions]
def is_valid(self):
"""
Validate that the provided data is valid.
"""
def reduce_bool(a, b):
return a and b
return reduce(reduce_bool, [c.is_valid() for c in self.conditions])
def annotate(self, queryset):
"""
Allow subqueries to add their annotations
"""
for c in self.conditions:
queryset = c.annotate(queryset)
return queryset
def get_filter(self):
def reduce_or(a, b):
return Q(a | b)
# Return filtered queryset
return reduce(reduce_or, [c.get_filter() for c in self.conditions])
class LookupCondition(Condition):
kind = 'lookup'
def __init__(self, *args, field=None, lookup=None, value=None, **kwargs):
super().__init__(*args, **kwargs)
assert isinstance(field, str), \
'field must be specified'
assert isinstance(lookup, str), \
'lookup must be specified'
self.field = getattr(self.query, field)
self.lookup = lookup
self.value = value
def is_valid(self):
"""
Validate that the provided data is valid.
"""
try:
self.field.validate(self.value, self.lookup)
except:
return False
return True
def get_filter(self):
field = '%s__%s' % (self.field.model_name, self.lookup)
return Q(**{field: self.field.prepare(self.value, self.lookup)})
| {
"repo_name": "mkonline/django-json-queries",
"path": "django_json_queries/conditions.py",
"copies": "1",
"size": "3973",
"license": "mit",
"hash": 270628688784390370,
"line_mean": 27.1773049645,
"line_max": 79,
"alpha_frac": 0.59023408,
"autogenerated": false,
"ratio": 4.304442036836403,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5394676116836403,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext
from pyston.exception import RESTException
from pyston.utils import rfs, LOOKUP_SEP
from pyston.utils.helpers import get_field_or_none, get_method_or_none
from pyston.serializer import get_resource_or_none
from .exceptions import OrderIdentifierError
from .parsers import DefaultOrderParser, OrderParserError
from .utils import DIRECTION
from .sorters import DefaultSorter
def get_allowed_order_fields_rfs_from_model(model):
return rfs(model._rest_meta.extra_order_fields).join(rfs(model._rest_meta.order_fields))
class ModelOrderManager:
"""
Order manager is used inside model resource for order response queryset according to input values.
This is abstract class that provides methods to obtain concrete order strings from resource and model methods
and fields.
"""
def _get_real_field_name(self, resource, field_name):
return resource.renamed_fields.get(field_name, field_name) if resource else field_name
def _get_sorter_from_method(self, method, identifiers_prefix, identifiers, direction, model, resource, request,
order_fields_rfs):
"""
:param method: method from which we can get order string.
:param identifiers_prefix: because order strings are recursive if model relations property contains list of
identifiers that was used for recursive searching the order string.
:param identifiers: list of identifiers that conclusively identifies order string.
:param direction: direction of ordering ASC or DESC.
:param model: django model class.
:param resource: resource object.
:param request: django HTTP request.
:param order_fields_rfs: RFS of fields that is allowed to order.
:return: db order method string that is obtained from method.
"""
if hasattr(method, 'order_by'):
# If method has order_by attribute order string is being searched according to this value.
order_identifiers = method.order_by.split(LOOKUP_SEP)
# Because method must be inside allowed order fields RFS, we must add value order_by of the method
# to the next RFS.
next_order_fields_rfs = rfs(order_identifiers)
return self._get_sorter_recursive(
identifiers_prefix, order_identifiers, direction, model, resource, request, next_order_fields_rfs
)
if not identifiers_prefix and hasattr(method, 'sorter'):
return method.sorter(identifiers_prefix + identifiers, direction)
raise OrderIdentifierError
def _get_sorter_from_resource(self, identifiers_prefix, identifiers, direction, model, resource, request,
order_fields_rfs):
"""
:param identifiers_prefix: because order strings are recursive if model relations property contains list of
identifiers that was used for recursive searching the order string.
:param identifiers: list of identifiers that conclusively identifies order string.
:param direction: direction of ordering ASC or DESC.
:param model: django model class.
:param resource: resource object.
:param request: django HTTP request.
:param order_fields_rfs: RFS of fields that is allowed to order.
:return: db order method string that is obtained from resource object.
"""
full_identifiers_string = self._get_real_field_name(resource, LOOKUP_SEP.join(identifiers))
resource_method = resource.get_method_returning_field_value(full_identifiers_string) if resource else None
if full_identifiers_string in order_fields_rfs and resource_method:
return self._get_sorter_from_method(resource_method, identifiers_prefix, identifiers, direction, model,
resource, request, order_fields_rfs)
def _get_sorter_from_model(self, identifiers_prefix, identifiers, direction, model, resource, request,
order_fields_rfs):
"""
:param identifiers_prefix: because order strings are recursive if model relations property contains list of
identifiers that was used for recursive searching the order string.
:param identifiers: list of identifiers that conclusively identifies order string.
:param direction: direction of ordering ASC or DESC.
:param model: django model class.
:param resource: resource object.
:param request: django HTTP request.
:param order_fields_rfs: RFS of fields that is allowed to order.
:return: db order method string that is obtained from model fields or methods.
"""
current_identifier = self._get_real_field_name(resource, identifiers[0])
identifiers_suffix = identifiers[1:]
if current_identifier not in order_fields_rfs:
raise OrderIdentifierError
model_field = get_field_or_none(model, current_identifier)
model_method = get_method_or_none(model, current_identifier)
if model_field and not identifiers_suffix and (not model_field.is_relation or model_field.related_model):
return DefaultSorter(identifiers_prefix + identifiers, direction)
elif model_field and model_field.is_relation and model_field.related_model:
next_model = model_field.related_model
next_resource = get_resource_or_none(request, next_model, getattr(resource, 'resource_typemapper'))
return self._get_sorter_recursive(
identifiers_prefix + [identifiers[0]], identifiers[1:], direction,
next_model, next_resource, request, order_fields_rfs[current_identifier].subfieldset
)
elif model_method and not identifiers_suffix:
return self._get_sorter_from_method(
model_method, identifiers_prefix, identifiers, direction, model, resource, request, order_fields_rfs
)
def _get_sorter_recursive(self, identifiers_prefix, identifiers, direction, model, resource, request,
extra_order_fields_rfs=None):
"""
:param identifiers_prefix: because order strings are recursive if model relations property contains list of
identifiers that was used for recursive searching the order string.
:param identifiers: list of identifiers that conclusively identifies order string.
:param direction: direction of ordering ASC or DESC.
:param model: django model class.
:param resource: resource object.
:param request: django HTTP request.
:param extra_order_fields_rfs: RFS of fields that is allowed to order.
:return: method search resursice order string with order_string_from_model or order_string_from_resource
getters.
"""
extra_order_fields_rfs = rfs() if extra_order_fields_rfs is None else extra_order_fields_rfs
order_fields_rfs = (
extra_order_fields_rfs.join(
resource.get_order_fields_rfs() if resource else get_allowed_order_fields_rfs_from_model(model)
)
)
order_string = (
self._get_sorter_from_resource(
identifiers_prefix, identifiers, direction, model, resource, request, order_fields_rfs) or
self._get_sorter_from_model(
identifiers_prefix, identifiers, direction, model, resource, request, order_fields_rfs)
)
if not order_string:
raise OrderIdentifierError
return order_string
def get_sorter(self, identifiers, direction, resource, request):
"""
:param identifiers: list of identifiers that conclusively identifies a order string.
:param direction: direction of ordering ASC or DESC.
:param resource: resource object.
:param request: django HTTP request.
:return: method returns filter string according to input identifiers, resource and request.
"""
return self._get_sorter_recursive([], identifiers, direction, resource.model, resource, request)
class ParserModelOrderManager(ModelOrderManager):
"""
Manager that uses parser to parse input order data to the list of order strings.
"""
parser = None
def _get_sorters(self, parsed_order_terms, resource, request):
"""
Converts order terms to sorter classes
"""
sorters = []
for ordering_term in parsed_order_terms:
try:
sorters.append(self.get_sorter(ordering_term.identifiers, ordering_term.direction, resource, request))
except OrderIdentifierError:
raise RESTException(
mark_safe(ugettext('Invalid identifier of ordering "{}"').format(ordering_term.source))
)
return sorters
def _convert_order_terms(self, sorters):
"""
Converts sorters to the django query order strings.
"""
return [sorter.get_order_term() for sorter in sorters]
def _update_queryset(self, qs, sorters):
"""
Update queryset for extra sorter class (it is used for annotations before ordering)
"""
return reduce(
lambda qs, sorter: sorter.update_queryset(qs) if hasattr(sorter, 'update_queryset') else qs, sorters, qs
)
def sort(self, resource, qs, request):
try:
parsed_order_terms = self.parser.parse(request)
sorters = self._get_sorters(parsed_order_terms or (), resource, request)
qs = self._update_queryset(qs, sorters)
return qs.order_by(*self._convert_order_terms(sorters)) if sorters else qs
except OrderParserError as ex:
raise RESTException(ex)
class DefaultModelOrderManager(ParserModelOrderManager):
"""
Default order manager.
"""
parser = DefaultOrderParser()
| {
"repo_name": "druids/django-pyston",
"path": "pyston/order/managers.py",
"copies": "1",
"size": "10096",
"license": "bsd-3-clause",
"hash": -3902591484074683000,
"line_mean": 48.0097087379,
"line_max": 118,
"alpha_frac": 0.6677892235,
"autogenerated": false,
"ratio": 4.527354260089686,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002438904690989841,
"num_lines": 206
} |
from functools import reduce
from domain.gameboardrelativepositionstateneuron import GameBoardRelativePositionStateNeuron
from domain.direction import Direction
from domain.neuron import Neuron
from domain.position import Position
from domain.sigmoidneuron import SigmoidNeuron
from domain.synapse import Synapse
class SnakeNeuralNet:
"""Class to represent the neural network of a given genetic encoding. Parts of the network require access to the
game board. To simulate the snake's perspective, this class requires knowledge of the gamesnake's orientation
and position. This class used to require the numeric position of the mouse, but that information has since been
encoded into the game board."""
def __init__(self, encoding, game_board, gamesnake, mouse):
# Encoding is a 256*130*1 + 130*3 list of synapse values
self.input = [GameBoardRelativePositionStateNeuron] * 256
self.hidden = [Neuron] * 130
self.output = [Neuron] * 3
self.gamesnake = gamesnake
# Create input neurons
i = 0
for y in range(-8, 8):
for x in range(-8, 8):
self.input[i] = GameBoardRelativePositionStateNeuron(game_board, Position(x, y), gamesnake)
i += 1
# Hidden neurons
for j in range(130):
synapses = [0] * 256
for k in range(256):
synapses[k] = Synapse(self.input[k], encoding[j * 256 + k])
self.hidden[j] = SigmoidNeuron(synapses)
# Output neurons
for j in range(3):
synapses = [0] * 130
for k in range(130):
synapses[k] = Synapse(self.hidden[k], encoding[130 * 256 + j * 130 + k])
self.output[j] = SigmoidNeuron(synapses)
def evaluate(self):
# Input neurons
if self.gamesnake.direction == Direction.right:
for j in range(256): # 68 neurons
self.input[j].calculate_result()
elif self.gamesnake.direction == Direction.left:
for j in range(256): # 68 neurons
self.input[j].calculate_result_left()
elif self.gamesnake.direction == Direction.up:
for j in range(256): # 68 neurons
self.input[j].calculate_result_up()
else:
for j in range(256): # 68 neurons
self.input[j].calculate_result_down()
for j in range(130): # 68 neurons
self.hidden[j].calculate_result()
# Output neurons
for j in range(3): # 3 output neurons
self.output[j].calculate_result()
final_values = [(Direction.up, self.output[0]),
(Direction.right, self.output[1]),
(Direction.down, self.output[2])]
choice = reduce(lambda x, y: x if x[1].result > y[1].result else y, final_values)
return (self.gamesnake.direction + choice[0]) % 4
| {
"repo_name": "hesslink111/neuralnetworknibbles",
"path": "domain/snakeneuralnet.py",
"copies": "1",
"size": "2950",
"license": "mit",
"hash": -3494377340332166000,
"line_mean": 39.9722222222,
"line_max": 119,
"alpha_frac": 0.6071186441,
"autogenerated": false,
"ratio": 3.975741239892183,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009404712743072426,
"num_lines": 72
} |
from functools import reduce
from ..function import Function
class _DimReduceFunction(Function):
def __init__(self, dim=None):
super(_DimReduceFunction, self).__init__()
self.dim = dim
def forward(self, input):
self.input_size = input.size()
fn = getattr(input, self.fn_name)
if self.dim is None:
return input.new((fn(),))
else:
return fn(self.dim)
class Sum(_DimReduceFunction):
fn_name = 'sum'
def backward(self, grad_output):
if self.dim is None:
return grad_output.new(self.input_size).fill_(grad_output[0])
else:
repeats = [1 for _ in self.input_size]
repeats[self.dim] = self.input_size[self.dim]
return grad_output.repeat(*repeats),
class Prod(_DimReduceFunction):
def forward(self, input):
self.input_size = input.size()
if self.dim is None:
self.result = input.prod()
self.save_for_backward(input)
return input.new((self.result,))
else:
output = input.prod(self.dim)
self.save_for_backward(input, output)
return output
def backward(self, grad_output):
if self.dim is None:
input, = self.saved_tensors
zero_idx = (input == 0).nonzero()
if zero_idx.dim() == 0:
return grad_output.mul(self.result).expand_as(input).div(input)
elif zero_idx.size(0) > 1:
return grad_output.new(self.input_size).zero_()
else:
grad_input = grad_output.new(self.input_size).zero_()
zero_idx = tuple(zero_idx[0].cpu())
input_copy = input.clone()
input_copy[zero_idx] = 1.
grad_input[zero_idx] = grad_output[0] * input_copy.prod()
return grad_input
else:
input, output = self.saved_tensors
zero_mask = input == 0
slice_zero_count = zero_mask.sum(self.dim)
total_zeros = slice_zero_count.sum()
grad_input = grad_output.mul(output).expand_as(input).div(input)
if total_zeros == 0:
return grad_input
some_zeros = slice_zero_count.gt(0).expand_as(grad_input)
grad_input[some_zeros] = 0
single_zero_idx = slice_zero_count.eq(1).nonzero()
for idx in single_zero_idx:
idx_tuple = tuple(idx.cpu())
input_idx_tuple = idx_tuple[:self.dim] + (slice(0, None),) + idx_tuple[self.dim + 1:]
# slice_mask and input_copy are 1D
slice_mask = zero_mask[input_idx_tuple]
input_copy = input[input_idx_tuple].clone()
zero_idx = slice_mask.nonzero()[0, 0]
input_copy[zero_idx] = 1.
grad_idx_tuple = idx_tuple[:self.dim] + (zero_idx,) + idx_tuple[self.dim + 1:]
grad_input[grad_idx_tuple] = grad_output[idx_tuple] * input_copy.prod()
return grad_input
class Mean(_DimReduceFunction):
fn_name = 'mean'
def backward(self, grad_output):
if self.dim is None:
grad_input_val = grad_output[0]
grad_input_val /= reduce(lambda x, y: x * y, self.input_size, 1)
return grad_output.new(*self.input_size).fill_(grad_input_val)
else:
repeats = [1 for _ in self.input_size]
dim_size = self.input_size[self.dim]
repeats[self.dim] = dim_size
return grad_output.repeat(*repeats).div_(dim_size)
class _SelectionFunction(Function):
has_all_reduce = True
# additional_args is prepended before dim when calling the tensor
# function. It's a no-op for subclasses other than kthvalue.
# kthvalue not only requires us to pass a dim, but also preceed it with k.
additional_args = tuple()
def __init__(self, dim=None):
super(_SelectionFunction, self).__init__()
self.dim = dim
def forward(self, input):
fn = getattr(input, type(self).__name__.lower())
self.input_size = input.size()
if self.dim is None and self.has_all_reduce:
value = fn(*self.additional_args)
self.indices = tuple(input.eq(value).nonzero()[0])
return input.new((value,))
else:
if self.dim is None:
dim = input.dim() - 1
else:
dim = self.dim
args = (dim,)
if self.additional_args:
args = self.additional_args + args
output, indices = fn(*args)
self.save_for_backward(indices)
self.mark_non_differentiable(indices)
return output, indices
def backward(self, grad_output, grad_indices=None):
grad_input = grad_output.new(*self.input_size).zero_()
if self.dim is None and self.has_all_reduce:
grad_input[self.indices] = grad_output[0]
else:
if self.dim is None:
dim = input.dim() - 1
else:
dim = self.dim
indices, = self.saved_tensors
grad_input.scatter_(dim, indices, grad_output)
return grad_input
class Max(_SelectionFunction):
pass
class Min(_SelectionFunction):
pass
class Mode(_SelectionFunction):
has_all_reduce = False
class Median(_SelectionFunction):
has_all_reduce = False
class Kthvalue(_SelectionFunction):
has_all_reduce = False
def __init__(self, k, dim=None):
super(Kthvalue, self).__init__(dim)
self.additional_args = (k,)
class Norm(Function):
def __init__(self, norm_type=2, dim=None):
super(Norm, self).__init__()
self.norm_type = norm_type
self.dim = dim
def forward(self, input):
if self.dim is None:
self.norm = input.norm(self.norm_type)
self.save_for_backward(input)
return input.new((self.norm,))
else:
output = input.norm(self.norm_type, self.dim)
self.save_for_backward(input, output)
return output
def backward(self, grad_output):
if self.dim is None:
input, = self.saved_tensors
if self.norm_type == 2:
return input.mul(grad_output[0] / self.norm)
else:
pow = input.abs().pow(self.norm_type - 2)
scale = grad_output[0] / self.norm ** (self.norm_type - 1)
return input.mul(pow).mul(scale)
else:
input, output = self.saved_tensors
big_grad_output = grad_output.expand_as(input)
if self.norm_type == 2:
big_output = output.expand_as(input)
return input.mul(big_grad_output).div(big_output)
else:
pow = input.abs().pow(self.norm_type - 2)
big_output = output.pow(self.norm_type - 1).expand_as(input)
return input.mul(pow).mul(big_grad_output).div(big_output)
# TODO: renorm
# TODO: std
# TODO: var
| {
"repo_name": "RPGOne/Skynet",
"path": "pytorch-master/torch/autograd/_functions/reduce.py",
"copies": "1",
"size": "7116",
"license": "bsd-3-clause",
"hash": -1041379393362069600,
"line_mean": 32.2523364486,
"line_max": 101,
"alpha_frac": 0.5505902192,
"autogenerated": false,
"ratio": 3.763088313061872,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9812938021193607,
"avg_score": 0.00014810221365297896,
"num_lines": 214
} |
from functools import reduce
from git_orm import transaction, GitError
from git_orm.models.query import Q
class ObjCache:
def __init__(self, model):
self.model = model
self.cache = {}
pks = transaction.current().list_blobs([model._meta.storage_name])
self.pks = set(map(model._meta.pk.loads, pks))
self.pk_names = ('pk', model._meta.pk.attname)
def __getitem__(self, pk):
if not pk in self.cache:
obj = self.model(pk=pk)
try:
trans = transaction.current()
content = trans.get_blob(obj.path).decode('utf-8')
except GitError:
raise self.model.DoesNotExist(
'object with pk {} does not exist'.format(pk))
obj.loads(content)
self.cache[pk] = obj
return self.cache[pk]
class QuerySet:
REPR_MAXLEN = 10
def __init__(self, model, query=None):
self.model = model
if query is None:
query = Q()
self.query = query
def __repr__(self):
evaluated = list(self[:self.REPR_MAXLEN+1])
if len(evaluated) > self.REPR_MAXLEN:
bits = (repr(bit) for bit in evaluated[:self.REPR_MAXLEN-1])
return '[{}, ...]'.format(', '.join(bits))
return repr(evaluated)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and self.model == other.model and
self.query == other.query)
def __ne__(self, other):
return not self == other
def __or__(self, other):
return QuerySet(self.model, self.query | other.query)
def __and__(self, other):
return QuerySet(self.model, self.query & other.query)
def __invert__(self):
return QuerySet(self.model, ~self.query)
@transaction.wrap()
def __getitem__(self, key):
if isinstance(key, slice):
return QuerySet(self.model, self.query[key])
elif isinstance(key, int):
try:
stop = key + 1
if stop == 0:
stop = None
return QuerySet(self.model, self.query[key:stop]).get()
except self.model.DoesNotExist:
raise IndexError('index out of range')
else:
raise TypeError('indices must be integers')
@transaction.wrap()
def __iter__(self):
pks, obj_cache = self._execute()
return iter([obj_cache[pk] for pk in pks])
def _execute(self, *args, **kwargs):
query = self.query & self._filter(*args, **kwargs)
obj_cache = ObjCache(self.model)
return query.execute(obj_cache, obj_cache.pks), obj_cache
def _filter(self, *args, **kwargs):
return reduce(lambda x, y: x & y, args, Q(**kwargs))
def all(self):
return self
def filter(self, *args, **kwargs):
query = self.query & self._filter(*args, **kwargs)
return QuerySet(self.model, query)
def exclude(self, *args, **kwargs):
query = self.query & ~self._filter(*args, **kwargs)
return QuerySet(self.model, query)
@transaction.wrap()
def get(self, *args, **kwargs):
pks, obj_cache = self._execute(*args, **kwargs)
try:
pk = next(pks)
except StopIteration:
raise self.model.DoesNotExist('object does not exist')
try:
next(pks)
except StopIteration:
pass
else:
raise self.model.MultipleObjectsReturned(
'multiple objects returned')
return obj_cache[pk]
@transaction.wrap()
def exists(self, *args, **kwargs):
pks, _ = self._execute(*args, **kwargs)
try:
next(pks)
except StopIteration:
return False
return True
@transaction.wrap()
def count(self, *args, **kwargs):
pks, _ = self._execute(*args, **kwargs)
return sum(1 for _ in pks)
def order_by(self, *order_by):
return QuerySet(self.model, self.query.order_by(*order_by))
| {
"repo_name": "natano/python-git-orm",
"path": "git_orm/models/queryset.py",
"copies": "1",
"size": "4081",
"license": "isc",
"hash": -7960725986496986000,
"line_mean": 29.9166666667,
"line_max": 79,
"alpha_frac": 0.550845381,
"autogenerated": false,
"ratio": 3.9315992292870905,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9981296768230158,
"avg_score": 0.0002295684113865932,
"num_lines": 132
} |
from functools import reduce
from helpers.spec_helper import *
from interfaces.expr import Bool
from interfaces.spec import Spec
n = 2
rs_array, r_array = [], []
gs_array, g_array = [], []
rm_s, rm = sig_prop('rm')
gm_s, gm = sig_prop('gm')
for i in range(n):
rs, r = sig_prop('r_'+str(i))
gs, g = sig_prop('g_'+str(i))
rs_array.append(rs)
r_array.append(r)
gs_array.append(gs)
g_array.append(g)
#
mut_excl = Bool(True)
for i in range(n):
mut_excl &= G(g_array[i] >>
~(reduce(lambda x,y: x|y, g_array[:i], Bool(False)) |
reduce(lambda x,y: x|y, g_array[i+1:], Bool(False)) |
gm))
mut_excl &= G(gm >> ~reduce(lambda x,y: x|y, g_array))
#
req_is_granted = reduce(lambda x,y: x&y,
[G(r_array[i] >> F(g_array[i])) for i in range(n)],
Bool(True))
req_is_granted &= G(rm >> F(gm))
#
master_priority = G(rm >> X(U(reduce(lambda x,y: x&y, [~g for g in g_array]), gm)))
#
resettability = AG(EFG(~reduce(lambda x,y: x|y, g_array + [gm])))
formula = A(GF(~rm) >> (mut_excl & req_is_granted & master_priority)) & resettability
inputs = rs_array + [rm_s]
outputs = gs_array + [gm_s]
spec = Spec(inputs, outputs, formula)
| {
"repo_name": "5nizza/party-elli",
"path": "experiments-for-synt/prioritized_arbiter_2.py",
"copies": "1",
"size": "1275",
"license": "mit",
"hash": -2886802942676356600,
"line_mean": 16.4657534247,
"line_max": 85,
"alpha_frac": 0.5458823529,
"autogenerated": false,
"ratio": 2.634297520661157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8595766166125314,
"avg_score": 0.0168827414871686,
"num_lines": 73
} |
from functools import reduce
from helpers.spec_helper import *
from interfaces.expr import Bool
from interfaces.spec import Spec
n = 3
#
rs_array, r_array = [], []
gs_array, g_array = [], []
for i in range(n):
rs, r = sig_prop('r_'+str(i))
gs, g = sig_prop('g_'+str(i))
rs_array.append(rs)
r_array.append(r)
gs_array.append(gs)
g_array.append(g)
#
mut_excl = Bool(True)
for i in range(n):
mut_excl &= G(g_array[i] >>
~(reduce(lambda x,y: x|y, g_array[:i], Bool(False)) |
reduce(lambda x,y: x|y, g_array[i+1:], Bool(False))))
#
req_is_granted = reduce(lambda x,y: x&y,
[G(r_array[i] >> F(g_array[i])) for i in range(n)])
#
no_init_grants = reduce(lambda x,y: x&y, [~g for g in g_array])
#
postponing = AG(reduce(lambda x,y: x&y,
[EF(~g_array[i] & r_array[i] & X(r_array[i] & ~g_array[i] & X(~g_array[i])))
for i in range(n)]))
#
formula = A(mut_excl & req_is_granted) & no_init_grants & postponing
inputs = rs_array
outputs = gs_array
spec = Spec(inputs, outputs, formula)
| {
"repo_name": "5nizza/party-elli",
"path": "experiments-for-synt/postponed_arbiter_3.py",
"copies": "1",
"size": "1119",
"license": "mit",
"hash": -2262745207517796900,
"line_mean": 23.8666666667,
"line_max": 99,
"alpha_frac": 0.5549597855,
"autogenerated": false,
"ratio": 2.632941176470588,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36879009619705877,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from helpers.spec_helper import *
from interfaces.expr import Bool
from interfaces.spec import Spec
n = 3
rs_array, r_array = [], []
gs_array, g_array = [], []
rm_s, rm = sig_prop('rm')
gm_s, gm = sig_prop('gm')
for i in range(n):
rs, r = sig_prop('r_'+str(i))
gs, g = sig_prop('g_'+str(i))
rs_array.append(rs)
r_array.append(r)
gs_array.append(gs)
g_array.append(g)
#
mut_excl = Bool(True)
for i in range(n):
mut_excl &= G(g_array[i] >>
~(reduce(lambda x,y: x|y, g_array[:i], Bool(False)) |
reduce(lambda x,y: x|y, g_array[i+1:], Bool(False)) |
gm))
mut_excl &= G(gm >> ~reduce(lambda x,y: x|y, g_array))
#
req_is_granted = reduce(lambda x,y: x&y,
[G(r_array[i] >> F(g_array[i])) for i in range(n)],
Bool(True))
req_is_granted &= G(rm >> F(gm))
#
master_priority = G(rm >> X(U(reduce(lambda x,y: x&y, [~g for g in g_array]), gm)))
#
resettability = AG(EFG(~reduce(lambda x,y: x|y, g_array + [gm])))
formula = A(GF(~rm) >> (mut_excl & req_is_granted & master_priority)) & resettability
inputs = rs_array + [rm_s]
outputs = gs_array + [gm_s]
spec = Spec(inputs, outputs, formula)
| {
"repo_name": "5nizza/party-elli",
"path": "experiments-for-synt/prioritized_arbiter_3.py",
"copies": "1",
"size": "1275",
"license": "mit",
"hash": -2582711146591164000,
"line_mean": 16.4657534247,
"line_max": 85,
"alpha_frac": 0.5458823529,
"autogenerated": false,
"ratio": 2.634297520661157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3680179873561157,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from helpers.spec_helper import *
from interfaces.expr import Bool
from interfaces.spec import Spec
n = 4
#
rs_array, r_array = [], []
gs_array, g_array = [], []
for i in range(n):
rs, r = sig_prop('r_'+str(i))
gs, g = sig_prop('g_'+str(i))
rs_array.append(rs)
r_array.append(r)
gs_array.append(gs)
g_array.append(g)
#
mut_excl = Bool(True)
for i in range(n):
mut_excl &= G(g_array[i] >>
~(reduce(lambda x,y: x|y, g_array[:i], Bool(False)) |
reduce(lambda x,y: x|y, g_array[i+1:], Bool(False))))
#
req_is_granted = reduce(lambda x,y: x&y,
[G(r_array[i] >> F(g_array[i])) for i in range(n)])
#
no_init_grants = reduce(lambda x,y: x&y, [~g for g in g_array])
#
postponing = AG(reduce(lambda x,y: x&y,
[EF(~g_array[i] & r_array[i] & X(r_array[i] & ~g_array[i] & X(~g_array[i])))
for i in range(n)]))
#
formula = A(mut_excl & req_is_granted) & no_init_grants & postponing
inputs = rs_array
outputs = gs_array
spec = Spec(inputs, outputs, formula)
| {
"repo_name": "5nizza/party-elli",
"path": "experiments-for-synt/postponed_arbiter_4.py",
"copies": "1",
"size": "1119",
"license": "mit",
"hash": 7659840922421147000,
"line_mean": 23.8666666667,
"line_max": 99,
"alpha_frac": 0.5549597855,
"autogenerated": false,
"ratio": 2.632941176470588,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36879009619705877,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from .homogeneous import Translation, UniformScale, Rotation, Affine, Homogeneous
def transform_about_centre(obj, transform):
r"""
Return a Transform that implements transforming an object about
its centre. The given object must be transformable and must implement
a method to provide the object centre. More precisely, the object will be
translated to the origin (according to it's centre), transformed, and then
translated back to it's previous position.
Parameters
----------
obj : :map:`Transformable`
A transformable object that has the ``centre`` method.
transform : :map:`ComposableTransform`
A composable transform.
Returns
-------
transform : :map:`Homogeneous`
A homogeneous transform that implements the scaling.
"""
to_origin = Translation(-obj.centre(), skip_checks=True)
back_to_centre = Translation(obj.centre(), skip_checks=True)
# Fast path - compose in-place in order to ensure only a single matrix
# is returned
if isinstance(transform, Homogeneous):
# Translate to origin, transform, then translate back
return to_origin.compose_before(transform).compose_before(back_to_centre)
else: # Fallback to transform chain
return reduce(
lambda a, b: a.compose_before(b), [to_origin, transform, back_to_centre]
)
def scale_about_centre(obj, scale):
r"""
Return a Homogeneous Transform that implements scaling an object about
its centre. The given object must be transformable and must implement
a method to provide the object centre.
Parameters
----------
obj : :map:`Transformable`
A transformable object that has the ``centre`` method.
scale : `float` or ``(n_dims,)`` `ndarray`
The scale factor as defined in the :map:`Scale` documentation.
Returns
-------
transform : :map:`Homogeneous`
A homogeneous transform that implements the scaling.
"""
s = UniformScale(scale, obj.n_dims, skip_checks=True)
return transform_about_centre(obj, s)
def rotate_ccw_about_centre(obj, theta, degrees=True):
r"""
Return a Homogeneous Transform that implements rotating an object
counter-clockwise about its centre. The given object must be transformable
and must implement a method to provide the object centre.
Parameters
----------
obj : :map:`Transformable`
A transformable object that has the ``centre`` method.
theta : `float`
The angle of rotation clockwise about the origin.
degrees : `bool`, optional
If ``True`` theta is interpreted as degrees. If ``False``, theta is
interpreted as radians.
Returns
-------
transform : :map:`Homogeneous`
A homogeneous transform that implements the rotation.
"""
if obj.n_dims != 2:
raise ValueError("CCW rotation is currently only supported for " "2D objects")
r = Rotation.init_from_2d_ccw_angle(theta, degrees=degrees)
return transform_about_centre(obj, r)
def shear_about_centre(obj, phi, psi, degrees=True):
r"""
Return an affine transform that implements shearing (distorting) an
object about its centre. The given object must be transformable and must
implement a method to provide the object centre.
Parameters
----------
obj : :map:`Transformable`
A transformable object that has the ``centre`` method.
phi : `float`
The angle of shearing in the X direction.
psi : `float`
The angle of shearing in the Y direction.
degrees : `bool`, optional
If ``True``, then phi and psi are interpreted as degrees. If ``False``
they are interpreted as radians.
Returns
-------
transform : :map:`Affine`
An affine transform that implements the shearing.
Raises
------
ValueError
Shearing can only be applied on 2D objects
"""
if obj.n_dims != 2:
raise ValueError("Shearing is currently only supported for 2D objects")
s = Affine.init_from_2d_shear(phi, psi, degrees=degrees)
return transform_about_centre(obj, s)
| {
"repo_name": "patricksnape/menpo",
"path": "menpo/transform/compositions.py",
"copies": "2",
"size": "4180",
"license": "bsd-3-clause",
"hash": 4948414588822811000,
"line_mean": 33.8333333333,
"line_max": 86,
"alpha_frac": 0.6669856459,
"autogenerated": false,
"ratio": 4.278403275332651,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.594538892123265,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from .homogeneous import (Translation, UniformScale, Rotation, Affine,
Homogeneous)
def transform_about_centre(obj, transform):
r"""
Return a Transform that implements transforming an object about
its centre. The given object must be transformable and must implement
a method to provide the object centre. More precisely, the object will be
translated to the origin (according to it's centre), transformed, and then
translated back to it's previous position.
Parameters
----------
obj : :map:`Transformable`
A transformable object that has the ``centre`` method.
transform : :map:`ComposableTransform`
A composable transform.
Returns
-------
transform : :map:`Homogeneous`
A homogeneous transform that implements the scaling.
"""
to_origin = Translation(-obj.centre(), skip_checks=True)
back_to_centre = Translation(obj.centre(), skip_checks=True)
# Fast path - compose in-place in order to ensure only a single matrix
# is returned
if isinstance(transform, Homogeneous):
# Translate to origin, transform, then translate back
return to_origin.compose_before(transform).compose_before(back_to_centre)
else: # Fallback to transform chain
return reduce(lambda a, b: a.compose_before(b),
[to_origin, transform, back_to_centre])
def scale_about_centre(obj, scale):
r"""
Return a Homogeneous Transform that implements scaling an object about
its centre. The given object must be transformable and must implement
a method to provide the object centre.
Parameters
----------
obj : :map:`Transformable`
A transformable object that has the ``centre`` method.
scale : `float` or ``(n_dims,)`` `ndarray`
The scale factor as defined in the :map:`Scale` documentation.
Returns
-------
transform : :map:`Homogeneous`
A homogeneous transform that implements the scaling.
"""
s = UniformScale(scale, obj.n_dims, skip_checks=True)
return transform_about_centre(obj, s)
def rotate_ccw_about_centre(obj, theta, degrees=True):
r"""
Return a Homogeneous Transform that implements rotating an object
counter-clockwise about its centre. The given object must be transformable
and must implement a method to provide the object centre.
Parameters
----------
obj : :map:`Transformable`
A transformable object that has the ``centre`` method.
theta : `float`
The angle of rotation clockwise about the origin.
degrees : `bool`, optional
If ``True`` theta is interpreted as degrees. If ``False``, theta is
interpreted as radians.
Returns
-------
transform : :map:`Homogeneous`
A homogeneous transform that implements the rotation.
"""
if obj.n_dims != 2:
raise ValueError('CCW rotation is currently only supported for '
'2D objects')
r = Rotation.init_from_2d_ccw_angle(theta, degrees=degrees)
return transform_about_centre(obj, r)
def shear_about_centre(obj, phi, psi, degrees=True):
r"""
Return an affine transform that implements shearing (distorting) an
object about its centre. The given object must be transformable and must
implement a method to provide the object centre.
Parameters
----------
obj : :map:`Transformable`
A transformable object that has the ``centre`` method.
phi : `float`
The angle of shearing in the X direction.
psi : `float`
The angle of shearing in the Y direction.
degrees : `bool`, optional
If ``True``, then phi and psi are interpreted as degrees. If ``False``
they are interpreted as radians.
Returns
-------
transform : :map:`Affine`
An affine transform that implements the shearing.
Raises
------
ValueError
Shearing can only be applied on 2D objects
"""
if obj.n_dims != 2:
raise ValueError('Shearing is currently only supported for 2D objects')
s = Affine.init_from_2d_shear(phi, psi, degrees=degrees)
return transform_about_centre(obj, s)
| {
"repo_name": "yuxiang-zhou/menpo",
"path": "menpo/transform/compositions.py",
"copies": "4",
"size": "4233",
"license": "bsd-3-clause",
"hash": 699174643305522400,
"line_mean": 33.9834710744,
"line_max": 81,
"alpha_frac": 0.6586345382,
"autogenerated": false,
"ratio": 4.323799795709908,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00010078613182826043,
"num_lines": 121
} |
from functools import reduce
from lisp.environment import default, Env
NIL = []
def procedure(params, body, env):
"""
User defined procedure.
Uses lexical scoping (lookup names in the place it was defined).
"""
return lambda *args: evaluate_expression(body, Env(params, args, env))
def evaluate_expression(exp, env=default):
"""
Evaluate expression exp in environment env.
Expression represented as a list of terms.
Environment as an instance of Env class.
Example:
> evaluate_expression([+, 2, [-, 4, 2]], Env()) # Passing empty env
>> 4
"""
def function_call(exp):
"""
Handle function call.
In lisp every s-expression is a function-call.
First element is a function name (or a function itself).
Other elements are argumets to that function.
"""
func, *args = exp
function_to_call = evaluate_expression(func, env)
args = [evaluate_expression(x, env) for x in args]
return function_to_call(*args)
def let(bindings, body):
"""
Handle let special form.
First, extend current environment with bindings.
Second, evaluate body under extended environment.
"""
names = [b[0] for b in bindings]
values = [evaluate_expression(b[1], env) for b in bindings]
new_env = Env(names=names, values=values, outer=env)
return evaluate_expression(body, new_env)
def define(name, exp):
"""
Handle define special form.
First, evaluate exp under under current environment to a value V.
Second, extend current environment with name -> V pair.
Return V as a result.
"""
val = evaluate_expression(exp, env)
env.update({name: val})
return val
def if_(predicate, if_true_exp, if_false_exp):
"""
Handle if special form.
First, evaluate predicate under current environment to a value V.
Second, if V is truthy evaluate if_true_exp to a value V'.
Otherwise, evaluate if_false_exp to a value V'.
Return V' as a result.
"""
predicate_value = evaluate_expression(predicate, env)
return (evaluate_expression(if_true_exp, env) if predicate_value
else evaluate_expression(if_false_exp, env))
def set_(name, exp):
"""
Handle 'set!' special form.
If name exists in current environment, update its value.
Else, fail.
Note: works ONLY for symbols/name/envs. Lists are immutable.
"""
value = evaluate_expression(exp, env)
env.set(name, value)
return value
def quasiquoute(exp):
"""
Handle 'quasiquoute' special form.
Traverse given exp, if subexpression is 'unquote' special form,
evaluate it. If not -- left as is.
Return modified exp.
"""
return [evaluate_expression(datum, env) if is_unquote(datum) else datum
for datum in exp]
def cond(clauses):
"""
Handle 'cond' special form.
Iterates over list of clauses in order and evaluates it.
If one evaluates to true, evaluate its corresponding expression to result and return it.
If no clause evaluates to true, return NIL.
"""
for clause in clauses:
predicate_exp, value_exp = clause
if evaluate_expression(predicate_exp, env):
return evaluate_expression(value_exp, env)
return NIL
def match(exp, first_term):
return isinstance(exp, list) and exp[0] == first_term
def is_symbol(exp):
return isinstance(exp, str)
def is_literal(exp):
return not isinstance(exp, list)
def is_let(exp):
return match(exp, 'let')
def is_quasiqoute(exp):
return match(exp, 'quasiquote')
def is_quote(exp):
return match(exp, 'quote')
def is_unquote(exp):
return match(exp, 'unquote')
def is_cond(exp):
return match(exp, 'cond')
def is_define(exp):
return match(exp, 'define')
def is_lambda(exp):
return match(exp, 'lambda')
def is_if(exp):
return match(exp, 'if')
def is_set(exp):
return match(exp, 'set!')
# Kinda of pattern-matching.
if not exp:
return NIL
elif is_symbol(exp):
return env.lookup(exp)
elif is_literal(exp):
return exp
elif is_quote(exp):
_, datum = exp
return datum
elif is_unquote(exp):
_, datum = exp
return evaluate_expression(datum, env)
elif is_quasiqoute(exp):
_, datum = exp
return quasiquoute(datum)
elif is_cond(exp):
clauses = exp[1:]
return cond(clauses)
elif is_if(exp):
_, predicate, true_branch, false_branch = exp
return if_(predicate, true_branch, false_branch)
elif is_let(exp):
_, bindings, body = exp
return let(bindings, body)
elif is_set(exp):
_, name, value = exp
return set_(name, value)
elif is_define(exp):
_, name, exp = exp
return define(name, exp)
elif is_lambda(exp):
_, args, body = exp
return procedure(args, body, env)
else:
return function_call(exp)
| {
"repo_name": "begor/lisp",
"path": "lisp/evaluator.py",
"copies": "1",
"size": "5360",
"license": "mit",
"hash": 3426213915181929000,
"line_mean": 26.6288659794,
"line_max": 96,
"alpha_frac": 0.5893656716,
"autogenerated": false,
"ratio": 4.088482074752098,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5177847746352098,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from lispy.parser import List, Quotation, Number, Symbol
from lispy.evaluator import evaluate, macro, ismacro
def test_number():
value = evaluate(Number(123), {})
assert value == Number(123)
def test_number_list():
value = evaluate(Quotation(List([Number(11), Number(22), Number(33)])), {})
assert value == List([Number(11), Number(22), Number(33)])
def test_quote_word():
value = evaluate(Quotation(Symbol('abc')), {})
assert value == Symbol('abc')
def test_sum():
def add(numbers):
return reduce(lambda a, b: Number(a.value + b.value),
numbers,
Number(0))
vars_ = {'+': add}
value = evaluate(List([Symbol('+'), Number(1), Number(1), Number(1)]),
vars_)
assert value == Number(3)
def test_nested_evaluation():
def add(numbers):
return reduce(lambda a, b: Number(a.value + b.value),
numbers,
Number(0))
def mult(numbers):
return reduce(lambda a, b: Number(a.value * b.value),
numbers,
Number(1))
vars_ = {
'+': add,
'*': mult,
}
value = evaluate(List([Symbol('+'),
List([Symbol('*'),
List([Symbol('+'), Number(1), Number(1)]),
List([Symbol('+'), Number(2), Number(2)]),
List([Symbol('+'), Number(3), Number(3)]),
]),
Number(4),
Number(5)
]),
vars_)
assert value == Number(57)
def test_macro():
@macro
def f():
pass
assert ismacro(f)
def test_macro_evaluation():
ast = Quotation(List([Number(11), Number(22), Number(33)]))
@macro
def define(vars_, ast):
name, value = ast
vars_[name.identifier] = value
vars_ = {'define': define}
evaluate(List([Symbol('define'), Symbol('list'), ast]), vars_)
assert vars_['list'] == ast
| {
"repo_name": "yhlam/lispy",
"path": "tests/test_evaluator.py",
"copies": "1",
"size": "2147",
"license": "mit",
"hash": -4133747809516157000,
"line_mean": 26.1772151899,
"line_max": 79,
"alpha_frac": 0.4792734048,
"autogenerated": false,
"ratio": 4.193359375,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 79
} |
from functools import reduce
from mission.constants.config import buoys as constants
from mission.framework.combinators import Sequential, Concurrent, MasterConcurrent, Conditional
from mission.framework.helpers import call_if_function, ConsistencyCheck, get_sub_position
from mission.framework.movement import Heading, RelativeToInitialHeading, VelocityX, VelocityY, Depth, RelativeToInitialDepth
from mission.framework.position import GoToPosition, MoveX
from mission.framework.primitive import Log, NoOp, Zero, FunctionTask, Succeed
from mission.framework.targeting import ForwardTarget
from mission.framework.task import Task
from mission.framework.timing import Timer, Timed
from mission.helpers import scaled_speed
import shm
from shm import red_buoy_results
from shm import green_buoy_results
from shm import yellow_buoy_results
# Hit order: Red Green Yellow
BUOY_SURGE_TIMEOUT = 10
def redRight():
return constants.BUOY_ORDER.index('R') == 2
def greenRight():
return constants.BUOY_ORDER.index('G') > constants.BUOY_ORDER.index('R')
def yellowRight():
return constants.BUOY_ORDER.index('Y') > constants.BUOY_ORDER.index('G')
def secondRedRight():
return constants.BUOY_ORDER.index('R') > constants.BUOY_ORDER.index('Y')
class HeadingRestore(Task):
"""
Saves the current heading and restores it at a later time
"""
def __init__(self, heading=None, *args, **kwargs):
"""
heading - a heading to use as the original heading
"""
super().__init__(*args, **kwargs)
# Store the start heading of the sub
if heading is None:
self.start_heading = shm.kalman.heading.get()
else:
self.start_heading = heading
self.heading_task = Heading(self.start_heading, error=4)
def on_run(self):
# Restore the sub's heading to the stored one
# self.logv("Running {}".format(self.__class__.__name__))
if not self.heading_task.finished:
self.heading_task()
else:
self.finish()
# def on_finish(self):
# self.logv('{} task finished in {} seconds!'.format(
# self.__class__.__name__,
# self.this_run_time - self.first_run_time))
class DepthRestore(Task):
"""
Saves the current depth and restores it at a later time
"""
def __init__(self, depth=None, *args, **kwargs):
"""
depth - a depth to use as the original depth
"""
super().__init__(*args, **kwargs)
# Store the start depth of the sub
if depth is None:
self.start_depth = constants.BUOY_SEARCH_DEPTH
else:
self.start_depth = depth
self.depth_task = Depth(self.start_depth, error=.01)
# def on_first_run(self):
# self.logv("Starting {} task".format(self.__class__.__name__))
def on_run(self):
# Restore the sub's depth to the stored one
# self.logv("Running {}".format(self.__class__.__name__))
# self.logv("Des: {}".format(self.start_depth))
if not self.depth_task.finished:
self.depth_task()
else:
self.finish()
# def on_finish(self):
# self.logv('{} task finished in {} seconds!'.format(
# self.__class__.__name__,
# self.this_run_time - self.first_run_time))
"""
class LocateBuoyBySpinning(Task):
Locates a buoy by spinning.
def __init__(self, validator, *args, **kwargs):
validator - a function that returns True when a buoy is found and
False otherwise.
super().__init__(*args, **kwargs)
#self.logv("Starting {} task".format(self.__class__.__name__))
self.validator = validator
self.start_heading = shm.kalman.heading.get()
#self.subtasks = [Sequential(RelativeToInitialHeading(60, error=0.1), Timer(1)) for i in range(6)]
self.subtasks = [
Sequential(RelativeToInitialHeading(20, error=0.5), Timer(0.5)),
Sequential(RelativeToInitialHeading(25, error=0.5), Timer(0.5)),
Sequential(HeadingRestore(self.start_heading), Timer(1.5)),
Sequential(RelativeToInitialHeading(-20, error=0.5), Timer(0.5)),
Sequential(RelativeToInitialHeading(-25, error=0.5), Timer(0.5)),
Sequential(HeadingRestore(self.start_heading), Timer(1.5))
]
self.spin_task = Sequential(subtasks=self.subtasks)
self.zero_task = Zero()
self.TIMEOUT = 20
def on_run(self):
# Perform a search for the buoy
# If the validator() is True, then finish
if self.this_run_time - self.first_run_time > self.TIMEOUT:
HeadingRestore(self.start_heading)()
self.finish()
self.loge("{} timed out!".format(self.__class__.__name__))
return
self.zero_task()
#self.logv("Running {}".format(self.__class__.__name__))
#self.logv("Spin step: {}/{}".format(
#reduce(lambda acc, x: acc + 1 if x.finished else acc, self.subtasks, 1),
#len(self.subtasks)))
self.spin_task()
if self.validator() or self.spin_task.finished:
self.finish()
def on_finish(self):
#self.logv('{} task finished in {} seconds!'.format(
#self.__class__.__name__,
#self.this_run_time - self.first_run_time))
self.zero_task()
"""
class LocateAdjacentBuoy(Task):
"""
Locates a buoy using LocateBuoyStrafe
"""
def __init__(self, validator, right=True, changeDir=True, *args, **kwargs):
"""
validator - a function that returns True when a buoy is found and False
otherwise.
forward - determines whether the submarine should move forward or
backward during its search
checkBehind - determines whether the submarine should begin by moving
backwards to see if the buoy is behind it
"""
super().__init__(*args, **kwargs)
self.validator = validator
self.task_classes = []
if changeDir:
self.task_classes.append(
lambda: LocateBuoyStrafe(validator, right=right))
self.task_classes.append(lambda: DirectionalSurge(2, .3))
self.task_classes.append(
lambda: LocateBuoyStrafe(validator, right=(not right)))
self.task_classes.append(lambda: DirectionalSurge(3, -.3))
else:
self.task_classes.append(
lambda: LocateBuoyStrafe(validator, right=right))
self.tasks = []
self.task_index = 0
self.TIMEOUT = 60
def on_first_run(self):
# self.logv("Starting {} task".format(self.__class__.__name__))
self.tasks = [x() for x in self.task_classes]
def on_run(self):
if self.this_run_time - self.first_run_time > self.TIMEOUT:
self.finish()
# self.loge("{} timed out!".format(self.__class__.__name__))
return
# self.logv("Running {}".format(self.__class__.__name__))
self.tasks[self.task_index]()
if self.validator():
self.finish()
if self.tasks[self.task_index].finished:
# Reinstantiate subtask, because validator is not true
self.tasks[self.task_index] = self.task_classes[self.task_index]()
self.task_index = (self.task_index + 1) % len(self.tasks)
def on_finish(self):
self.logv('{} task finished in {} seconds!'.format(
self.__class__.__name__, self.this_run_time - self.first_run_time))
class LocateAlignedBuoy(Task):
"""
Locates a buoy using LocateBuoySurge
"""
def __init__(self, validator, forward=True, *args, **kwargs):
"""
validator - a function that returns True when a buoy is found and False
otherwise.
forward - determines whether the submarine should move forward or
backward during its search
checkBehind - determines whether the submarine should begin by moving
backwards to see if the buoy is behind it
"""
super().__init__(*args, **kwargs)
self.validator = validator
self.task_classes = [
lambda: LocateBuoySurge(validator, forward=forward)
]
self.tasks = []
self.task_index = 0
self.TIMEOUT = 60
def on_first_run(self):
# self.logv("Starting {} task".format(self.__class__.__name__))
self.tasks = [x() for x in self.task_classes]
def on_run(self):
if self.this_run_time - self.first_run_time > self.TIMEOUT:
self.finish()
# self.loge("{} timed out!".format(self.__class__.__name__))
return
# self.logv("Running {}".format(self.__class__.__name__))
self.tasks[self.task_index]()
if self.validator():
self.finish()
if self.tasks[self.task_index].finished:
# Reinstantiate subtask, because validator is not true
self.tasks[self.task_index] = self.task_classes[self.task_index]()
self.task_index = (self.task_index + 1) % len(self.tasks)
def on_finish(self):
pass
# self.logv('{} task finished in {} seconds!'.format(
# self.__class__.__name__,
# self.this_run_time - self.first_run_time))
class LocateBuoySurge(Task):
"""
Locates a buoy in front of or behind the current position of the submarine.
"""
def __init__(self, validator, forward=True, *args, **kwargs):
"""
validator - a function that returns True when a buoy is found and False
otherwise.
forward - determines whether the submarine should move forward or
backward during its search
timeout - the amount of time to surge
"""
super().__init__(*args, **kwargs)
# self.logv("Starting {} task".format(self.__class__.__name__))
self.validator = validator
self.forward = forward
self.surge_task = VelocityX()
self.zero_task = Zero()
self.TIMEOUT = BUOY_SURGE_TIMEOUT
def on_run(self, forward=None):
"""
forward - determines whether the submarine should move forward or
backward during its search
"""
# Perform a search for the buoy
# If the validator() is True, then finish
if self.this_run_time - self.first_run_time > self.TIMEOUT:
self.finish()
# self.loge("{} timed out!".format(self.__class__.__name__))
return
# self.logv("Running {}".format(self.__class__.__name__))
velocity = 0.2 if self.forward else -0.7
if forward is not None:
velocity = 0.2 if forward else -0.7
self.surge_task(velocity)
if self.validator():
self.finish()
def on_finish(self):
self.zero_task()
# self.logv('{} task finished in {} seconds!'.format(
# self.__class__.__name__,
# self.this_run_time - self.first_run_time))
class LocateBuoyStrafe(Task):
"""
Locates a buoy by strafing in defined direction
"""
def __init__(self, validator, right=True, timeout=10, *args, **kwargs):
"""
validator - a function that returns True when a buoy is found and False
otherwise.
right - determines whether the submarine should move right or
left during its search
timeout - the amount of time to surge
"""
super().__init__(*args, **kwargs)
# self.logv("Starting {} task".format(self.__class__.__name__))
self.validator = validator
self.right = right
self.surge_task = VelocityY()
self.zero_task = Zero()
self.depth_task = Depth(constants.BUOY_SEARCH_DEPTH)
self.TIMEOUT = timeout
def on_run(self, right=True):
"""
right - determines whether the submarine should move right or
left during its search
"""
# Perform a search for the buoy
# If the validator() is True, then finish
if self.this_run_time - self.first_run_time > self.TIMEOUT:
self.finish()
self.loge("{} timed out!".format(self.__class__.__name__))
return
velocity = 0.2 if self.right else -0.2
self.surge_task(velocity)
self.depth_task()
if self.validator():
self.finish()
def on_finish(self):
self.zero_task()
class LocateFirstBuoy(Task):
"""
Locates the first buoy using LocateBuoyStrafe and LocateBuoySurge
Surges until it sees either the desired buoy or the Middle buoy, then
strafes.
"""
def __init__(self,
validator,
forward=True,
right=True,
middle=yellow_buoy_results,
*args,
**kwargs):
"""
validator - a function that returns True when a buoy is found and False
otherwise.
forward - determines whether the submarine should move forward or
backward during its search
"""
super().__init__(*args, **kwargs)
self.validator = validator
self.middle_buoy = middle
if self.middle_check:
self.task_classes = [lambda: LocateBuoyStrafe(validator, right)]
else:
self.task_classes = [
lambda: LocateBuoySurge(validator, forward),
lambda: LocateBuoyStrafe(validator, right)
]
self.tasks = []
self.task_index = 0
self.TIMEOUT = 60
def on_first_run(self):
self.logv("Starting {} task".format(self.__class__.__name__))
self.tasks = [x() for x in self.task_classes]
def on_run(self):
if self.this_run_time - self.first_run_time > self.TIMEOUT:
self.finish()
self.loge("{} timed out!".format(self.__class__.__name__))
return
self.logv("Running {}".format(self.__class__.__name__))
self.tasks[self.task_index]()
if self.validator():
self.finish()
if self.task_index == 0 and self.middle_check:
self.tasks[self.task_index].finish()
if self.tasks[self.task_index].finished:
# Reinstantiate subtask, because validator is not true
self.tasks[self.task_index] = self.task_classes[self.task_index]()
self.task_index = (self.task_index + 1) % len(self.tasks)
def middle_check(self):
# Checks if middle buoy is visible and relatively close
return self.seen_frames_checker.check(
self.middle_buoy.probability.get() !=
0) and (self.middle_buoy.percent_frame.get() > 1)
class LocateBuoy(Task):
"""
Locates a buoy using LocateBuoyBySpinning and LocateBuoySurge
"""
def __init__(self,
validator,
forward=True,
checkBehind=False,
*args,
**kwargs):
"""
validator - a function that returns True when a buoy is found and False
otherwise.
forward - determines whether the submarine should move forward or
backward during its search
checkBehind - determines whether the submarine should begin by moving
backwards to see if the buoy is behind it
"""
super().__init__(*args, **kwargs)
self.validator = validator
self.task_classes = [
lambda: LocateBuoyBySpinning(validator),
lambda: LocateBuoySurge(validator, forward)
]
if checkBehind:
self.task_classes.insert(0,
lambda: LocateBuoySurge(validator, False))
self.tasks = []
self.task_index = 0
self.TIMEOUT = 60
def on_first_run(self):
# self.logv("Starting {} task".format(self.__class__.__name__))
self.tasks = [x() for x in self.task_classes]
def on_run(self):
if self.this_run_time - self.first_run_time > self.TIMEOUT:
self.finish()
# self.loge("{} timed out!".format(self.__class__.__name__))
return
# self.logv("Running {}".format(self.__class__.__name__))
self.tasks[self.task_index]()
if self.validator():
self.finish()
if self.tasks[self.task_index].finished:
# Reinstantiate subtask, because validator is not true
self.tasks[self.task_index] = self.task_classes[self.task_index]()
self.task_index = (self.task_index + 1) % len(self.tasks)
# def on_finish(self):
# self.logv('{} task finished in {} seconds!'.format(
# self.__class__.__name__,
# self.this_run_time - self.first_run_time))
class PreventSurfacing(Task):
def on_first_run(self, *args, **kwargs):
pass
def on_run(self, *args, **kwargs):
if shm.kalman.depth.get() < .3:
self.loge('Tried to surface, killing')
self.finish(success=False)
class AlignTarget(Task):
"""
Aligns using ForwardTarget on a target coordinate, while ensuring that the
target is visible
"""
def __init__(self,
validator,
locator_task,
target_coords,
vision_group,
heading_task,
forward_target_p=0.001,
*args,
**kwargs):
"""
validator - a function that returns True when the target is visible and
False otherwise.
locator_task - a task that locates the target
target_coords - the coordinates of the target with which to align
vision_group - the shm group for the buoy
"""
super().__init__(*args, **kwargs)
self.validator = validator
self.locator_task = locator_task
self.heading_task = heading_task
def get_center():
return (0, 0)
# TODO use normalized coordinates instead
self.target_task = ForwardTarget(
target_coords,
target=get_center,
px=forward_target_p,
dx=forward_target_p / 2,
py=forward_target_p,
dy=forward_target_p / 2,
deadband=(30, 30))
self.target_checker = ConsistencyCheck(5, 5, strict=True)
self.TIMEOUT = 60
# def on_first_run(self):
# self.logv("Starting {} task".format(self.__class__.__name__))
def on_run(self):
if self.this_run_time - self.first_run_time > self.TIMEOUT:
self.finish()
# self.loge("{} timed out!".format(self.__class__.__name__))
return
# self.logv("Running {}".format(self.__class__.__name__))
# if shm.kalman.depth.get() < .4:
# self.giveup_task()
# self.finish()
if self.validator():
# if abs(shm.kalman.depth.get() - BUOY_SEARCH_DEPTH) >= BUOY_DEPTH_VARIANCE:
# self.depthless_target()
# else:
self.target_task()
else:
# self.heading_task()
# HeadingRestore()
self.logv('lost buoy? searching and restoring depth')
Depth(constants.BUOY_SEARCH_DEPTH)
self.locator_task()
if self.target_checker.check(self.target_task.finished):
self.finish()
# def on_finish(self):
# self.logv('{} task finished in {} seconds!'.format(
# self.__class__.__name__,
# self.this_run_time - self.first_run_time))
class RamTarget(Task):
"""
Moves forward until collision with an object at a given coordinate in the
yz-plane.
"""
def __init__(self,
target_validator,
collision_validator,
locator_task,
concurrent_task=NoOp(),
ram_speed=None,
*args,
**kwargs):
"""
target_validator - a function that returns True when a target is
visible and False otherwise.
collision_validator - a function that returns True when a collision is
made and False otherwise.
concurrent_task - an optional argument for a task to run while moving
forward to ram the target. It may be used to continually align with
the target while ramming it.
ram_speed - a function that returns a speed at which to ram the target
"""
super().__init__(*args, **kwargs)
# self.logv("Starting {} task".format(self.__class__.__name__))
self.target_validator = target_validator
self.collision_validator = collision_validator
self.ram_speed = ram_speed
self.ram_task = VelocityX()
self.locator_task = locator_task
self.concurrent_task = concurrent_task
self.commit_task = Sequential(VelocityX(1), Timer(1), VelocityX(0))
self.ram_commit_phase = False
self.TIMEOUT = 25
def on_run(self):
# Move forward for ramming target
# If the validator function returns True, then finish the task
if self.this_run_time - self.first_run_time > self.TIMEOUT:
self.finish()
# self.loge("{} timed out!".format(self.__class__.__name__))
return
# self.logv("Running {}".format(self.__class__.__name__))
if self.target_validator():
if self.ram_speed is not None:
speed = self.ram_speed()
else:
speed = 0.3
self.ram_task(speed)
else:
self.locator_task()
if self.collision_validator():
self.ram_commit_phase = True
self.finish()
# Don't run concurrent task if we're committing the ram!
if self.concurrent_task:
self.concurrent_task()
def on_finish(self):
# self.logv('{} task finished in {} seconds!'.format(
# self.__class__.__name__,
# self.this_run_time - self.first_run_time))
Zero()()
class DirectionalSurge(Task):
"""
Backs away from rammed target
"""
def __init__(self, timeout, speed=.3, compensate=False, *args, **kwargs):
super().__init__(*args, **kwargs)
# self.logv("Starting {} task".format(self.__class__.__name__))
self.speed = speed
self.ram_task = VelocityX()
self.strafe_task = VelocityY()
self.commit_task = Sequential(VelocityX(1), Timer(1), VelocityX(0))
self.ram_commit_phase = False
self.TIMEOUT = timeout
self.compensate = compensate
def on_run(self):
# Move backward
# If the validator function returns True, then finish the task
if self.this_run_time - self.first_run_time > self.TIMEOUT:
self.finish()
return
# self.logv("Running {}".format(self.__class__.__name__))
self.ram_task(self.speed)
if self.compensate:
self.strafe_task(.1)
def on_finish(self):
# self.logv('{} task finished in {} seconds!'.format(
# self.__class__.__name__,
# self.this_run_time - self.first_run_time))
Zero()()
class SeqLog(Task):
def __init__(self, message, *args, **kwargs):
super().__init__(*args, **kwargs)
self.message = message
def on_run(self):
self.logv(self.message)
self.finish()
class BuoyAlign(Task):
def __init__(self,
location_validator,
target_coordinates,
vision_group,
collision_validator,
ram_concurrent_task=NoOp(),
first_buoy=False,
right=True,
yellow=False,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.logv("Starting {} task".format(self.__class__.__name__))
self.location_validator = location_validator
self.target_coordinates = target_coordinates
self.collision_validator = collision_validator
self.ram_concurrent_task = ram_concurrent_task
self.heading_task = HeadingRestore()
if first_buoy:
self.locator_task_a = LocateAlignedBuoy(
self.location_validator, forward=True)
else:
self.locator_task_a = LocateAdjacentBuoy(
self.location_validator, right=right)
self.locator_task_b = LocateAlignedBuoy(
self.location_validator, forward=False)
self.align_task = AlignTarget(
self.location_validator, self.locator_task_a,
self.target_coordinates, vision_group, self.heading_task)
self.ram_task = RamTarget(
self.location_validator,
self.collision_validator,
# LocateBuoy(self.location_validator,checkBehind=True),
self.locator_task_b,
self.ram_concurrent_task)
if yellow:
self.forward_task = DirectionalSurge(6, .5)
self.retreat_task = DirectionalSurge(4, -.5)
else:
self.forward_task = DirectionalSurge(constants.FORWARD_TIME, .2)
self.retreat_task = DirectionalSurge(constants.BACKUP_TIME, -1)
self.depth_task = DepthRestore()
self.tasks = Sequential(
Zero(),
SeqLog("Restoring Depth"),
self.depth_task,
SeqLog("Locating Buoy"),
self.locator_task_a,
SeqLog("Aligning"),
self.align_task,
SeqLog("Approaching"),
self.ram_task,
Zero(),
Timer(.5), )
# self.tasks = Sequential(Zero(), self.depth_task, self.locator_task, self.align_task,
# self.ram_task, self.forward_task, self.retreat_task,
# self.heading_task
# )
self.TIMEOUT = 90
def on_run(self):
# Locate the buoy
# Align with the buoy
# Ram the buoy
# Fulfill postcondition
if self.this_run_time - self.first_run_time > self.TIMEOUT:
self.finish()
self.loge("{} timed out!".format(self.__class__.__name__))
return
self.tasks()
if self.tasks.finished:
self.finish()
def on_finish(self):
self.logv('{} task finished in {} seconds!'.format(
self.__class__.__name__, self.this_run_time - self.first_run_time))
class BuoyRam(Task):
"""
Locates and rams a buoy.
Precondition: The target buoy is located at a position (x-coordinate) in
front of the position of the submarine.
Postcondition: The submarine will have rammed the buoy and will be
positioned at the same depth as determined by the target coordinates. The
original heading of the submarine prior to the collision will be maintained
after the collision is complete.
"""
def __init__(self,
location_validator,
target_coordinates,
vision_group,
collision_validator,
ram_concurrent_task=NoOp(),
first_buoy=False,
right=True,
yellow=False,
*args,
**kwargs):
"""
location_validator - a function that returns True when the target has
been found and False otherwise
target_coordinates - a tuple representing the coordinates of the target
in the xz-plane
vision_group - the shm group for the buoy
collision_validator - a function that returns True when there has been
a collision with the target and False otherwise.
ram_concurrent_task - an optional task to run concurrently when ramming
the target
"""
super().__init__(*args, **kwargs)
self.logv("Starting {} task".format(self.__class__.__name__))
self.location_validator = location_validator
self.target_coordinates = target_coordinates
self.collision_validator = collision_validator
self.ram_concurrent_task = ram_concurrent_task
self.heading_task = HeadingRestore()
if first_buoy:
self.locator_task_a = LocateAlignedBuoy(
self.location_validator, forward=True)
else:
self.locator_task_a = LocateAdjacentBuoy(
self.location_validator, right=right)
self.locator_task_b = LocateAlignedBuoy(
self.location_validator, forward=False)
self.align_task = AlignTarget(
self.location_validator, self.locator_task_a,
self.target_coordinates, vision_group, self.heading_task)
self.ram_task = RamTarget(
self.location_validator,
self.collision_validator,
# LocateBuoy(self.location_validator,checkBehind=True),
self.locator_task_b,
self.ram_concurrent_task)
if yellow:
self.forward_task = DirectionalSurge(6, .5)
self.retreat_task = DirectionalSurge(3, -.5)
else:
self.forward_task = DirectionalSurge(constants.FORWARD_TIME, .2)
self.retreat_task = DirectionalSurge(constants.BACKUP_TIME, -1)
self.depth_task = DepthRestore()
self.tasks = Sequential(Zero(),
SeqLog("Restoring Depth"), self.depth_task,
SeqLog("Locating Buoy"), self.locator_task_a,
SeqLog("Aligning"), self.align_task,
SeqLog("Approaching"), self.ram_task,
Zero(),
Timer(.5),
SeqLog("Ramming"), self.forward_task,
SeqLog("Backing up"), self.retreat_task,
SeqLog("Restoring heading"), self.heading_task)
# self.tasks = Sequential(Zero(), self.depth_task, self.locator_task, self.align_task,
# self.ram_task, self.forward_task, self.retreat_task,
# self.heading_task)
self.TIMEOUT = 90
def on_run(self):
# Locate the buoy
# Align with the buoy
# Ram the buoy
# Fulfill postcondition
if self.this_run_time - self.first_run_time > self.TIMEOUT:
self.finish()
self.loge("{} timed out!".format(self.__class__.__name__))
return
self.tasks()
if self.tasks.finished:
self.finish()
def on_finish(self):
self.logv('{} task finished in {} seconds!'.format(
self.__class__.__name__, self.this_run_time - self.first_run_time))
class Buoy(Task):
"""
Wrapper around the BuoyRam class that will specifically ram a red or green
buoy
"""
def __init__(self,
buoy,
right=True,
first_buoy=False,
yellow=False,
align_only=False,
*args,
**kwargs):
super().__init__(*args, **kwargs)
# Instantiate the BuoyRam task
self.buoy = buoy
self.heading_task = HeadingRestore()
if align_only:
self.align_task = AlignTarget(self.location_validator,
LocateAdjacentBuoy(
self.location_validator,
right=right),
(self.buoy.r_side_x.get,
self.buoy.center_y.get), self.buoy,
self.heading_task)
self.ram_task = BuoyAlign(
self.location_validator, (self.buoy.r_side_x.get,
self.buoy.center_y.get),
self.buoy,
self.collision_validator,
self.align_task,
right=right,
first_buoy=first_buoy,
yellow=yellow)
elif yellow:
self.align_task = AlignTarget(self.location_validator,
LocateAdjacentBuoy(
self.location_validator,
right=right),
(self.buoy.r_side_x.get,
self.buoy.bottom_y.get), self.buoy,
self.heading_task)
self.ram_task = BuoyRam(
self.location_validator, (self.buoy.r_side_x.get,
self.buoy.bottom_y.get),
self.buoy,
self.collision_validator,
self.align_task,
right=right,
first_buoy=first_buoy,
yellow=yellow)
else:
self.align_task = AlignTarget(self.location_validator,
LocateAdjacentBuoy(
self.location_validator,
right=right),
(self.buoy.r_side_x.get,
self.buoy.center_y.get), self.buoy,
self.heading_task)
self.ram_task = BuoyRam(
self.location_validator, (self.buoy.r_side_x.get,
self.buoy.center_y.get),
self.buoy,
self.collision_validator,
self.align_task,
right=right,
first_buoy=first_buoy,
yellow=yellow)
self.seen_frames_checker = ConsistencyCheck(3, 3, strict=True)
self.collision_checker = ConsistencyCheck(2, 2, strict=True)
self.last_percent_frame = 0
self.PERCENT_FRAME_THRESHOLD = 2.5
self.PERCENT_FRAME_DELTA_THRESHOLD = 10
self.TIMEOUT = 100
def on_first_run(self):
self.logv("Starting {} task".format(self.__class__.__name__))
def on_run(self):
# Perform BuoyRam task
if self.this_run_time - self.first_run_time > self.TIMEOUT:
self.finish()
self.loge("Buoy ({}) timed out!".format(self.buoy))
return
self.ram_task()
if self.ram_task.finished:
self.finish()
def on_finish(self):
self.logv("Buoy ({}) task finished in {} seconds!".format(
self.buoy, self.this_run_time - self.first_run_time))
Zero()()
def location_validator(self):
# TODO even more robust location validator
return self.seen_frames_checker.check(self.buoy.probability.get() != 0)
def collision_validator(self):
# TODO even more robust collision validator, susceptible to false
# positives
# if not shm.gpio.wall_1.get():
# self.logi("Detected buoy ram using touch sensor!")
# return True
current = self.buoy.percent_frame.get()
# self.logv("Buoy Percent Frame : {}".format(current))
if current >= self.PERCENT_FRAME_THRESHOLD:
# if self.collision_checker.check(abs(self.last_percent_frame - current) <= self.PERCENT_FRAME_DELTA_THRESHOLD):
# self.logv("Returned true!")
return True
# self.last_percent_frame = current
return False
class AllBuoys(Task):
def desiredModules(self):
return [shm.vision_modules.Buoys]
def on_first_run(self):
self.has_made_progress = True
self.seen_frames_checker = ConsistencyCheck(3, 3, strict=False)
def location_validator(buoy):
return self.seen_frames_checker.check(buoy.probability.get() != 0)
self.depth_task = DepthRestore()
self.heading_task = HeadingRestore()
self.up_task = DepthRestore(constants.BUOY_OVER_DEPTH)
self.dodge_vel = -.4 if yellowRight else .4
self.over_task = Timed(VelocityX(.4), 8)
self.heading_task = HeadingRestore()
self.task = Sequential(
Depth(constants.BUOY_SEARCH_DEPTH),
Conditional(
MasterConcurrent(
Sequential(
self.heading_task,
# Depth(0.9, error=.01)
# SeqLog("Looking for red buoy"), LocateFirstBuoy(lambda: location_validator(red_buoy_results), forward=True, right=BUOY_RIGHT_TO_REACH[0], middle=yellow_buoy_results),
Buoy(
red_buoy_results,
first_buoy=True,
right=redRight()),
# self.depth_task,
# SeqLog("Looking for green buoy stage 1"), LocateBuoyStrafe(lambda: location_validator(yellow_buoy_results), right=True, timeout=3),
SeqLog("Looking for green buoy"),
LocateBuoyStrafe(
lambda: location_validator(green_buoy_results),
right=greenRight(),
timeout=3),
Buoy(green_buoy_results, right=greenRight()),
# self.depth_task,
SeqLog("Looking for yellow buoy"),
LocateBuoyStrafe(
lambda: location_validator(yellow_buoy_results),
right=yellowRight(),
timeout=2),
Buoy(
yellow_buoy_results,
right=yellowRight(),
yellow=True),
Log("re-aligning red buoy"),
LocateBuoyStrafe(
lambda: location_validator(red_buoy_results),
right=secondRedRight(),
timeout=2),
Buoy(
red_buoy_results,
right=secondRedRight(),
yellow=False,
align_only=True), ),
PreventSurfacing(), ),
on_success=Sequential(
Zero(),
self.heading_task,
SeqLog("Rising to Over depth"),
self.up_task,
SeqLog("Going over buoys"),
self.over_task, ),
on_fail=Sequential(
Zero(),
self.heading_task,
SeqLog("Going to Over depth"),
self.up_task,
SeqLog("Going over buoys"),
self.over_task,
Timed(VelocityX(.4), 8), )))
def on_run(self):
if self.task.finished:
self.finish()
else:
self.task()
class RetryBuoys(Task):
def desiredModules(self):
return [shm.vision_modules.Buoys]
def on_first_run(self):
self.has_made_progress = True
self.seen_frames_checker = ConsistencyCheck(3, 3, strict=False)
def location_validator(buoy):
return self.seen_frames_checker.check(buoy.probability.get() != 0)
self.depth_task = DepthRestore()
self.heading_task = HeadingRestore()
self.up_task = DepthRestore(constants.BUOY_OVER_DEPTH)
self.dodge_vel = -.4 if yellowRight else .4
self.over_task = Sequential(
Timed(VelocityY(self.dodge_vel), 2),
DirectionalSurge(6, .4, compensate=True))
self.heading_task = HeadingRestore()
self.task = Sequential(
Depth(constants.BUOY_SEARCH_DEPTH),
self.heading_task,
# Depth(0.9, error=.01)
# SeqLog("Looking for red buoy"), LocateFirstBuoy(lambda: location_validator(red_buoy_results), forward=True, right=BUOY_RIGHT_TO_REACH[0], middle=yellow_buoy_results),
Buoy(red_buoy_results, first_buoy=True, right=not redRight()),
# self.depth_task,
# SeqLog("Looking for green buoy stage 1"), LocateBuoyStrafe(lambda: location_validator(yellow_buoy_results), right=True, timeout=3),
SeqLog("Looking for green buoy"),
LocateBuoyStrafe(
lambda: location_validator(green_buoy_results),
right=not greenRight(),
timeout=3),
Buoy(green_buoy_results, right=not greenRight()),
# self.depth_task,
SeqLog("Looking for yellow buoy"),
LocateBuoyStrafe(
lambda: location_validator(yellow_buoy_results),
right=not yellowRight(),
timeout=2),
Buoy(yellow_buoy_results, right=not yellowRight(), yellow=True),
HeadingRestore(),
SeqLog("Rising to Over depth"),
self.up_task,
SeqLog("Going around buoys"),
self.over_task)
def on_run(self):
if self.task.finished:
self.finish()
else:
self.task()
red = lambda: Buoy(red_buoy_results, first_buoy=True)
green = lambda: Buoy(green_buoy_results, first_buoy=True)
yellow = lambda: Buoy(yellow_buoy_results, first_buoy=True)
| {
"repo_name": "cuauv/software",
"path": "mission/missions/old/2017/buoys.py",
"copies": "1",
"size": "41842",
"license": "bsd-3-clause",
"hash": -9004799887464110000,
"line_mean": 36.6276978417,
"line_max": 192,
"alpha_frac": 0.544452942,
"autogenerated": false,
"ratio": 4.022495673908864,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5066948615908864,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from netCDF4 import Dataset
from numpy import ndarray
from numpy.ma import masked_array, masked_where
from numpy.ma.core import is_masked
from rasterio.dtypes import is_ndarray
from ncdjango.geoprocessing import params
from ncdjango.geoprocessing.data import Raster
from ncdjango.geoprocessing.evaluation import Lexer, Parser
from ncdjango.geoprocessing.exceptions import ExecutionError
from ncdjango.geoprocessing.workflow import Task
class LoadRasterDataset(Task):
"""Loads a raster dataset from a NetCDF file."""
name = 'raster:load_dataset'
inputs = [params.StringParameter('path', required=True)]
outputs = [params.RasterDatasetParameter('dataset_out')]
def execute(self, path):
return Dataset(path, 'r')
class ArrayFromDataset(Task):
"""Reads a variable from a raster dataset into an array."""
name = 'raster:array_from_dataset'
inputs = [
params.RasterDatasetParameter('dataset', required=True), params.StringParameter('variable', required=True)
]
outputs = [params.NdArrayParameter('array_out')]
def execute(self, dataset, variable):
return dataset[variable][:]
class ExpressionMixin(object):
"""A mixin class to handle expression parsing and error handling."""
def get_expression_names(self, expression):
try:
return list(Lexer().get_names(expression))
except SyntaxError as e:
raise ExecutionError('The expression is invalid ({0}): {1}'.format(str(e), expression), self)
def evaluate_expression(self, expression, context={}):
try:
# Operations against masked arrays are really slow, so take a regular array view, then back to a masked
# array afterwards. Todo: find a better solution long-term
expr_context = {k: v.view(ndarray) if is_masked(v) else v for k, v in context.items()}
result = Parser().evaluate(expression, context=expr_context)
if is_ndarray(result):
for value in context.values():
if is_masked(value):
if is_masked(result) and is_masked(value):
result.mask = result.mask | value.mask
elif is_masked(value):
result = masked_array(result, mask=value.mask)
result = Raster(result, value.extent, value.x_dim, value.y_dim, value.y_increasing)
break
return result
except (SyntaxError, NameError) as e:
raise ExecutionError(
'The expression is invalid ({0}): {1}\nContext: {2}'.format(str(e), expression, str(context)),
self
)
class SingleArrayExpressionBase(ExpressionMixin, Task):
"""Base class for tasks with a single array and expression as inputs."""
inputs = [params.NdArrayParameter('array_in', required=True), params.StringParameter('expression', required=True)]
outputs = [params.NdArrayParameter('array_out')]
allow_extra_args = True
def get_context(self, arr, expr, context):
"""
Returns a context dictionary for use in evaluating the expression.
:param arr: The input array.
:param expr: The input expression.
:param context: Evaluation context.
"""
expression_names = [x for x in self.get_expression_names(expr) if x not in set(context.keys()).union(['i'])]
if len(expression_names) != 1:
raise ValueError('The expression must have exactly one variable.')
return {expression_names[0]: arr}
class MaskByExpression(SingleArrayExpressionBase):
"""Applies a mask to an array based on an expression."""
name = 'raster:mask_by_expression'
def execute(self, array_in, expression, **kwargs):
"""Creates and returns a masked view of the input array."""
context = self.get_context(array_in, expression, kwargs)
context.update(kwargs)
return masked_where(self.evaluate_expression(expression, context), array_in)
class ApplyExpression(SingleArrayExpressionBase):
"""Applies an expression to an array and returns a new array of the results."""
name = 'raster:apply_expression'
def execute(self, array_in, expression, **kwargs):
"""Returns a new array, resulting from applying the expression to the input array."""
context = self.get_context(array_in, expression, kwargs)
context.update(kwargs)
return self.evaluate_expression(expression, context)
class MapByExpression(SingleArrayExpressionBase):
"""Applies a given expression to a list of arrays, returning a list with new arrays."""
name = 'raster:map_by_expression'
inputs = [
params.ListParameter(params.NdArrayParameter(''), 'arrays_in', required=True),
params.StringParameter('expression', required=True),
params.BooleanParameter('generator', required=False)
]
outputs = [params.ListParameter(params.NdArrayParameter(''), 'arrays_out')]
def execute(self, arrays_in, expression, generator=False, **kwargs):
result = (
self.evaluate_expression(expression, dict(self.get_context(a, expression, kwargs), i=i, **kwargs))
for i, a in enumerate(arrays_in)
)
return result if generator else list(result)
class ReduceByExpression(ExpressionMixin, Task):
"""Iteratively reduces a list of arrays using an expression."""
name = 'raster:reduce_by_expression'
inputs = [
params.ListParameter(params.NdArrayParameter(''), 'arrays_in', required=True),
params.StringParameter('expression', required=True),
params.NdArrayParameter('initial_array', required=False)
]
outputs = [params.NdArrayParameter('array_out')]
allow_extra_args = True
def execute(self, arrays_in, expression, initial_array=None, **kwargs):
expression_names = [x for x in self.get_expression_names(expression) if x not in set(kwargs.keys())]
if len(expression_names) != 2:
raise ValueError("The expression must have exactly two variables.")
def reduce_fn(x, y):
context = {
expression_names[0]: x,
expression_names[1]: y
}
context.update(kwargs)
return self.evaluate_expression(expression, context)
args = [reduce_fn, arrays_in]
if initial_array is not None:
args.append(initial_array)
return reduce(*args)
| {
"repo_name": "consbio/ncdjango",
"path": "ncdjango/geoprocessing/tasks/raster.py",
"copies": "1",
"size": "6556",
"license": "bsd-3-clause",
"hash": -8171174457640008000,
"line_mean": 36.0395480226,
"line_max": 118,
"alpha_frac": 0.6526845638,
"autogenerated": false,
"ratio": 4.310322156476003,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5463006720276002,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from plisp import types
from plisp import environment
# Special forms
class LambdaForm(types.Callable):
def apply(self, args, call_env):
if len(args) != 2 or type(args[0]) is not types.List:
raise SyntaxError("lambda must be of form: lambda args expression")
for arg in args[0]:
if type(arg) is not types.Symbol:
raise SyntaxError("lambda argument list must be comprised of symbols")
return types.Function(args[0], args[1], call_env)
class DefineForm(types.Callable):
def apply(self, args, call_env):
if len(args) != 2 or type(args[0]) is not types.Symbol:
raise SyntaxError("define must be in form: define name expression")
result = args[1].evaluate(call_env)
call_env.set_symbol(args[0], result)
return result
class QuoteForm(types.Callable):
def apply(self, args, call_env):
if len(args) == 0:
return types.List()
return args[0]
class UnQuoteForm(types.Callable):
def apply(self, args, call_env):
if len(args) == 0:
return types.List()
return args[0].evaluate(call_env)
class BackquoteForm(types.Callable):
def backquote_evaluate(self, expr, env):
if isinstance(expr, types.List):
if len(expr) > 0:
if (isinstance(expr.elements[0], types.Symbol) and
isinstance(env.get_form(expr.elements[0]), UnQuoteForm)):
return expr.evaluate(env)
ret = []
for e in expr.elements:
ret.append(self.backquote_evaluate(e, env))
return types.List(*ret)
return expr
def apply(self, args, call_env):
if len(args) == 0:
return types.List()
return self.backquote_evaluate(args[0], call_env)
class FnForm(types.Callable):
def apply(self, args, call_env):
if len(args) != 3 or type(args[0]) is not types.Symbol or type(args[1]) is not types.List:
raise SyntaxError("fn must be of form: fn name args expression")
for arg in args[1]:
if type(arg) is not types.Symbol:
raise SyntaxError("fn argument list must be comprised only of symbols")
function = types.Function(args[1], args[2], call_env)
call_env.set_symbol(args[0], function)
return function
class IfForm(types.Callable):
def apply(self, args, call_env):
if len(args) != 3:
raise SyntaxError("if must be of form: if test then else")
test = types.Boolean(args[0].evaluate(call_env))
if test:
return args[1].evaluate(call_env)
else:
return args[2].evaluate(call_env)
class DoForm(types.Callable):
def apply(self, args, call_env):
res = types.List()
for expr in args:
res = expr.evaluate(call_env)
return res
class DotForm(types.Callable):
def apply(self, args, call_env):
if len(args) != 2:
raise SyntaxError(". must be of form: . container field")
container = args[0].evaluate(call_env)
return types.to_lisp_type(getattr(container, str(args[1].evaluate(call_env))))
class BangForm(types.Callable):
def apply(self, args, call_env):
if len(args) == 0:
raise SyntaxError("! must be of form: ! callable args")
fn = args[0].evaluate(call_env)
return types.to_lisp_type(fn(*[e.evaluate(call_env).pytype() for e in args[1:]]))
class DefMacroForm(types.Callable):
def apply(self, args, call_env):
if len(args) != 3 or not isinstance(args[0], types.Symbol) or not isinstance(args[1], types.List):
raise SyntaxError("defmacro must be of form: defmacro name args expression")
for arg in args[1]:
if type(arg) is not types.Symbol:
raise SyntaxError("defmacro argument list must be comprised of symbols")
macro = types.Macro(args[1], args[2])
call_env.set_macro(args[0], macro)
return macro
# Built-in functions
class BuiltinFunction(types.Function):
def __init__(self, env):
self.env = env
class ListReduceBuiltin(BuiltinFunction):
func = lambda x, y: None
def apply(self, args, call_env):
return reduce(self.__class__.func, [a.evaluate(call_env) for a in args])
class AddFunction(ListReduceBuiltin):
func = lambda x, y: x + y
class SubtractFunction(ListReduceBuiltin):
func = lambda x, y: x - y
class MultiplyFunction(ListReduceBuiltin):
func = lambda x, y: x * y
class DivisionFunction(ListReduceBuiltin):
func = lambda x, y: x / y
class EqualityFunction(BuiltinFunction):
def apply(self, args, call_env):
if len(args) != 2:
raise Exception("Arity error")
return types.Boolean(args[0].evaluate(call_env) == args[1].evaluate(call_env))
class ListFunction(BuiltinFunction):
def apply(self, args, call_env):
return types.List(*[e.evaluate(call_env) for e in args])
class ConsFunction(BuiltinFunction):
def apply(self, args, call_env):
if len(args) != 2:
raise Exception("Arity error")
elem = args[0].evaluate(call_env)
tgt = args[1].evaluate(call_env)
if not isinstance(tgt, types.List):
raise SyntaxError("the second argument of cons must be a list")
return types.List(elem, *tgt.elements)
class FirstFunction(BuiltinFunction):
def apply(self, args, call_env):
if len(args) != 1:
raise Exception("Arity error")
tgt = args[0].evaluate(call_env)
if not isinstance(tgt, types.List):
raise SyntaxError("first only accepts a list")
if len(tgt) == 0:
return types.List()
return tgt.elements[0]
class RestFunction(BuiltinFunction):
def apply(self, args, call_env):
if len(args) != 1:
raise Exception("Arity error")
tgt = args[0].evaluate(call_env)
if not isinstance(tgt, types.List):
raise SyntaxError("rest only accepts a list")
return types.List(*tgt.elements[1:])
class TypeFunction(BuiltinFunction):
def apply(self, args, call_env):
if len(args) != 1:
raise SyntaxError("type must be in form: type expression")
return args[0].evaluate(call_env).__class__
class PrintFunction(BuiltinFunction):
def apply(self, args, call_env):
string = ' '.join([str(a.evaluate(call_env)) for a in args])
print(string)
return types.List()
class ImportFunction(BuiltinFunction):
def apply(self, args, call_env):
if len(args) != 1:
raise SyntaxError("import must be in form: import name")
name = args[0].evaluate(call_env)
if type(name) is not types.String:
raise SyntaxError("import only accepts a string")
try:
mod = __import__(name.value)
except ImportError as e:
if name.value in __builtins__:
return __builtins__[name.value]
raise
return mod
| {
"repo_name": "s0lder/plisp",
"path": "plisp/builtins.py",
"copies": "1",
"size": "7136",
"license": "mit",
"hash": -2324792269042718700,
"line_mean": 31.2895927602,
"line_max": 106,
"alpha_frac": 0.6091647982,
"autogenerated": false,
"ratio": 3.791710945802338,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9889223195644228,
"avg_score": 0.00233050967162191,
"num_lines": 221
} |
from functools import reduce
from pyglet_gui.constants import HALIGN_CENTER, HALIGN_LEFT, HALIGN_RIGHT, \
VALIGN_TOP, VALIGN_CENTER, ANCHOR_CENTER, GetRelativePoint
from pyglet_gui.core import Viewer, Rectangle
class Spacer(Viewer):
def __init__(self, min_width=0, min_height=0):
Viewer.__init__(self)
self._min_width, self._min_height = min_width, min_height
def expand(self, width, height):
self.width, self.height = width, height
def is_expandable(self):
return True
def compute_size(self):
return self._min_width, self._min_height
class Container(Viewer):
def __init__(self, content, width=0, height=0):
assert isinstance(content, list)
super(Container, self).__init__(width, height)
self._content = [x or Spacer() for x in content]
@property
def content(self):
return self._content
def set_manager(self, manager):
Viewer.set_manager(self, manager)
for item in self._content:
item.set_manager(self._manager)
item.parent = self
def load_content(self):
for item in self._content:
item.load()
def load(self):
super(Container, self).load()
self.load_content()
def unload_content(self):
for item in self._content:
item.unload()
def unload(self):
super(Container, self).unload()
self.unload_content()
def add(self, item, position=0):
item = item or Spacer()
assert isinstance(item, Viewer)
item.set_manager(self._manager)
item.parent = self
item.load()
item.reset_size()
self._content.insert(len(self._content) - position, item)
self.reset_size()
def remove(self, item):
assert isinstance(item, Viewer)
item.unload()
self._content.remove(item)
item.delete()
self.reset_size()
def delete(self):
for item in self._content:
item.delete()
self._content = []
Viewer.delete(self)
def reset_size(self, reset_parent=True):
if not reset_parent:
for item in self._content:
item.reset_size(reset_parent=False)
super(Container, self).reset_size(reset_parent)
class VerticalContainer(Container):
def __init__(self, content, align=HALIGN_CENTER, padding=5):
assert align in (HALIGN_CENTER, HALIGN_LEFT, HALIGN_RIGHT)
super(VerticalContainer, self).__init__(content)
self.align = align
self.padding = padding
self._expandable = []
def expand(self, width, height):
"""
Expands to fill available vertical space. We split available space
equally between all spacers.
"""
available = int((height - self.height) / len(self._expandable))
remainder = height - self.height - len(self._expandable) * available
for item in self._expandable:
if remainder > 0:
item.expand(item.width, item.height + available + 1)
remainder -= 1
else:
item.expand(item.width, item.height + available)
self.height = height
self.width = width
def is_expandable(self):
# True if we contain an expandable content.
return len(self._expandable) > 0
def layout(self):
# Expand any expandable content to our width
for item in self._content:
if item.is_expandable() and item.width < self.width:
item.expand(self.width, item.height)
top = self.y + self.height
if self.align == HALIGN_RIGHT:
for item in self._content:
item.set_position(self.x + self.width - item.width, top - item.height)
top -= item.height + self.padding
elif self.align == HALIGN_CENTER:
for item in self._content:
item.set_position(self.x + self.width / 2 - item.width / 2, top - item.height)
top -= item.height + self.padding
else: # HALIGN_LEFT
for item in self._content:
item.set_position(self.x, top - item.height)
top -= item.height + self.padding
def compute_size(self):
if len(self._content) < 2:
height = 0
else:
height = -self.padding
width = 0
for item in self._content:
height += item.height + self.padding
width = max(width, item.width)
self._expandable = [x for x in self._content if x.is_expandable()]
return width, height
class HorizontalContainer(Container):
def __init__(self, content, align=VALIGN_CENTER, padding=5):
assert align in (HALIGN_CENTER, HALIGN_LEFT, HALIGN_RIGHT)
super(HorizontalContainer, self).__init__(content)
self.align = align
self.padding = padding
self._expandable = []
def is_expandable(self):
# True if we contain expandable content.
return len(self._expandable) > 0
def expand(self, width, height):
"""
Expands to fill available horizontal space. We split available space
equally between all spacers.
"""
available = int((width - self.width) / len(self._expandable))
remainder = height - self.height - len(self._expandable) * available
for item in self._expandable:
if remainder > 0:
item.expand(item.width + available + 1, item.height)
remainder -= 1
else:
item.expand(item.width + available, item.height)
self.width = width
def layout(self):
# Expand any expandable content to our height
for item in self._content:
if item.is_expandable() and item.height < self.height:
item.expand(item.width, self.height)
left = self.x
if self.align == VALIGN_TOP:
for item in self._content:
item.set_position(left, self.y + self.height - item.height)
left += item.width + self.padding
elif self.align == VALIGN_CENTER:
for item in self._content:
item.set_position(left, self.y + self.height / 2 - item.height / 2)
left += item.width + self.padding
else: # VALIGN_BOTTOM
for item in self._content:
item.set_position(left, self.y)
left += item.width + self.padding
def compute_size(self):
height = 0
if len(self._content) < 2:
width = 0
else:
width = -self.padding
for item in self._content:
item.compute_size()
height = max(height, item.height)
width += item.width + self.padding
self._expandable = [x for x in self._content if x.is_expandable()]
return width, height
class GridContainer(Container):
"""
Arranges Widgets in a table. Each cell's height and width are set to
the maximum width of any Viewer in its column, or the maximum height of
any Viewer in its row.
"""
def __init__(self, content, anchor=ANCHOR_CENTER, padding=5,
offset=(0, 0)):
assert isinstance(content, list) and len(content) != 0
# todo: transform all "None" in "Spacers".
# we set _content to be a flatten list of content.
Container.__init__(self, [item for sub_list in content for item in sub_list])
# and we set _matrix to be the matrix-like list [[]].
self._matrix = content
self.anchor = anchor
self.padding = padding
self.offset = offset
self._max_heights = []
self._max_widths = []
self._update_max_vectors()
@property
def content(self):
return self._matrix
def _update_max_vectors(self):
"""
Updates the sizes of vectors _max_widths and _max_heights.
Must be called when _matrix changes number of elements.
"""
# re-compute length of vector _max_widths
self._max_heights = [0] * len(self._matrix)
width = 0
for row in self._matrix:
width = max(width, len(row))
self._max_widths = [0] * width
def add_row(self, row):
"""
Adds a new row to the layout.
"""
assert isinstance(row, list)
for item in row:
item = item or Spacer()
item.set_manager(self._manager)
item.parent = self
item.load()
self._content.append(item)
self._matrix.append(row)
self._update_max_vectors()
self.reset_size()
def add_column(self, column):
"""
Adds a new column to the layout.
"""
assert isinstance(column, list)
# assign items parents and managers
for item in column:
if item is not None:
item = item or Spacer()
item.set_manager(self._manager)
item.parent = self
item.load()
self._content.append(item)
# add items to the matrix, extending the grid if needed.
for i in range(len(column)):
try:
self._matrix[i].append(column[i])
except IndexError:
self._matrix.append([]*len(column) + [column[i]])
self._update_max_vectors()
# update sizes
self.reset_size()
def get(self, column, row):
"""
Gets the content of a cell within the grid.
If invalid, it raises an IndexError.
"""
return self._matrix[row][column]
def set(self, column, row, item):
"""
Set the content of a cell within the grid,
substituting existing content.
"""
item = item or Spacer()
assert isinstance(item, Viewer)
self._content.remove(self._matrix[row][column])
self._matrix[row][column].delete()
self._matrix[row][column] = item
self._content.append(item)
item.set_manager(self._manager)
item.parent = self
item.load()
self.reset_size()
def layout(self):
row_index = 0
placement = Rectangle()
placement.y = self.y + self.height
for row in self._matrix:
col_index = 0
placement.x = self.x
placement.height = self._max_heights[row_index]
placement.y -= placement.height
for item in row:
placement.width = self._max_widths[col_index]
if item is not None:
if item.is_expandable():
item.expand(placement.width, placement.height)
item.set_position(*GetRelativePoint(placement, self.anchor, item, self.anchor, self.offset))
placement.x += placement.width
col_index += 1
row_index += 1
def compute_size(self):
# calculates the size and the maximum widths and heights of
# each row and column.
row_index = 0
for row in self._matrix:
max_height = self.padding
col_index = 0
for item in row:
if item is not None:
item.compute_size()
width, height = item.width, item.height
else:
width = height = 0
max_height = max(max_height, height + self.padding)
max_width = self._max_widths[col_index]
max_width = max(max_width, width + self.padding)
self._max_widths[col_index] = max_width
col_index += 1
self._max_heights[row_index] = max_height
row_index += 1
if self._max_widths:
width = reduce(lambda x, y: x + y, self._max_widths) - self.padding
else:
width = 0
if self._max_heights:
height = reduce(lambda x, y: x + y, self._max_heights) - self.padding
else:
height = 0
return width, height
def delete(self):
super(GridContainer, self).delete()
self._matrix = [[]]
class Wrapper(Container):
"""
A Viewer that wraps another widget.
"""
def __init__(self, content, is_expandable=False, anchor=ANCHOR_CENTER, offset=(0, 0)):
assert isinstance(content, Viewer)
Container.__init__(self, [content])
self.expandable = is_expandable
self._anchor = anchor
self.content_offset = offset
@property
def anchor(self):
return self._anchor
@anchor.setter
def anchor(self, anchor):
self._anchor = anchor
@property
def content(self):
return self._content[0]
@content.setter
def content(self, content):
assert isinstance(content, Viewer)
self.content.delete()
self._content[0] = content
self.content.set_manager(self._manager)
self.content.parent = self
self.content.load()
self.reset_size()
def expand(self, width, height):
if self.content.is_expandable():
self.content.expand(width, height)
self.width = width
self.height = height
def is_expandable(self):
return self.expandable
def compute_size(self):
return self.content.width, self.content.height
def layout(self):
x, y = GetRelativePoint(self, self.anchor, self.content, self.anchor, self.content_offset)
self.content.set_position(x, y)
| {
"repo_name": "jorgecarleitao/pyglet-gui",
"path": "pyglet_gui/containers.py",
"copies": "1",
"size": "13627",
"license": "bsd-3-clause",
"hash": 8314039891102288000,
"line_mean": 30.7645687646,
"line_max": 112,
"alpha_frac": 0.5621927057,
"autogenerated": false,
"ratio": 4.118162586884255,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5180355292584254,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from pypokerengine.engine.card import Card
import random
class Deck:
def __init__(self, deck_ids=None, cheat=False, cheat_card_ids=[]):
self.cheat = cheat
self.cheat_card_ids = cheat_card_ids
self.deck = [Card.from_id(cid) for cid in deck_ids] if deck_ids else self.__setup()
def draw_card(self):
return self.deck.pop()
def draw_cards(self, num):
return reduce(lambda acc, _: acc + [self.draw_card()], range(num), [])
def size(self):
return len(self.deck)
def restore(self):
self.deck = self.__setup()
def shuffle(self):
if not self.cheat:
random.shuffle(self.deck)
# serialize format : [cheat_flg, chat_card_ids, deck_card_ids]
def serialize(self):
return [self.cheat, self.cheat_card_ids, [card.to_id() for card in self.deck]]
@classmethod
def deserialize(self, serial):
cheat, cheat_card_ids, deck_ids = serial
return self(deck_ids=deck_ids, cheat=cheat, cheat_card_ids=cheat_card_ids)
def __setup(self):
return self.__setup_cheat_deck() if self.cheat else self.__setup_52_cards()
def __setup_52_cards(self):
return [Card.from_id(cid) for cid in range(1,53)]
def __setup_cheat_deck(self):
cards = [Card.from_id(cid) for cid in self.cheat_card_ids]
return cards[::-1]
| {
"repo_name": "sberbank-ai/holdem-challenge",
"path": "PyPokerEngine/pypokerengine/engine/deck.py",
"copies": "2",
"size": "1305",
"license": "mit",
"hash": -9160316534048159000,
"line_mean": 26.7659574468,
"line_max": 87,
"alpha_frac": 0.6636015326,
"autogenerated": false,
"ratio": 2.986270022883295,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4649871555483295,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from pypokerengine.engine.table import Table
from pypokerengine.engine.player import Player
from pypokerengine.engine.pay_info import PayInfo
from pypokerengine.engine.poker_constants import PokerConstants as Const
from pypokerengine.engine.action_checker import ActionChecker
from pypokerengine.engine.game_evaluator import GameEvaluator
from pypokerengine.engine.message_builder import MessageBuilder
class RoundManager:
@classmethod
def start_new_round(self, round_count, small_blind_amount, ante_amount, table):
_state = self.__gen_initial_state(round_count, small_blind_amount, table)
state = self.__deep_copy_state(_state)
table = state["table"]
table.deck.shuffle()
self.__correct_ante(ante_amount, table.seats.players)
self.__correct_blind(small_blind_amount, table)
self.__deal_holecard(table.deck, table.seats.players)
start_msg = self.__round_start_message(round_count, table)
state, street_msgs = self.__start_street(state)
return state, start_msg + street_msgs
@classmethod
def apply_action(self, original_state, action, bet_amount, bot_info=None):
state = self.__deep_copy_state(original_state)
state = self.__update_state_by_action(state, action, bet_amount, bot_info=bot_info)
update_msg = self.__update_message(state, action, bet_amount)
if self.__is_everyone_agreed(state):
[player.save_street_action_histories(state["street"]) for player in state["table"].seats.players]
state["street"] += 1
state, street_msgs = self.__start_street(state)
return state, [update_msg] + street_msgs
else:
state["next_player"] = state["table"].next_ask_waiting_player_pos(state["next_player"])
next_player_pos = state["next_player"]
next_player = state["table"].seats.players[next_player_pos]
ask_message = (next_player.uuid, MessageBuilder.build_ask_message(next_player_pos, state))
return state, [update_msg, ask_message]
@classmethod
def __correct_ante(self, ante_amount, players):
if ante_amount == 0: return
active_players = [player for player in players if player.is_active()]
for player in active_players:
player.collect_bet(ante_amount)
player.pay_info.update_by_pay(ante_amount)
player.add_action_history(Const.Action.ANTE, ante_amount)
@classmethod
def __correct_blind(self, sb_amount, table):
self.__blind_transaction(table.seats.players[table.sb_pos()], True, sb_amount)
self.__blind_transaction(table.seats.players[table.bb_pos()], False, sb_amount)
@classmethod
def __blind_transaction(self, player, small_blind, sb_amount):
action = Const.Action.SMALL_BLIND if small_blind else Const.Action.BIG_BLIND
blind_amount = sb_amount if small_blind else sb_amount*2
player.collect_bet(blind_amount)
player.add_action_history(action, sb_amount=sb_amount)
player.pay_info.update_by_pay(blind_amount)
@classmethod
def __deal_holecard(self, deck, players):
for player in players:
player.add_holecard(deck.draw_cards(2))
@classmethod
def __start_street(self, state):
next_player_pos = state["table"].next_ask_waiting_player_pos(state["table"].sb_pos()-1)
state["next_player"] = next_player_pos
street = state["street"]
if street == Const.Street.PREFLOP:
return self.__preflop(state)
elif street == Const.Street.FLOP:
return self.__flop(state)
elif street == Const.Street.TURN:
return self.__turn(state)
elif street == Const.Street.RIVER:
return self.__river(state)
elif street == Const.Street.SHOWDOWN:
return self.__showdown(state)
else:
raise ValueError("Street is already finished [street = %d]" % street)
@classmethod
def __preflop(self, state):
for i in range(2):
state["next_player"] = state["table"].next_ask_waiting_player_pos(state["next_player"])
return self.__forward_street(state)
@classmethod
def __flop(self, state):
for card in state["table"].deck.draw_cards(3):
state["table"].add_community_card(card)
return self.__forward_street(state)
@classmethod
def __turn(self, state):
state["table"].add_community_card(state["table"].deck.draw_card())
return self.__forward_street(state)
@classmethod
def __river(self, state):
state["table"].add_community_card(state["table"].deck.draw_card())
return self.__forward_street(state)
@classmethod
def __showdown(self, state):
winners, hand_info, prize_map = GameEvaluator.judge(state["table"])
self.__prize_to_winners(state["table"].seats.players, prize_map)
result_message = MessageBuilder.build_round_result_message(state["round_count"], winners, hand_info, state)
state["table"].reset()
state["street"] += 1
return state, [(-1, result_message)]
@classmethod
def __prize_to_winners(self, players, prize_map):
for idx, prize in prize_map.items():
players[idx].append_chip(prize)
@classmethod
def __round_start_message(self, round_count, table):
players = table.seats.players
gen_msg = lambda idx: (players[idx].uuid, MessageBuilder.build_round_start_message(round_count, idx, table.seats))
return reduce(lambda acc, idx: acc + [gen_msg(idx)], range(len(players)), [])
@classmethod
def __forward_street(self, state):
table = state["table"]
street_start_msg = [(-1, MessageBuilder.build_street_start_message(state))]
if table.seats.count_active_players() == 1: street_start_msg = []
if table.seats.count_ask_wait_players() <= 1:
state["street"] += 1
state, messages = self.__start_street(state)
return state, street_start_msg + messages
else:
next_player_pos = state["next_player"]
next_player = table.seats.players[next_player_pos]
ask_message = [(next_player.uuid, MessageBuilder.build_ask_message(next_player_pos, state))]
return state, street_start_msg + ask_message
@classmethod
def __update_state_by_action(self, state, action, bet_amount, bot_info=None):
table = state["table"]
action, bet_amount = ActionChecker.correct_action(\
table.seats.players, state["next_player"], state["small_blind_amount"], action, bet_amount)
next_player = table.seats.players[state["next_player"]]
if ActionChecker.is_allin(next_player, action, bet_amount):
next_player.pay_info.update_to_allin()
return self.__accept_action(state, action, bet_amount, bot_info=bot_info)
@classmethod
def __accept_action(self, state, action, bet_amount, bot_info=None):
player = state["table"].seats.players[state["next_player"]]
if action == 'call':
self.__chip_transaction(player, bet_amount)
player.add_action_history(Const.Action.CALL, bet_amount, bot_info=bot_info)
elif action == 'raise':
self.__chip_transaction(player, bet_amount)
add_amount = bet_amount - ActionChecker.agree_amount(state["table"].seats.players)
player.add_action_history(Const.Action.RAISE, bet_amount, add_amount, bot_info=bot_info)
elif action == 'fold':
player.add_action_history(Const.Action.FOLD, bot_info=bot_info)
player.pay_info.update_to_fold()
else:
raise ValueError("Unexpected action %s received" % action)
return state
@classmethod
def __chip_transaction(self, player, bet_amount):
need_amount = ActionChecker.need_amount_for_action(player, bet_amount)
player.collect_bet(need_amount)
player.pay_info.update_by_pay(need_amount)
@classmethod
def __update_message(self, state, action, bet_amount):
return (-1, MessageBuilder.build_game_update_message(
state["next_player"], action, bet_amount, state))
@classmethod
def __is_everyone_agreed(self, state):
self.__agree_logic_bug_catch(state)
players = state["table"].seats.players
next_player_pos = state["table"].next_ask_waiting_player_pos(state["next_player"])
next_player = players[next_player_pos] if next_player_pos != "not_found" else None
max_pay = max([p.paid_sum() for p in players])
everyone_agreed = len(players) == len([p for p in players if self.__is_agreed(max_pay, p)])
lonely_player = state["table"].seats.count_active_players() == 1
no_need_to_ask = state["table"].seats.count_ask_wait_players() == 1 and\
next_player and next_player.is_waiting_ask() and next_player.paid_sum() == max_pay
return everyone_agreed or lonely_player or no_need_to_ask
@classmethod
def __agree_logic_bug_catch(self, state):
if state["table"].seats.count_active_players() == 0:
raise "[__is_everyone_agreed] no-active-players!!"
@classmethod
def __is_agreed(self, max_pay, player):
# BigBlind should be asked action at least once
is_preflop = player.round_action_histories[0] == None
bb_ask_once = len(player.action_histories)==1 \
and player.action_histories[0]["action"] == Player.ACTION_BIG_BLIND
bb_ask_check = not is_preflop or not bb_ask_once
return (bb_ask_check and player.paid_sum() == max_pay and len(player.action_histories) != 0)\
or player.pay_info.status in [PayInfo.FOLDED, PayInfo.ALLIN]
@classmethod
def __gen_initial_state(self, round_count, small_blind_amount, table):
return {
"round_count": round_count,
"small_blind_amount": small_blind_amount,
"street": Const.Street.PREFLOP,
"next_player": table.next_ask_waiting_player_pos(table.bb_pos()),
"table": table
}
@classmethod
def __deep_copy_state(self, state):
table_deepcopy = Table.deserialize(state["table"].serialize())
return {
"round_count": state["round_count"],
"small_blind_amount": state["small_blind_amount"],
"street": state["street"],
"next_player": state["next_player"],
"table": table_deepcopy
}
| {
"repo_name": "sberbank-ai/holdem-challenge",
"path": "PyPokerEngine/pypokerengine/engine/round_manager.py",
"copies": "1",
"size": "9824",
"license": "mit",
"hash": -9215129100342321000,
"line_mean": 41.5281385281,
"line_max": 118,
"alpha_frac": 0.6836319218,
"autogenerated": false,
"ratio": 3.3540457494025264,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9464825001667672,
"avg_score": 0.01457053390697107,
"num_lines": 231
} |
from functools import reduce
from pypokerengine.engine.table import Table
from pypokerengine.engine.seats import Seats
from pypokerengine.engine.card import Card
from pypokerengine.engine.deck import Deck
from pypokerengine.engine.player import Player
from pypokerengine.engine.pay_info import PayInfo
from pypokerengine.engine.data_encoder import DataEncoder
from pypokerengine.engine.poker_constants import PokerConstants as Const
def restore_game_state(round_state):
return {
"round_count": round_state["round_count"],
"small_blind_amount": round_state["small_blind_amount"],
"street": _street_flg_translator[round_state["street"]],
"next_player": round_state["next_player"],
"table": _restore_table(round_state)
}
def attach_hole_card_from_deck(game_state, uuid):
deepcopy = deepcopy_game_state(game_state)
hole_card = deepcopy["table"].deck.draw_cards(2)
return attach_hole_card(deepcopy, uuid, hole_card)
def replace_community_card_from_deck(game_state):
deepcopy = deepcopy_game_state(game_state)
card_num = _street_community_card_num[deepcopy["street"]]
community_card = deepcopy["table"].deck.draw_cards(card_num)
return replace_community_card(deepcopy, community_card)
_street_community_card_num = {
Const.Street.PREFLOP: 0,
Const.Street.FLOP: 3,
Const.Street.TURN: 4,
Const.Street.RIVER: 5
}
def attach_hole_card(game_state, uuid, hole_card):
deepcopy = deepcopy_game_state(game_state)
target = [player for player in deepcopy["table"].seats.players if uuid==player.uuid]
if len(target)==0: raise Exception('The player whose uuid is "%s" is not found in passed game_state.' % uuid)
if len(target)!=1: raise Exception('Multiple players have uuid "%s". So we cannot attach hole card.' % uuid)
target[0].hole_card = hole_card
return deepcopy
def replace_community_card(game_state, community_card):
deepcopy = deepcopy_game_state(game_state)
deepcopy["table"]._community_card = community_card
return deepcopy
def deepcopy_game_state(game_state):
tabledeepcopy = Table.deserialize(game_state["table"].serialize())
return {
"round_count": game_state["round_count"],
"small_blind_amount": game_state["small_blind_amount"],
"street": game_state["street"],
"next_player": game_state["next_player"],
"table": tabledeepcopy
}
_street_flg_translator = {
"preflop": Const.Street.PREFLOP,
"flop": Const.Street.FLOP,
"turn": Const.Street.TURN,
"river": Const.Street.RIVER,
"showdown": Const.Street.SHOWDOWN
}
def _restore_table(round_state):
table = Table()
table.dealer_btn = round_state["dealer_btn"]
table.set_blind_pos(round_state["small_blind_pos"], round_state["big_blind_pos"])
_restore_community_card_on_table(table, round_state["community_card"])
table.deck = _restore_deck(round_state["community_card"])
table.seats = _restore_seats(round_state["seats"], round_state["action_histories"])
return table
def _restore_community_card_on_table(table, card_data):
for str_card in card_data:
table.add_community_card(Card.from_str(str_card))
def _restore_deck(str_exclude_cards):
deck = Deck()
exclude_ids = [Card.to_id(Card.from_str(s)) for s in str_exclude_cards]
deck_cards = [Card.from_id(cid) for cid in range(1, 53) if cid not in exclude_ids]
deck.deck = deck_cards
return deck
def _restore_seats(seats_info, action_histories):
players = [Player(info["uuid"], info["stack"], info["name"]) for info in seats_info]
players_state = [info["state"] for info in seats_info]
_restore_action_histories_on_players(players, action_histories)
_restore_pay_info_on_players(players, players_state, action_histories)
seats = Seats()
seats.players = players
return seats
def _restore_action_histories_on_players(players, round_action_histories):
ordered_street_names = sorted(round_action_histories.keys(), key=lambda x:_street_flg_translator[x])
current_street_name = ordered_street_names[-1]
past_street_names = ordered_street_names[:-1]
# restore round_action_histories
for street_name in past_street_names:
street_flg = _street_flg_translator[street_name]
action_histories = round_action_histories[street_name]
for player in players: player.round_action_histories[street_flg] = []
for action_history in action_histories:
player = _find_user_by_uuid(players, action_history["uuid"])
player.round_action_histories[street_flg].append(action_history)
# resotre action_histories
for action_history in round_action_histories[current_street_name]:
player = _find_user_by_uuid(players, action_history["uuid"])
player.action_histories.append(action_history)
def _restore_pay_info_on_players(players, players_state, round_action_histories):
_restore_pay_info_status_on_players(players, players_state)
_restore_pay_info_amount_on_players(players, round_action_histories)
def _restore_pay_info_amount_on_players(players, round_action_histories):
ordered_street_names = sorted(round_action_histories.keys(), key=lambda x:_street_flg_translator[x])
all_histories = reduce(lambda ary, key: ary + round_action_histories[key], ordered_street_names, [])
for action_history in all_histories:
player = _find_user_by_uuid(players, action_history["uuid"])
player.pay_info.amount += _fetch_pay_amount(action_history)
def _find_user_by_uuid(players, uuid):
return [player for player in players if player.uuid==uuid][0]
def _fetch_pay_amount(action_history):
action = action_history["action"]
if action == Player.ACTION_FOLD_STR: return 0
if action == Player.ACTION_CALL_STR: return action_history["paid"]
if action == Player.ACTION_RAISE_STR: return action_history["paid"]
if action == Player.ACTION_SMALL_BLIND: return action_history["amount"]
if action == Player.ACTION_BIG_BLIND: return action_history["amount"]
if action == Player.ACTION_ANTE: return action_history["amount"]
raise Exception("Unexpected type of action_history is passed => %s" % action_history)
def _restore_pay_info_status_on_players(players, players_state):
for player, state_str in zip(players, players_state):
player.pay_info.status = _pay_info_state_translator[state_str]
_pay_info_state_translator = {
DataEncoder.PAY_INFO_PAY_TILL_END_STR: PayInfo.PAY_TILL_END,
DataEncoder.PAY_INFO_ALLIN_STR: PayInfo.ALLIN,
DataEncoder.PAY_INFO_FOLDED_STR: PayInfo.FOLDED
}
| {
"repo_name": "ishikota/PyPokerEngine",
"path": "pypokerengine/utils/game_state_utils.py",
"copies": "2",
"size": "6742",
"license": "mit",
"hash": -6288793233862788000,
"line_mean": 43.0653594771,
"line_max": 113,
"alpha_frac": 0.6929694453,
"autogenerated": false,
"ratio": 3.369315342328836,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5062284787628836,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from rest_framework.decorators import api_view
from django.conf import settings
from django.db.models import Q
from .helpers import (PrettyJsonResponse, _create_page_token,
_get_paginated_bookings, _parse_datetime,
_return_json_bookings, _serialize_equipment,
_serialize_rooms, _filter_for_free_rooms, _round_date)
from .models import BookingA, BookingB, Equipment, RoomA, RoomB
from timetable.models import Lock
from common.decorators import uclapi_protected_endpoint
@api_view(['GET'])
@uclapi_protected_endpoint(
last_modified_redis_key="gencache" # Served from our cached Oracle view
)
def get_rooms(request, *args, **kwargs):
# add them to iterables so can be filtered without if-else
request_params = {}
request_params['roomid'] = request.GET.get('roomid')
request_params['siteid'] = request.GET.get('siteid')
request_params['roomname__icontains'] = request.GET.get('roomname')
request_params['sitename__icontains'] = request.GET.get('sitename')
request_params['category'] = request.GET.get('category')
request_params['roomclass'] = request.GET.get('classification')
request_params['capacity__gte'] = request.GET.get('capacity')
request_params['automated'] = request.GET.get('automated')
try:
if request_params['capacity__gte']:
int(request_params['capacity__gte'])
except ValueError:
response = PrettyJsonResponse({
"ok": False,
"error": "capacity should be an int"
}, custom_header_data=kwargs)
response.status_code = 400
return response
# Get available rooms:
# - Filtered by this academic year only
# - Anything centrally bookable
# - All ICH rooms (Site IDs 238 and 240)
lock = Lock.objects.all()[0]
curr = RoomA if lock.a else RoomB
# No filters provided, return all rooms serialised
if reduce(lambda x, y: x or y, request_params.values()):
request_params = dict((k, v) for k, v in request_params.items() if v)
else:
request_params = {}
filtered_rooms = curr.objects.filter(
Q(bookabletype='CB') | Q(siteid='238') | Q(siteid='240'),
**request_params
)
return PrettyJsonResponse(
{
"ok": True,
"rooms": _serialize_rooms(filtered_rooms)
}, custom_header_data=kwargs)
@api_view(['GET'])
@uclapi_protected_endpoint(
last_modified_redis_key='gencache' # Served from our cached Oracle view
)
def get_bookings(request, *args, **kwargs):
# if page_token exists, dont look for query
page_token = request.GET.get('page_token')
if page_token:
bookings = _get_paginated_bookings(page_token)
return _return_json_bookings(bookings, custom_header_data=kwargs)
# query params
request_params = {}
# non functional filters
request_params['roomid'] = request.GET.get('roomid')
# TODO: building?
request_params['siteid'] = request.GET.get('siteid')
request_params['roomname'] = request.GET.get('roomname')
request_params['descrip'] = request.GET.get('description')
request_params['condisplayname__contains'] = request.GET.get('contact')
request_params['startdatetime'] = request.GET.get('date')
# 20 is the default number of bookings per page
results_per_page = request.GET.get('results_per_page') or 1000
try:
results_per_page = int(results_per_page)
results_per_page = results_per_page if results_per_page > 0 else 1000
except ValueError:
response = PrettyJsonResponse({
"ok": False,
"error": "results_per_page should be an integer"
}, custom_header_data=kwargs)
response.status_code = 400
return response
results_per_page = results_per_page if results_per_page < 1000 else 1000
# functional filters
request_params['startdatetime__gte'] = request.GET.get('start_datetime')
request_params['finishdatetime__lte'] = request.GET.get('end_datetime')
is_parsed = True
if any([
request_params['startdatetime__gte'],
request_params['finishdatetime__lte'],
request_params['startdatetime']
]):
start, end, is_parsed = _parse_datetime( # returned
request_params['startdatetime__gte'],
request_params['finishdatetime__lte'],
request_params['startdatetime']
)
request_params["startdatetime__gte"] = start
request_params["finishdatetime__lte"] = end
# ignore the date since its already parsed
request_params.pop("startdatetime")
if not is_parsed:
return PrettyJsonResponse({
"ok": False,
"error": "date/time isn't formatted as suggested in the docs"
}, custom_header_data=kwargs)
# filter the query dict
request_params = dict((k, v) for k, v in request_params.items() if v)
# create a database entry for token
page_token = _create_page_token(request_params, results_per_page)
# first page
bookings = _get_paginated_bookings(page_token)
lock = Lock.objects.all()[0]
curr = BookingA if lock.a else BookingB
bookings["count"] = curr.objects.filter(
Q(bookabletype='CB') | Q(siteid='238') | Q(siteid='240'),
**request_params
).count()
return _return_json_bookings(bookings, custom_header_data=kwargs)
@api_view(['GET'])
@uclapi_protected_endpoint(
last_modified_redis_key=None # Served from Oracle directly
)
def get_equipment(request, *args, **kwargs):
roomid = request.GET.get("roomid")
siteid = request.GET.get("siteid")
if not roomid:
response = PrettyJsonResponse({
"ok": False,
"error": "No roomid supplied"
}, custom_header_data=kwargs)
response.status_code = 400
return response
if not siteid:
response = PrettyJsonResponse({
"ok": False,
"error": "No siteid supplied"
}, custom_header_data=kwargs)
response.status_code = 400
return response
equipment = Equipment.objects.using("roombookings").filter(
setid=settings.ROOMBOOKINGS_SETID,
roomid=roomid,
siteid=siteid
)
return PrettyJsonResponse({
"ok": True,
"equipment": _serialize_equipment(equipment)
}, custom_header_data=kwargs)
@api_view(['GET'])
@uclapi_protected_endpoint(
last_modified_redis_key='gencache' # Real time calculation, cached data
)
def get_free_rooms(request, *args, **kwargs):
request_params = {}
request_params['startdatetime__gte'] = request.GET.get('start_datetime')
request_params['finishdatetime__lte'] = request.GET.get('end_datetime')
if (
not request_params['startdatetime__gte'] or
not request_params['finishdatetime__lte']
):
return PrettyJsonResponse({
"ok": False,
"error": "start_datetime or end_datetime not provided"
}, custom_header_data=kwargs)
is_parsed = True
start, end, is_parsed = _parse_datetime(
request_params['startdatetime__gte'],
request_params['finishdatetime__lte'],
None
)
if not is_parsed:
return PrettyJsonResponse({
"ok": False,
"error": "date/time isn't formatted as suggested in the docs"
}, custom_header_data=kwargs)
# Rounding down start date to start of day
request_params["startdatetime__gte"] = _round_date(start)
# Rounding up end date to start of next day
request_params["finishdatetime__lte"] = _round_date(end, up=True)
# Pagination Logic
# maxing out results_per_page to get all the bookings in one page
results_per_page = 100000
request_params = {k: v for k, v in request_params.items() if v}
page_token = _create_page_token(request_params, results_per_page)
# All bookings in the given time period
bookings = _get_paginated_bookings(page_token)["bookings"]
lock = Lock.objects.all()[0]
curr = RoomA if lock.a else RoomB
# Get available rooms:
# - Filtered by this academic year only
# - Anything centrally bookable
# - All ICH rooms (Site IDs 238 and 240)
all_rooms = curr.objects.filter(
Q(bookabletype='CB') | Q(siteid='238') | Q(siteid='240')
)
all_rooms = _serialize_rooms(all_rooms)
free_rooms = _filter_for_free_rooms(all_rooms, bookings, start, end)
return PrettyJsonResponse({
"ok": True,
"count": len(free_rooms),
"free_rooms": free_rooms
}, custom_header_data=kwargs)
| {
"repo_name": "uclapi/uclapi",
"path": "backend/uclapi/roombookings/views.py",
"copies": "1",
"size": "8695",
"license": "mit",
"hash": -1820212846184739800,
"line_mean": 33.2322834646,
"line_max": 78,
"alpha_frac": 0.6354226567,
"autogenerated": false,
"ratio": 3.7237687366167025,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48591913933167025,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from scitbx import matrix
from scitbx.array_family import flex
from dials.algorithms.refinement.parameterisation.model_parameters import (
ModelParameterisation,
Parameter,
)
from dials.algorithms.refinement.refinement_helpers import (
PanelGroupCompose,
dR_from_axis_and_angle,
get_panel_groups_at_depth,
get_panel_ids_at_root,
)
class DetectorMixin:
"""Mix-in class defining some functionality unique to detector
parameterisations that can be shared by static and scan-varying versions"""
@staticmethod
def _init_core(detector, parameter_type=Parameter):
"""Calculate initial state and list of parameters, using the parameter_type
callback to select between versions of the Parameter class"""
# get some vectors we need from the Panel
panel = detector[0]
so = matrix.col(panel.get_origin())
d1 = matrix.col(panel.get_fast_axis())
d2 = matrix.col(panel.get_slow_axis())
dn = matrix.col(panel.get_normal())
# We choose the dorg vector to terminate in the centre of the Panel, and
# the offset between the end of the dorg vector and the Panel origin is
# a coordinate matrix with elements in the basis d1, d2, dn
panel_lim = panel.get_image_size_mm()
offset = matrix.col((-1.0 * panel_lim[0] / 2.0, -1.0 * panel_lim[1] / 2.0, 0.0))
dorg = so - offset[0] * d1 - offset[1] * d2
# Set up the initial state. There are multiple items of interest, so
# use a dictionary here (note for a single panel model we can do
# without the first 3 of these, but will need them for multiple panels)
istate = {"d1": d1, "d2": d2, "dn": dn, "offset": offset}
# set up the parameters.
# distance from lab origin to detector model plane along its normal, in
# initial orientation
distance = panel.get_directed_distance()
dist = parameter_type(distance, dn, "length (mm)", "Dist")
# shift in the detector model plane to locate dorg, in initial
# orientation
shift = dorg - dn * distance
shift1 = parameter_type(shift.dot(d1), d1, "length (mm)", "Shift1")
shift2 = parameter_type(shift.dot(d2), d2, "length (mm)", "Shift2")
# rotations of the plane through its origin about:
# 1) axis normal to initial orientation
# 2) d1 axis of initial orientation
# 3) d2 axis of initial orientation
tau1 = parameter_type(0, dn, "angle (mrad)", "Tau1")
tau2 = parameter_type(0, d1, "angle (mrad)", "Tau2")
tau3 = parameter_type(0, d2, "angle (mrad)", "Tau3")
# build the parameter list in a specific, maintained order
p_list = [dist, shift1, shift2, tau1, tau2, tau3]
return {"istate": istate, "p_list": p_list}
def _compose_core(self, dist, shift1, shift2, tau1, tau2, tau3):
# extract items from the initial state
id1 = self._initial_state["d1"]
id2 = self._initial_state["d2"]
ioffset = self._initial_state["offset"]
# convert angles to radians
tau1rad = tau1.value / 1000.0
tau2rad = tau2.value / 1000.0
tau3rad = tau3.value / 1000.0
# compose rotation matrices and their first order derivatives
Tau1 = (tau1.axis).axis_and_angle_as_r3_rotation_matrix(tau1rad, deg=False)
dTau1_dtau1 = dR_from_axis_and_angle(tau1.axis, tau1rad, deg=False)
Tau2 = (tau2.axis).axis_and_angle_as_r3_rotation_matrix(tau2rad, deg=False)
dTau2_dtau2 = dR_from_axis_and_angle(tau2.axis, tau2rad, deg=False)
Tau3 = (tau3.axis).axis_and_angle_as_r3_rotation_matrix(tau3rad, deg=False)
dTau3_dtau3 = dR_from_axis_and_angle(tau3.axis, tau3rad, deg=False)
Tau32 = Tau3 * Tau2
Tau321 = Tau32 * Tau1
# Compose new state
# =================
# First the frame positioned at a distance from the lab origin
P0 = dist.value * dist.axis # distance along initial detector normal
Px = P0 + id1 # point at the end of d1 in lab frame
Py = P0 + id2 # point at the end of d2 in lab frame
# detector shift vector
dsv = P0 + shift1.value * shift1.axis + shift2.value * shift2.axis
# compose dorg point
dorg = Tau321 * dsv - Tau32 * P0 + P0
# compose d1, d2 and dn and ensure frame remains orthonormal.
d1 = (Tau321 * (Px - P0)).normalize()
d2 = (Tau321 * (Py - P0)).normalize()
dn = d1.cross(d2).normalize()
# NB dn not actually used in this simple model; calculation left
# here as a reminder for future extension
d2 = dn.cross(d1)
# compose new sensor origin
o = dorg + ioffset[0] * d1 + ioffset[1] * d2
# keep the new state for return
new_state = {"d1": d1, "d2": d2, "origin": o}
# calculate derivatives of the state wrt parameters
# =================================================
# Start with the dorg vector, where
# dorg = Tau321 * dsv - Tau32 * P0 + P0
# derivative wrt dist
dP0_ddist = dist.axis
ddsv_ddist = dP0_ddist
ddorg_ddist = Tau321 * ddsv_ddist - Tau32 * dP0_ddist + dP0_ddist
# derivative wrt shift1
ddsv_dshift1 = shift1.axis
ddorg_dshift1 = Tau321 * ddsv_dshift1
# derivative wrt shift2
ddsv_dshift2 = shift2.axis
ddorg_dshift2 = Tau321 * ddsv_dshift2
# derivative wrt tau1
dTau321_dtau1 = Tau32 * dTau1_dtau1
ddorg_dtau1 = dTau321_dtau1 * dsv
# derivative wrt tau2
dTau32_dtau2 = Tau3 * dTau2_dtau2
dTau321_dtau2 = dTau32_dtau2 * Tau1
ddorg_dtau2 = dTau321_dtau2 * dsv - dTau32_dtau2 * P0
# derivative wrt tau3
dTau32_dtau3 = dTau3_dtau3 * Tau2
dTau321_dtau3 = dTau32_dtau3 * Tau1
ddorg_dtau3 = dTau321_dtau3 * dsv - dTau32_dtau3 * P0
# Now derivatives of the direction d1, where
# d1 = (Tau321 * (Px - P0)).normalize()
# For calc of derivatives ignore the normalize(), which should
# be unnecessary anyway as Px - P0 is a unit vector and Tau321 a
# pure rotation.
# derivative wrt dist
# dPx_ddist = dist.axis; dP0_ddist = dist.axis, so these cancel
dd1_ddist = matrix.col((0.0, 0.0, 0.0))
# derivative wrt shift1
dd1_dshift1 = matrix.col((0.0, 0.0, 0.0))
# derivative wrt shift2
dd1_dshift2 = matrix.col((0.0, 0.0, 0.0))
# derivative wrt tau1
dd1_dtau1 = dTau321_dtau1 * (Px - P0)
# derivative wrt tau2
dd1_dtau2 = dTau321_dtau2 * (Px - P0)
# derivative wrt tau3
dd1_dtau3 = dTau321_dtau3 * (Px - P0)
# Derivatives of the direction d2, where
# d2 = (Tau321 * (Py - P0)).normalize()
# derivative wrt dist
dd2_ddist = matrix.col((0.0, 0.0, 0.0))
# derivative wrt shift1
dd2_dshift1 = matrix.col((0.0, 0.0, 0.0))
# derivative wrt shift2
dd2_dshift2 = matrix.col((0.0, 0.0, 0.0))
# derivative wrt tau1
dd2_dtau1 = dTau321_dtau1 * (Py - P0)
# derivative wrt tau2
dd2_dtau2 = dTau321_dtau2 * (Py - P0)
# derivative wrt tau3
dd2_dtau3 = dTau321_dtau3 * (Py - P0)
# Derivatives of the direction dn, where dn = d1.cross(d2).normalize()
# These derivatives are not used, but are left as comments for understanding
# derivative wrt dist
# ddn_ddist = matrix.col((0.0, 0.0, 0.0))
# derivative wrt shift1
# ddn_dshift1 = matrix.col((0.0, 0.0, 0.0))
# derivative wrt shift2
# ddn_dshift2 = matrix.col((0.0, 0.0, 0.0))
# derivative wrt tau1. Product rule for cross product applies
# ddn_dtau1 = dd1_dtau1.cross(d2) + d1.cross(dd2_dtau1)
# derivative wrt tau2
# ddn_dtau2 = dd1_dtau2.cross(d2) + d1.cross(dd2_dtau2)
# derivative wrt tau3
# ddn_dtau3 = dd1_dtau3.cross(d2) + d1.cross(dd2_dtau3)
# calculate derivatives of the attached sensor matrix
# ===================================================
# sensor origin:
# o = dorg + ioffset[0] * d1 + ioffset[1] * d2
# derivative wrt dist
do_ddist = ddorg_ddist + ioffset[0] * dd1_ddist + ioffset[1] * dd2_ddist
# derivative wrt shift1
do_dshift1 = ddorg_dshift1 + ioffset[0] * dd1_dshift1 + ioffset[1] * dd2_dshift1
# derivative wrt shift2
do_dshift2 = ddorg_dshift2 + ioffset[0] * dd1_dshift2 + ioffset[1] * dd2_dshift2
# derivative wrt tau1
do_dtau1 = ddorg_dtau1 + ioffset[0] * dd1_dtau1 + ioffset[1] * dd2_dtau1
# derivative wrt tau2
do_dtau2 = ddorg_dtau2 + ioffset[0] * dd1_dtau2 + ioffset[1] * dd2_dtau2
# derivative wrt tau3
do_dtau3 = ddorg_dtau3 + ioffset[0] * dd1_dtau3 + ioffset[1] * dd2_dtau3
# combine these vectors together into derivatives of the sensor
# matrix d, converting angles back to mrad
dd_dval = []
# derivative wrt dist
dd_dval.append(
matrix.sqr(dd1_ddist.elems + dd2_ddist.elems + do_ddist.elems).transpose()
)
# derivative wrt shift1
dd_dval.append(
matrix.sqr(
dd1_dshift1.elems + dd2_dshift1.elems + do_dshift1.elems
).transpose()
)
# derivative wrt shift2
dd_dval.append(
matrix.sqr(
dd1_dshift2.elems + dd2_dshift2.elems + do_dshift2.elems
).transpose()
)
# derivative wrt tau1
dd_dval.append(
matrix.sqr(dd1_dtau1.elems + dd2_dtau1.elems + do_dtau1.elems).transpose()
/ 1000.0
)
# derivative wrt tau2
dd_dval.append(
matrix.sqr(dd1_dtau2.elems + dd2_dtau2.elems + do_dtau2.elems).transpose()
/ 1000.0
)
# derivative wrt tau3
dd_dval.append(
matrix.sqr(dd1_dtau3.elems + dd2_dtau3.elems + do_dtau3.elems).transpose()
/ 1000.0
)
return new_state, dd_dval
class DetectorParameterisationSinglePanel(ModelParameterisation, DetectorMixin):
"""A parameterisation for a Detector model with a single abstract panel plane.
A single panel has 6 parameters: three translations (detector distance and
two in-plane shifts) and three rotations (around the initial normal, fast and
slow axes) with angles expressed in mrad.
"""
def __init__(self, detector, experiment_ids=None):
"""Initialise the DetectorParameterisationSinglePanel object
Args:
detector: A dxtbx Detector object to be parameterised.
experiment_ids (list): The experiment IDs affected by this
parameterisation. Defaults to None, which is replaced by [0].
"""
# The state of a single Panel is its detector matrix d = (d1|d2|d0).
# However, for the purposes of parameterisation we choose a different
# vector than d0 to locate the Panel. That's because we want to perform
# rotations around a point on the detector surface, and d0 points to the
# corner of the Panel. To avoid excess correlations between 'tilt' and
# 'twist' angles with the detector distance, we prefer to perform
# rotations around a point located at the centre of the Panel. This is
# usually close to point of intersection with the plane normal drawn
# from the origin of the laboratory frame.
#
# Therefore we define:
#
# * a vector 'dorg' locating the centre of the single Panel
# * a pair of orthogonal unit directions 'd1' and 'd2' forming a plane
# with its origin at the end of the vector dorg
# * a third unit direction 'dn', orthogonal to both 'd1' & 'd2'.
# * offsets to locate the origin d0 of the Panel frame from the
# tip of the dorg vector, in terms of the coordinate system
# formed by d1, d2 and dn.
#
# Held separately in attribute 'models' are:
# * references to the detector objects contained in this model
#
# For this simplified class there is only a single Panel frame
# and the vector dn is not actually required, because the plane formed
# by d1 and d2 is coplanar with the sensor plane. Therefore the
# offset is fully in terms of d1 and d2
# set up the initial state of the detector parameterisation from the
# orientation of the single Panel it contains, in terms of the vectors
# dorg, d1 and d2.
if experiment_ids is None:
experiment_ids = [0]
dat = self._init_core(detector)
# set up the base class
ModelParameterisation.__init__(
self, detector, dat["istate"], dat["p_list"], experiment_ids=experiment_ids
)
# call compose to calculate all the derivatives
self.compose()
def compose(self):
# extract parameters from the internal list
dist, shift1, shift2, tau1, tau2, tau3 = self._param
new_state, self._dstate_dp = self._compose_core(
dist, shift1, shift2, tau1, tau2, tau3
)
# now update the panel with its new position and orientation.
# The detector is self._model, the panel is the first in the
# detector
(self._model)[0].set_frame(
new_state["d1"], new_state["d2"], new_state["origin"]
)
def get_state(self):
# only a single panel exists, so no multi_state_elt argument is allowed
panel = (self._model)[0]
return matrix.sqr(panel.get_d_matrix())
class DetectorParameterisationMultiPanel(ModelParameterisation):
"""A parameterisation for a Detector model with multiple panels.
The whole detector is treated as a single rigid block with 6 degrees of
freedom (see DetectorParameterisationSinglePanel). The axes of the
translations and rotations are taken from the 'middle panel' of the detector
(the one closest to the direct beam).
"""
def __init__(self, detector, beam, experiment_ids=None):
"""Initialise the DetectorParameterisationMultiPanel object
Args:
detector: A dxtbx Detector object to be parameterised.
beam: An dxtbx beam object used to calculate the closest panel.
experiment_ids (list): The experiment IDs affected by this
parameterisation. Defaults to None, which is replaced by [0].
"""
# The state of each Panel in the detector model is its matrix
# d = (d1|d2|d0). We need to define a new coordinate system rigidly
# attached to the detector model in which to express the
# parameterisation and compose each of the Panel states.
#
# We define:
#
# * a vector 'dorg' locating a point in laboratory space that moves with
# the rigid body of the detector and thus is fixed wrt each of the
# Panels.
# * A pair of orthogonal unit directions 'd1' and 'd2' forming a plane
# with its origin at the end of the vector dorg.
# * a third unit direction 'dn', orthogonal to both 'd1' & 'd2'.
# * offsets to locate the origin of each panel frame from the
# tip of the dorg vector, in terms of the coordinate system
# formed by d1, d2 and dn.
#
# Held separately in attribute 'models' are:
# * references to detector objects contained in this model
# set up the initial state of the detector model from the
# orientation of whichever Panel has its centre most closely
# located to the direct beam intersection. Call this 'mid_panel'
if experiment_ids is None:
experiment_ids = [0]
beam_centres = [
matrix.col(p.get_beam_centre(beam.get_unit_s0())) for p in detector
]
panel_centres = [0.5 * matrix.col(p.get_image_size_mm()) for p in detector]
beam_to_centres = [
(a - b).length() for a, b in zip(beam_centres, panel_centres)
]
mid_panel_id = beam_to_centres.index(min(beam_to_centres))
mid_panel = detector[mid_panel_id]
# get some vectors we need from the mid_panel
so = matrix.col(mid_panel.get_origin())
d1 = matrix.col(mid_panel.get_fast_axis())
d2 = matrix.col(mid_panel.get_slow_axis())
dn = matrix.col(mid_panel.get_normal())
# we choose the dorg vector to terminate in the centre of the mid_panel,
# and the offset between the end of the dorg vector and each Panel
# origin is a coordinate matrix with elements in the basis d1, d2, dn.
# We need also each Panel's plane directions dir1 and dir2 in terms of
# d1, d2 and dn.
mid_panel_centre = panel_centres[mid_panel_id]
dorg = so + mid_panel_centre[0] * d1 + mid_panel_centre[1] * d2
offsets, dir1s, dir2s = [], [], []
for p in detector:
offset = matrix.col(p.get_origin()) - dorg
offsets.append(matrix.col((offset.dot(d1), offset.dot(d2), offset.dot(dn))))
dir1 = matrix.col(p.get_fast_axis())
dir1_new_basis = matrix.col((dir1.dot(d1), dir1.dot(d2), dir1.dot(dn)))
dir1s.append(dir1_new_basis)
dir2 = matrix.col(p.get_slow_axis())
dir2_new_basis = matrix.col((dir2.dot(d1), dir2.dot(d2), dir2.dot(dn)))
dir2s.append(dir2_new_basis)
# The offsets and directions in the d1, d2, dn basis are fixed
# quantities, not dependent on parameter values.
self._offsets = offsets
self._dir1s = dir1s
self._dir2s = dir2s
# Set up the initial state. This is the basis d1, d2, dn.
istate = {"d1": d1, "d2": d2, "dn": dn}
# set up the parameters.
# distance from lab origin to mid_panel plane along its normal,
# in initial orientation
distance = mid_panel.get_directed_distance()
dist = Parameter(distance, dn, "length (mm)", "Dist")
# shift in the detector model plane to locate dorg, in initial
# orientation
shift = dorg - dn * distance
shift1 = Parameter(shift.dot(d1), d1, "length (mm)", "Shift1")
shift2 = Parameter(shift.dot(d2), d2, "length (mm)", "Shift2")
# rotations of the plane through its origin about:
# 1) axis normal to initial orientation
# 2) d1 axis of initial orientation
# 3) d2 axis of initial orientation
tau1 = Parameter(0, dn, "angle (mrad)", "Tau1")
tau2 = Parameter(0, d1, "angle (mrad)", "Tau2")
tau3 = Parameter(0, d2, "angle (mrad)", "Tau3")
# build the parameter list in a specific, maintained order
p_list = [dist, shift1, shift2, tau1, tau2, tau3]
# set up the base class
ModelParameterisation.__init__(
self,
detector,
istate,
p_list,
experiment_ids=experiment_ids,
is_multi_state=True,
)
# call compose to calculate all the derivatives
self.compose()
def compose(self):
# extract parameters from the internal list
dist, shift1, shift2, tau1, tau2, tau3 = self._param
# convert angles to radians
tau1rad = tau1.value / 1000.0
tau2rad = tau2.value / 1000.0
tau3rad = tau3.value / 1000.0
# compose rotation matrices and their first order derivatives
Tau1 = (tau1.axis).axis_and_angle_as_r3_rotation_matrix(tau1rad, deg=False)
dTau1_dtau1 = dR_from_axis_and_angle(tau1.axis, tau1rad, deg=False)
Tau2 = (tau2.axis).axis_and_angle_as_r3_rotation_matrix(tau2rad, deg=False)
dTau2_dtau2 = dR_from_axis_and_angle(tau2.axis, tau2rad, deg=False)
Tau3 = (tau3.axis).axis_and_angle_as_r3_rotation_matrix(tau3rad, deg=False)
dTau3_dtau3 = dR_from_axis_and_angle(tau3.axis, tau3rad, deg=False)
# Compose the new state
from scitbx.array_family import flex
from dials_refinement_helpers_ext import multi_panel_compose
ret = multi_panel_compose(
flex.vec3_double([self._initial_state[tag] for tag in ("d1", "d2", "dn")]),
flex.double([p.value for p in self._param]),
flex.vec3_double([p.axis for p in self._param]),
self._model,
flex.vec3_double(self._offsets),
flex.vec3_double(self._dir1s),
flex.vec3_double(self._dir2s),
Tau1,
dTau1_dtau1,
Tau2,
dTau2_dtau2,
Tau3,
dTau3_dtau3,
)
# Store the results. The results come back as a single array, convert it to a 2D array
self._multi_state_derivatives = [
[
matrix.sqr(ret[j * len(self._offsets) + i])
for j in range(len(self._param))
]
for i in range(len(self._offsets))
]
def get_state(self, multi_state_elt=0):
# There is only one detector, but the req. panel must be specified
panel = (self._model)[multi_state_elt]
return matrix.sqr(panel.get_d_matrix())
class PyDetectorParameterisationMultiPanel(DetectorParameterisationMultiPanel):
"""A python only version of DetectorParameterisationMultiPanel
This version uses an older python-only compose method for comparison. See
the base class for more details"""
def compose(self):
# extract items from the initial state
id1 = self._initial_state["d1"]
id2 = self._initial_state["d2"]
# extract parameters from the internal list
dist, shift1, shift2, tau1, tau2, tau3 = self._param
# Extract the detector model
detector = self._model
# convert angles to radians
tau1rad = tau1.value / 1000.0
tau2rad = tau2.value / 1000.0
tau3rad = tau3.value / 1000.0
# compose rotation matrices and their first order derivatives
Tau1 = (tau1.axis).axis_and_angle_as_r3_rotation_matrix(tau1rad, deg=False)
dTau1_dtau1 = dR_from_axis_and_angle(tau1.axis, tau1rad, deg=False)
Tau2 = (tau2.axis).axis_and_angle_as_r3_rotation_matrix(tau2rad, deg=False)
dTau2_dtau2 = dR_from_axis_and_angle(tau2.axis, tau2rad, deg=False)
Tau3 = (tau3.axis).axis_and_angle_as_r3_rotation_matrix(tau3rad, deg=False)
dTau3_dtau3 = dR_from_axis_and_angle(tau3.axis, tau3rad, deg=False)
Tau32 = Tau3 * Tau2
Tau321 = Tau32 * Tau1
# Compose new state
# =================
# First the frame positioned at a distance from the lab origin
P0 = dist.value * dist.axis # distance along initial detector normal
Px = P0 + id1 # point at the end of d1 in lab frame
Py = P0 + id2 # point at the end of d2 in lab frame
# detector shift vector
dsv = P0 + shift1.value * shift1.axis + shift2.value * shift2.axis
# compose dorg point
dorg = Tau321 * dsv - Tau32 * P0 + P0
# compose new d1, d2 and dn and ensure frame remains orthonormal.
d1 = (Tau321 * (Px - P0)).normalize()
d2 = (Tau321 * (Py - P0)).normalize()
dn = d1.cross(d2).normalize()
d2 = dn.cross(d1)
# compose new Panel origins
origins = [
dorg + offset[0] * d1 + offset[1] * d2 + offset[2] * dn
for offset in self._offsets
]
# compose new Panel directions
dir1s = [vec[0] * d1 + vec[1] * d2 + vec[2] * dn for vec in self._dir1s]
dir2s = [vec[0] * d1 + vec[1] * d2 + vec[2] * dn for vec in self._dir2s]
# now update the panels with their new position and orientation.
for p, dir1, dir2, org in zip(detector, dir1s, dir2s, origins):
p.set_frame(dir1, dir2, org)
# calculate derivatives of the state wrt parameters
# =================================================
# Start with the dorg vector, where
# dorg = Tau321 * dsv - Tau32 * P0 + P0
# derivative wrt dist
dP0_ddist = dist.axis
ddsv_ddist = dP0_ddist
ddorg_ddist = Tau321 * ddsv_ddist - Tau32 * dP0_ddist + dP0_ddist
# derivative wrt shift1
ddsv_dshift1 = shift1.axis
ddorg_dshift1 = Tau321 * ddsv_dshift1
# derivative wrt shift2
ddsv_dshift2 = shift2.axis
ddorg_dshift2 = Tau321 * ddsv_dshift2
# derivative wrt tau1
dTau321_dtau1 = Tau32 * dTau1_dtau1
ddorg_dtau1 = dTau321_dtau1 * dsv
# derivative wrt tau2
dTau32_dtau2 = Tau3 * dTau2_dtau2
dTau321_dtau2 = dTau32_dtau2 * Tau1
ddorg_dtau2 = dTau321_dtau2 * dsv - dTau32_dtau2 * P0
# derivative wrt tau3
dTau32_dtau3 = dTau3_dtau3 * Tau2
dTau321_dtau3 = dTau32_dtau3 * Tau1
ddorg_dtau3 = dTau321_dtau3 * dsv - dTau32_dtau3 * P0
# Now derivatives of the direction d1, where
# d1 = (Tau321 * (Px - P0)).normalize()
# For calc of derivatives ignore the normalize(), which should
# be unnecessary anyway as Px - P0 is a unit vector and Tau321 a
# pure rotation.
# derivative wrt dist
# dPx_ddist = dist.axis; dP0_ddist = dist.axis, so these cancel
dd1_ddist = matrix.col((0.0, 0.0, 0.0))
# derivative wrt shift1
dd1_dshift1 = matrix.col((0.0, 0.0, 0.0))
# derivative wrt shift2
dd1_dshift2 = matrix.col((0.0, 0.0, 0.0))
# derivative wrt tau1
dd1_dtau1 = dTau321_dtau1 * (Px - P0)
# derivative wrt tau2
dd1_dtau2 = dTau321_dtau2 * (Px - P0)
# derivative wrt tau3
dd1_dtau3 = dTau321_dtau3 * (Px - P0)
# Derivatives of the direction d2, where
# d2 = (Tau321 * (Py - P0)).normalize()
# derivative wrt dist
dd2_ddist = matrix.col((0.0, 0.0, 0.0))
# derivative wrt shift1
dd2_dshift1 = matrix.col((0.0, 0.0, 0.0))
# derivative wrt shift2
dd2_dshift2 = matrix.col((0.0, 0.0, 0.0))
# derivative wrt tau1
dd2_dtau1 = dTau321_dtau1 * (Py - P0)
# derivative wrt tau2
dd2_dtau2 = dTau321_dtau2 * (Py - P0)
# derivative wrt tau3
dd2_dtau3 = dTau321_dtau3 * (Py - P0)
# Derivatives of the direction dn, where
# dn = d1.cross(d2).normalize()
# derivative wrt dist
ddn_ddist = matrix.col((0.0, 0.0, 0.0))
# derivative wrt shift1
ddn_dshift1 = matrix.col((0.0, 0.0, 0.0))
# derivative wrt shift2
ddn_dshift2 = matrix.col((0.0, 0.0, 0.0))
# derivative wrt tau1. Product rule for cross product applies
ddn_dtau1 = dd1_dtau1.cross(d2) + d1.cross(dd2_dtau1)
# derivative wrt tau2
ddn_dtau2 = dd1_dtau2.cross(d2) + d1.cross(dd2_dtau2)
# derivative wrt tau3
ddn_dtau3 = dd1_dtau3.cross(d2) + d1.cross(dd2_dtau3)
# reset stored derivatives
for i in range(len(detector)):
self._multi_state_derivatives[i] = [None] * len(self._dstate_dp)
# calculate derivatives of the attached Panel matrices
# ====================================================
for panel_id, (offset, dir1_new_basis, dir2_new_basis) in enumerate(
zip(self._offsets, self._dir1s, self._dir2s)
):
# Panel origin:
# o = dorg + offset[0] * d1 + offset[1] * d2 + offset[2] * dn
# derivative wrt dist. NB only ddorg_ddist is not null! The other
# elements are left here to aid understanding, but should be removed
# when this class is ported to C++ for speed.
do_ddist = (
ddorg_ddist
+ offset[0] * dd1_ddist
+ offset[1] * dd2_ddist
+ offset[2] * ddn_ddist
)
# derivative wrt shift1. NB only ddorg_dshift1 is non-null.
do_dshift1 = (
ddorg_dshift1
+ offset[0] * dd1_dshift1
+ offset[1] * dd2_dshift1
+ offset[2] * ddn_dshift1
)
# derivative wrt shift2. NB only ddorg_dshift2 is non-null.
do_dshift2 = (
ddorg_dshift2
+ offset[0] * dd1_dshift2
+ offset[1] * dd2_dshift2
+ offset[2] * ddn_dshift2
)
# derivative wrt tau1
do_dtau1 = (
ddorg_dtau1
+ offset[0] * dd1_dtau1
+ offset[1] * dd2_dtau1
+ offset[2] * ddn_dtau1
)
# derivative wrt tau2
do_dtau2 = (
ddorg_dtau2
+ offset[0] * dd1_dtau2
+ offset[1] * dd2_dtau2
+ offset[2] * ddn_dtau2
)
# derivative wrt tau3
do_dtau3 = (
ddorg_dtau3
+ offset[0] * dd1_dtau3
+ offset[1] * dd2_dtau3
+ offset[2] * ddn_dtau3
)
# Panel dir1:
# dir1 = dir1_new_basis[0] * d1 + dir1_new_basis[1] * d2 +
# dir1_new_basis[2] * dn
# derivative wrt dist. NB These are all null.
ddir1_ddist = (
dir1_new_basis[0] * dd1_ddist
+ dir1_new_basis[1] * dd2_ddist
+ dir1_new_basis[2] * ddn_ddist
)
# derivative wrt shift1. NB These are all null.
ddir1_dshift1 = (
dir1_new_basis[0] * dd1_dshift1
+ dir1_new_basis[1] * dd2_dshift1
+ dir1_new_basis[2] * ddn_dshift1
)
# derivative wrt shift2. NB These are all null.
ddir1_dshift2 = (
dir1_new_basis[0] * dd1_dshift2
+ dir1_new_basis[1] * dd2_dshift2
+ dir1_new_basis[2] * ddn_dshift2
)
# derivative wrt tau1
ddir1_dtau1 = (
dir1_new_basis[0] * dd1_dtau1
+ dir1_new_basis[1] * dd2_dtau1
+ dir1_new_basis[2] * ddn_dtau1
)
# derivative wrt tau2
ddir1_dtau2 = (
dir1_new_basis[0] * dd1_dtau2
+ dir1_new_basis[1] * dd2_dtau2
+ dir1_new_basis[2] * ddn_dtau2
)
# derivative wrt tau3
ddir1_dtau3 = (
dir1_new_basis[0] * dd1_dtau3
+ dir1_new_basis[1] * dd2_dtau3
+ dir1_new_basis[2] * ddn_dtau3
)
# Panel dir2:
# dir2 = dir2_new_basis[0] * d1 + dir2_new_basis[1] * d2 +
# dir2_new_basis[2] * dn
# derivative wrt dist. NB These are all null.
ddir2_ddist = (
dir2_new_basis[0] * dd1_ddist
+ dir2_new_basis[1] * dd2_ddist
+ dir2_new_basis[2] * ddn_ddist
)
# derivative wrt shift1. NB These are all null.
ddir2_dshift1 = (
dir2_new_basis[0] * dd1_dshift1
+ dir2_new_basis[1] * dd2_dshift1
+ dir2_new_basis[2] * ddn_dshift1
)
# derivative wrt shift2. NB These are all null.
ddir2_dshift2 = (
dir2_new_basis[0] * dd1_dshift2
+ dir2_new_basis[1] * dd2_dshift2
+ dir2_new_basis[2] * ddn_dshift2
)
# derivative wrt tau1
ddir2_dtau1 = (
dir2_new_basis[0] * dd1_dtau1
+ dir2_new_basis[1] * dd2_dtau1
+ dir2_new_basis[2] * ddn_dtau1
)
# derivative wrt tau2
ddir2_dtau2 = (
dir2_new_basis[0] * dd1_dtau2
+ dir2_new_basis[1] * dd2_dtau2
+ dir2_new_basis[2] * ddn_dtau2
)
# derivative wrt tau3
ddir2_dtau3 = (
dir2_new_basis[0] * dd1_dtau3
+ dir2_new_basis[1] * dd2_dtau3
+ dir2_new_basis[2] * ddn_dtau3
)
# combine these vectors together into derivatives of the panel
# matrix d and store them, converting angles back to mrad
self._multi_state_derivatives[panel_id] = [
matrix.sqr(
ddir1_ddist.elems + ddir2_ddist.elems + do_ddist.elems
).transpose(),
matrix.sqr(
ddir1_dshift1.elems + ddir2_dshift1.elems + do_dshift1.elems
).transpose(),
matrix.sqr(
ddir1_dshift2.elems + ddir2_dshift2.elems + do_dshift2.elems
).transpose(),
matrix.sqr(
ddir1_dtau1.elems + ddir2_dtau1.elems + do_dtau1.elems
).transpose()
/ 1000.0,
matrix.sqr(
ddir1_dtau2.elems + ddir2_dtau2.elems + do_dtau2.elems
).transpose()
/ 1000.0,
matrix.sqr(
ddir1_dtau3.elems + ddir2_dtau3.elems + do_dtau3.elems
).transpose()
/ 1000.0,
]
class DetectorParameterisationHierarchical(DetectorParameterisationMultiPanel):
"""A parameterisation for a hierarchical Detector model with multiple panels.
The detector hierarchy is used to determine panel groups, each of which will
be treated as a single rigid block with 6 degrees of freedom (see
DetectorParameterisationSinglePanel).
"""
def __init__(self, detector, experiment_ids=None, level=0):
"""Initialise the DetectorParameterisationHierarchical object
Args:
detector: A dxtbx Detector object to be parameterised.
experiment_ids (list): The experiment IDs affected by this
parameterisation. Defaults to None, which is replaced by [0].
level (int): Select level of the detector hierarchy to determine panel
groupings that are treated as separate rigid blocks.
"""
if experiment_ids is None:
experiment_ids = [0]
try:
h = detector.hierarchy()
except AttributeError:
print("This detector does not have a hierarchy")
raise
# list the panel groups at the chosen level
try:
self._groups = get_panel_groups_at_depth(h, level)
except AttributeError:
print(f"Cannot access the hierarchy at the depth level={level}")
raise
# collect the panel ids for each Panel within the groups
panels = list(detector)
self._panel_ids_by_group = [
get_panel_ids_at_root(panels, g) for g in self._groups
]
p_list = []
self._group_ids_by_parameter = []
istate = []
self._offsets = []
self._dir1s = []
self._dir2s = []
# loop over the groups, collecting initial parameters and states
for igp, pnl_ids in enumerate(self._panel_ids_by_group):
panel_centres_in_lab_frame = []
for i in pnl_ids:
pnl = detector[i]
im_size = pnl.get_image_size_mm()
cntr = (
matrix.col(pnl.get_origin())
+ 0.5 * matrix.col(pnl.get_fast_axis()) * im_size[0]
+ 0.5 * matrix.col(pnl.get_slow_axis()) * im_size[1]
)
panel_centres_in_lab_frame.append(cntr)
# get some vectors we need from the group
go = matrix.col(self._groups[igp].get_origin())
d1 = matrix.col(self._groups[igp].get_fast_axis())
d2 = matrix.col(self._groups[igp].get_slow_axis())
dn = matrix.col(self._groups[igp].get_normal())
# we choose the dorg vector for this group to terminate on the group's
# frame, at a point that we consider close to the centre of the group of
# panels. This point is defined by taking the 3D centroid of the panel
# centres then projecting that point onto the group frame.
centroid = reduce(lambda a, b: a + b, panel_centres_in_lab_frame) / len(
panel_centres_in_lab_frame
)
try:
gp_centroid = matrix.col(
self._groups[igp].get_bidirectional_ray_intersection(centroid)
)
dorg = go + gp_centroid[0] * d1 + gp_centroid[1] * d2
except RuntimeError: # workaround for a group frame that passes through
# the origin
dorg = matrix.col((0.0, 0.0, 0.0))
# The offset between the end of the dorg vector and
# each Panel origin is a coordinate matrix with elements in the basis d1,
# d2, dn. We need also each Panel's plane directions dir1 and dir2 in
# terms of d1, d2 and dn.
offsets, dir1s, dir2s = [], [], []
for p in [detector[i] for i in pnl_ids]:
offset = matrix.col(p.get_origin()) - dorg
offsets.append(
matrix.col((offset.dot(d1), offset.dot(d2), offset.dot(dn)))
)
dir1 = matrix.col(p.get_fast_axis())
dir1_new_basis = matrix.col((dir1.dot(d1), dir1.dot(d2), dir1.dot(dn)))
dir1s.append(dir1_new_basis)
dir2 = matrix.col(p.get_slow_axis())
dir2_new_basis = matrix.col((dir2.dot(d1), dir2.dot(d2), dir2.dot(dn)))
dir2s.append(dir2_new_basis)
# The offsets and directions in the d1, d2, dn basis are fixed
# quantities, not dependent on parameter values. Keep these as separate
# sub-lists for each group
self._offsets.append(offsets)
self._dir1s.append(dir1s)
self._dir2s.append(dir2s)
# Set up the initial state for this group. This is the basis d1, d2, dn,
# plus the offset locating the origin of the initial group frame
gp_offset = go - dorg # lab frame basis
gp_offset = matrix.col(
(gp_offset.dot(d1), gp_offset.dot(d2), gp_offset.dot(dn))
) # d1,d2,dn basis
istate.append({"d1": d1, "d2": d2, "dn": dn, "gp_offset": gp_offset})
# set up the parameters.
# distance from lab origin to ref_panel plane along its normal,
# in initial orientation
distance = self._groups[igp].get_directed_distance()
dist = Parameter(distance, dn, "length (mm)", f"Group{igp + 1}Dist")
# shift in the detector model plane to locate dorg, in initial
# orientation
shift = dorg - dn * distance
shift1 = Parameter(
shift.dot(d1), d1, "length (mm)", f"Group{igp + 1}Shift1"
)
shift2 = Parameter(
shift.dot(d2), d2, "length (mm)", f"Group{igp + 1}Shift2"
)
# rotations of the plane through its origin about:
# 1) axis normal to initial orientation
# 2) d1 axis of initial orientation
# 3) d2 axis of initial orientation
tau1 = Parameter(0, dn, "angle (mrad)", f"Group{igp + 1}Tau1")
tau2 = Parameter(0, d1, "angle (mrad)", f"Group{igp + 1}Tau2")
tau3 = Parameter(0, d2, "angle (mrad)", f"Group{igp + 1}Tau3")
# extend the parameter list with those pertaining to this group
p_list.extend([dist, shift1, shift2, tau1, tau2, tau3])
self._group_ids_by_parameter.extend([igp] * 6)
# set up the base class
ModelParameterisation.__init__(
self,
detector,
istate,
p_list,
experiment_ids=experiment_ids,
is_multi_state=True,
)
# call compose to calculate all the derivatives
self.compose()
def get_panel_ids_by_group(self):
"""Return the panel IDs for each panel group of the detector.
Provides access to a result calculated once during initialisation of the
class, for use during parameterisation auto reduction tasks.
Returns:
A list over the panel groups. Each element of the list is itself a list,
containing the panel IDs for that group.
"""
return self._panel_ids_by_group
def get_param_panel_groups(self):
"""Return the panel group ID for each parameter of the parameterisation.
Provides access to a result calculated once during initialisation of the
class, for use during parameterisation auto reduction tasks.
Returns:
A list over the parameters. Each element of the list contains the panel
group ID of the panel group affected by that parameter.
"""
return self._group_ids_by_parameter
def compose(self):
# reset the list that holds derivatives
for i in range(len(self._model)):
self._multi_state_derivatives[i] = [None] * len(self._dstate_dp)
# loop over groups of panels collecting derivatives of the state wrt
# parameters
param = iter(self._param)
for igp, pnl_ids in enumerate(self._panel_ids_by_group):
# extract parameters from the internal list
dist = next(param)
shift1 = next(param)
shift2 = next(param)
tau1 = next(param)
tau2 = next(param)
tau3 = next(param)
param_vals = flex.double(
(
dist.value,
shift1.value,
shift2.value,
tau1.value,
tau2.value,
tau3.value,
)
)
param_axes = flex.vec3_double(
(dist.axis, shift1.axis, shift2.axis, tau1.axis, tau2.axis, tau3.axis)
)
offsets = self._offsets[igp]
dir1s = self._dir1s[igp]
dir2s = self._dir2s[igp]
# Get items from the initial state for the group of interest
initial_state = self._initial_state[igp]
id1 = initial_state["d1"]
id2 = initial_state["d2"]
idn = initial_state["dn"]
igp_offset = initial_state["gp_offset"]
# Compose the new state using the helper class for calculations
pgc = PanelGroupCompose(id1, id2, idn, igp_offset, param_vals, param_axes)
# assign back to the group frame
self._groups[igp].set_frame(pgc.d1(), pgc.d2(), pgc.origin())
# Loop over attached Panel matrices, using the helper class to calculate
# derivatives of the d matrix in each case and store them.
i = igp * 6
for (panel_id, offset, dir1_new_basis, dir2_new_basis) in zip(
pnl_ids, offsets, dir1s, dir2s
):
self._multi_state_derivatives[panel_id][
i : (i + 6)
] = pgc.derivatives_for_panel(offset, dir1_new_basis, dir2_new_basis)
| {
"repo_name": "dials/dials",
"path": "algorithms/refinement/parameterisation/detector_parameters.py",
"copies": "1",
"size": "43681",
"license": "bsd-3-clause",
"hash": -6781433291710114000,
"line_mean": 37.1160558464,
"line_max": 95,
"alpha_frac": 0.5652343124,
"autogenerated": false,
"ratio": 3.4883405206835967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9549851048423001,
"avg_score": 0.000744756932118898,
"num_lines": 1146
} |
from functools import reduce
from skyline_path.core import SkylineStatus
from skyline_path.core.dominate import dominate_check
from skyline_path.core.edge_helper import aggregate
class SkyPath:
"""
Implement SkyPath Algorithm.
composite with MultiAttributeGraph
"""
def __init__(self, mag):
# record partial skyline paths and full skyline paths
# with their attributes
self.mag = mag
self.partial_sp = {}
self.full_sp = {}
def skypath_query(self, src, dst):
"""
Give src and dst (Both are Nodes in this Graph)
Find out all of skyline paths by SkyPath Algorithm.
"""
return self._path_recursive(src, dst)
def _path_recursive(self, cur, dst, path=None):
if path is None:
path = []
path = path + [cur]
if cur == dst:
self.__add_new_sp_check(path)
return
for neighbor in self.mag.neighbors[cur] - set(path):
if self.__next_hop(neighbor, path):
self._path_recursive(neighbor, dst, path)
def __next_hop(self, neighbor, path):
target_attrs = self.__attrs_in(path + [neighbor])
if self.__partial_dominace(path + [neighbor], target_attrs):
return False
elif self.__full_dominace(path + [neighbor], target_attrs):
return False
else:
return True
def __partial_dominace(self, path, target_attrs):
existed_attrs = self.partial_sp.get((path[0], path[-1]))
if existed_attrs is not None:
check = dominate_check(existed_attrs, target_attrs)
if check is SkylineStatus.DOMINATE:
return True
elif check is SkylineStatus.NON_DOMINATE:
return False
# Here means old partial are dominated and replace it.
self.partial_sp[(path[0], path[-1])] = target_attrs
return False
def __full_dominace(self, path, target_attrs):
for _, existed_attrs in self.full_sp.items():
check = dominate_check(existed_attrs, target_attrs)
if check is SkylineStatus.DOMINATE:
return True
return False
def __add_new_sp_check(self, path):
temp_attrs = self.__attrs_in(path)
# should use copy() full sp due to changed size during iteration
for sp, existed_attrs in self.full_sp.copy().items():
check = dominate_check(existed_attrs, temp_attrs)
if check is SkylineStatus.DOMINATE:
return
elif check is SkylineStatus.BE_DOMINATED:
self.full_sp.pop(sp)
# Check finished, add the new skyline path.
self.full_sp[tuple(path)] = temp_attrs
def __attrs_in(self, path):
edges_attrs = self.__edges_to_attrs(self.__split_to(path))
return reduce(lambda a1, a2: aggregate(a1, a2), edges_attrs)
def __split_to(self, path):
return [(path[i], path[i+1]) for i in range(len(path[:-1]))]
def __edges_to_attrs(self, edges):
return map(lambda e: self.mag.attrs_between(e[0], e[1]), edges)
| {
"repo_name": "shadow3x3x3/renew-skyline-path-query",
"path": "skyline_path/algorithms/sky_path.py",
"copies": "1",
"size": "3131",
"license": "mit",
"hash": 4500573737302058500,
"line_mean": 35.8352941176,
"line_max": 72,
"alpha_frac": 0.5934206324,
"autogenerated": false,
"ratio": 3.740740740740741,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4834161373140741,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from Slices import Slice
from theReader import TheReader
class Brain:
def __init__(self, configFile):
self.reader = TheReader(configFile)
self.reader.readConfiguration()
self.slices = []
def searchMinOccurencesOfIngredient(self):
numResult = reduce(lambda a,b : a+b, map(lambda pizzaRow : map(lambda cell:1 if(cell.type == 'M') else -1), self.reader.theMatrix.matrix))
return 'M' if numResult > 0 else 'T'
def search(self):
minIngredient = self.searchMinOccurencesOfIngredient()
pizzaMatrix = self.reader.theMatrix.matrix
while not self.reader.theMatrix.allExplored():
startingPositions = self.getStartingPositions(pizzaMatrix, minIngredient)
x,y = startingPositions[0]
currSlice = Slice(self.reader.theMatrix.matrix, x, y)
self.reader.theMatrix.matrix[x][y].explore()
self.slices.append(currSlice)
self.exploreForMinimum(currSlice, self.reader.theMatrix[x][y])
def exploreForMinimum(self, slice, startingCell):
counter = {'M' : 0, 'T' : 0}
counter[startingCell.type] += 1
while counter['M'] + counter['T'] < 2 * self.reader.minNumberOfIngredients:
lenght = slice.lenght()
height = slice.height()
results = self.watchLeft(slice), self.watchTop(slice), self.watchRight(slice), self.watchDown(slice)
maxRes = max(results)
rightIndex = results.index(maxRes)
def watchLeft(self, slice):
points = 0
numM, numT = slice.countIngredient('M'), slice.countIngredient('T')
if slice.y1 > 0 :
leftY = (slice.y1 - 1)
for x in range(slice.x1, slice.x2 + 1):
cell = self.reader.theMatrix.matrix[x][leftY]
if cell.type == 'M' and numM < numT:
points += 10
elif cell.type == 'M' and numM >= numT:
points += 5
elif cell.type == 'T' and numT < numM:
points += 10
else:
points += 5
return points
def watchRight(self, slice):
points = 0
numM, numT = slice.countIngredient('M'), slice.countIngredient('T')
if slice.y2 < self.reader.numberOfRows :
rightY = (slice.y2 + 1)
for x in range(slice.x1, slice.x2 + 1):
cell = self.reader.theMatrix.matrix[x][rightY]
if cell.type == 'M' and numM < numT:
points += 10
elif cell.type == 'M' and numM >= numT:
points += 5
elif cell.type == 'T' and numT < numM:
points += 10
else:
points += 5
return points
def watchDown(self, slice):
points = 0
numM, numT = slice.countIngredient('M'), slice.countIngredient('T')
if slice.x2 < self.reader.numberOfRows:
downX = (slice.x2 + 1)
for y in range(slice.y1, slice.y2 + 1):
cell = self.reader.theMatrix.matrix[downX][y]
if cell.type == 'M' and numM < numT:
points += 10
elif cell.type == 'M' and numM >= numT:
points += 5
elif cell.type == 'T' and numT < numM:
points += 10
else:
points += 5
return points
def watchTop(self, slice):
points = 0
numM, numT = slice.countIngredient('M'), slice.countIngredient('T')
if slice.x1 > 0:
topX = (slice.x1 - 1)
for y in range(slice.y1, slice.y2 + 1):
cell = self.reader.theMatrix.matrix[topX][y]
if cell.type == 'M' and numM < numT:
points += 10
elif cell.type == 'M' and numM >= numT:
points += 5
elif cell.type == 'T' and numT < numM:
points += 10
else:
points += 5
return points
def getStartingPositions(self, pizzaMatrix, minIngredient):
startingPositions = []
for i in range(pizzaMatrix):
for j in range(pizzaMatrix):
if not pizzaMatrix[i][j].explored and pizzaMatrix[i][j].type == minIngredient:
startingPositions.append((i,j))
return startingPositions
| {
"repo_name": "ziliquas/PizzaHashCode",
"path": "Logic.py",
"copies": "1",
"size": "4492",
"license": "apache-2.0",
"hash": 3854685415951473700,
"line_mean": 37.724137931,
"line_max": 146,
"alpha_frac": 0.5256010686,
"autogenerated": false,
"ratio": 3.8360375747224595,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48616386433224595,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from sqlalchemy import and_
def rowdict(row):
if row is None:
return None
return dict(row.items())
def rowdicts(rows):
if rows is None:
return None
return [dict(r) for r in rows]
def exec_fetchall(expr):
return expr.execute().fetchall()
def exec_fetchone(expr):
return expr.execute().fetchone()
def fetchall(table):
return exec_fetchall(table.select())
def fetchone(table, where_expr):
"""Select one row from a table filtered by a `where` expression"""
expr = table.select().where(where_expr)
return rowdict(exec_fetchone(expr))
def where(table, operator=and_, **values):
"""Return a `where` expression to combine column=value criteria"""
# Create a list of (column == value) filters and combine them
return reduce_filters(table, eq_column, operator=operator, **values)
def iwhere(table, operator=and_, **values):
"""Return a `where` expression to combine column ILIKE value criteria"""
# Create a list of (column ILIKE value) filters and combine them
return reduce_filters(table, ilike_column, operator=operator, **values)
def eq_column(table, column, value):
"""column == value"""
return getattr(table.c, column) == value
def ilike_column(table, column, value):
"""column ILIKE value"""
return getattr(table.c, column).ilike(value)
def reduce_filters(table, fn, operator=and_, **values):
"""Return a `where` expression to combine column=value criteria"""
# Create a list of (column == value) filters and combine them using the
# provided combinator.
filters = [fn(table, k, v) for k, v in values.items()]
# If we have multiple filters then combine them using AND by default,
# otherwise use the first one as-is.
if len(filters) == 1:
expr = filters[0]
else:
expr = reduce(operator, filters)
return expr
def select_one(table, where=where, operator=and_, **values):
"""Select one row filtered by `values` column=value criteria"""
where_expr = where(table, operator=operator, **values)
return fetchone(table, where_expr)
def select_all(table, where=where, operator=and_, **values):
"""Select all rows filtered by `values` column=value criteria"""
where_expr = where(table, operator=operator, **values)
expr = table.select().where(where_expr)
return exec_fetchall(expr)
def update_values(table, where_expr, **values):
"""Return an update().values(...) expression for the given table"""
return table.update().values(**values).where(where_expr)
def update(table, table_id, **values):
"""Update a specific row's values by ID"""
return update_values(table, table.c.id == table_id, **values).execute()
def delete(table, operator=and_, where=where, **values):
"""Delete rows from a table based on the filter values"""
where_expr = where(table, operator=operator, **values)
return table.delete(where_expr).execute()
| {
"repo_name": "davvid/skeletor",
"path": "skeletor/db/sql.py",
"copies": "1",
"size": "2973",
"license": "bsd-3-clause",
"hash": -2865598310043325400,
"line_mean": 29.3367346939,
"line_max": 76,
"alpha_frac": 0.6781029263,
"autogenerated": false,
"ratio": 3.7776365946632784,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9955739520963278,
"avg_score": 0,
"num_lines": 98
} |
from functools import reduce
from syntax import *
from collections import namedtuple
class NoRuleApplies(RuntimeError):
pass
# ------------------------ EVALUATION ------------------------
def isnumericval(term):
t_term = type(term)
if t_term is TmZero:
return True
elif t_term is TmSucc:
return isnumericval(term.term)
return False
def isval(term):
t_term = type(term)
if t_term is TmTrue:
return True
elif t_term is TmFalse:
return True
elif isnumericval(term):
return True
elif t_term is TmAbs:
return True
return False
class Evaluate(Visitor):
def visit_TmApp(term, ctx):
if isval(term.left) and isval(term.right):
return termSubstTop(term.right, term.left.term)
elif isval(term.left):
right = evaluate1(term.right, ctx)
return term._replace(right=right)
else:
left = evaluate1(term.left, ctx)
return term._replace(left=left)
def visit_TmIf(term, ctx):
t_cond = type(term.term_condition)
if t_cond is TmTrue:
return term.term_then
elif t_cond is TmFalse:
return term.term_else
t = evaluate1(term.term_condition, ctx)
return term._replace(term_condition=t)
def visit_TmSucc(term, ctx):
t = evaluate1(term.term, ctx)
return term._replace(term=t)
def visit_TmPred(term, ctx):
t_term = type(term.term)
if t_term is TmZero:
return TmZero(None)
elif t_term is TmSucc and isnumericval(term.term):
return term.term
t = evaluate1(term.term, ctx)
return TmPred(term.info, t)
def visit_TmIsZero(term, ctx):
t_term = type(term.term)
if t_term is TmZero:
return TmTrue(None)
elif t_term is TmSucc and isnumericval(term.term):
return TmFalse(None)
t = evaluate1(term.term, ctx)
return TmIsZero(term.info, t)
def visit_ANY(term, ctx):
raise NoRuleApplies
evaluate1 = Evaluate.visit
def evaluate(ctx, term):
try:
return evaluate(ctx, evaluate1(term, ctx))
except NoRuleApplies:
return term
# ------------------------ TYPING ------------------------
combineconstr = list.extend
def uvargen():
n = 0
while True:
yield "?X%s" % n
n += 1
class Reconstruction(Visitor):
def visit_TmVar(term, ctx, nextuvar):
tyT = getTypeFromContext(ctx, term.index)
return (tyT, [])
def visit_TmAbs(term, ctx, nextuvar):
"lambda <name>:<type>. <term>"
typeLeft = term.type
addbinding(ctx, term.name, VarBind(typeLeft))
(typeRight, contsr) = recon(term.term, ctx, nextuvar)
ctx.pop()
return (TyArr(typeLeft, typeRight), contsr)
def visit_TmApp(term, ctx, nextuvar):
"""
(t1 t2) with t1: T1, t2: T2
return: type X and constraint T1 = T2 -> X
see 22.3 Constraint-Based Typing
"""
(typeLeft, constrLeft) = recon(term.left, ctx, nextuvar)
(typeRight, constrRight) = recon(term.right, ctx, nextuvar)
tyX = nextuvar()
# typeLeft should be is 'arrow' from typeRight to X
newconstr = [(typeLeft, TyArr(typeRight, TyId(tyX)))]
constr = newconstr + constrLeft + constrRight
return (TyId(tyX), constr)
def visit_TmZero(term, ctx, nextuvar):
return (TyNat(), [])
def visit_TmSucc(term, ctx, nextuvar):
(tyT, constr) = recon(term.term, ctx, nextuvar)
return (TyNat(), [(tyT, TyNat())] + constr)
def visit_TmPred(term, ctx, nextuvar):
(tyT, constr) = recon(term.term, ctx, nextuvar)
return (TyNat(), [(tyT, TyNat())] + constr)
def visit_TmIsZero(term, ctx, nextuvar):
(tyT, constr) = recon(term.term, ctx, nextuvar)
return (TyNat(), [(tyT, TyNat())] + constr)
def visit_TmTrue(term, ctx, nextuvar):
return (TyBool(), [])
def visit_TmFalse(term, ctx, nextuvar):
return (TyBool(), [])
def visit_TmIf(term, ctx, nextuvar):
(tyT1, constr1) = recon(term.term_condition, ctx, nextuvar)
(tyT2, constr2) = recon(term.term_then, ctx, nextuvar)
(tyT3, constr3) = recon(term.term_else, ctx, nextuvar)
newconstr = [(tyT1,TyBool()), (tyT2,tyT3)]
constr = newconstr + constr1 + constr2 + constr3
return (tyT3, constr)
recon = Reconstruction.visit
class SubstituteInTy(Visitor):
def visit_TyArr(term, tyX, tyT):
return TyArr(
substinty(term.left, tyX, tyT),
substinty(term.right, tyX, tyT))
def visit_TyNat(term, tyX, tyT):
return term
def visit_TyBool(term, tyX, tyT):
return term
def visit_TyId(term, tyX, tyT):
if term.name == tyX:
return tyT
return term
substinty = SubstituteInTy.visit
def applysubst(constr, tyT):
tyS = tyT
for (tyC1, tyC2) in reversed(constr):
tyX = tyC1.name
tyS = substinty(tyS, tyX, tyC2)
return tyS
def substinconstr(tyT, tyX, constr):
return list(map(
lambda tyS: (
substinty(tyS[0], tyX, tyT),
substinty(tyS[1], tyX, tyT)),
constr))
class OccursIn(Visitor):
def visit_TyArr(term, tyX):
return (
occursin(term.left, tyX)
or occursin(term.right, tyX))
def visit_TyNat(term, tyX):
return False
def visit_TyBool(term, tyX):
return False
def visit_TyId(term, tyX):
return term.name == tyX
occursin = OccursIn.visit
def unify(ctx, constr_in):
if not constr_in:
return constr_in
constr = list(constr_in)
(tyS, tyT) = constr[0]
rest = constr[1:]
t_tyS = type(tyS)
t_tyT = type(tyT)
if t_tyT is TyId:
tyX = tyT.name
if tyS == tyT:
return unify(ctx, rest)
elif occursin(tyS, tyX):
raise RuntimeError("Circular constraints")
else:
upd = unify(ctx, substinconstr(tyS, tyX, rest))
upd.append((TyId(tyX),tyS))
return upd
elif t_tyS is TyId:
tyX = tyS.name
if tyT == tyS:
return unify(ctx, rest)
elif occursin(tyT, tyX):
raise RuntimeError("Circular constraints")
else:
upd = unify(ctx, substinconstr(tyT, tyX, rest))
upd.append((TyId(tyX),tyT))
return upd
elif t_tyS is TyNat and t_tyT is TyNat:
return unify(ctx, rest)
elif t_tyS is TyBool and t_tyT is TyBool:
return unify(ctx, rest)
elif t_tyS is TyArr and t_tyT is TyArr:
upd = [(tyS.left, tyT.left), (tyS.right, tyT.right)]
upd.extend(rest)
return unify(ctx, upd)
raise RuntimeError("Unsolvable constraints")
| {
"repo_name": "habibutsu/tapl-py",
"path": "recon/core.py",
"copies": "1",
"size": "6891",
"license": "mit",
"hash": -2543317977205793300,
"line_mean": 26.2371541502,
"line_max": 67,
"alpha_frac": 0.5735016688,
"autogenerated": false,
"ratio": 3.145139205842081,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9202088763534622,
"avg_score": 0.0033104222214919636,
"num_lines": 253
} |
from functools import reduce
from tetre.rule_applier import *
from tree_utils import find_in_spacynode, merge_nodes
class Growth(RuleApplier):
def __init__(self):
"""Implements all growth rules. Growth rules are the ones that increases the number of children that
are right below the node of the word being searched for, representing its relations.
"""
RuleApplier.__init__(self)
self.subs = ['nsubj', 'csubj', 'nsubjpass', 'csubjpass']
self.objs = ['dobj', 'iobj', 'pobj']
self.move_if = [("xcomp", "obj"), ("ccomp", "obj"), ("xcomp", "subj"), ("ccomp", "subj")]
self.downwards_subj = "nsubj"
self.downwards_obj = "dobj"
@RuleApplier.register_function
def replace_subj_if_dep_is_relcl_or_ccomp(self, root, node_set, spacy_tree):
"""
1) Consider the following sentence:
"2 Related work Learning to rank has been a promising research area which continuously improves web
search relevance (Burges et al."
In this case, the dependency parser puts not the action the improves something as a parent of the word
"improves" in the in the tree, and adds to it the relcl relation. This method adjusts the tree, bringing
the node above under "improves".
2) Now consider the following sentence:
"The best known recovery algorithm for dirty mapping entries, proposed in LazyFTL, exhibits
two shortcomings that GeckoFTL improves upon."
In this case, GeckoFTL is is a proper noun, so it shouldn't be replaced.
Args:
root: The head of the NLTK tree.
node_set: The nodes of the NLTK tree.
spacy_tree: The TreeNode object, rooted at the word being searched for.
Returns:
root: The modified head of the NLTK tree.
node_set: The modified nodes of the NLTK tree.
spacy_tree: The modified TreeNode object.
is_applied: A boolean marking if the rule was applied or not.
"""
is_applied = False
upwards = ["relcl", "ccomp"]
# adjust tree
token = spacy_tree
token_head = spacy_tree
if token_head.dep_ in upwards and token_head.head != token:
token_head = token_head.head
is_changing_possibilities = []
is_changing = False
has_subj = False
children_list = token.children[:]
for i in range(0, len(children_list)):
if children_list[i].dep_ in self.subs:
has_subj = True
if not (token.children[i].pos_ in ["NOUN", "PROPN", "VERB", "NUM", "PRON", "X"]):
token.children.pop(i)
is_changing_possibilities.append(True)
else:
is_changing_possibilities.append(False)
if True in is_changing_possibilities:
is_changing = True
if not has_subj:
is_changing = True
if is_changing:
is_applied = True
children_list = token_head.children[:]
for i in range(0, len(children_list)):
if children_list[i].idx == token.idx:
token_head.children.pop(i)
# adjust representation
node_set.append(self.downwards_subj)
token_head.dep_ = self.downwards_subj
token.children.append(token_head)
token_head.head = token
return root, node_set, spacy_tree, is_applied
@RuleApplier.register_function
def recurse_on_dep_conj_if_no_subj(self, root, node_set, spacy_tree):
"""
1) Consider the following sentence:
"Using many ASR hypotheses helps recover the ASR errors of NE words in 1-best ASR results and
improves NER accuracy."
In this case, the dependency parser puts not the action the improves something as a parent of the word
"improves" in the in the tree, and adds to it the conj relation. This method adjusts the tree, bringing
the node above under "improves".
2) Now consider the following sentence:
"Both identify product features from reviews, but OPINE significantly improves on both."
In this case, note how although improves is a conj, "Both" is the subj up the tree. However, there is a
"but" as the "cc", and beucase of this we need to pick the "conj" below instead of the "subj".
3) Now consider the following sentence:
"SFS [6] (sort-filter-skyline) is based on the same rationale as BNL , but improves performance by
first sorting the data according to a monotone function."
In this case, this has a "but", however no other conj, in this case we should use the nsubj again.
4) Now consider the following sentence:
"[16] studies the usage of grammars and LZ77 parsing for compression of similar sequence collections and
improves complexity bounds with respect to space as well as time."
In this case, the subj is actually the dobj of the head.
Args:
root: The head of the NLTK tree.
node_set: The nodes of the NLTK tree.
spacy_tree: The TreeNode object, rooted at the word being searched for.
Returns:
root: The modified head of the NLTK tree.
node_set: The modified nodes of the NLTK tree.
spacy_tree: The modified TreeNode object.
is_applied: A boolean marking if the rule was applied or not.
"""
is_applied = False
upwards = ["conj"]
token = spacy_tree
token_head = spacy_tree
while True:
if token_head.dep_ in upwards \
and token_head.head != token_head \
and len([child for child in token.children if child.dep_ in self.subs]) == 0:
token_head = token_head.head
children_list = token_head.children[:]
is_but = False
other_conj_exists = False
has_subj = False
# has_obj = False
for j in range(0, len(children_list)):
if token_head.children[j].dep_ in "cc" \
and token_head.children[j].orth_ == "but":
is_but = True
if token_head.children[j].dep_ in "conj" \
and token_head.children[j] != token:
other_conj_exists = True
if "subj" in token_head.children[j].dep_:
has_subj = True
# if "obj" in token_head.children[j].dep_:
# has_obj = True
for i in range(0, len(children_list)):
is_other_conj = token_head.children[i].dep_ == "conj" and token_head.children[i] != token
is_subj = token_head.children[i].dep_ in self.subs
is_obj = token_head.children[i].dep_ in self.objs
node_result = find_in_spacynode(token_head.children[i], token.dep_, token.orth_)
if node_result:
is_sub_child = True
else:
is_sub_child = False
cond_subj = not is_but and is_subj
cond_dobj = not is_but and not has_subj and is_obj
cond_conj_other = is_but and not is_subj and other_conj_exists and \
is_other_conj and not is_sub_child
cond_conj_same = is_but and not other_conj_exists and is_subj
if cond_subj or \
cond_conj_other or \
cond_dobj or \
cond_conj_same:
is_applied = True
if cond_dobj or cond_conj_other:
token_head.children[i].dep_ = self.downwards_subj
# adjust representation
node_set.append(token_head.children[i].dep_)
# adjust actual tree
token.children.append(token_head.children[i])
token_head.children[i].head = token
token_head.children.pop(i)
break
else:
break
return root, node_set, spacy_tree, is_applied
@RuleApplier.register_function
def transform_xcomp_to_dobj_or_sub_if_doesnt_exists(self, root, node_set, spacy_tree):
"""
1) Consider the sentences:
-- xcomp > "Recent work has showed that structured retrieval improves answer ranking for factoid
questions: Bilotti et al."
-- ccomp > "The Fat-Btree structure presented in [19] vastly reduces the index-modification cost
and improves the dynamic data skew handling method."
In this case, although it is possible to understand that "structured retrieval" "improves"
"answer ranking..." the "answer ranking..." part is not presented as a dobj dependency, but a xcomp
dependency instead. This rule transforms xcomp into "obj" as both contain the same purpose for information
extraction.
2) Now consider the following sentence:
-- ccomp > "2 Related Work Caching frequently accessed data at the client side not only improves the
user’s experience of the distributed system, but also alleviates the server’s workload and
enhances its scalability."
In this case, although in this sentence the dobj was detected, the ccomp is the nsubj. Thus, after
replacing the items for dobj, if there is no nsubj in the sentence we try to tranform then in nsubj.
Args:
root: The head of the NLTK tree.
node_set: The nodes of the NLTK tree.
spacy_tree: The TreeNode object, rooted at the word being searched for.
Returns:
root: The modified head of the NLTK tree.
node_set: The modified nodes of the NLTK tree.
spacy_tree: The modified TreeNode object.
is_applied: A boolean marking if the rule was applied or not.
"""
is_applied = False
for replace, target in self.move_if:
is_obj = False
for child in spacy_tree.children:
if target in child.dep_:
is_obj = True
break
if is_obj:
continue
for child in spacy_tree.children:
if replace in child.dep_:
is_applied = True
child.dep_ = target
node_set = [target if node == replace else node for node in node_set]
break
node_set = set([self.rewrite_dp_tag(node) for node in node_set])
return root, node_set, spacy_tree, is_applied
@RuleApplier.register_function
def transform_prep_in_to_dobj(self, root, node_set, spacy_tree):
"""
1) Consider the following sentence:
"While matrix factorization is widely used in recommender systems, matrix co-factorization helps to handle
multiple aspects of the data and improves in predicting individual decisions (Hong et al. "
In this case, one can see that "matrix co-factorization" and improves "predicting individual decisions". It
could be rewriting as "improves prediction of individual decisions". Thus anything after a "prep in"
could be considered an "obj".
Args:
root: The head of the NLTK tree.
node_set: The nodes of the NLTK tree.
spacy_tree: The TreeNode object, rooted at the word being searched for.
Returns:
root: The modified head of the NLTK tree.
node_set: The modified nodes of the NLTK tree.
spacy_tree: The modified TreeNode object.
is_applied: A boolean marking if the rule was applied or not.
"""
is_applied = False
target = "obj"
replace = "prep"
is_obj = False
for child in spacy_tree.children:
if target in child.dep_:
is_obj = True
for child in spacy_tree.children:
if replace in child.dep_ and child.orth_ == "in":
if not is_obj:
is_applied = True
child.dep_ = target
node_set = [target if node == replace else node for node in node_set]
node_set = list(set([self.rewrite_dp_tag(node) for node in node_set]))
return root, node_set, spacy_tree, is_applied
@RuleApplier.register_function
def add_dobj_if_dep_is_subj(self, root, node_set, spacy_tree):
"""
1) Consider the following sentence:
"Turney (2005) extends the above approach by introducing the latent relational analysis (LRA), which uses
automatically generated synonyms, learns suitable patterns, and performs singular value decomposition
in order to smooth the frequencies."
"This work uses materialized views to further benefit from commonalities across queries."
In the first sentence, the relation uses(which; automatically generated synonyms) could have been extracted
by getting the nsubj dependency and transforming it to be the child's dobj. The same is valid for the
second example.
Args:
root: The head of the NLTK tree.
node_set: The nodes of the NLTK tree.
spacy_tree: The TreeNode object, rooted at the word being searched for.
Returns:
root: The modified head of the NLTK tree.
node_set: The modified nodes of the NLTK tree.
spacy_tree: The modified TreeNode object.
is_applied: A boolean marking if the rule was applied or not.
"""
token = spacy_tree
token_head = token.head
is_applied = False
has_subj = False
has_obj = False
for j in range(0, len(token.children)):
if "subj" in token.children[j].dep_:
has_subj = True
if "obj" in token.children[j].dep_:
has_obj = True
if "subj" in token.dep_ and has_subj and not has_obj:
is_applied = True
children_list = token_head.children[:]
for i in range(0, len(children_list)):
if children_list[i].idx == token.idx:
token_head.children.pop(i)
# adjust representation
node_set.append(self.downwards_obj)
token_head.dep_ = self.downwards_obj
token.children.append(token_head)
token_head.head = token
return root, node_set, spacy_tree, is_applied
class Reduction(RuleApplier):
def __init__(self):
"""Implements all reduction rules. Reduction rules are the ones that decreases the number of children that
are right below the node of the word being searched for, representing its relations.
"""
RuleApplier.__init__(self)
self.tags_to_be_removed = {'punct', 'mark', ' ', '', 'meta'}
@RuleApplier.register_function
def remove_duplicates(self, root, node_set, spacy_tree):
"""This groups sentence with e.g.: multiple "punct" into the same group for easier analysis.
Args:
root: The head of the NLTK tree.
node_set: The nodes of the NLTK tree.
spacy_tree: The TreeNode object, rooted at the word being searched for.
Returns:
root: The modified head of the NLTK tree.
node_set: The modified nodes of the NLTK tree.
spacy_tree: The modified TreeNode object.
is_applied: A boolean marking if the rule was applied or not.
"""
return root, set(node_set), spacy_tree, False
@RuleApplier.register_function
def remove_tags(self, root, node_set, spacy_tree):
"""This removes dependency paths of the types contained in self.tags_to_be_removed as they are not considered
relevant. This reduces the number of different groups.
Args:
root: The head of the NLTK tree.
node_set: The nodes of the NLTK tree.
spacy_tree: The TreeNode object, rooted at the word being searched for.
Returns:
root: The modified head of the NLTK tree.
node_set: The modified nodes of the NLTK tree.
spacy_tree: The modified TreeNode object.
is_applied: A boolean marking if the rule was applied or not.
"""
is_applied = False
node_set = set(node_set) - self.tags_to_be_removed
for child in spacy_tree.children:
if child.dep_ in self.tags_to_be_removed:
is_applied = True
child.no_follow = True
return root, node_set, spacy_tree, is_applied
@RuleApplier.register_function
def transform_tags(self, root, node_set, spacy_tree):
"""This transform tags from several variations into a more general version. The mappings are contained
in the self.translation_rules variables.
Args:
root: The head of the NLTK tree.
node_set: The nodes of the NLTK tree.
spacy_tree: The TreeNode object, rooted at the word being searched for.
Returns:
root: The modified head of the NLTK tree.
node_set: The modified nodes of the NLTK tree.
spacy_tree: The modified TreeNode object.
is_applied: A boolean marking if the rule was applied or not.
"""
node_set = set([self.rewrite_dp_tag(node) for node in node_set])
return root, node_set, spacy_tree, False
@RuleApplier.register_function
def merge_multiple_subj_or_dobj(self, root, node_set, spacy_tree):
"""This intends to unify multiple subj and fix representation. Consider the following sentence:
"Another partitional method ORCLUS [2] improves PROCLUS by selecting principal components so that clusters
not parallel to the original dimensions can also be detected."
In this example, the sentence has 2 nsubj: "Another partitional method" and "ORCLUS [2]". They should
be in the same sentence. Because it has 2 subj, the representation ends up being the one from the
last nsubj.
Args:
root: The head of the NLTK tree.
node_set: The nodes of the NLTK tree.
spacy_tree: The TreeNode object, rooted at the word being searched for.
Returns:
root: The modified head of the NLTK tree.
node_set: The modified nodes of the NLTK tree.
spacy_tree: The modified TreeNode object.
is_applied: A boolean marking if the rule was applied or not.
"""
is_applied = False
groups = ["subj", "obj"]
for group in groups:
this_group = []
count = reduce(lambda x, y: x + 1 if group in y.dep_ else x, spacy_tree.children, 0)
if count < 2:
continue
changed = True
while changed:
changed = False
children_list = spacy_tree.children[:]
for i in range(0, len(children_list)):
if group in children_list[i].dep_:
this_group.append(children_list[i])
spacy_tree.children.pop(i)
is_applied = True
changed = True
break
child = merge_nodes(this_group)
spacy_tree.children.append(child)
child.head = spacy_tree
return root, node_set, spacy_tree, is_applied
class Process(object):
def __init__(self):
"""Creates the Process object which is simply an entry point for both Growth and Reduction rules.
"""
self.growth = Growth()
self.reduction = Reduction()
return
def apply_all(self, nltk_tree, spacy_tree):
"""Apply all growth and reduction rules.
Args:
nltk_tree: The tree in the NLTK structure that represents the grouping.
spacy_tree: The actual TreeNode in which the rules will be extracted from, rooted at the word being
searched for.
Returns:
nltk_tree: the final version of the NLTK representation tree, after all rules are applied.
A combined list of the method signatures of all rules applied.
"""
nltk_tree, applied_growth = self.growth.apply(nltk_tree, spacy_tree)
nltk_tree, applied_reduction = self.reduction.apply(nltk_tree, spacy_tree)
return nltk_tree, (applied_growth + applied_reduction)
| {
"repo_name": "aoldoni/tetre",
"path": "lib/tetre/graph_processing.py",
"copies": "1",
"size": "21385",
"license": "mit",
"hash": -8382806804492293,
"line_mean": 39.9367816092,
"line_max": 119,
"alpha_frac": 0.5767232907,
"autogenerated": false,
"ratio": 4.269530469530469,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5346253760230469,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from typing import Tuple, Callable, Any, TypeVar, overload # noqa
A = TypeVar("A")
B = TypeVar("B")
C = TypeVar("C")
D = TypeVar("D")
E = TypeVar("E")
F = TypeVar("F")
G = TypeVar("G")
@overload
def compose() -> Callable[[A], A]: # pylint: disable=function-redefined
... # pylint: disable=pointless-statement
@overload
def compose(op1: Callable[[A], B]) -> Callable[[A], B]: # pylint: disable=function-redefined
... # pylint: disable=pointless-statement
@overload
def compose(op2: Callable[[B], C], op1: Callable[[A], B]) -> Callable[[A], C]: # pylint: disable=function-redefined
... # pylint: disable=pointless-statement
@overload
def compose(
op3: Callable[[C], D], op2: Callable[[B], C], op1: Callable[[A], B] # pylint: disable=function-redefined
) -> Callable[[A], D]:
... # pylint: disable=pointless-statement
@overload
def compose(
op4: Callable[[D], E], # pylint: disable=function-redefined
op3: Callable[[C], D],
op2: Callable[[B], C],
op1: Callable[[A], B],
) -> Callable[[A], E]:
... # pylint: disable=pointless-statement
@overload
def compose(
op5: Callable[[E], F], # pylint: disable=function-redefined
op4: Callable[[D], E],
op3: Callable[[C], D],
op2: Callable[[B], C],
op1: Callable[[A], B],
) -> Callable[[A], F]:
... # pylint: disable=pointless-statement
@overload
def compose(
op1: Callable[[A], B], # pylint: disable=function-redefined,too-many-arguments
op2: Callable[[B], C],
op3: Callable[[C], D],
op4: Callable[[D], E],
op5: Callable[[E], F],
op6: Callable[[F], G],
) -> Callable[[A], G]:
... # pylint: disable=pointless-statement
def compose(*funcs: Callable) -> Callable: # type: ignore
"""Compose multiple functions right to left.
Composes zero or more functions into a functional composition. The
functions are composed right to left. A composition of zero
functions gives back the identity function.
compose()(x) == x
compose(f)(x) == f(x)
compose(g, f)(x) == g(f(x))
compose(h, g, f)(x) == h(g(f(x)))
...
Returns the composed function.
"""
def _compose(source: Any) -> Any:
return reduce(lambda acc, f: f(acc), funcs[::-1], source)
return _compose
fmap = lambda f, g: compose(f, g) # To force partial application
identity = compose() # type: Callable
| {
"repo_name": "dbrattli/OSlash",
"path": "oslash/util/fn.py",
"copies": "1",
"size": "2405",
"license": "apache-2.0",
"hash": -4851590875184598000,
"line_mean": 24.8602150538,
"line_max": 116,
"alpha_frac": 0.6191268191,
"autogenerated": false,
"ratio": 3.202396804260985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4321523623360985,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from typing import TypeVar, Callable, List, Tuple, Any
T = TypeVar('T')
def curry(x, args_count=None):
"""
In mathematics and computer science, currying is the technique of translating the evaluation of a function.
It that takes multiple arguments (or a tuple of arguments) into evaluating a sequence of functions.
each with a single argument.
"""
if args_count is None:
args_count = x.__code__.co_argcount
def fn(*args):
if len(args) == args_count:
return x(*args)
return curry(lambda *args1: x(*(args + args1)), args_count - len(args))
return fn
def identity(value: T) -> T:
"""
Return first argument.
:param value:
:type value: Any
:returns:
:rtype: Any
"""
return value
def increase(value: int) -> int:
"""
Return increased by 1 argument.
:param value:
:type value: Int
:returns:
:rtype: Int
"""
return value + 1
@curry
def eq(value, value1) -> bool:
return value == value1
@curry
def curried_map(mapper, collection):
return [mapper(item) for item in collection]
@curry
def curried_filter(filterer, collection):
return [item for item in collection if filterer(item)]
@curry
def find(collection: List[T], key: Callable[[T], bool]):
"""
Return the first element of the list which matches the keys, or None if no element matches.
:param collection: collection to search
:type collection: List[A]
:param key: function to decide witch element should be found
:type key: Function(A) -> Boolean
:returns: element of collection or None
:rtype: A | None
"""
for item in collection:
if key(item):
return item
def compose(value, *functions):
"""
Perform right-to-left function composition.
:param value: argument of first applied function
:type value: Any
:param functions: list of functions to applied from right-to-left
:type functions: List[Function]
:returns: result of all functions
:rtype: Any
"""
return reduce(
lambda current_value, function: function(current_value),
functions[::-1],
value
)
def pipe(value, *functions):
"""
Perform left-to-right function composition.
:param value: argument of first applied function
:type value: Any
:param functions: list of functions to applied from left-to-right
:type functions: List[Function]
:returns: result of all functions
:rtype: Any
"""
return reduce(
lambda current_value, function: function(current_value),
functions,
value
)
def cond(condition_list: List[Tuple[
Callable[[T], bool],
Callable,
]]):
"""
Function for return function depended on first function argument
cond get list of two-item tuples,
first is condition_function, second is execute_function.
Returns this execute_function witch first condition_function return truly value.
:param condition_list: list of two-item tuples (condition_function, execute_function)
:type condition_list: List[(Function, Function)]
:returns: Returns this execute_function witch first condition_function return truly value
:rtype: Function
"""
def result(*args):
for (condition_function, execute_function) in condition_list:
if condition_function(*args):
return execute_function(*args)
return result
def memoize(fn: Callable, key=eq) -> Callable:
"""
Create a new function that, when invoked,
caches the result of calling fn for a given argument set and returns the result.
Subsequent calls to the memoized fn with the same argument set will not result in an additional call to fn;
instead, the cached result for that set of arguments will be returned.
:param fn: function to invoke
:type fn: Function(A) -> B
:param key: function to decide if result should be taken from cache
:type key: Function(A, A) -> Boolean
:returns: new function invoking old one
:rtype: Function(A) -> B
"""
cache: List[Any] = []
def memoized_fn(argument):
cached_result = find(cache, lambda cacheItem: key(cacheItem[0], argument))
if cached_result is not None:
return cached_result[1]
fn_result = fn(argument)
cache.append((argument, fn_result))
return fn_result
return memoized_fn
| {
"repo_name": "przemyslawjanpietrzak/pyMonet",
"path": "pymonet/utils.py",
"copies": "1",
"size": "4461",
"license": "mit",
"hash": -5447081044837870000,
"line_mean": 26.0363636364,
"line_max": 111,
"alpha_frac": 0.6565792423,
"autogenerated": false,
"ratio": 4.062841530054645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005770679252026417,
"num_lines": 165
} |
from functools import reduce
from videocore.vinstr import *
from videocore.encoding import Register
# helper functions
def print_with_indent(instr):
print (' {}'.format(instr))
def print_with_attension(instr):
print ('>>> {}'.format(instr))
# TODO: print labels
# labels are not stored in asm._instructions, but asm._labels (tuple label with its position)
def print_instructions(instrs, indexes, labels):
labels_rev = enc.rev (labels)
for index in indexes:
if 0 <= index <= len(instrs) - 1:
l = labels_rev.get(index)
if l:
print_with_indent ('L.{}'.format(l))
print_with_indent(instrs[index])
def print_around(target, instrs, labels):
index = instrs.index(target)
labels_rev = enc.rev (labels)
print_instructions(instrs, range (index-2, index), labels)
l = labels_rev.get(index)
if l:
print_with_indent('L.{}'.format(l))
print_with_attension(target)
print_instructions(instrs, range (index+1, index + 3), labels)
#================ utility functions ===================================
def is_tmu(reg):
assert (isinstance (reg, Register))
return 56 <= reg.addr <= 63
def is_sfu(reg):
assert (isinstance (reg, Register))
return 52 <= reg.addr <= 55
def is_sfu_instruction(instr):
assert (isinstance (instr, InstrBase))
outputs = get_outputs(instr)
for output in outputs:
if is_sfu(output):
return True
return False
def get_outputs(instr):
outputs = []
if is_composed (instr):
if not instr.add_instr.is_nop():
outputs.append (instr.add_instr.get_dst())
if not instr.mul_instr.is_nop():
outputs.append (instr.mul_instr.get_dst())
elif instr.get_dst():
if not instr.is_nop():
outputs.append (instr.get_dst())
return list (filter (lambda x: x != None, outputs))
def get_inputs(instr):
inputs = []
if is_composed (instr):
inputs.append(instr.add_instr.get_arg1())
inputs.append(instr.add_instr.get_arg2())
inputs.append(instr.mul_instr.get_arg1())
inputs.append(instr.mul_instr.get_arg2())
else:
inputs.append(instr.get_arg1())
inputs.append(instr.get_arg2())
return list(filter (lambda x: x != None, inputs))
# return instruction if instr is located in the position of last delay-slot
def is_in_last_delayslot (instr, instrs, labels):
index = instrs.index(instr)
if index - 3 < 0:
return None
prev = instrs[index - 3]
if is_branch(prev) and prev.target: # destination may not be a label
return instrs[labels[prev.target.name]//8]
else:
return None
def is_register(reg):
return isinstance(reg, Register)
def is_r4(reg):
assert (isinstance (reg, Register))
return enc.REGISTERS['r4'] == reg
def is_read_from_r4(instr):
inputs = get_inputs(instr)
return not list (filter(is_r4, inputs)) == []
def is_write_to_r4(instr):
outputs = get_outputs(instr)
return not list (filter(is_r4, outputs)) == []
def is_use_r4(instr):
return is_read_from_r4(instr) or is_write_to_r4(instr)
def is_rotate(instr):
return is_mul(instr) and instr.rotate or is_composed(instr) and is_rotate(instr.mul_instr)
#================ check functions ======================================
def check_branch_delay_slot(instr, instrs, labels):
f = True
if (is_branch (instr)):
index = instrs.index(instr)
if len(instrs) < index + 3:
print ('warning: instructions of delay_slot is short?')
print_around(instr, instrs, labels)
f = False
else:
delay_slot = instrs[index+1:index+4]
for item in delay_slot:
if (is_branch (item)):
print ('warning: branch is located in the position of delay_slot')
print_around(item, instrs, labels)
f = False
return f
def check_composed(instr, instrs, labels):
if (is_composed(instr)):
v = instr
if v.add_instr.dst == v.mul_instr.dst and v.add_instr.sig != 'thread end':
print ('warning: dst is the same register in the following composed-instruction')
print_around(instr, instrs, labels)
return False
return True
def check_signal(instr, instrs, labels):
f = True
if not (is_composed (instr) or is_add (instr) or is_mul (instr)):
return True
outputs = get_outputs (instr)
sig = instr.get_sig()
if sig and (sig == 'load tmu0' or sig == 'load tmu1'):
for out in outputs:
if is_tmu(out):
print ('warning: signal to tmu and setting tmu register are together')
print_around(instr, instrs, labels)
f = False
return f
def check_regfile(instr, instrs, labels):
f = True
index = instrs.index(instr)
if len(instrs) == index + 1:
return True
# prev -> current
prev = instr
current = is_in_last_delayslot(instr, instrs, labels)
show_current = True
if current:
pass
else:
show_current = False
current = instrs[index + 1]
outputs = get_outputs(prev)
inputs = get_inputs(current)
for out in list (filter(is_register, outputs)):
for read in list (filter(is_register, inputs)):
if enc.GENERAL_PURPOSE_REGISTERS.get(out.name, None) and out.name == read.name:
print ('warning: regfile is read next to writing instruction')
print_around(prev, instrs, labels)
if show_current:
print('-----------------')
print_around(current, instrs, labels)
f = False
return f
def check_rotate(instr, instrs, labels):
prev = instr
index = instrs.index(prev)
currents = get_nexts(prev, instrs, labels, 1)
show_current = True
f = True
for current in currents:
if not is_rotate(current):
continue
if is_composed(current):
mul = current.mul_instr
else:
mul = current
outputs = get_outputs(prev)
inputs = get_inputs(mul)
for out in list (set(filter(is_register, outputs))):
for inp in list (set(filter(is_register, inputs))):
if out.name == inp.name:
print('warning: An instruction that does a vector rotate must not immediately follow an instruction that writes to the accumulator that is being rotated.')
print_around(prev, instrs, labels)
if len(instrs) == index+1 or current != instrs[index+1]:
print('-----------------')
print_around(current, instrs, labels)
f = False
if mul.get_rotate() == enc.REGISTERS['r5']:
for out in outputs:
if out == enc.REGISTERS['broadcast']:
print('warning: An instruction that does a vector rotate by r5 must not immediately follow an instruction that writes to r5.')
print_around(prev, instrs, labels)
if len(instrs) == index+1 or current != instrs[index+1]:
print('-----------------')
print_around(current, instrs, labels)
f = False
return f
def get_nexts(instr, instrs, labels, n):
index = instrs.index(instr)
if n == 0:
return [instr]
l = []
n1 = is_in_last_delayslot(instr, instrs, labels)
if n1:
l.append(n1)
if index + 1 < len(instrs):
n2 = instrs[index+1]
l.append(n2)
if l:
l = map(lambda insn: get_nexts (insn, instrs, labels, n-1), l)
return list(reduce(lambda x, y: x + y, l))
else:
return []
# See Summary of Instruction Restrictions (page 37)
def check_sfu(instr, instrs, labels):
f = True
if is_sfu_instruction(instr):
n1 = get_nexts(instr, instrs, labels, 1)
n2 = get_nexts(instr, instrs, labels, 2)
for e in n1 + n2:
if is_use_r4(e):
print("warning: reading from r4 is forbidden in the following two instruction")
print_around(e, instrs, labels)
f = False
if is_sfu_instruction(e) or (e.get_sig() and (e.get_sig() == 'load tmu0' or e.get_sig() == 'load tmu1')):
print("warning: writing to r4 is forbidden in the following two instruction")
print_around(e, instrs, labels)
f = False
return f
single_steps = [check_regfile, check_composed, check_branch_delay_slot, check_signal, check_sfu, check_rotate]
def single_step(instrs, labels):
f = True
for instr in instrs:
for check in single_steps:
f = f and check (instr, instrs, labels)
return f
all_checks = [single_step]
def extract_verbose(instr):
return instr.verbose
def prepare(instrs, labels):
instrs = list (map(extract_verbose, instrs))
labels = dict (map (lambda x: (x[0].name, x[1]), filter (lambda p: p[0].pinned, labels)))
return (instrs, labels)
def check_main(instrs, labels):
instrs, labels = prepare(instrs, labels)
f = True
# print_instructions (instrs, range (0, len (instrs)), labels)
for check in all_checks:
f = f and check(instrs, dict (labels))
return f
| {
"repo_name": "nineties/py-videocore",
"path": "videocore/checker.py",
"copies": "1",
"size": "8613",
"license": "mit",
"hash": 1364999910554287900,
"line_mean": 28.4965753425,
"line_max": 165,
"alpha_frac": 0.6353187043,
"autogenerated": false,
"ratio": 3.32035466461064,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.445567336891064,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from werkzeug.exceptions import NotFound
from werkzeug.utils import redirect
from .application import Page
from .utils import unicodecmp
class ServerList(Page):
url_rule = "/"
def order_link(self, name, title):
cls = ""
link = f"?order_by={name}"
desc = False
if name == self.order_by:
desc = not self.order_desc
cls = f' class="{"down" if desc else "up"}"'
if desc:
link += "&dir=desc"
return f'<a href="{link}"{cls}>{title}</a>'
def process(self):
self.order_by = self.request.args.get("order_by") or "name"
sort_func = {
"name": lambda x: x,
"map": lambda x: x.map,
"gametype": lambda x: x.gametype,
"players": lambda x: x.player_count,
"progression": lambda x: x.progression,
}.get(self.order_by)
if sort_func is None:
return redirect(self.url_for("serverlist"))
self.servers = self.cup.server_browser.servers.values()
self.servers.sort(key=sort_func)
if self.request.args.get("dir") == "desc":
self.servers.reverse()
self.order_desc = True
else:
self.order_desc = False
self.players = reduce(lambda a, b: a + b.players, self.servers, [])
self.players = sorted(self.players, key=lambda a, b: unicodecmp(a.name, b.name))
class Server(Page):
url_rule = "/server/<id>"
def process(self, id):
try:
self.server = self.cup.server_browser.servers[id]
except KeyError:
raise NotFound()
class Search(Page):
url_rule = "/search"
def process(self):
self.user = self.request.args.get("user")
if self.user:
self.results = []
for server in self.cup.server_browser.servers.values():
for player in server.players:
if player.name == self.user:
self.results.append(server)
class MissingPage(Page):
def get_response(self):
response = super().get_response()
response.status_code = 404
return response
| {
"repo_name": "pallets/werkzeug",
"path": "examples/cupoftee/pages.py",
"copies": "1",
"size": "2202",
"license": "bsd-3-clause",
"hash": -3179701102525839400,
"line_mean": 28.36,
"line_max": 88,
"alpha_frac": 0.5613079019,
"autogenerated": false,
"ratio": 3.8229166666666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9883475504896254,
"avg_score": 0.000149812734082397,
"num_lines": 75
} |
from functools import reduce
import asyncio_mongo
from asyncio_mongo import _pymongo
from asyncio_mongo import exceptions
from asyncio_mongo import filter as qf
from wdim.orm import sort
from wdim.orm import query
from wdim.orm import exceptions
from wdim.orm.database.base import DatabaseLayer
from wdim.orm.database.translation import Translator
class MongoTranslator(Translator):
@classmethod
def translate_query(cls, q: query.BaseQuery):
assert isinstance(q, query.BaseQuery), 'q must be an instance of BaseQuery'
try:
return {
query.Equals: lambda: {q.name: q.value},
query.Or: lambda: {'$or': [cls.translate_query(qu) for qu in q.queries]},
query.And: lambda: reduce(lambda x, y: {**x, **y}, (cls.translate_query(qu) for qu in q.queries), {}),
}[q.__class__]()
except KeyError:
raise exceptions.UnsupportedOperation(q)
@classmethod
def translate_sorting(self, sorting):
try:
return qf.sort({
sort.Ascending: qf.DESCENDING,
sort.Descending: qf.ASCENDING,
}[sorting.__class__](sorting.field._name))
except KeyError:
raise exceptions.UnsupportedOperation(sorting)
class MongoLayer(DatabaseLayer):
@classmethod
async def connect(cls, host='127.0.0.1', port=27017, name='wdim20150921'):
connection = await asyncio_mongo.Connection.create(host, port)
return cls(connection[name])
def __init__(self, connection):
self.connection = connection
async def insert(self, inst):
try:
return await self.connection[inst._collection_name].insert(inst.to_document(), safe=True)
except _pymongo.errors.DuplicateKeyError:
raise exceptions.UniqueViolation
async def upsert(self, inst):
await self.connection[inst._collection_name].update({'_id': inst._id}, inst.to_document(), safe=True, upsert=True)
return inst._id
async def load(self, cls, _id):
return await self.find_one(cls, cls._id == _id)
async def drop(self, cls):
return await self.connection[cls._collection_name].drop(safe=True)
async def find_one(self, cls, query):
doc = await self.connection[cls._collection_name].find_one(
MongoTranslator.translate_query(query)
)
if not doc:
raise exceptions.NotFound(query)
return doc
async def find(self, cls, query=None, limit=0, skip=0, sort=None):
filter = sort and MongoTranslator.translate_sorting(sort)
query = (query or {}) and MongoTranslator.translate_query(query)
return await self.connection[cls._collection_name].find(
query,
limit=limit,
skip=skip,
filter=filter
)
async def ensure_index(self, name, indices):
for keys, opts in indices:
await self.connection[name].ensure_index(
qf.sort([(key, opts.get('order', 1)) for key in keys]),
unique=opts.get('unique', False)
)
| {
"repo_name": "chrisseto/Still",
"path": "wdim/orm/database/mongo.py",
"copies": "1",
"size": "3132",
"license": "mit",
"hash": -5461633035654963000,
"line_mean": 33.8,
"line_max": 122,
"alpha_frac": 0.6270753512,
"autogenerated": false,
"ratio": 4.051746442432083,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5178821793632082,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import django_filters
from django.db.models import Q
from . import models
class BuildingFilter(django_filters.FilterSet):
class Meta:
model = models.Building
fields = [
'campusonline',
]
class FloorFilter(django_filters.FilterSet):
class Meta:
model = models.Floor
fields = [
'level',
'campusonline',
]
class NodeFilter(django_filters.FilterSet):
class Meta:
model = models.Node
fields = [
'level',
]
class RoomFilter(NodeFilter):
class Meta(NodeFilter.Meta):
model = models.Room
fields = [
'campusonline',
'category',
] + NodeFilter.Meta.fields
class DoorFilter(NodeFilter):
class Meta(NodeFilter.Meta):
model = models.Door
class PointOfInterestInstanceFilter(NodeFilter):
class Meta(NodeFilter.Meta):
model = models.PointOfInterestInstance
fields = [
'name',
] + NodeFilter.Meta.fields
class EdgeFilter(django_filters.FilterSet):
level = django_filters.NumberFilter(
method=lambda q, n, v: EdgeFilter.filter_level(q, v)
)
source_level = django_filters.NumberFilter(
name='source__level'
)
destination_level = django_filters.NumberFilter(
name='destination__level'
)
class Meta:
model = models.Edge
fields = (
'level',
'source_level',
'destination_level',
)
@staticmethod
def filter_level(queryset, value):
fields = [
'source__level__exact',
'destination__level__exact',
]
levels = reduce(
lambda x, y: x | y,
[Q(**{field: value}) for field in fields]
)
return queryset.filter(levels)
| {
"repo_name": "medunigraz/outpost",
"path": "src/outpost/django/geo/filters.py",
"copies": "2",
"size": "1883",
"license": "bsd-2-clause",
"hash": -387461130281135360,
"line_mean": 19.6923076923,
"line_max": 60,
"alpha_frac": 0.5677110993,
"autogenerated": false,
"ratio": 4.175166297117516,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5742877396417516,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import ee
import math
class ImageOperation(object):
def __init__(self, image):
super(ImageOperation, self).__init__()
self.image = image
self.input_band_names = image.bandNames()
def select(self, name):
return self.image.select(name)
def set(self, name, toAdd, args={}):
toAdd = self.toImage(toAdd, args)
self.image = self.image.addBands(toAdd.rename([name]), None, True)
def setIf(self, name, condition, trueValue, args={}):
self.setIfElse(name, condition, trueValue, name, args)
def setIfElse(self, name, condition, trueValue, falseValue, args={}):
self.set(name,
self.toImage(falseValue, args)
.where(self.toImage(condition, args), self.toImage(trueValue, args)))
def setAll(self, image):
# Replace bands in source image, to ensure all image properties are preserved
self.image = self.image.addBands(image, None, True)
def invertMask(self, mask):
return mask.multiply(-1).add(1)
def toImage(self, band, args={}):
if isinstance(band, str):
if band.find('.') > -1 or band.find(' ') > -1 or band.find('{') > -1:
band = self.image.expression(self.format(band, args), {'i': self.image})
else:
band = self.image.select(band)
return ee.Image(band)
def format(self, s, args={}):
if not args:
args = {}
allArgs = self.merge({'pi': math.pi}, args)
result = str(s).format(**allArgs)
if result.find('{') > -1:
return format(result, args)
return result
def isMasked(self, band):
return self.toImage(band).mask().reduce('min').eq(0)
def updateMask(self, condition):
self.image = self.image.updateMask(self.toImage(condition))
def merge(self, o1, o2):
return dict(list(o1.items()) + list(o2.items()))
def rescale(self, value, min, max):
return self.toImage(value) \
.subtract(min) \
.divide(max - min) \
.clamp(0, 1)
def combine_probability(probabilities):
return reduce(lambda result, probability: probability.multiply(result), probabilities, 1)
| {
"repo_name": "openforis/sepal",
"path": "modules/google-earth-engine/docker/src/sepalinternal/image_operation.py",
"copies": "1",
"size": "2259",
"license": "mit",
"hash": 4669413418577869000,
"line_mean": 30.8169014085,
"line_max": 93,
"alpha_frac": 0.5945108455,
"autogenerated": false,
"ratio": 3.6731707317073172,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9761187992012708,
"avg_score": 0.0012987170389219809,
"num_lines": 71
} |
from functools import reduce
import hail as hl
from hail.expr.functions import _ndarray
from hail.expr.functions import array as aarray
from hail.expr.types import HailType, tfloat64, ttuple, tndarray
from hail.typecheck import typecheck, nullable, oneof, tupleof, sequenceof
from hail.expr.expressions import (
expr_int32, expr_int64, expr_tuple, expr_any, expr_array, expr_ndarray,
expr_numeric, Int64Expression, cast_expr, construct_expr)
from hail.expr.expressions.typed_expressions import NDArrayNumericExpression
from hail.ir import NDArrayQR, NDArrayInv, NDArrayConcat, NDArraySVD, Apply
tsequenceof_nd = oneof(sequenceof(expr_ndarray()), expr_array(expr_ndarray()))
shape_type = oneof(expr_int64, tupleof(expr_int64), expr_tuple())
def array(input_array, dtype=None):
"""Construct an :class:`.NDArrayExpression`
Examples
--------
>>> hl.eval(hl.nd.array([1, 2, 3, 4]))
array([1, 2, 3, 4], dtype=int32)
>>> hl.eval(hl.nd.array([[1, 2, 3], [4, 5, 6]]))
array([[1, 2, 3],
[4, 5, 6]], dtype=int32)
>>> hl.eval(hl.nd.array(np.identity(3)))
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
>>> hl.eval(hl.nd.array(hl.range(10, 20)))
array([10, 11, 12, 13, 14, 15, 16, 17, 18, 19], dtype=int32)
Parameters
----------
input_array : :class:`.ArrayExpression`, numpy ndarray, or nested python lists/tuples
dtype : :class:`.HailType`
Desired hail type. Default: `float64`.
Returns
-------
:class:`.NDArrayExpression`
An ndarray based on the input array.
"""
return _ndarray(input_array, dtype=dtype)
@typecheck(a=expr_array(), shape=shape_type)
def from_column_major(a, shape):
assert len(shape) == 2
return array(a).reshape(tuple(reversed(shape))).T
@typecheck(start=expr_int32, stop=nullable(expr_int32), step=expr_int32)
def arange(start, stop=None, step=1) -> NDArrayNumericExpression:
"""Returns a 1-dimensions ndarray of integers from `start` to `stop` by `step`.
Examples
--------
>>> hl.eval(hl.nd.arange(10))
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=int32)
>>> hl.eval(hl.nd.arange(3, 10))
array([3, 4, 5, 6, 7, 8, 9], dtype=int32)
>>> hl.eval(hl.nd.arange(0, 10, step=3))
array([0, 3, 6, 9], dtype=int32)
Notes
-----
The range includes `start`, but excludes `stop`.
If provided exactly one argument, the argument is interpreted as `stop` and
`start` is set to zero. This matches the behavior of Python's ``range``.
Parameters
----------
start : int or :class:`.Expression` of type :py:data:`.tint32`
Start of range.
stop : int or :class:`.Expression` of type :py:data:`.tint32`
End of range.
step : int or :class:`.Expression` of type :py:data:`.tint32`
Step of range.
Returns
-------
:class:`.NDArrayNumericExpression`
A 1-dimensional ndarray from `start` to `stop` by `step`.
"""
return array(hl.range(start, stop, step))
@typecheck(shape=shape_type, value=expr_any, dtype=nullable(HailType))
def full(shape, value, dtype=None):
"""Creates a hail :class:`.NDArrayNumericExpression` full of the specified value.
Examples
--------
Create a 5 by 7 NDArray of type :py:data:`.tfloat64` 9s.
>>> hl.nd.full((5, 7), 9)
It is possible to specify a type other than :py:data:`.tfloat64` with the `dtype` argument.
>>> hl.nd.full((5, 7), 9, dtype=hl.tint32)
Parameters
----------
shape : `tuple` or :class:`.TupleExpression`
Desired shape.
value : :class:`.Expression` or python value
Value to fill ndarray with.
dtype : :class:`.HailType`
Desired hail type.
Returns
-------
:class:`.NDArrayNumericExpression`
An ndarray of the specified shape filled with the specified value.
"""
if isinstance(shape, Int64Expression):
shape_product = shape
else:
shape_product = reduce(lambda a, b: a * b, shape)
return arange(hl.int32(shape_product)).map(lambda x: cast_expr(value, dtype)).reshape(shape)
@typecheck(shape=shape_type, dtype=HailType)
def zeros(shape, dtype=tfloat64):
"""Creates a hail :class:`.NDArrayNumericExpression` full of zeros.
Examples
--------
Create a 5 by 7 NDArray of type :py:data:`.tfloat64` zeros.
>>> hl.nd.zeros((5, 7))
It is possible to specify a type other than :py:data:`.tfloat64` with the `dtype` argument.
>>> hl.nd.zeros((5, 7), dtype=hl.tfloat32)
Parameters
----------
shape : `tuple` or :class:`.TupleExpression`
Desired shape.
dtype : :class:`.HailType`
Desired hail type. Default: `float64`.
See Also
--------
:func:`.full`
Returns
-------
:class:`.NDArrayNumericExpression`
ndarray of the specified size full of zeros.
"""
return full(shape, 0, dtype)
@typecheck(shape=shape_type, dtype=HailType)
def ones(shape, dtype=tfloat64):
"""Creates a hail :class:`.NDArrayNumericExpression` full of ones.
Examples
--------
Create a 5 by 7 NDArray of type :py:data:`.tfloat64` ones.
>>> hl.nd.ones((5, 7))
It is possible to specify a type other than :py:data:`.tfloat64` with the `dtype` argument.
>>> hl.nd.ones((5, 7), dtype=hl.tfloat32)
Parameters
----------
shape : `tuple` or :class:`.TupleExpression`
Desired shape.
dtype : :class:`.HailType`
Desired hail type. Default: `float64`.
See Also
--------
:func:`.full`
Returns
-------
:class:`.NDArrayNumericExpression`
ndarray of the specified size full of ones.
"""
return full(shape, 1, dtype)
@typecheck(nd=expr_ndarray())
def diagonal(nd):
"""Gets the diagonal of a 2 dimensional NDArray.
Examples
--------
>>> hl.eval(hl.nd.diagonal(hl.nd.array([[1, 2], [3, 4]])))
array([1, 4], dtype=int32)
:param nd: A 2 dimensional NDArray, shape(M, N).
:return: A 1 dimension NDArray of length min (M, N), containing the diagonal of `nd`.
"""
assert nd.ndim == 2, "diagonal requires 2 dimensional ndarray"
shape_min = hl.min(nd.shape[0], nd.shape[1])
return hl.nd.array(hl.range(hl.int32(shape_min)).map(lambda i: nd[i, i]))
@typecheck(a=expr_ndarray(), b=expr_ndarray(), no_crash=bool)
def solve(a, b, no_crash=False):
"""Solve a linear system.
Parameters
----------
a : :class:`.NDArrayNumericExpression`, (N, N)
Coefficient matrix.
b : :class:`.NDArrayNumericExpression`, (N,) or (N, K)
Dependent variables.
Returns
-------
:class:`.NDArrayNumericExpression`, (N,) or (N, K)
Solution to the system Ax = B. Shape is same as shape of B.
"""
assert a.ndim == 2
assert b.ndim == 1 or b.ndim == 2
b_ndim_orig = b.ndim
if b_ndim_orig == 1:
b = b.reshape((-1, 1))
if a.dtype.element_type != hl.tfloat64:
a = a.map(lambda e: hl.float64(e))
if b.dtype.element_type != hl.tfloat64:
b = b.map(lambda e: hl.float64(e))
if no_crash:
name = "linear_solve_no_crash"
return_type = hl.tstruct(solution=hl.tndarray(hl.tfloat64, 2), failed=hl.tbool)
else:
name = "linear_solve"
return_type = hl.tndarray(hl.tfloat64, 2)
ir = Apply(name, return_type, a._ir, b._ir)
result = construct_expr(ir, return_type, a._indices, a._aggregations)
if b_ndim_orig == 1:
if no_crash:
result = hl.struct(solution=result.solution.reshape((-1)), failed=result.failed)
else:
result = result.reshape((-1))
return result
@typecheck(nd=expr_ndarray(), mode=str)
def qr(nd, mode="reduced"):
"""Performs a QR decomposition.
:param nd: A 2 dimensional ndarray, shape(M, N)
:param mode: One of "reduced", "complete", "r", or "raw".
If K = min(M, N), then:
- `reduced`: returns q and r with dimensions (M, K), (K, N)
- `complete`: returns q and r with dimensions (M, M), (M, N)
- `r`: returns only r with dimensions (K, N)
- `raw`: returns h, tau with dimensions (N, M), (K,)
Returns
-------
- q: ndarray of float64
A matrix with orthonormal columns.
- r: ndarray of float64
The upper-triangular matrix R.
- (h, tau): ndarrays of float64
The array h contains the Householder reflectors that generate q along with r.
The tau array contains scaling factors for the reflectors
"""
assert nd.ndim == 2, "QR decomposition requires 2 dimensional ndarray"
if mode not in ["reduced", "r", "raw", "complete"]:
raise ValueError(f"Unrecognized mode '{mode}' for QR decomposition")
float_nd = nd.map(lambda x: hl.float64(x))
ir = NDArrayQR(float_nd._ir, mode)
indices = nd._indices
aggs = nd._aggregations
if mode == "raw":
return construct_expr(ir, ttuple(tndarray(tfloat64, 2), tndarray(tfloat64, 1)), indices, aggs)
elif mode == "r":
return construct_expr(ir, tndarray(tfloat64, 2), indices, aggs)
elif mode in ["complete", "reduced"]:
return construct_expr(ir, ttuple(tndarray(tfloat64, 2), tndarray(tfloat64, 2)), indices, aggs)
@typecheck(nd=expr_ndarray(), full_matrices=bool, compute_uv=bool)
def svd(nd, full_matrices=True, compute_uv=True):
"""Performs a singular value decomposition.
:param nd: :class:`.NDArrayExpression`
A 2 dimensional ndarray, shape(M, N).
:param full_matrices: `bool`
If True (default), u and vt have dimensions (M, M) and (N, N) respectively. Otherwise, they have dimensions
(M, K) and (K, N), where K = min(M, N)
:param compute_uv: `bool`
If True (default), compute the singular vectors u and v. Otherwise, only return a single ndarray, s.
Returns
-------
- u: :class:`.NDArrayExpression`
The left singular vectors.
- s: :class:`.NDArrayExpression`
The singular values.
- vt: :class:`.NDArrayExpression`
The right singular vectors.
"""
float_nd = nd.map(lambda x: hl.float64(x))
ir = NDArraySVD(float_nd._ir, full_matrices, compute_uv)
return_type = ttuple(tndarray(tfloat64, 2), tndarray(tfloat64, 1), tndarray(tfloat64, 2)) if compute_uv else tndarray(tfloat64, 1)
return construct_expr(ir, return_type)
@typecheck(nd=expr_ndarray())
def inv(nd):
"""Performs a matrix inversion.
:param nd: A 2 dimensional ndarray, shape(M, N)
Returns
-------
- a: ndarray of float64
The inverted matrix
"""
assert nd.ndim == 2, "Matrix inversion requires 2 dimensional ndarray"
float_nd = nd.map(lambda x: hl.float64(x))
ir = NDArrayInv(float_nd._ir)
return construct_expr(ir, tndarray(tfloat64, 2))
@typecheck(nds=tsequenceof_nd, axis=int)
def concatenate(nds, axis=0):
"""Join a sequence of arrays along an existing axis.
Examples
--------
>>> x = hl.nd.array([[1., 2.], [3., 4.]])
>>> y = hl.nd.array([[5.], [6.]])
>>> hl.eval(hl.nd.concatenate([x, y], axis=1))
array([[1., 2., 5.],
[3., 4., 6.]])
>>> x = hl.nd.array([1., 2.])
>>> y = hl.nd.array([3., 4.])
>>> hl.eval(hl.nd.concatenate((x, y), axis=0))
array([1., 2., 3., 4.])
Parameters
----------
:param nds: a1, a2, …sequence of array_like
The arrays must have the same shape, except in the dimension corresponding to axis (the first, by default).
Note: unlike Numpy, the numerical element type of each array_like must match.
:param axis: int, optional
The axis along which the arrays will be joined. Default is 0.
Note: unlike Numpy, if provided, axis cannot be None.
Returns
-------
- res: ndarray
The concatenated array
"""
head_nd = nds[0]
head_ndim = head_nd.ndim
hl.case().when(hl.all(lambda a: a.ndim == head_ndim, nds), True).or_error("Mismatched ndim")
makearr = aarray(nds)
concat_ir = NDArrayConcat(makearr._ir, axis)
return construct_expr(concat_ir, tndarray(head_nd._type.element_type, head_ndim))
@typecheck(N=expr_numeric, M=nullable(expr_numeric), dtype=HailType)
def eye(N, M=None, dtype=hl.tfloat64):
"""
Construct a 2-D :class:`.NDArrayExpression` with ones on the *main* diagonal
and zeros elsewhere.
Parameters
----------
N : :class:`.NumericExpression` or Python number
Number of rows in the output.
M : :class:`.NumericExpression` or Python number, optional
Number of columns in the output. If None, defaults to `N`.
dtype : numeric :class:`.HailType`, optional
Element type of the returned array. Defaults to :py:data:`.tfloat64`
Returns
-------
I : :class:`.NDArrayExpression` representing a Hail ndarray of shape (N,M)
An ndarray whose elements are equal to one on the main diagonal, zeroes elsewhere.
See Also
--------
:func:`.identity`
:func:`.diagonal`
Examples
--------
>>> hl.eval(hl.nd.eye(3))
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
>>> hl.eval(hl.nd.eye(2, 5, dtype=hl.tint32))
array([[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0]], dtype=int32)
"""
n_row = hl.int32(N)
if M is None:
n_col = n_row
else:
n_col = hl.int32(M)
return hl.nd.array(hl.range(0, n_row * n_col).map(
lambda i: hl.if_else((i // n_col) == (i % n_col),
hl.literal(1, dtype),
hl.literal(0, dtype))
)).reshape((n_row, n_col))
@typecheck(N=expr_numeric, dtype=HailType)
def identity(N, dtype=hl.tfloat64):
"""
Constructs a 2-D :class:`.NDArrayExpression` representing the identity array.
The identity array is a square array with ones on the main diagonal.
Parameters
----------
n : :class:`.NumericExpression` or Python number
Number of rows and columns in the output.
dtype : numeric :class:`.HailType`, optional
Element type of the returned array. Defaults to :py:data:`.tfloat64`
Returns
-------
out : :class:`.NDArrayExpression`
`n` x `n` ndarray with its main diagonal set to one, and all other elements 0.
See Also
--------
:func:`.eye`
Examples
--------
>>> hl.eval(hl.nd.identity(3))
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
"""
return eye(N, dtype=dtype)
@typecheck(arrs=tsequenceof_nd)
def vstack(arrs):
"""
Stack arrays in sequence vertically (row wise).
1-D arrays of shape `(N,)`, will reshaped to `(1,N)` before concatenation.
For all other arrays, equivalent to :func:`.concatenate` with axis=0.
Parameters
----------
arrs : sequence of :class:`.NDArrayExpression`
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : :class:`.NDArrayExpression`
The array formed by stacking the given arrays, will be at least 2-D.
See Also
--------
:func:`.concatenate` : Join a sequence of arrays along an existing axis.
Examples
--------
>>> a = hl.nd.array([1, 2, 3])
>>> b = hl.nd.array([2, 3, 4])
>>> hl.eval(hl.nd.vstack((a,b)))
array([[1, 2, 3],
[2, 3, 4]], dtype=int32)
>>> a = hl.nd.array([[1], [2], [3]])
>>> b = hl.nd.array([[2], [3], [4]])
>>> hl.eval(hl.nd.vstack((a,b)))
array([[1],
[2],
[3],
[2],
[3],
[4]], dtype=int32)
"""
head_ndim = arrs[0].ndim
if head_ndim == 1:
return concatenate(hl.map(lambda a: a._broadcast(2), arrs), 0)
return concatenate(arrs, 0)
@typecheck(arrs=tsequenceof_nd)
def hstack(arrs):
"""
Stack arrays in sequence horizontally (column wise).
Equivalent to concatenation along the second axis, except for 1-D
arrays where it concatenates along the first axis.
This function makes most sense for arrays with up to 3 dimensions.
:func:`.concatenate` provides more general stacking and concatenation operations.
Parameters
----------
tup : sequence of :class:`.NDArrayExpression`
The arrays must have the same shape along all but the second axis,
except 1-D arrays which can be any length.
Returns
-------
stacked : :class:`.NDArrayExpression`
The array formed by stacking the given arrays.
See Also
--------
:func:`.concatenate`
:func:`.vstack`
Examples
--------
>>> a = hl.nd.array([1,2,3])
>>> b = hl.nd.array([2,3,4])
>>> hl.eval(hl.nd.hstack((a,b)))
array([1, 2, 3, 2, 3, 4], dtype=int32)
>>> a = hl.nd.array([[1],[2],[3]])
>>> b = hl.nd.array([[2],[3],[4]])
>>> hl.eval(hl.nd.hstack((a,b)))
array([[1, 2],
[2, 3],
[3, 4]], dtype=int32)
"""
head_ndim = arrs[0].ndim
if head_ndim == 1:
axis = 0
else:
axis = 1
return concatenate(arrs, axis)
| {
"repo_name": "danking/hail",
"path": "hail/python/hail/nd/nd.py",
"copies": "1",
"size": "17209",
"license": "mit",
"hash": -2458475302368590000,
"line_mean": 28.8732638889,
"line_max": 134,
"alpha_frac": 0.5916196897,
"autogenerated": false,
"ratio": 3.4304226475279105,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9519732166478068,
"avg_score": 0.00046203414996861717,
"num_lines": 576
} |
from functools import reduce
import numpy as np
import numpy.core.umath as umath
import numpy.core.fromnumeric as fromnumeric
from numpy.testing import (
assert_, assert_raises, assert_equal,
)
from numpy.ma import (
MaskType, MaskedArray, absolute, add, all, allclose, allequal, alltrue,
arange, arccos, arcsin, arctan, arctan2, array, average, choose,
concatenate, conjugate, cos, cosh, count, divide, equal, exp, filled,
getmask, greater, greater_equal, inner, isMaskedArray, less,
less_equal, log, log10, make_mask, masked, masked_array, masked_equal,
masked_greater, masked_greater_equal, masked_inside, masked_less,
masked_less_equal, masked_not_equal, masked_outside,
masked_print_option, masked_values, masked_where, maximum, minimum,
multiply, nomask, nonzero, not_equal, ones, outer, product, put, ravel,
repeat, resize, shape, sin, sinh, sometrue, sort, sqrt, subtract, sum,
take, tan, tanh, transpose, where, zeros,
)
from numpy.compat import pickle
pi = np.pi
def eq(v, w, msg=''):
result = allclose(v, w)
if not result:
print(f'Not eq:{msg}\n{v}\n----{w}')
return result
class TestMa:
def setup(self):
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = array(x, mask=m1)
ym = array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
s = x.shape
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf, s)
def test_testBasic1d(self):
# Test of basic array creation and properties in 1 dimension.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
assert_(not isMaskedArray(x))
assert_(isMaskedArray(xm))
assert_equal(shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.dtype, x.dtype)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
assert_(eq(xm, xf))
assert_(eq(filled(xm, 1.e20), xf))
assert_(eq(x, xm))
def test_testBasic2d(self):
# Test of basic array creation and properties in 2 dimensions.
for s in [(4, 3), (6, 2)]:
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
x.shape = s
y.shape = s
xm.shape = s
ym.shape = s
xf.shape = s
assert_(not isMaskedArray(x))
assert_(isMaskedArray(xm))
assert_equal(shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
assert_equal(count(xm),
len(m1) - reduce(lambda x, y:x + y, m1))
assert_(eq(xm, xf))
assert_(eq(filled(xm, 1.e20), xf))
assert_(eq(x, xm))
self.setup()
def test_testArithmetic(self):
# Test of basic arithmetic.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
a2d = array([[1, 2], [0, 4]])
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
assert_(eq(a2d * a2d, a2d * a2dm))
assert_(eq(a2d + a2d, a2d + a2dm))
assert_(eq(a2d - a2d, a2d - a2dm))
for s in [(12,), (4, 3), (2, 6)]:
x = x.reshape(s)
y = y.reshape(s)
xm = xm.reshape(s)
ym = ym.reshape(s)
xf = xf.reshape(s)
assert_(eq(-x, -xm))
assert_(eq(x + y, xm + ym))
assert_(eq(x - y, xm - ym))
assert_(eq(x * y, xm * ym))
with np.errstate(divide='ignore', invalid='ignore'):
assert_(eq(x / y, xm / ym))
assert_(eq(a10 + y, a10 + ym))
assert_(eq(a10 - y, a10 - ym))
assert_(eq(a10 * y, a10 * ym))
with np.errstate(divide='ignore', invalid='ignore'):
assert_(eq(a10 / y, a10 / ym))
assert_(eq(x + a10, xm + a10))
assert_(eq(x - a10, xm - a10))
assert_(eq(x * a10, xm * a10))
assert_(eq(x / a10, xm / a10))
assert_(eq(x ** 2, xm ** 2))
assert_(eq(abs(x) ** 2.5, abs(xm) ** 2.5))
assert_(eq(x ** y, xm ** ym))
assert_(eq(np.add(x, y), add(xm, ym)))
assert_(eq(np.subtract(x, y), subtract(xm, ym)))
assert_(eq(np.multiply(x, y), multiply(xm, ym)))
with np.errstate(divide='ignore', invalid='ignore'):
assert_(eq(np.divide(x, y), divide(xm, ym)))
def test_testMixedArithmetic(self):
na = np.array([1])
ma = array([1])
assert_(isinstance(na + ma, MaskedArray))
assert_(isinstance(ma + na, MaskedArray))
def test_testUfuncs1(self):
# Test various functions such as sin, cos.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
assert_(eq(np.cos(x), cos(xm)))
assert_(eq(np.cosh(x), cosh(xm)))
assert_(eq(np.sin(x), sin(xm)))
assert_(eq(np.sinh(x), sinh(xm)))
assert_(eq(np.tan(x), tan(xm)))
assert_(eq(np.tanh(x), tanh(xm)))
with np.errstate(divide='ignore', invalid='ignore'):
assert_(eq(np.sqrt(abs(x)), sqrt(xm)))
assert_(eq(np.log(abs(x)), log(xm)))
assert_(eq(np.log10(abs(x)), log10(xm)))
assert_(eq(np.exp(x), exp(xm)))
assert_(eq(np.arcsin(z), arcsin(zm)))
assert_(eq(np.arccos(z), arccos(zm)))
assert_(eq(np.arctan(z), arctan(zm)))
assert_(eq(np.arctan2(x, y), arctan2(xm, ym)))
assert_(eq(np.absolute(x), absolute(xm)))
assert_(eq(np.equal(x, y), equal(xm, ym)))
assert_(eq(np.not_equal(x, y), not_equal(xm, ym)))
assert_(eq(np.less(x, y), less(xm, ym)))
assert_(eq(np.greater(x, y), greater(xm, ym)))
assert_(eq(np.less_equal(x, y), less_equal(xm, ym)))
assert_(eq(np.greater_equal(x, y), greater_equal(xm, ym)))
assert_(eq(np.conjugate(x), conjugate(xm)))
assert_(eq(np.concatenate((x, y)), concatenate((xm, ym))))
assert_(eq(np.concatenate((x, y)), concatenate((x, y))))
assert_(eq(np.concatenate((x, y)), concatenate((xm, y))))
assert_(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))
def test_xtestCount(self):
# Test count
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
assert_(count(ott).dtype.type is np.intp)
assert_equal(3, count(ott))
assert_equal(1, count(1))
assert_(eq(0, array(1, mask=[1])))
ott = ott.reshape((2, 2))
assert_(count(ott).dtype.type is np.intp)
assert_(isinstance(count(ott, 0), np.ndarray))
assert_(count(ott).dtype.type is np.intp)
assert_(eq(3, count(ott)))
assert_(getmask(count(ott, 0)) is nomask)
assert_(eq([1, 2], count(ott, 0)))
def test_testMinMax(self):
# Test minimum and maximum.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
xr = np.ravel(x) # max doesn't work if shaped
xmr = ravel(xm)
# true because of careful selection of data
assert_(eq(max(xr), maximum.reduce(xmr)))
assert_(eq(min(xr), minimum.reduce(xmr)))
def test_testAddSumProd(self):
# Test add, sum, product.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
assert_(eq(np.add.reduce(x), add.reduce(x)))
assert_(eq(np.add.accumulate(x), add.accumulate(x)))
assert_(eq(4, sum(array(4), axis=0)))
assert_(eq(4, sum(array(4), axis=0)))
assert_(eq(np.sum(x, axis=0), sum(x, axis=0)))
assert_(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
assert_(eq(np.sum(x, 0), sum(x, 0)))
assert_(eq(np.product(x, axis=0), product(x, axis=0)))
assert_(eq(np.product(x, 0), product(x, 0)))
assert_(eq(np.product(filled(xm, 1), axis=0),
product(xm, axis=0)))
if len(s) > 1:
assert_(eq(np.concatenate((x, y), 1),
concatenate((xm, ym), 1)))
assert_(eq(np.add.reduce(x, 1), add.reduce(x, 1)))
assert_(eq(np.sum(x, 1), sum(x, 1)))
assert_(eq(np.product(x, 1), product(x, 1)))
def test_testCI(self):
# Test of conversions and indexing
x1 = np.array([1, 2, 4, 3])
x2 = array(x1, mask=[1, 0, 0, 0])
x3 = array(x1, mask=[0, 1, 0, 1])
x4 = array(x1)
# test conversion to strings
str(x2) # raises?
repr(x2) # raises?
assert_(eq(np.sort(x1), sort(x2, fill_value=0)))
# tests of indexing
assert_(type(x2[1]) is type(x1[1]))
assert_(x1[1] == x2[1])
assert_(x2[0] is masked)
assert_(eq(x1[2], x2[2]))
assert_(eq(x1[2:5], x2[2:5]))
assert_(eq(x1[:], x2[:]))
assert_(eq(x1[1:], x3[1:]))
x1[2] = 9
x2[2] = 9
assert_(eq(x1, x2))
x1[1:3] = 99
x2[1:3] = 99
assert_(eq(x1, x2))
x2[1] = masked
assert_(eq(x1, x2))
x2[1:3] = masked
assert_(eq(x1, x2))
x2[:] = x1
x2[1] = masked
assert_(allequal(getmask(x2), array([0, 1, 0, 0])))
x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x3), array([0, 1, 1, 0])))
x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x4), array([0, 1, 1, 0])))
assert_(allequal(x4, array([1, 2, 3, 4])))
x1 = np.arange(5) * 1.0
x2 = masked_values(x1, 3.0)
assert_(eq(x1, x2))
assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
assert_(eq(3.0, x2.fill_value))
x1 = array([1, 'hello', 2, 3], object)
x2 = np.array([1, 'hello', 2, 3], object)
s1 = x1[1]
s2 = x2[1]
assert_equal(type(s2), str)
assert_equal(type(s1), str)
assert_equal(s1, s2)
assert_(x1[1:1].shape == (0,))
def test_testCopySize(self):
# Tests of some subtle points of copying and sizing.
n = [0, 0, 1, 0, 0]
m = make_mask(n)
m2 = make_mask(m)
assert_(m is m2)
m3 = make_mask(m, copy=True)
assert_(m is not m3)
x1 = np.arange(5)
y1 = array(x1, mask=m)
assert_(y1._data is not x1)
assert_(allequal(x1, y1._data))
assert_(y1._mask is m)
y1a = array(y1, copy=0)
# For copy=False, one might expect that the array would just
# passed on, i.e., that it would be "is" instead of "==".
# See gh-4043 for discussion.
assert_(y1a._mask.__array_interface__ ==
y1._mask.__array_interface__)
y2 = array(x1, mask=m3, copy=0)
assert_(y2._mask is m3)
assert_(y2[2] is masked)
y2[2] = 9
assert_(y2[2] is not masked)
assert_(y2._mask is m3)
assert_(allequal(y2.mask, 0))
y2a = array(x1, mask=m, copy=1)
assert_(y2a._mask is not m)
assert_(y2a[2] is masked)
y2a[2] = 9
assert_(y2a[2] is not masked)
assert_(y2a._mask is not m)
assert_(allequal(y2a.mask, 0))
y3 = array(x1 * 1.0, mask=m)
assert_(filled(y3).dtype is (x1 * 1.0).dtype)
x4 = arange(4)
x4[2] = masked
y4 = resize(x4, (8,))
assert_(eq(concatenate([x4, x4]), y4))
assert_(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))
y5 = repeat(x4, (2, 2, 2, 2), axis=0)
assert_(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))
y6 = repeat(x4, 2, axis=0)
assert_(eq(y5, y6))
def test_testPut(self):
# Test of put
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
m2 = m.copy()
x = array(d, mask=m)
assert_(x[3] is masked)
assert_(x[4] is masked)
x[[1, 4]] = [10, 40]
assert_(x._mask is m)
assert_(x[3] is masked)
assert_(x[4] is not masked)
assert_(eq(x, [0, 10, 2, -1, 40]))
x = array(d, mask=m2, copy=True)
x.put([0, 1, 2], [-1, 100, 200])
assert_(x._mask is not m2)
assert_(x[3] is masked)
assert_(x[4] is masked)
assert_(eq(x, [-1, 100, 200, 0, 0]))
def test_testPut2(self):
# Test of put
d = arange(5)
x = array(d, mask=[0, 0, 0, 0, 0])
z = array([10, 40], mask=[1, 0])
assert_(x[2] is not masked)
assert_(x[3] is not masked)
x[2:4] = z
assert_(x[2] is masked)
assert_(x[3] is not masked)
assert_(eq(x, [0, 1, 10, 40, 4]))
d = arange(5)
x = array(d, mask=[0, 0, 0, 0, 0])
y = x[2:4]
z = array([10, 40], mask=[1, 0])
assert_(x[2] is not masked)
assert_(x[3] is not masked)
y[:] = z
assert_(y[0] is masked)
assert_(y[1] is not masked)
assert_(eq(y, [10, 40]))
assert_(x[2] is masked)
assert_(x[3] is not masked)
assert_(eq(x, [0, 1, 10, 40, 4]))
def test_testMaPut(self):
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1]
i = np.nonzero(m)[0]
put(ym, i, zm)
assert_(all(take(ym, i, axis=0) == zm))
def test_testOddFeatures(self):
# Test of other odd features
x = arange(20)
x = x.reshape(4, 5)
x.flat[5] = 12
assert_(x[1, 0] == 12)
z = x + 10j * x
assert_(eq(z.real, x))
assert_(eq(z.imag, 10 * x))
assert_(eq((z * conjugate(z)).real, 101 * x * x))
z.imag[...] = 0.0
x = arange(10)
x[3] = masked
assert_(str(x[3]) == str(masked))
c = x >= 8
assert_(count(where(c, masked, masked)) == 0)
assert_(shape(where(c, masked, masked)) == c.shape)
z = where(c, x, masked)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is masked)
assert_(z[7] is masked)
assert_(z[8] is not masked)
assert_(z[9] is not masked)
assert_(eq(x, z))
z = where(c, masked, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
z = masked_where(c, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
assert_(eq(x, z))
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_(eq(z, [1., 2., 0., -4., -5]))
c[0] = masked
z = where(c, x, -x)
assert_(eq(z, [1., 2., 0., -4., -5]))
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2)))
assert_(eq(masked_where(greater_equal(x, 2), x),
masked_greater_equal(x, 2)))
assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2)))
assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2)))
assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2)))
assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4]))
assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]))
assert_(eq(masked_inside(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 1, 3).mask,
[1, 1, 1, 1, 0]))
assert_(eq(masked_outside(array(list(range(5)),
mask=[0, 1, 0, 0, 0]), 1, 3).mask,
[1, 1, 0, 0, 1]))
assert_(eq(masked_equal(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 0]))
assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1],
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 1]))
assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
[99, 99, 3, 4, 5]))
atest = ones((10, 10, 10), dtype=np.float32)
btest = zeros(atest.shape, MaskType)
ctest = masked_where(btest, atest)
assert_(eq(atest, ctest))
z = choose(c, (-x, x))
assert_(eq(z, [1., 2., 0., -4., -5]))
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
x = arange(6)
x[5] = masked
y = arange(6) * 10
y[2] = masked
c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0])
cm = c.filled(1)
z = where(c, x, y)
zm = where(cm, x, y)
assert_(eq(z, zm))
assert_(getmask(zm) is nomask)
assert_(eq(zm, [0, 1, 2, 30, 40, 50]))
z = where(c, masked, 1)
assert_(eq(z, [99, 99, 99, 1, 1, 1]))
z = where(c, 1, masked)
assert_(eq(z, [99, 1, 1, 99, 99, 99]))
def test_testMinMax2(self):
# Test of minimum, maximum.
assert_(eq(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3]))
assert_(eq(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9]))
x = arange(5)
y = arange(5) - 2
x[3] = masked
y[0] = masked
assert_(eq(minimum(x, y), where(less(x, y), x, y)))
assert_(eq(maximum(x, y), where(greater(x, y), x, y)))
assert_(minimum.reduce(x) == 0)
assert_(maximum.reduce(x) == 4)
def test_testTakeTransposeInnerOuter(self):
# Test of take, transpose, inner, outer products
x = arange(24)
y = np.arange(24)
x[5:6] = masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))))
assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)))
assert_(eq(np.inner(filled(x, 0), filled(y, 0)),
inner(x, y)))
assert_(eq(np.outer(filled(x, 0), filled(y, 0)),
outer(x, y)))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
t = take(y, [0, 3, 4])
assert_(t[0] == 'abc')
assert_(t[1] == 2)
assert_(t[2] == 3)
def test_testInplace(self):
# Test of inplace operations and rich comparisons
y = arange(10)
x = arange(10)
xm = arange(10)
xm[2] = masked
x += 1
assert_(eq(x, y + 1))
xm += 1
assert_(eq(x, y + 1))
x = arange(10)
xm = arange(10)
xm[2] = masked
x -= 1
assert_(eq(x, y - 1))
xm -= 1
assert_(eq(xm, y - 1))
x = arange(10) * 1.0
xm = arange(10) * 1.0
xm[2] = masked
x *= 2.0
assert_(eq(x, y * 2))
xm *= 2.0
assert_(eq(xm, y * 2))
x = arange(10) * 2
xm = arange(10)
xm[2] = masked
x //= 2
assert_(eq(x, y))
xm //= 2
assert_(eq(x, y))
x = arange(10) * 1.0
xm = arange(10) * 1.0
xm[2] = masked
x /= 2.0
assert_(eq(x, y / 2.0))
xm /= arange(10)
assert_(eq(xm, ones((10,))))
x = arange(10).astype(np.float32)
xm = arange(10)
xm[2] = masked
x += 1.
assert_(eq(x, y + 1.))
def test_testPickle(self):
# Test of pickling
x = arange(12)
x[4:10:2] = masked
x = x.reshape(4, 3)
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(x, protocol=proto)
y = pickle.loads(s)
assert_(eq(x, y))
def test_testMasked(self):
# Test of masked element
xx = arange(6)
xx[1] = masked
assert_(str(masked) == '--')
assert_(xx[1] is masked)
assert_equal(filled(xx[1], 0), 0)
def test_testAverage1(self):
# Test of average.
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
assert_(eq(2.0, average(ott, axis=0)))
assert_(eq(2.0, average(ott, weights=[1., 1., 2., 1.])))
result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True)
assert_(eq(2.0, result))
assert_(wts == 4.0)
ott[:] = masked
assert_(average(ott, axis=0) is masked)
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
ott = ott.reshape(2, 2)
ott[:, 1] = masked
assert_(eq(average(ott, axis=0), [2.0, 0.0]))
assert_(average(ott, axis=1)[0] is masked)
assert_(eq([2., 0.], average(ott, axis=0)))
result, wts = average(ott, axis=0, returned=True)
assert_(eq(wts, [1., 0.]))
def test_testAverage2(self):
# More tests of average.
w1 = [0, 1, 1, 1, 1, 0]
w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]
x = arange(6)
assert_(allclose(average(x, axis=0), 2.5))
assert_(allclose(average(x, axis=0, weights=w1), 2.5))
y = array([arange(6), 2.0 * arange(6)])
assert_(allclose(average(y, None),
np.add.reduce(np.arange(6)) * 3. / 12.))
assert_(allclose(average(y, axis=0), np.arange(6) * 3. / 2.))
assert_(allclose(average(y, axis=1),
[average(x, axis=0), average(x, axis=0)*2.0]))
assert_(allclose(average(y, None, weights=w2), 20. / 6.))
assert_(allclose(average(y, axis=0, weights=w2),
[0., 1., 2., 3., 4., 10.]))
assert_(allclose(average(y, axis=1),
[average(x, axis=0), average(x, axis=0)*2.0]))
m1 = zeros(6)
m2 = [0, 0, 1, 1, 0, 0]
m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]
m4 = ones(6)
m5 = [0, 1, 1, 1, 1, 1]
assert_(allclose(average(masked_array(x, m1), axis=0), 2.5))
assert_(allclose(average(masked_array(x, m2), axis=0), 2.5))
assert_(average(masked_array(x, m4), axis=0) is masked)
assert_equal(average(masked_array(x, m5), axis=0), 0.0)
assert_equal(count(average(masked_array(x, m4), axis=0)), 0)
z = masked_array(y, m3)
assert_(allclose(average(z, None), 20. / 6.))
assert_(allclose(average(z, axis=0),
[0., 1., 99., 99., 4.0, 7.5]))
assert_(allclose(average(z, axis=1), [2.5, 5.0]))
assert_(allclose(average(z, axis=0, weights=w2),
[0., 1., 99., 99., 4.0, 10.0]))
a = arange(6)
b = arange(6) * 3
r1, w1 = average([[a, b], [b, a]], axis=1, returned=True)
assert_equal(shape(r1), shape(w1))
assert_equal(r1.shape, w1.shape)
r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True)
assert_equal(shape(w2), shape(r2))
r2, w2 = average(ones((2, 2, 3)), returned=True)
assert_equal(shape(w2), shape(r2))
r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True)
assert_(shape(w2) == shape(r2))
a2d = array([[1, 2], [0, 4]], float)
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
a2da = average(a2d, axis=0)
assert_(eq(a2da, [0.5, 3.0]))
a2dma = average(a2dm, axis=0)
assert_(eq(a2dma, [1.0, 3.0]))
a2dma = average(a2dm, axis=None)
assert_(eq(a2dma, 7. / 3.))
a2dma = average(a2dm, axis=1)
assert_(eq(a2dma, [1.5, 4.0]))
def test_testToPython(self):
assert_equal(1, int(array(1)))
assert_equal(1.0, float(array(1)))
assert_equal(1, int(array([[[1]]])))
assert_equal(1.0, float(array([[1]])))
assert_raises(TypeError, float, array([1, 1]))
assert_raises(ValueError, bool, array([0, 1]))
assert_raises(ValueError, bool, array([0, 0], mask=[0, 1]))
def test_testScalarArithmetic(self):
xm = array(0, mask=1)
#TODO FIXME: Find out what the following raises a warning in r8247
with np.errstate(divide='ignore'):
assert_((1 / array(0)).mask)
assert_((1 + xm).mask)
assert_((-xm).mask)
assert_((-xm).mask)
assert_(maximum(xm, xm).mask)
assert_(minimum(xm, xm).mask)
assert_(xm.filled().dtype is xm._data.dtype)
x = array(0, mask=0)
assert_(x.filled() == x._data)
assert_equal(str(xm), str(masked_print_option))
def test_testArrayMethods(self):
a = array([1, 3, 2])
assert_(eq(a.any(), a._data.any()))
assert_(eq(a.all(), a._data.all()))
assert_(eq(a.argmax(), a._data.argmax()))
assert_(eq(a.argmin(), a._data.argmin()))
assert_(eq(a.choose(0, 1, 2, 3, 4),
a._data.choose(0, 1, 2, 3, 4)))
assert_(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])))
assert_(eq(a.conj(), a._data.conj()))
assert_(eq(a.conjugate(), a._data.conjugate()))
m = array([[1, 2], [3, 4]])
assert_(eq(m.diagonal(), m._data.diagonal()))
assert_(eq(a.sum(), a._data.sum()))
assert_(eq(a.take([1, 2]), a._data.take([1, 2])))
assert_(eq(m.transpose(), m._data.transpose()))
def test_testArrayAttributes(self):
a = array([1, 3, 2])
assert_equal(a.ndim, 1)
def test_testAPI(self):
assert_(not [m for m in dir(np.ndarray)
if m not in dir(MaskedArray) and
not m.startswith('_')])
def test_testSingleElementSubscript(self):
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
assert_equal(a[0].shape, ())
assert_equal(b[0].shape, ())
assert_equal(b[1].shape, ())
class TestUfuncs:
def setup(self):
self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),
array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)
def test_testUfuncRegression(self):
f_invalid_ignore = [
'sqrt', 'arctanh', 'arcsin', 'arccos',
'arccosh', 'arctanh', 'log', 'log10', 'divide',
'true_divide', 'floor_divide', 'remainder', 'fmod']
for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh',
'arcsinh',
'arccosh',
'arctanh',
'absolute', 'fabs', 'negative',
'floor', 'ceil',
'logical_not',
'add', 'subtract', 'multiply',
'divide', 'true_divide', 'floor_divide',
'remainder', 'fmod', 'hypot', 'arctan2',
'equal', 'not_equal', 'less_equal', 'greater_equal',
'less', 'greater',
'logical_and', 'logical_or', 'logical_xor']:
try:
uf = getattr(umath, f)
except AttributeError:
uf = getattr(fromnumeric, f)
mf = getattr(np.ma, f)
args = self.d[:uf.nin]
with np.errstate():
if f in f_invalid_ignore:
np.seterr(invalid='ignore')
if f in ['arctanh', 'log', 'log10']:
np.seterr(divide='ignore')
ur = uf(*args)
mr = mf(*args)
assert_(eq(ur.filled(0), mr.filled(0), f))
assert_(eqmask(ur.mask, mr.mask))
def test_reduce(self):
a = self.d[0]
assert_(not alltrue(a, axis=0))
assert_(sometrue(a, axis=0))
assert_equal(sum(a[:3], axis=0), 0)
assert_equal(product(a, axis=0), 0)
def test_minmax(self):
a = arange(1, 13).reshape(3, 4)
amask = masked_where(a < 5, a)
assert_equal(amask.max(), a.max())
assert_equal(amask.min(), 5)
assert_((amask.max(0) == a.max(0)).all())
assert_((amask.min(0) == [5, 6, 7, 8]).all())
assert_(amask.max(1)[0].mask)
assert_(amask.min(1)[0].mask)
def test_nonzero(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = array([1, 0, 2, 0], mask=[0, 0, 1, 1])
assert_(eq(nonzero(x), [0]))
class TestArrayMethods:
def setup(self):
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX)
def test_trace(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
mXdiag = mX.diagonal()
assert_equal(mX.trace(), mX.diagonal().compressed().sum())
assert_(eq(mX.trace(),
X.trace() - sum(mXdiag.mask * X.diagonal(),
axis=0)))
def test_clip(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
clipped = mx.clip(2, 8)
assert_(eq(clipped.mask, mx.mask))
assert_(eq(clipped._data, x.clip(2, 8)))
assert_(eq(clipped._data, mx._data.clip(2, 8)))
def test_ptp(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
(n, m) = X.shape
assert_equal(mx.ptp(), mx.compressed().ptp())
rows = np.zeros(n, np.float_)
cols = np.zeros(m, np.float_)
for k in range(m):
cols[k] = mX[:, k].compressed().ptp()
for k in range(n):
rows[k] = mX[k].compressed().ptp()
assert_(eq(mX.ptp(0), cols))
assert_(eq(mX.ptp(1), rows))
def test_swapaxes(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
mXswapped = mX.swapaxes(0, 1)
assert_(eq(mXswapped[-1], mX[:, -1]))
mXXswapped = mXX.swapaxes(0, 2)
assert_equal(mXXswapped.shape, (2, 2, 3, 3))
def test_cumprod(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
mXcp = mX.cumprod(0)
assert_(eq(mXcp._data, mX.filled(1).cumprod(0)))
mXcp = mX.cumprod(1)
assert_(eq(mXcp._data, mX.filled(1).cumprod(1)))
def test_cumsum(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
mXcp = mX.cumsum(0)
assert_(eq(mXcp._data, mX.filled(0).cumsum(0)))
mXcp = mX.cumsum(1)
assert_(eq(mXcp._data, mX.filled(0).cumsum(1)))
def test_varstd(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
assert_(eq(mX.var(axis=None), mX.compressed().var()))
assert_(eq(mX.std(axis=None), mX.compressed().std()))
assert_(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape))
assert_(eq(mX.var().shape, X.var().shape))
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
for k in range(6):
assert_(eq(mXvar1[k], mX[k].compressed().var()))
assert_(eq(mXvar0[k], mX[:, k].compressed().var()))
assert_(eq(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std()))
def eqmask(m1, m2):
if m1 is nomask:
return m2 is nomask
if m2 is nomask:
return m1 is nomask
return (m1 == m2).all()
| {
"repo_name": "rgommers/numpy",
"path": "numpy/ma/tests/test_old_ma.py",
"copies": "10",
"size": "32265",
"license": "bsd-3-clause",
"hash": -820715414340764300,
"line_mean": 36.6048951049,
"line_max": 81,
"alpha_frac": 0.4760576476,
"autogenerated": false,
"ratio": 2.9023117747593776,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8378369422359377,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import numpy as np
import random
import numpy as np
from common.utils import binary, index_bits
_AND = lambda a, b: a & b
_XOR = lambda a, b: a ^ b
def _gamma(cipher_query, cipher_index, cipher_one):
"""
Calculates the value of the gamma function, as described in PDF
(paragraph 3.1.2)
:param cipher_query: cipher query
:param cipher_index: cipher index
:param cipher_one: cipher one
:return: the value of the gamma function
"""
return reduce(_AND, [x for x in cipher_query ^ cipher_index], cipher_one)
def _R(gammas, column, cipher_zero):
"""
Calculates the value of R() function, as described in PDF (paragraph 3.1.3)
:param gammas: gammas
:param column: column
:param enc_zero: encrypted zero
:return: the value of the R function
"""
return reduce(_XOR, gammas[np.where(column == 1)], cipher_zero)
class Store:
"""A private store."""
def __init__(self, record_size=3, record_count=5, database=None, fill=0):
"""
Creates a new private store.
:param record_size: the size of each record, in bits.
:param record_count: the number of records.
:param database: numpy matrix of database values.
:param fill: value to fill the database with.
"""
if database is None:
array = None
if fill == 'random':
array = [[random.randint(0, 1) for _ in range(record_size)]
for _ in range(record_count)]
else:
array = [[fill] * record_size for _ in range(record_count)]
database = np.array(array)
self.record_count, self.record_size = database.shape
self.database = database
self.index_bits = index_bits(self.record_count)
# precompute binary representation for index
self.binary_index = [binary(x, size=self.index_bits) \
for x in range(self.record_count)]
def retrieve(self, cipher_query, public_key):
"""
Retrieves an encrypted record from the store, given a ciphered query.
:param cipher_query: the encrypted index of the record to retrieve, as
an :class:`~EncryptedArray`
:param public_key: the :class:`~PublicKey` to use.
:raises ValueError: if the length of cipher_query does not equal the \
Store's index_blength.
"""
cipher_one = public_key.encrypt(1)
cipher_zero = public_key.encrypt(0)
def gamma(bits):
bits = public_key.encrypt(bits)
bits = _gamma(cipher_query, bits, cipher_one)
return bits
# TODO: make this parallel
gammas = np.array([gamma(bits) for bits in self.binary_index])
assert (len(gammas) == self.record_count)
# TODO: make this parallel
return [_R(gammas, self.database[:, x], cipher_zero) \
for x in range(self.record_size)]
def set(self, i, value):
"""
Set a value in the array.
:param i: the unencrypted index to set.
:param value: the unencrypted value.
"""
if len(value) < self.record_size:
padded_value = np.zeros(self.record_size, dtype=np.int)
padded_value[padded_value.size - len(value):] = value
else:
padded_value = value
self.database[i] = padded_value | {
"repo_name": "blindstore/blindstore-old-scarab",
"path": "server/store.py",
"copies": "1",
"size": "3439",
"license": "mit",
"hash": 8082321268612189000,
"line_mean": 32.3980582524,
"line_max": 79,
"alpha_frac": 0.5958127363,
"autogenerated": false,
"ratio": 3.916856492027335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5012669228327336,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import numpy
import gem
from finat.finiteelementbase import FiniteElementBase
class TensorFiniteElement(FiniteElementBase):
def __init__(self, element, shape, transpose=False):
# TODO: Update docstring for arbitrary rank!
r"""A Finite element whose basis functions have the form:
.. math::
\boldsymbol\phi_{i \alpha \beta} = \mathbf{e}_{\alpha} \mathbf{e}_{\beta}^{\mathrm{T}}\phi_i
Where :math:`\{\mathbf{e}_\alpha,\, \alpha=0\ldots\mathrm{shape[0]}\}` and
:math:`\{\mathbf{e}_\beta,\, \beta=0\ldots\mathrm{shape[1]}\}` are
the bases for :math:`\mathbb{R}^{\mathrm{shape[0]}}` and
:math:`\mathbb{R}^{\mathrm{shape[1]}}` respectively; and
:math:`\{\phi_i\}` is the basis for the corresponding scalar
finite element space.
:param element: The scalar finite element.
:param shape: The geometric shape of the tensor element.
:param transpose: Changes the DoF ordering from the
Firedrake-style XYZ XYZ XYZ XYZ to the
FEniCS-style XXXX YYYY ZZZZ. That is,
tensor shape indices come before the scalar
basis function indices when transpose=True.
:math:`\boldsymbol\phi_{i\alpha\beta}` is, of course, tensor-valued. If
we subscript the vector-value with :math:`\gamma\epsilon` then we can write:
.. math::
\boldsymbol\phi_{\gamma\epsilon(i\alpha\beta)} = \delta_{\gamma\alpha}\delta{\epsilon\beta}\phi_i
This form enables the simplification of the loop nests which
will eventually be created, so it is the form we employ here."""
super(TensorFiniteElement, self).__init__()
self._base_element = element
self._shape = shape
self._transpose = transpose
@property
def base_element(self):
"""The base element of this tensor element."""
return self._base_element
@property
def cell(self):
return self._base_element.cell
@property
def degree(self):
return self._base_element.degree
@property
def formdegree(self):
return self._base_element.formdegree
def entity_dofs(self):
raise NotImplementedError("No one uses this!")
def space_dimension(self):
return int(numpy.prod(self.index_shape))
@property
def index_shape(self):
if self._transpose:
return self._shape + self._base_element.index_shape
else:
return self._base_element.index_shape + self._shape
@property
def value_shape(self):
return self._shape + self._base_element.value_shape
def basis_evaluation(self, order, ps, entity=None, coordinate_mapping=None):
r"""Produce the recipe for basis function evaluation at a set of points :math:`q`:
.. math::
\boldsymbol\phi_{(\gamma \epsilon) (i \alpha \beta) q} = \delta_{\alpha \gamma}\delta{\beta \epsilon}\phi_{i q}
\nabla\boldsymbol\phi_{(\epsilon \gamma \zeta) (i \alpha \beta) q} = \delta_{\alpha \epsilon} \deta{\beta \gamma}\nabla\phi_{\zeta i q}
"""
scalar_evaluation = self._base_element.basis_evaluation
return self._tensorise(scalar_evaluation(order, ps, entity, coordinate_mapping=coordinate_mapping))
def point_evaluation(self, order, point, entity=None):
scalar_evaluation = self._base_element.point_evaluation
return self._tensorise(scalar_evaluation(order, point, entity))
def _tensorise(self, scalar_evaluation):
# Old basis function and value indices
scalar_i = self._base_element.get_indices()
scalar_vi = self._base_element.get_value_indices()
# New basis function and value indices
tensor_i = tuple(gem.Index(extent=d) for d in self._shape)
tensor_vi = tuple(gem.Index(extent=d) for d in self._shape)
# Couple new basis function and value indices
deltas = reduce(gem.Product, (gem.Delta(j, k)
for j, k in zip(tensor_i, tensor_vi)))
if self._transpose:
index_ordering = tensor_i + scalar_i + tensor_vi + scalar_vi
else:
index_ordering = scalar_i + tensor_i + tensor_vi + scalar_vi
result = {}
for alpha, expr in scalar_evaluation.items():
result[alpha] = gem.ComponentTensor(
gem.Product(deltas, gem.Indexed(expr, scalar_i + scalar_vi)),
index_ordering
)
return result
@property
def mapping(self):
return self._base_element.mapping
| {
"repo_name": "FInAT/FInAT",
"path": "finat/tensorfiniteelement.py",
"copies": "1",
"size": "4689",
"license": "mit",
"hash": 6153960399085693000,
"line_mean": 36.512,
"line_max": 147,
"alpha_frac": 0.6142034549,
"autogenerated": false,
"ratio": 3.84344262295082,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.495764607785082,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import os
import magic
from django.core.exceptions import ImproperlyConfigured
from .settings import (
IMAGE_SETS,
QUAL,
VERSATILEIMAGEFIELD_POST_PROCESSOR,
VERSATILEIMAGEFIELD_SIZED_DIRNAME,
VERSATILEIMAGEFIELD_FILTERED_DIRNAME
)
# PIL-supported file formats as found here:
# https://infohost.nmt.edu/tcc/help/pubs/pil/formats.html
# {mime type: PIL Identifier}
MIME_TYPE_TO_PIL_IDENTIFIER = {
'image/bmp': 'BMP',
'image/dcx': 'DCX',
'image/eps': 'eps',
'image/gif': 'GIF',
'image/jpeg': 'JPEG',
'image/pcd': 'PCD',
'image/pcx': 'PCX',
'application/pdf': 'PDF',
'image/png': 'PNG',
'image/x-ppm': 'PPM',
'image/psd': 'PSD',
'image/tiff': 'TIFF',
'image/x-xbitmap': 'XBM',
'image/x-xpm': 'XPM',
'image/webp': 'WEBP',
}
class InvalidSizeKeySet(Exception):
pass
class InvalidSizeKey(Exception):
pass
def post_process_image_key(image_key):
"""Apply the processor function associated with settings.VER"""
if VERSATILEIMAGEFIELD_POST_PROCESSOR is None:
return image_key
else:
return VERSATILEIMAGEFIELD_POST_PROCESSOR(image_key)
def get_resized_filename(filename, width, height, filename_key):
"""
Return the 'resized filename' (according to `width`, `height` and
`filename_key`) in the following format:
`filename`-`filename_key`-`width`x`height`.ext
"""
try:
image_name, ext = filename.rsplit('.', 1)
except ValueError:
image_name = filename
ext = 'jpg'
resized_template = "%(filename_key)s-%(width)dx%(height)d"
if ext.lower() in ['jpg', 'jpeg']:
resized_template = resized_template + "-%(quality)d"
resized_key = resized_template % ({
'filename_key': filename_key,
'width': width,
'height': height,
'quality': QUAL
})
return "%(image_name)s-%(image_key)s.%(ext)s" % ({
'image_name': image_name,
'image_key': post_process_image_key(resized_key),
'ext': ext
})
def get_resized_path(path_to_image, width, height,
filename_key, storage):
"""
Return a `path_to_image` location on `storage` as dictated by `width`, `height`
and `filename_key`
"""
containing_folder, filename = os.path.split(path_to_image)
resized_filename = get_resized_filename(
filename,
width,
height,
filename_key
)
joined_path = os.path.join(*[
VERSATILEIMAGEFIELD_SIZED_DIRNAME,
containing_folder,
resized_filename
]).replace(' ', '') # Removing spaces so this path is memcached friendly
return joined_path
def get_filtered_filename(filename, filename_key):
"""
Return the 'filtered filename' (according to `filename_key`)
in the following format:
`filename`__`filename_key`__.ext
"""
try:
image_name, ext = filename.rsplit('.', 1)
except ValueError:
image_name = filename
ext = 'jpg'
return "%(image_name)s__%(filename_key)s__.%(ext)s" % ({
'image_name': image_name,
'filename_key': filename_key,
'ext': ext
})
def get_filtered_path(path_to_image, filename_key, storage):
"""
Return the 'filtered path'
"""
containing_folder, filename = os.path.split(path_to_image)
filtered_filename = get_filtered_filename(filename, filename_key)
path_to_return = os.path.join(*[
containing_folder,
VERSATILEIMAGEFIELD_FILTERED_DIRNAME,
filtered_filename
])
# Removing spaces so this path is memcached key friendly
path_to_return = path_to_return.replace(' ', '')
return path_to_return
def get_image_metadata_from_file(file_like):
"""
Receive a valid image file and returns a 2-tuple of two strings:
[0]: Image format (i.e. 'jpg', 'gif' or 'png')
[1]: InMemoryUploadedFile-friendly save format (i.e. 'image/jpeg')
image_format, in_memory_file_type
"""
mime_type = magic.from_buffer(file_like.read(1024), mime=True)
file_like.seek(0)
image_format = MIME_TYPE_TO_PIL_IDENTIFIER[mime_type]
return image_format, mime_type
def validate_versatileimagefield_sizekey_list(sizes):
"""
Validate a list of size keys.
`sizes`: An iterable of 2-tuples, both strings. Example:
[
('large', 'url'),
('medium', 'crop__400x400'),
('small', 'thumbnail__100x100')
]
"""
try:
for key, size_key in sizes:
size_key_split = size_key.split('__')
if size_key_split[-1] != 'url' and (
'x' not in size_key_split[-1]
):
raise InvalidSizeKey(
"{0} is an invalid size. All sizes must be either "
"'url' or made up of at least two segments separated "
"by double underscores. Examples: 'crop__400x400', "
"filters__invert__url".format(size_key)
)
except ValueError:
raise InvalidSizeKeySet(
'{} is an invalid size key set. Size key sets must be an '
'iterable of 2-tuples'.format(str(sizes))
)
return list(set(sizes))
def get_url_from_image_key(image_instance, image_key):
"""Build a URL from `image_key`."""
img_key_split = image_key.split('__')
if 'x' in img_key_split[-1]:
size_key = img_key_split.pop(-1)
else:
size_key = None
img_url = reduce(getattr, img_key_split, image_instance)
if size_key:
img_url = img_url[size_key].url
return img_url
def build_versatileimagefield_url_set(image_instance, size_set, request=None):
"""
Return a dictionary of urls corresponding to size_set
- `image_instance`: A VersatileImageFieldFile
- `size_set`: An iterable of 2-tuples, both strings. Example:
[
('large', 'url'),
('medium', 'crop__400x400'),
('small', 'thumbnail__100x100')
]
The above would lead to the following response:
{
'large': 'http://some.url/image.jpg',
'medium': 'http://some.url/__sized__/image-crop-400x400.jpg',
'small': 'http://some.url/__sized__/image-thumbnail-100x100.jpg',
}
- `request`:
"""
size_set = validate_versatileimagefield_sizekey_list(size_set)
to_return = {}
if image_instance or image_instance.field.placeholder_image:
for key, image_key in size_set:
img_url = get_url_from_image_key(image_instance, image_key)
if request is not None:
img_url = request.build_absolute_uri(img_url)
to_return[key] = img_url
return to_return
def get_rendition_key_set(key):
"""
Retrieve a validated and prepped Rendition Key Set from
settings.VERSATILEIMAGEFIELD_RENDITION_KEY_SETS
"""
try:
rendition_key_set = IMAGE_SETS[key]
except KeyError:
raise ImproperlyConfigured(
"No Rendition Key Set exists at "
"settings.VERSATILEIMAGEFIELD_RENDITION_KEY_SETS['{}']".format(key)
)
else:
return validate_versatileimagefield_sizekey_list(rendition_key_set)
| {
"repo_name": "respondcreate/django-versatileimagefield",
"path": "versatileimagefield/utils.py",
"copies": "2",
"size": "7238",
"license": "mit",
"hash": 8795258097111337000,
"line_mean": 28.7860082305,
"line_max": 83,
"alpha_frac": 0.6007184305,
"autogenerated": false,
"ratio": 3.4966183574879226,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5097336787987923,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import pytest
import requests
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from sqlalchemy import text
import xml.etree.ElementTree as ET
from tests.conftest import TEST_PROXY_URL, TEST_RADIO_DNS_URL
from tests.test_channel_creation import CHANNELS_MSQL_QUERY
from tests.utilities.utilities import compare_lists, sql_alchemy_result_to_list, accept_alert
from selenium.webdriver.support import expected_conditions as EC
CHANNELS_HTML_TR = [
'Station Client Type Name RadioDNS entry / Url DNS Authoritative FQDN Services',
'Classical Station default amss CS_AMSS 4001.amss.radiodns.org. classicalstation.standalone.radio.ebu.io\nEPG standalone.ebu.io\nSPI standalone.ebu.io\nEdit Delete',
'Classical Station default dab CS_DAB_NEW 2.4002.43e2.fe1.dab.radiodns.org. classicalstation.standalone.radio.ebu.io\nEPG standalone.ebu.io\nSPI standalone.ebu.io\nEdit Delete',
'Classical Station default drm CS_DRM 4001.drm.radiodns.org. classicalstation.standalone.radio.ebu.io\nEPG standalone.ebu.io\nSPI standalone.ebu.io\nEdit Delete',
'Classical Station default hd CS_HD_RADIO 0eaff.031.hd.radiodns.org. classicalstation.standalone.radio.ebu.io\nEPG standalone.ebu.io\nSPI standalone.ebu.io\nEdit Delete',
'Classical Station default id CS_IP http://server/stream classicalstation.standalone.radio.ebu.io Edit Delete',
'Classical Station CNN id CS_IP_2 http://server/stream/ouiiiiii classicalstation.standalone.radio.ebu.io Edit Delete'
]
CHANNELS_MYSQL_TR = [
' '.join(['CS_DAB_NEW', 'None', '81', '43e2', 'None', 'None', 'None', '2', 'None', '4002', 'None', 'dab', 'None', 'audio/aac', 'None', 'None']),
' '.join(['CS_DRM', 'None', 'None', 'None', 'None', 'None', 'None', 'None', 'None', '4001', 'None', 'drm', 'None', 'None', 'None', 'None']),
' '.join(['CS_AMSS', 'None', 'None', 'None', 'None', 'None', 'None', 'None', 'None', '4001', 'None', 'amss', 'None', 'None', 'None', 'None']),
' '.join(['CS_HD_RADIO', '031', 'None', 'None', 'None', 'None', 'None', 'None', 'None', 'None', '0EAFF', 'hd', 'None', 'None', 'None', 'None']),
' '.join(['CS_IP', 'None', 'None', 'None', 'classicalstation.standalone.radio.ebu.io', 'None', 'None', 'None', 'ebu1standalone', 'None', 'None', 'id', '200', 'audio/mpeg', 'http://server/stream', 'None']),
' '.join(['CS_IP_2', 'None', 'None', 'None', 'classicalstation.standalone.radio.ebu.io', 'None', 'None', 'None', 'ebu1standalone', 'None', 'None', 'id', '200', 'audio/mpeg', 'http://server/stream/ouiiiiii', '2']),
]
@pytest.mark.run(order=11)
def test_delete_channel(stack_setup, browser_setup):
db = stack_setup
driver = browser_setup
driver.get(TEST_PROXY_URL + "channels/")
driver.find_element_by_css_selector("[href='/channels/delete/2']").send_keys(Keys.RETURN)
accept_alert(driver)
WebDriverWait(driver, 5).until(EC.invisibility_of_element_located((By.CSS_SELECTOR, "[href='/channels/delete/2']")))
channel_tr = list(map(lambda x: x.text, driver
.find_element_by_id("radiodns-channel-table")
.find_elements_by_css_selector("tr")))
assert compare_lists(channel_tr, CHANNELS_HTML_TR)
# Check DB
result = db.engine.execute(text(CHANNELS_MSQL_QUERY))
assert result.rowcount == 6
station_mysql_tr = []
for row in sql_alchemy_result_to_list(result):
station_mysql_tr.append(reduce(lambda x, y: str(x) + " " + str(y), row))
assert compare_lists(station_mysql_tr, CHANNELS_MYSQL_TR, True)
# Check XML
res = requests.get(TEST_RADIO_DNS_URL + "radiodns/spi/3.1/SI.xml")
assert res.status_code == 200
bearers = ET.fromstring(res.text).findall(".//{http://www.worlddab.org/schemas/spi/31}bearer")
assert len(bearers) == 5
assert bearers[0].attrib["id"] == "amss:4001"
assert bearers[0].attrib["cost"] == "100"
assert bearers[1].attrib["id"] == "dab:fe1.43e2.4002.2"
assert bearers[1].attrib["cost"] == "50"
assert bearers[1].attrib["mimeValue"] == "audio/aac"
assert bearers[2].attrib["id"] == "drm:4001"
assert bearers[2].attrib["cost"] == "100"
assert bearers[3].attrib["id"] == "hd:031.0eaff"
assert bearers[3].attrib["cost"] == "100"
assert bearers[4].attrib["id"] == "http://server/stream"
assert bearers[4].attrib["cost"] == "100"
assert bearers[4].attrib["offset"] == "2000"
assert bearers[4].attrib["mimeValue"] == "audio/mpeg"
assert bearers[4].attrib["bitrate"] == "200"
| {
"repo_name": "ebu/radiodns-plugit",
"path": "tests/tests/test_channel_deletion.py",
"copies": "1",
"size": "4596",
"license": "bsd-3-clause",
"hash": -3467562439157245400,
"line_mean": 58.6883116883,
"line_max": 217,
"alpha_frac": 0.6703655352,
"autogenerated": false,
"ratio": 2.9575289575289574,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9118795779420596,
"avg_score": 0.0018197426616720178,
"num_lines": 77
} |
from functools import reduce
import pytest
import requests
import xml.etree.ElementTree as ET
from selenium.webdriver.support.select import Select
from sqlalchemy import text
from tests.conftest import TEST_PROXY_URL, TEST_RADIO_DNS_URL
from tests.test_channel_creation import CHANNELS_MSQL_QUERY
from tests.utilities.utilities import clear_input, compare_lists, sql_alchemy_result_to_list
CHANNELS_HTML_TR = [
'Station Client Type Name RadioDNS entry / Url DNS Authoritative FQDN Services',
'Classical Station default amss CS_AMSS 4001.amss.radiodns.org. classicalstation.standalone.radio.ebu.io\nEPG standalone.ebu.io\nSPI standalone.ebu.io\nEdit Delete',
'Classical Station default dab CS_DAB 0.4001.43e1.fe1.dab.radiodns.org. classicalstation.standalone.radio.ebu.io\nEPG standalone.ebu.io\nSPI standalone.ebu.io\nEdit Delete',
'Classical Station default drm CS_DRM 4001.drm.radiodns.org. classicalstation.standalone.radio.ebu.io\nEPG standalone.ebu.io\nSPI standalone.ebu.io\nEdit Delete',
'Classical Station default dab CS_DAB_NEW 2.4002.43e2.fe1.dab.radiodns.org. classicalstation.standalone.radio.ebu.ioEPG standalone.ebu.ioSPI standalone.ebu.ioEdit Delete',
'Classical Station default hd CS_HD_RADIO 0eaff.031.hd.radiodns.org. classicalstation.standalone.radio.ebu.io\nEPG standalone.ebu.io\nSPI standalone.ebu.io\nEdit Delete',
'Classical Station default id CS_IP http://server/stream classicalstation.standalone.radio.ebu.io Edit Delete',
'Classical Station CNN id CS_IP_2 http://server/stream/ouiiiiii classicalstation.standalone.radio.ebu.io Edit Delete'
]
CHANNELS_MYSQL_TR = [
' '.join(['CS_DAB_NEW', 'None', '81', '43e2', 'None', 'None', 'None', '2', 'None', '4002', 'None', 'dab', 'None', 'audio/aac', 'None', 'None']),
' '.join(['CS_DAB', 'None', '81', '43e1', 'None', 'None', 'None', '0', 'None', '4001', 'None', 'dab', 'None', 'audio/mpeg', 'None', 'None']),
' '.join(['CS_DRM', 'None', 'None', 'None', 'None', 'None', 'None', 'None', 'None', '4001', 'None', 'drm', 'None', 'None', 'None', 'None']),
' '.join(['CS_AMSS', 'None', 'None', 'None', 'None', 'None', 'None', 'None', 'None', '4001', 'None', 'amss', 'None', 'None', 'None', 'None']),
' '.join(['CS_HD_RADIO', '031', 'None', 'None', 'None', 'None', 'None', 'None', 'None', 'None', '0EAFF', 'hd', 'None', 'None', 'None', 'None']),
' '.join(['CS_IP', 'None', 'None', 'None', 'classicalstation.standalone.radio.ebu.io', 'None', 'None', 'None', 'ebu1standalone', 'None', 'None', 'id', '200', 'audio/mpeg', 'http://server/stream', 'None']),
' '.join(['CS_IP_2', 'None', 'None', 'None', 'classicalstation.standalone.radio.ebu.io', 'None', 'None', 'None', 'ebu1standalone', 'None', 'None', 'id', '200', 'audio/mpeg', 'http://server/stream/ouiiiiii', '2']),
]
@pytest.mark.run(order=10)
def test_channel_edition(stack_setup, browser_setup):
db = stack_setup
driver = browser_setup
driver.get(TEST_PROXY_URL + "channels/edit/1")
assert Select(driver.find_element_by_name("station")).first_selected_option.get_attribute('value') == "1"
assert driver.find_element_by_name("name").get_attribute("value") == "CS_VHF_FM"
assert Select(driver.find_element_by_name("fk_client")).first_selected_option.text.strip() == "default"
assert Select(driver.find_element_by_name("type_id")).first_selected_option.text.strip() == "VHF/FM"
assert Select(driver.find_element_by_name("ecc_id")).first_selected_option.text.strip() == "France (FR) [FE1]" # CH 4EI
assert driver.find_element_by_name("pi").get_attribute('value') == "C00F"
assert driver.find_element_by_name("frequency").get_attribute('value') == "00917"
clear_input(driver, "[name=name]")
driver.find_element_by_name("name").send_keys("CS_DAB_NEW")
driver.find_element_by_id("type").find_element_by_css_selector("option[value=dab]").click()
driver.find_element_by_name("eid").send_keys("43e2")
driver.find_element_by_name("sid").send_keys("4002")
driver.find_element_by_name("scids").send_keys("2")
driver.find_element_by_name("mime_type").send_keys("audio/aac")
driver.find_element_by_css_selector("input[type=submit][value=Save]").click()
# Check entered data
channel_tr = list(map(lambda x: x.text, driver
.find_element_by_id("radiodns-channel-table")
.find_elements_by_css_selector("tr")))
assert compare_lists(channel_tr, CHANNELS_HTML_TR)
# Check DB
result = db.engine.execute(text(CHANNELS_MSQL_QUERY))
assert result.rowcount == 7
station_mysql_tr = []
for row in sql_alchemy_result_to_list(result):
station_mysql_tr.append(reduce(lambda x, y: str(x) + " " + str(y), row))
assert compare_lists(station_mysql_tr, CHANNELS_MYSQL_TR, True)
# Check XML
res = requests.get(TEST_RADIO_DNS_URL + "radiodns/spi/3.1/SI.xml")
assert res.status_code == 200
bearers = ET.fromstring(res.text).findall(".//{http://www.worlddab.org/schemas/spi/31}bearer")
assert len(bearers) == 6
assert bearers[0].attrib["id"] == "amss:4001"
assert bearers[0].attrib["cost"] == "100"
assert bearers[1].attrib["id"] == "dab:fe1.43e1.4001.0"
assert bearers[1].attrib["cost"] == "20"
assert bearers[1].attrib["mimeValue"] == "audio/mpeg"
assert bearers[2].attrib["id"] == "dab:fe1.43e2.4002.2"
assert bearers[2].attrib["cost"] == "50"
assert bearers[2].attrib["mimeValue"] == "audio/aac"
assert bearers[3].attrib["id"] == "drm:4001"
assert bearers[3].attrib["cost"] == "100"
assert bearers[4].attrib["id"] == "hd:031.0eaff"
assert bearers[4].attrib["cost"] == "100"
assert bearers[5].attrib["id"] == "http://server/stream"
assert bearers[5].attrib["cost"] == "100"
assert bearers[5].attrib["offset"] == "2000"
assert bearers[5].attrib["mimeValue"] == "audio/mpeg"
assert bearers[5].attrib["bitrate"] == "200"
| {
"repo_name": "ebu/radiodns-plugit",
"path": "tests/tests/test_channel_edition.py",
"copies": "1",
"size": "5928",
"license": "bsd-3-clause",
"hash": -3110348555499969000,
"line_mean": 62.0638297872,
"line_max": 217,
"alpha_frac": 0.6643049933,
"autogenerated": false,
"ratio": 2.89453125,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40588362432999997,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import pytest
from robot_server.service.json_api.errors import ErrorResponse
def errors_wrapper(d):
return {'errors': [d]}
valid_error_objects = [
{'id': 'abc123'},
{'status': '404'},
{'title': 'Something went wrong'},
{'detail': "oh wow, there's a few things we messed up there"},
{'meta': {'num_errors_today': 10000}},
{'links': {'self': {'href': '/my/error-info?code=1005'}}},
{'source': {'pointer': '/data/attributes/price'}},
]
valid_error_responses = map(errors_wrapper, valid_error_objects)
@pytest.mark.parametrize('error_response', valid_error_responses)
def test_valid_error_response_fields(error_response):
validated = ErrorResponse(**error_response)
assert validated.dict(exclude_unset=True) == error_response
error_with_all_fields = reduce(
lambda acc, d: {**acc, **d}, valid_error_objects, {}
)
def test_error_response_with_all_fields():
error_response = errors_wrapper(error_with_all_fields)
validated = ErrorResponse(**error_response)
assert validated.dict(exclude_unset=True) == error_response
def test_empty_error_response_valid():
error_response = {'errors': []}
validated = ErrorResponse(**error_response)
assert validated.dict(exclude_unset=True) == error_response
| {
"repo_name": "OpenTrons/opentrons-api",
"path": "robot-server/tests/service/json_api/test_errors.py",
"copies": "2",
"size": "1298",
"license": "apache-2.0",
"hash": 4259104373867266000,
"line_mean": 27.8444444444,
"line_max": 66,
"alpha_frac": 0.6825885978,
"autogenerated": false,
"ratio": 3.5176151761517613,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5200203773951761,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import tensorflow as tf
from tensorflow.contrib.layers import batch_norm
from enum import Enum
class Phase(Enum):
train = 1
validation = 2
predict = 3
class Layer(Enum):
token = 1
tag = 2
deprel = 3
feature = 4
class ParseModel:
def __init__(
self,
config,
shapes):
batch_size = None
# Are we training or not?
self._is_training = tf.placeholder(tf.bool, [], "is_training")
# Labels for training and validation.
self._targets = tf.placeholder(tf.int32, batch_size, "targets")
self._embeds = tf.placeholder(tf.float32, [batch_size, shapes['embed_size']], "embeds")
n_tokens = int(shapes['tokens'])
self._tokens = tf.placeholder(tf.int32, [batch_size, n_tokens],
"tokens")
n_tags = int(shapes['tags'])
self._tags = tf.placeholder(tf.int32, [batch_size, n_tags], "tags")
n_deprels = int(shapes['deprels'])
self._deprels = tf.placeholder(
tf.int32, [batch_size, n_deprels], "deprels")
n_features = int(shapes['features'])
self._features = tf.placeholder(
tf.int32, [batch_size, n_features], "features")
# For dependency relations, we train a separate layer, which could be seen as an
# embeddings layer.
n_deprel_embeds = int(shapes["deprel_embeds"])
with tf.device("/cpu:0"):
deprel_embeds = tf.get_variable(
"deprel_embed", [
n_deprel_embeds, config.deprel_embed_size])
deprel_input = tf.nn.embedding_lookup(deprel_embeds, self._deprels)
deprel_input = tf.reshape(deprel_input, [tf.shape(self._deprels)[
0], self._deprels.shape[1] * deprel_embeds.shape[1]])
# Features are converted to a one-hot representation.
n_features = int(shapes["n_features"])
features = tf.one_hot(self._features, n_features, axis=-1)
features = tf.contrib.layers.flatten(features)
inputs = tf.concat([self.embeds,
deprel_input,
features],
1,
name="concat_inputs")
with tf.variable_scope("input_norm"):
inputs = tf.layers.batch_normalization(
inputs, scale=True, momentum=0.98, training=self.is_training, fused=True)
if config.keep_prob_input < 1:
inputs = tf.contrib.layers.dropout(
inputs,
keep_prob=config.keep_prob_input,
is_training=self.is_training)
hidden_w = tf.get_variable(
"hidden_w", [
inputs.get_shape()[1], config.hidden_size])
hidden_b = tf.get_variable("hidden_b", [config.hidden_size])
hidden = tf.matmul(inputs, hidden_w) + hidden_b
hidden = tf.nn.relu(hidden)
with tf.variable_scope("hidden_norm"):
hidden = tf.layers.batch_normalization(
hidden, scale=True, momentum=0.97, training=self.is_training, fused=True)
if config.keep_prob < 1:
hidden = tf.contrib.layers.dropout(
hidden, keep_prob=config.keep_prob, is_training=self.is_training)
n_labels = int(shapes["n_labels"])
output_w = tf.get_variable("output_w", [config.hidden_size, n_labels])
output_b = tf.get_variable("output_b", [n_labels])
logits = tf.add(tf.matmul(hidden, output_w), output_b, name="logits")
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=self._targets)
self._loss = loss = tf.reduce_sum(losses, name="loss")
_, labels = tf.nn.top_k(logits)
labels = tf.reshape(labels, [-1])
correct = tf.equal(self._targets, labels)
self._accuracy = tf.divide(
tf.reduce_sum(
tf.cast(
correct, tf.float32)), tf.cast(
tf.shape(
self._targets)[0], tf.float32), name="accuracy")
self._lr = tf.placeholder(tf.float32, [], "lr")
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self._train_op = tf.train.AdagradOptimizer(
self.lr).minimize(loss, name="train")
@property
def accuracy(self):
return self._accuracy
@property
def correct(self):
return self._correct
@property
def deprels(self):
return self._deprels
@property
def embeds(self):
return self._embeds
@property
def is_training(self):
return self._is_training
@property
def features(self):
return self._features
@property
def lr(self):
return self._lr
@property
def tags(self):
return self._tags
@property
def tokens(self):
return self._tokens
@property
def tag_embeds(self):
return self._tag_embeds
@property
def token_embeds(self):
return self._token_embeds
@property
def loss(self):
return self._loss
@property
def train_op(self):
return self._train_op
@property
def targets(self):
return self._targets
| {
"repo_name": "danieldk/dpar",
"path": "dpar-utils/tensorflow/model.py",
"copies": "1",
"size": "5346",
"license": "apache-2.0",
"hash": -7279705318816222000,
"line_mean": 29.375,
"line_max": 95,
"alpha_frac": 0.5658436214,
"autogenerated": false,
"ratio": 3.810406272273699,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4876249893673699,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import time
from pebble import ProcessPool
from multiprocessing import cpu_count
from os.path import isdir, join, abspath
from glob import glob
import h5py
from datatypes.objects import Molecule, RISM
from operators.charges.ambercharges import AmberCharges
from operators.converters.select_top_conformer import SelectTopConformer
from operators.geometry.rdkitGeometry import RdKitGeometry
from operators.rism3d.rism3d import RISM3DCalculator
from processing.hdf5manage import lock_file, unlock_file, set_prefix, set_failed
def merge_dicts(*dict_args):
"""
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
class SelectTopAndCalcAmberCharges():
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
ch = AmberCharges(**kwargs)
return ch(**(SelectTopConformer()(**kwargs)))
class RISMProcessor():
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
calc = RISM3DCalculator(kwargs['xvvfile'])
pdbs = kwargs['pdbs']
prmtop = kwargs['prmtop']
collector = []
for pdb in pdbs:
collector.append(RISM(**calc(**{"pdb":pdb,"prmtop":prmtop})))
rism = reduce(lambda acc,rism: rism.merge(acc),collector,None)
del collector
kwargs.update({"rism":rism})
return kwargs
extensions_dict = {"geometry": "???", "charges": "g??", "rism": "gc?"}
tmp_dict = {"geometry": "gzz", "charges": "gcz", "rism": "gcr"} #DelMe
locks_dict = {"geometry": "g", "charges": "c", "rism": "r"}
timeout_dict = {"geometry": 1200, "charges": 1200, "rism": 1200}
operators_dict = {"geometry":RdKitGeometry,
"charges":SelectTopAndCalcAmberCharges,
"rism":RISMProcessor}
def set_geometry_parser(root):
geometry_parser = root.add_parser ( "geometry" )
geometry_parser.add_argument ( '--number_of_conformers', type=int, required=True )
geometry_parser.add_argument ( '--database', type=str, required=True )
geometry_parser.set_defaults ( action='geometry' )
def set_charges_parser(root):
charges_parser = root.add_parser ( "charges" )
charges_parser.add_argument ( '--database', type=str, required=True )
charges_parser.set_defaults (action='charges')
def set_rism_parser(root):
rism_parser = root.add_parser ( "rism" )
rism_parser.add_argument ( '--database', type=str, required=True )
rism_parser.add_argument ( '--xvvfile', type=str, required=True)
#rism_parser.add_argument ( '--number', type=int ) Not implemented yet
rism_parser.set_defaults (action='rism')
def main_entrance_point(kwargs):
#print(kwargs)
file = kwargs['file']
operator = operators_dict[kwargs['action']](**kwargs)
file = lock_file(file,locks_dict[kwargs['action']])
args = {}
args.update(kwargs)
#try:
with h5py.File(file,'r+') as f:
molecule = Molecule.load_from_hdf(f)
args.update(molecule.data)
molecule = Molecule(**operator(**args))
molecule.dump_to_hdf(f)
file = unlock_file ( file )
set_prefix ( file, tmp_dict[kwargs['action']] )
return None
#except Exception as e:
# print(e)
# file = unlock_file (file)
# set_failed(file,locks_dict[kwargs['action']])
def process(**kwargs):
assert isdir(kwargs['database'])
path = join( kwargs['database'],"*{}.hdf".format( extensions_dict[kwargs['action']] ) )
files = list(map(lambda file:abspath(file),glob(path)))
assert len(files)>0
args = list ( map ( lambda file: merge_dicts({'file':file},kwargs), files ) )
with ProcessPool (max_workers=cpu_count()) as pool:
future = pool.map( main_entrance_point, args, timeout=timeout_dict[kwargs['action']] )
iterator = future.result ()
while True:
try:
next(iterator)
except StopIteration:
break
except TimeoutError as error:
pass
#print("function took longer than %d seconds" % error.args[1])
time.sleep(0.5)
| {
"repo_name": "sergsb/clever",
"path": "processing/core.py",
"copies": "1",
"size": "4255",
"license": "apache-2.0",
"hash": -1789431450703733800,
"line_mean": 35.0593220339,
"line_max": 98,
"alpha_frac": 0.644653349,
"autogenerated": false,
"ratio": 3.4791496320523305,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46238029810523307,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
import wx
from vistas.core.observers.camera import CameraObservable
from vistas.core.preferences import Preferences
from vistas.ui.controllers.project import ProjectChangedEvent
from vistas.ui.controls.viewer_panel import ViewerPanel
from vistas.ui.events import EVT_CAMERA_MODE_CHANGED, EVT_CAMERA_SYNC, CameraSyncEvent
class ViewerContainerPanel(wx.Panel):
"""
A container panel that provides access to all active viewer panels and handles adding, removing and resizing
window rows and columns. Also provides access for synchronizing ViewerPanels when mouse events occur.
"""
class Row:
def __init__(self):
self.viewers = []
self.num_viewers = 0
self.prev_row = None
def __init__(self, parent, id):
super().__init__(parent, id)
self.num_viewers = 0
self.wireframe = False
self.selection_view = False
self.rows = []
self.num_columns = Preferences.app().get('viewer_itemsperrow', 2)
self.AddViewer()
# Events
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(EVT_CAMERA_MODE_CHANGED, self.OnCameraModeChanged)
self.Bind(EVT_CAMERA_SYNC, self.OnCameraSyncEvent)
self.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroy)
def OnDestroy(self, event):
self.Unbind(EVT_CAMERA_MODE_CHANGED)
def AddViewer(self, new_viewer=None):
# Add new row if necessary
if self.num_viewers % self.num_columns == 0:
self.AddRow()
last_row = self.rows[-1]
# Create new viewer
if new_viewer is None:
new_viewer = ViewerPanel(self, wx.ID_ANY)
new_viewer.HideResizeAreas()
new_viewer.ResetNeighbors()
index = last_row.num_viewers
last_row.viewers[index] = new_viewer
last_row.num_viewers += 1
self.num_viewers += 1
# Size proportions for the new viewer
new_viewer.width = 1 / last_row.num_viewers
new_viewer.height = 1 / len(self.rows)
for viewer in last_row.viewers[:index]:
viewer.width *= index * (1 / last_row.num_viewers)
# Set neighbors
if last_row.num_viewers > 1:
new_viewer.SetNeighbor(last_row.viewers[index - 1], ViewerPanel.WEST)
last_row.viewers[index - 1].SetNeighbor(new_viewer, ViewerPanel.EAST)
if last_row.prev_row is not None and last_row.prev_row.num_viewers >= last_row.num_viewers:
for viewer in last_row.prev_row.viewers:
new_viewer.SetNeighbor(viewer, ViewerPanel.NORTH)
viewer.SetNeighbor(new_viewer, ViewerPanel.SOUTH)
self.UpdateViewerSizes()
observable = CameraObservable.get()
if observable.is_sync:
self.SyncAllCameras(False, False)
self.SyncAllCameras(True, True)
new_viewer.ResetCameraInteractor()
def RemoveViewer(self, viewer=None):
# Can't remove the last viewer
if self.num_viewers < 2:
return
if viewer is None:
row = self.rows[-1]
viewer = row.viewers[row.num_viewers - 1]
for row in self.rows:
if viewer in row.viewers:
index = row.viewers.index(viewer)
viewer = row.viewers[index]
row.viewers[index] = None
viewer.legend_window.Destroy()
viewer.Destroy()
self.num_viewers -= 1
self.Rebuild()
return
def RefreshAllViewers(self):
for row in self.rows:
for viewer in row.viewers[:row.num_viewers]:
viewer.gl_canvas.Refresh()
def UpdateViewerSizes(self):
for row in self.rows:
for viewer in row.viewers[:row.num_viewers]:
x = 0
y = 0
neighbor = viewer.GetNeighbor(ViewerPanel.WEST)
if neighbor:
x = neighbor.GetPosition().x + neighbor.GetSize().GetWidth()
neighbor = viewer.GetNeighbor(ViewerPanel.NORTH)
if neighbor:
y = neighbor.GetPosition().y + neighbor.GetSize().GetHeight()
viewer.SetSize(
x, y, self.GetSize().GetWidth() * viewer.width,
self.GetSize().GetHeight() * viewer.height
)
viewer.gl_canvas.camera_controls.reposition()
def OnSize(self, event):
self.UpdateViewerSizes()
def Rebuild(self):
rows = self.rows
self.rows = []
self.num_viewers = 0
for row in rows:
for viewer in (x for x in row.viewers if x is not None):
self.AddViewer(viewer)
def AddRow(self):
new_row = self.Row()
new_row.viewers = list(None for _ in range(self.num_columns))
if self.rows:
new_row.prev_row = self.rows[-1]
for row in self.rows:
for viewer in row.viewers[:row.num_viewers]:
viewer.height *= len(self.rows) * (1 / (len(self.rows) + 1))
self.rows.append(new_row)
def ProjectChanged(self, event):
if event.change == ProjectChangedEvent.PROJECT_RESET:
while self.num_viewers > 1:
self.RemoveViewer()
self.GetMainViewerPanel().RefreshScenes()
self.GetMainViewerPanel().UpdateLegend()
self.GetMainViewerPanel().UpdateOverlay()
else:
for row in self.rows:
for i in range(row.num_viewers):
row.viewers[i].ProjectChanged(event)
def GetMainViewerPanel(self):
return self.rows[0].viewers[0]
def GetAllViewerPanels(self):
return reduce(lambda x, y: x + y, (row.viewers[:row.num_viewers] for row in self.rows))
def ToggleWireframe(self):
self.wireframe = not self.wireframe
for viewer in self.GetAllViewerPanels():
viewer.camera.wireframe = self.wireframe
viewer.camera.scene.render_bounding_boxes = self.wireframe
viewer.Refresh()
def ToggleSelectionView(self):
self.selection_view = not self.selection_view
for viewer in self.GetAllViewerPanels():
viewer.camera.selection_view = self.selection_view
viewer.Refresh()
def OnCameraModeChanged(self, event):
if CameraObservable.get().is_sync:
self.SyncAllCameras(True, False)
def OnCameraSyncEvent(self, event: CameraSyncEvent):
if CameraObservable.get().is_sync:
canvas = event.GetEventObject()
for panel in self.GetAllViewerPanels():
if canvas is not panel.gl_canvas:
interactor = panel.gl_canvas.camera_interactor
interactor.sync(event.interactor)
def SyncAllCameras(self, do_sync, save_state):
observable = CameraObservable.get()
if do_sync:
interactor = self.GetMainViewerPanel().gl_canvas.camera_interactor
observable.sync_camera(interactor, save_state)
for panel in self.GetAllViewerPanels():
if panel is not self.GetMainViewerPanel():
panel.gl_canvas.camera_controls.hide()
else:
main_panel_interactor = observable.global_interactor
observable.unsync_camera()
if main_panel_interactor is not None:
self.GetMainViewerPanel().gl_canvas.camera_interactor = main_panel_interactor
for panel in self.GetAllViewerPanels():
if panel is not None and panel is not self.GetMainViewerPanel():
panel.gl_canvas.camera_controls.show()
| {
"repo_name": "VISTAS-IVES/pyvistas",
"path": "source/vistas/ui/controls/viewer_container_panel.py",
"copies": "1",
"size": "7749",
"license": "bsd-3-clause",
"hash": -8581602621986710000,
"line_mean": 34.7096774194,
"line_max": 112,
"alpha_frac": 0.5972383533,
"autogenerated": false,
"ratio": 3.8590637450199203,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.495630209831992,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
class ConfigurationGenerator:
def __init__(self):
pass
def _check_circle(self, function_list, device):
in_degrees = []
for func in function_list:
in_degrees.append(len(getattr(device, func).dependencies))
if 0 in in_degrees:
return False
else:
return True
def generate(self, devices):
output = [] # 存放全部设备的配置信息
for device in devices:
loc_output = [] # 存放单个设备的配置信息
function_list = getattr(device, "support_functions", None)
if function_list is not None:
temp_list = function_list.copy() # 作为队列使用
if self._check_circle(function_list, device):
raise ValueError(f"exit circle in function dependencies of model <{device.model_type}>'")
while temp_list:
func = temp_list.pop(0)
func_instance = getattr(device, func)
dependencies = func_instance.dependencies
# 检验依赖关系
if dependencies:
if not loc_output: # 无已配置项时,显然依赖不满足
temp_list.append(func)
else: # 有已配置项,检查依赖项是否都满足
tag = True # 作为依赖是否满足的标记
for dependency in dependencies:
# 依赖项未在已配置内容中,则将标记置为False
if not list(filter(lambda key: dependency == key, reduce(lambda a, b: list(a) + list(b),
map(lambda item: item.keys(),
loc_output)))):
tag = False
if not tag:
temp_list.append(func)
else:
loc_output.append({func: func_instance.generate_conf()})
else:
loc_output.append({func: func_instance.generate_conf()})
output.append({device.name: loc_output})
return output
def genarate_topo(self, devices):
json = {"nodes": [], "links": []}
for device in devices:
loc_json = json
# todo: 还需要拓扑排序,树搜索
if getattr(device, "parent_id", None):
print("----------------")
print(device.parent_id)
loc_json = self._find_node_in_json(device.parent_id, json["nodes"])
print(loc_json)
if loc_json.get("nodes", None) is None:
loc_json.update({"nodes": []})
loc_json["nodes"].append({
"id": device.name,
"label": device.model_type,
"group": "#ccc",
"attrs": device.get_attrs_json(),
})
for link in device.links.values():
if loc_json.get("links", None) is None:
loc_json.update({"links": []})
loc_json["links"].append({
"source": device.name,
"target": link["to"].name,
"label": link["link_type"],
"attrs": {
"name": link["name"],
"id": link["id"],
"usage": link["usage"]
}
})
return json
def _find_node_in_json(self, node_id, json):
for item in json:
print(item)
if item["attrs"]["id"] == node_id:
if item.get("children", None) is None:
item.update({"children": {}})
return item["children"]
elif item.get("children", None) is not None:
return self._find_node_in_json(node_id, item["children"]["nodes"])
return None | {
"repo_name": "ttaanngg/petal",
"path": "app/generator.py",
"copies": "1",
"size": "4208",
"license": "bsd-3-clause",
"hash": 3498471126078858000,
"line_mean": 42.0430107527,
"line_max": 120,
"alpha_frac": 0.4340329835,
"autogenerated": false,
"ratio": 4.130030959752322,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001452679708211934,
"num_lines": 93
} |
from functools import reduce
class ConfigurationGenerator:
def __init__(self):
pass
def _check_circle(self, functions_list, device):
in_degrees = []
for func in functions_list:
in_degrees.append(len(getattr(device, func).dependencies))
if 0 in in_degrees:
return False
else:
return True
def generate(self, devices):
output = [] # 存放全部设备的配置信息
for device in devices:
print(device.id)
loc_output = [] # 存放单个设备的配置信息
functions_list = getattr(device, "functions_list", None)
if functions_list is not None:
print(device.id, functions_list)
for item in device.functions_list:
# print(item._entities)
print(id(item))
print("---------generate-----------------")
print(item.tag)
if item.tag:
test = item.generate_revoke_conf()
else:
test = item.generate_conf()
# print("222222222")
# print(item)
# print(dir(item))
# print(item._entities)
# print("222222222")
loc_output.append(test)
# temp_list = functions_list.copy() # 作为队列使用
# if self._check_circle(functions_list, device):
# raise ValueError(f"exit circle in function dependencies of model <{device.model_type}>'")
# while temp_list:
# func = temp_list.pop(0)
# func_instance = getattr(device, func)
# dependencies = func_instance.dependencies
# # 检验依赖关系
# if dependencies:
# if not loc_output: # 无已配置项时,显然依赖不满足
# temp_list.append(func)
# else: # 有已配置项,检查依赖项是否都满足
# tag = True # 作为依赖是否满足的标记
# for dependency in dependencies:
# # 依赖项未在已配置内容中,则将标记置为False
# if not list(filter(lambda key: dependency == key, reduce(lambda a, b: list(a) + list(b),
# map(lambda item: item.keys(),
# loc_output)))):
# tag = False
# if not tag:
# temp_list.append(func)
# else:
# loc_output.append({func: func_instance.generate_conf()})
# else:
# loc_output.append({func: func_instance.generate_conf()})
output.append({device.name: loc_output})
# print("1111111111111111111111")
# print(output)
return output
def genarate_topo(self, devices):
json = {"nodes": [], "links": []}
for device in devices:
loc_json = json
# todo: 还需要拓扑排序,树搜索
if getattr(device, "parent_id", None):
print("----------------")
print(device.parent_id)
loc_json = self._find_node_in_json(device.parent_id, json["nodes"])
print(loc_json)
if loc_json.get("nodes", None) is None:
loc_json.update({"nodes": []})
loc_json["nodes"].append({
"id": device.name,
"label": device.model_type,
"group": "#ccc",
"attrs": device.get_attrs_json(),
})
for link in device.links.values():
if loc_json.get("links", None) is None:
loc_json.update({"links": []})
loc_json["links"].append({
"source": device.name,
"target": link["to"].name,
"label": link["link_type"],
"attrs": {
"name": link["name"],
"id": link["id"],
"usage": link["usage"]
}
})
return json
def _find_node_in_json(self, node_id, json):
for item in json:
print(item)
if item["attrs"]["id"] == node_id:
if item.get("children", None) is None:
item.update({"children": {}})
return item["children"]
elif item.get("children", None) is not None:
return self._find_node_in_json(node_id, item["children"]["nodes"])
return None | {
"repo_name": "wenkaiqiu/petal",
"path": "app/generator.py",
"copies": "1",
"size": "5049",
"license": "bsd-3-clause",
"hash": 7921616959501786000,
"line_mean": 42.25,
"line_max": 122,
"alpha_frac": 0.4216394797,
"autogenerated": false,
"ratio": 4.153516295025729,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011960166243894714,
"num_lines": 112
} |
from functools import reduce
class Counter:
def __init__(self):
self.success_count = 0
self.skip_count = 0
self.fail_count = 0
self.first_fail = None
self.first_skip = None
def success(self):
self.success_count += 1
def skip(self, message):
self.skip_count += 1
if self.first_skip is None:
self.first_skip = message
def fail(self, message):
self.fail_count += 1
if self.first_fail is None:
self.first_fail = message
def results(self):
print("{} success; {} fail; {} skipped".format(self.success_count, self.fail_count, self.skip_count))
if self.first_fail:
print("first fail: {}".format(self.first_fail))
if self.first_skip:
print("first skip: {}".format(self.first_skip))
def remove_duplicates(l):
"removes duplicates in a list while maintaining insertion order"
if not l:
return []
return reduce(lambda a, b: b[0] in a and a or a + b, [[i] for i in l])
| {
"repo_name": "jtauber/greek-inflection",
"path": "utils.py",
"copies": "1",
"size": "1055",
"license": "mit",
"hash": -521518600355458600,
"line_mean": 27.5135135135,
"line_max": 109,
"alpha_frac": 0.5800947867,
"autogenerated": false,
"ratio": 3.7279151943462896,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9801583974620283,
"avg_score": 0.0012852012852012853,
"num_lines": 37
} |
from functools import reduce
class IPv4Address(object):
def __init__(self, addr):
if isinstance(addr, IPv4Address):
self._raw = addr._raw
elif isinstance(addr, str):
self._raw = ip_to_int(addr)
elif isinstance(addr, int):
self._raw = addr
else:
raise ValueError('Can\'t convert "%s" to IPv4Address' % repr(addr))
@property
def int(self):
return self._raw
def __str__(self):
return int_to_ip(self._raw)
def __repr__(self):
return '<IPv4Address %s>' % int_to_ip(self._raw)
def __lt__(self, other):
return self._raw < other._raw
def __eq__(self, other):
return self._raw == other._raw
def __hash__(self):
return hash(self.__repr__())
class IPv4Subnet(object):
def __init__(self, cidr):
if isinstance(cidr, self.__class__):
self._ip = cidr._ip
self._size = cidr._size
return
if '/' not in cidr:
base, size = cidr, 32
else:
base, size = cidr.split('/', 1)
self._size = int(size)
base = self.__mask(IPv4Address(base).int, self._size)
self._ip = IPv4Address(base)
@staticmethod
def __mask(addr, size):
mask = 0xFFFFFFFF << (32 - size)
assert isinstance(addr, int)
return addr & mask
def __contains__(self, other):
if isinstance(other, self.__class__):
if other._size < self._size:
return False
return self.__mask(other._ip.int, self._size) == self.__mask(self._ip.int, self._size)
addr = IPv4Address(other)
return self.__mask(addr.int, self._size) == self.__mask(self._ip.int, self._size)
def __getitem__(self, item):
i = int(item)
if i >= 0:
return IPv4Address(self._ip.int + i)
else:
mask = 0xFFFFFFFF >> (self._size)
raw = self._ip.int | mask
return IPv4Address(raw + i + 1)
def __str__(self):
return '%s/%s' % (self._ip, self._size)
def __repr__(self):
return '<IPv4Subnet %s>' % self.__str__()
def __eq__(self, other):
return self._ip.int == other._ip.int and self._size == other._size
def __hash__(self):
return hash(self.__repr__())
def ip_to_int(addr):
fields = addr.split('.')
assert len(fields) == 4
assert all([int(x) >= 0 and int(x) <= 255 for x in fields])
return reduce(lambda x, y: x * 0x100 + int(y), fields, 0)
def int_to_ip(raw):
addr = []
for _ in range(4):
addr.append(str(raw % 0x100))
raw //= 0x100
assert raw == 0
return '.'.join(reversed(addr))
| {
"repo_name": "sourcesimian/vpn-porthole",
"path": "vpnporthole/ip.py",
"copies": "1",
"size": "2728",
"license": "mit",
"hash": 6528199585641708000,
"line_mean": 26.5555555556,
"line_max": 98,
"alpha_frac": 0.5201612903,
"autogenerated": false,
"ratio": 3.627659574468085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4647820864768085,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
class Solution:
def nextGreaterElement(self, n: int) -> int:
nums = [int(d) for d in str(n)]
# find the rightmost digit smaller than the next “534976” -> 9
i = len(nums) - 1
while i > 1:
if nums[i - 1] < nums[i]:
break
i -= 1
if i == 0 or (i == 1 and nums[0] >= nums[1]):
return -1
# search for the smallest digit on the right part “534976” -> 6
min_i = i
for j in range(i, len(nums)):
if nums[j] > nums[i - 1] and nums[j] < nums[min_i]:
min_i = j
# swap 4 and 6
nums[i - 1], nums[min_i] = nums[min_i], nums[i - 1]
# sort the right part “534976” -> “536974”
next_nums = nums[:i] + sorted(nums[i:])
ans = reduce(lambda x, y: x * 10 + y, next_nums, 0)
return ans if ans <= (2 ** 31 - 1) else -1
# TESTS
for n, expected in [
(1, -1),
(12, 21),
(21, -1),
(1234, 1243),
(54321, -1),
(111, -1),
(230241, 230412),
(534976, 536479),
(1999999999, -1),
]:
sol = Solution()
actual = sol.nextGreaterElement(n)
print("Next smallest integer of", n, "->", actual)
assert actual == expected
| {
"repo_name": "l33tdaima/l33tdaima",
"path": "p556m/next_greater_element.py",
"copies": "1",
"size": "1272",
"license": "mit",
"hash": -3162866208163780600,
"line_mean": 28.2093023256,
"line_max": 71,
"alpha_frac": 0.4912420382,
"autogenerated": false,
"ratio": 3.0411622276029058,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9032404265802906,
"avg_score": 0,
"num_lines": 43
} |
from functools import reduce
def _list_errors(e):
"""
Returns a list of violated schema fragments and related error messages
:param e: ``jsonschema.exceptions.ValidationError`` instance
"""
error_list = []
for value, error in zip(e.validator_value, e.context):
error_list.append((value, error.message))
if error.context:
error_list += _list_errors(error)
return error_list
class NetJsonConfigException(Exception):
"""
Root netjsonconfig exception
"""
def __str__(self):
message = "%s %s\n" % (self.__class__.__name__, self.details,)
errors = _list_errors(self.details)
separator = '\nAgainst schema %s\n%s\n'
details = reduce(lambda x, y: x + separator % y, errors, '')
return message + details
class ValidationError(NetJsonConfigException):
"""
Error while validating schema
"""
def __init__(self, e):
"""
preserve jsonschema exception attributes
in self.details
"""
self.message = e.message
self.details = e
class ParseError(NetJsonConfigException):
"""
Error while parsing native configuration
"""
pass
| {
"repo_name": "openwisp/netconfig-gen",
"path": "netjsonconfig/exceptions.py",
"copies": "2",
"size": "1209",
"license": "mit",
"hash": -5680948303670708000,
"line_mean": 23.6734693878,
"line_max": 74,
"alpha_frac": 0.6112489661,
"autogenerated": false,
"ratio": 4.1833910034602075,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5794639969560207,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
def normalize(aL):
def touppercase(L):
return L[0].upper() + L[1:].lower()
return map(touppercase, aL)
L = ['adam', 'LISA', 'barT']
print(list(normalize(L)))
def prod(L):
def star(x, y):
return x * y
return reduce(star, L)
print('3 * 5 * 7 * 9 =', prod([3, 5, 7, 9]))
def str2float(s):
def fn(x, y):
return x * 10 + y
def char2Num(s):
return {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9}[s]
dot = s.index('.')
s = s[:dot] + s[dot+1:]
result = reduce(fn, map(char2Num, s))
result /= 10 ** (len(s) - dot)
return result
# filter
print(10 ** 5)
def is_odd(n):
return n % 2 == 0
print(list(filter(is_odd, [1,2,3,4,5,6,7])))
def not_empty(s):
return s and s.strip()
list(filter(not_empty, ['a','','b']))
# 用filter实现素数查找
def _odd_iter():
n = 1
while True:
n = n + 2
yield n
def _not_divisible(n):
return lambda x: x % n > 0
def primes():
yield 2
it = _odd_iter()
while True:
n = next(it)
yield n
it = filter(_not_divisible(n), it)
for n in primes():
if n < 1000:
print(n)
else:
break
# sorted函数
sorted([45, 46,22, 7])
sorted([-14, -13, 65, -6] , key=abs)
sorted([-14, -13, 65, -6] , key=abs, reverse=True)
L = [('Bob', 75), ('Adam', 92), ('Bart', 66), ('Lisa', 88)]
def by_name(t):
return t[0]
L2 = sorted(L, key=by_name)
print(L2)
# 返回函数
def calc_sum(*args):
ax = 0
for n in args:
ax += n
return ax
def lazy_sum(*args):
def sum():
ax = 0
for n in args:
ax += n
return ax
return sum
def count():
fs = []
for i in range(1, 4):
def f():
return i * i
fs.append(f)
return fs
f1, f2, f3 = count()
print(f1())
print(f2())
print(f3())
for i in range(1, 4):
print(i)
# 匿名函数
list(map(lambda x:x * x, [1, 2, 3, 4, 5, 6, 7, 8, 9]))
# 装饰器 在代码运行期间动态增加功能的方式,称之为“装饰器
def now():
print('2017-04-23')
now.__name__
f = now
f.__name__
# 定义一个能打印日志的decorator
def log(func):
def wrapper(*args, **kw):
print('call %s():' % func.__name__)
return func(*args, **kw)
return wrapper
@log
def now():
print('2017-04-23')
now()
def log(text):
def decorator(func):
def wrapper(*args, **kw):
print('%s %s():' % (text, func.__name__))
return func(*args, **kw)
return wrapper
return decorator
@log('executor')
def now():
print('2017')
now()
# 完整的decorator
import functools
def log(func):
@functools.wraps(func)
def wrapper(*args, **kw):
print('call %s():' % func.__name__)
return func(*args, **kw)
return wrapper
# 偏函数
int2 = functools.partial(int, base=2)
print(int2('10000'))
print(int2('10000', base=10))
| {
"repo_name": "zhayangtao/HelloPython",
"path": "python01/PythonFunction.py",
"copies": "1",
"size": "2986",
"license": "apache-2.0",
"hash": 427637658049744450,
"line_mean": 16.1616766467,
"line_max": 98,
"alpha_frac": 0.5132588974,
"autogenerated": false,
"ratio": 2.5796579657965797,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.35929168631965797,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
from numpy import uint16
from numpy import bool_
from query_result_list import QueryResultList
from data_record import DataRecord
from has_actions import HasActions
from session import Session
from filterable import Filterable
from has_documents import HasDocuments
from action import Action
from attr_utils import _memoize_attr
class Query(DataRecord, HasActions, Filterable, HasDocuments):
def __init__(self, query_id, topic = None, user = None, condition = None, autocomplete = None, query_text = None, session = None, precision = None):
DataRecord.__init__( self, str(query_id) )
HasDocuments.__init__( self )
self.topic = topic
self.user = user
self.condition = condition
self.autocomplete = bool_(autocomplete)
self.query_text = query_text
self.session = session
self.precision = precision
self.result_list = QueryResultList(self)
def _rank_sanity_check(self, rank):
if int(rank) < 1 or int(rank) > self.result_list.length():
raise RuntimeError("Attempted to fetch results up to rank %s for query %s (%s), which is impossible." % (rank, self.record_id, self.query_text))
def add_to_result_list( self, rank, document ):
self.result_list.add( rank, document )
def results_up_to_rank(self, rank, relevance_level_match=lambda r: True):
self._rank_sanity_check(rank)
return self.result_list.results_up_to_rank(rank, relevance_level_match=relevance_level_match)
def non_relevant_results_up_to_rank(self, rank):
self._rank_sanity_check(rank)
return self.result_list.non_relevant_results_up_to_rank(rank)
def results_of_relevance_level(self, relevance_level_match):
last_rank_reached = self.last_rank_reached()
if last_rank_reached is None:
return []
return self.results_up_to_rank(last_rank_reached, relevance_level_match=relevance_level_match)
def non_relevant_results(self):
return self.non_relevant_results_up_to_rank(self.last_rank_reached())
def moderately_relevant_results_up_to_rank(self, rank):
self._rank_sanity_check(rank)
return self.result_list.moderately_relevant_results_up_to_rank(rank)
def moderately_relevant_results(self):
return self.moderately_relevant_results_up_to_rank(self.last_rank_reached())
def highly_relevant_results_up_to_rank(self, rank):
self._rank_sanity_check(rank)
return self.result_list.highly_relevant_results_up_to_rank(rank)
def highly_relevant_results(self):
return self.highly_relevant_results_up_to_rank(self.last_rank_reached())
def result_at(self, rank):
return self.result_list.result_documents[rank - 1]
def results_between(self, rank_start, rank_end):
result_length = self.result_list.length()
if int(rank_start) < 1 or int(rank_start) > result_length or int(rank_end) < 1 or int(rank_end) > result_length or int(rank_start) > int(rank_end):
raise RuntimeError("Attempted to fetch results between rank %s and %s for query %s (%s), which is impossible." % (rank_start, rank_end, self.record_id, self.query_text))
return self.result_list.results_between(rank_start, rank_end)
def focus_action(self):
focus_actions = self.actions_by_type('QUERY_FOCUS')
if len(focus_actions) == 0:
return (None, None)
return self.actions_by_type('QUERY_FOCUS')[0]
def formulation_time_in_seconds(self):
(idx, query_start_action) = self.focus_action()
return self.action_duration_in_seconds_for(idx, query_start_action, 'QUERY_ISSUED') if query_start_action is not None else None
def formulation_event(self):
(idx, query_start_action) = self.focus_action()
action_duration = self.action_duration_in_seconds_for(idx, query_start_action, 'QUERY_ISSUED') if query_start_action is not None else None
return {
'query_formulation_duration': action_duration,
'query_formulation_start_at': self.session.seconds_elapsed_at(query_start_action.timestamp) if query_start_action is not None else None,
'autocomplete': self.autocomplete,
'query_text': self.query_text,
'precision': self.precision,
'average_snippet_scan_duration': self.average_snippet_scanning_time_in_seconds(),
'query_order_number': self.order_number(),
'duration_in_seconds': self.duration_in_seconds()
}
def last_rank_reached(self):
return _memoize_attr(
self,
'_last_rank_reached',
lambda: self._calculate_last_rank_reached()
)
def _calculate_last_rank_reached(self):
seen_docs_count = len(self.seen_documents)
return None if seen_docs_count == 0 else seen_docs_count
def amount_of_non_relevant_documents_seen_at_last_rank(self):
return len(self.non_relevant_documents_seen_at_last_rank())
def non_relevant_documents_seen_at_last_rank(self):
last_rank = self.last_rank_reached()
results_seen = self.results_up_to_rank( last_rank )
return [result.document for result in results_seen if result.is_not_relevant_for_topic( self.topic )]
def amount_of_contiguous_non_relevant_documents_seen_at_last_rank(self):
return len(self.last_contiguous_non_relevant_documents_seen())
def last_contiguous_non_relevant_documents_seen(self):
last_rank = self.last_rank_reached()
results_seen = self.results_up_to_rank( last_rank )
contiguous_non_relevants = []
for result in reversed(results_seen):
if result.is_not_relevant_for_topic( self.topic ):
contiguous_non_relevants.append( result.document )
else:
break
return contiguous_non_relevants
def total_snippet_scanning_time_in_seconds(self):
# No actions -> No scanning time.
if len(self.actions) == 0:
return None
formulation_time = self.formulation_time_in_seconds()
formulation_time = 0 if formulation_time is None else formulation_time # Act as if formulation was instant
return self.duration_in_seconds() - formulation_time - sum(self.document_read_times().values())
def continuous_rank_at(self, rank):
prior_queries = self.session.queries_prior_to(self)
prior_last_ranks = map(lambda lr: 0 if lr is None else lr, [query.last_rank_reached() for query in prior_queries])
return sum(prior_last_ranks) + rank
def continuous_rank_at_end(self):
last_rank_reached = self.last_rank_reached()
return self.continuous_rank_at(0 if last_rank_reached is None else last_rank_reached)
def order_number(self):
return self.session.sorted_queries().index(self) + 1
def rank_of(self, document):
return self.result_list.rank_of(document)
def never_switched_from_first_serp(self):
return len(self.serp_views_after_first()) == 0
def serp_views_after_first(self):
return self.actions_by_filter(lambda a: a.action_type == Action.SERP_SWITCH_ACTION_NAME and int(a.result_page) > 1, plain_actions=True)
def serp_views(self):
return self.actions_by_filter(lambda a: a.action_type == Action.SERP_SWITCH_ACTION_NAME, plain_actions=True)
def last_serp_number(self):
return max([int(a.result_page) for a in self.serp_views()])
def last_serp_actions(self, plain_actions=True):
return self.actions_by_filter(lambda a: a.serp_page_num == self.last_serp_number(), plain_actions=plain_actions)
def no_document_actions_on_last_serp(self):
return len(list(filter(lambda a: a.is_read_event() or a.is_mark_event(), self.last_serp_actions()))) == 0
@classmethod
def average_formulation_time_in_seconds(cls, filter_func = lambda query: True):
queries = list(filter( filter_func, cls.get_store().values() ))
return reduce( lambda acc, query: acc + query.formulation_time_in_seconds(), queries, 0 ) / len(queries)
@classmethod
def average_last_rank_reached(cls, filter_func = lambda query: True):
queries = list(filter( filter_func, cls.get_store().values() ))
return reduce( lambda acc, query: acc + float(query.last_rank_reached()), queries, 0.0 ) / float(len(queries))
@classmethod
def average_amount_of_non_relevant_documents_seen_at_last_rank(cls, filter_func = lambda query: True):
queries = list(filter( filter_func, cls.get_store().values() ))
return reduce( lambda acc, query: acc + float(query.amount_of_non_relevant_documents_seen_at_last_rank()), queries, 0.0 ) / float(len(queries))
@classmethod
def average_amount_of_contiguous_non_relevant_documents_seen_at_last_rank(cls, filter_func = lambda query: True):
queries = list(filter( filter_func, cls.get_store().values() ))
return reduce( lambda acc, query: acc + float(query.amount_of_contiguous_non_relevant_documents_seen_at_last_rank()), queries, 0.0 ) / float(len(queries))
| {
"repo_name": "fire-uta/iiix-data-parser",
"path": "query.py",
"copies": "1",
"size": "8645",
"license": "mit",
"hash": 3045347485768151600,
"line_mean": 43.1071428571,
"line_max": 177,
"alpha_frac": 0.7101214575,
"autogenerated": false,
"ratio": 3.3533747090768036,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45634961665768037,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
# func 将函数名作为变量传入
def my_add(ax, bx, func):
return func(ax) + func(bx)
a, b = 5, -6
print(a, b, my_add(a, b, abs))
l1 = [1, 2, 3, 4, 5]
def add(ax, bx):
return ax + bx
def sq(iax):
return iax**2
# map函数入参为函数和Iterable, 出参是 Iterator,其中入参函数的参数列表为一个
# 作用为对Iterable逐个进行入参的函数处理后返回
r1 = map(sq, l1)
# Iterator 可以直接转换为集合, 用的是 list / set / tuple 函数
print(r1, next(r1), tuple(r1))
# reduce函数入参为函数,序列和初始值.存在于 functools
# 下方表示 add(初始值,add(add(lx[0], lx[1]),lx[2])...)
r2 = reduce(add, l1, 1)
print(r2)
# 联合用法:str转int
def fn(ax, ay):
return ax * 10 + ay
def char2num(sx):
return {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9}[sx]
i_str = '12345'
print(i_str, reduce(fn, map(char2num, i_str)))
# lambda 表达式为可认为匿名函数
print(i_str, reduce(lambda x, y: x*10+y, map(char2num, i_str)))
# 练习: 名字正规化
def crt_name(name):
return name[0].upper() + name[1:].lower()
print(list(map(crt_name, ['adam', 'LISA', 'barT'])))
# 高级方案:
print(list(map(lambda x: x.capitalize(), ['adam', 'LISA', 'barT'])))
# 练习: 求积
def prod(factors):
return reduce(lambda x, y: x*y, factors)
print('2*3*5 =', prod([2, 3, 5]))
# 练习: str2float:
def str2float(f_str):
i_pnt = f_str.split('.')
i_tmp = reduce(lambda x, y: 10*x+y,
map(lambda z: {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9}[z],
f_str.replace('.', '')
)
)
if len(i_pnt) == 2:
f_res = i_tmp/pow(10, len(i_pnt[1]))
else:
f_res = i_tmp
return f_res
print(str2float('123.456'), str2float('12'), str2float('34.'))
| {
"repo_name": "lucd1990/self-learn-python",
"path": "middle/higher_order_function.py",
"copies": "1",
"size": "1938",
"license": "unlicense",
"hash": 1176498263243161900,
"line_mean": 19.3855421687,
"line_max": 117,
"alpha_frac": 0.5360520095,
"autogenerated": false,
"ratio": 1.9789473684210526,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.30149993779210527,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
"""
Noções básicas de programação funcional em Python
Leitura complementar: http://nasemanadaprova.blogspot.com.br/2015/01/programacao-funcional-python.html
List Comprehension: http://www.python-course.eu/list_comprehension.php
Expressão lambda: http://blog.alienretro.com/entendendo-python-lambda/
Map, Filter and Reduce: http://book.pythontips.com/en/latest/map_filter.html
Map: aplicar uma função em todos os itens de uma ou mais sequências
Reduce: aplica uma função sobre uma sequencia e vai acumulando o valor de retorno da função a partir de um valor inicial (reduce foi removida do python3 (core) e está disponível no pacote functools, importado acima)
Filter: serve para filtrar/selecionar elementos de uma lista que correspondem a uma determinada condição
"""
# criar uma lista com os 100 primeiros números pares (100 inclusive) usando List Comprehension
pares = [n for n in range(101) if n % 2 == 0]
print(pares)
# expressão lambda para um função de potência
potencia = lambda x: x ** 2
print(potencia(2))
# função fatorial com lambda
fatorial = lambda n: n * fatorial(n - 1) if n > 1 else 1
print(fatorial(5))
# utilizando lambda com map (elevar a potência todos os itens da lista)
lista = [1, 2, 3, 44, 556, 78, 4, 67, 9, 2]
m = map(lambda x: x ** 2, lista)
for i in m: print(i, end=' ')
print()
# ou utilizando a função lambda "potencia" previamente definida
m2 = map(potencia, lista)
for i in m2: print(i, end=' ')
print()
# utilizando a função reduce para fazer o somatório da lista (1+2+3+44+556+78...)
print(reduce(lambda x, y: x + y, lista))
# neste exemplo, seria mais legível e fácil usar a função sum()
print(sum(lista))
print()
# usando filter para selecionar elementos de uma lista sob uma determinada condição
# selecionando na lista somente os números pares
filtro = filter(lambda x: x % 2 == 0, lista)
for i in filtro: print(i, end=' ')
| {
"repo_name": "dev-lord/estruturas-dados-com-python3",
"path": "aula03/basico-programacao-funcional.py",
"copies": "1",
"size": "1944",
"license": "mit",
"hash": 2990369443586176000,
"line_mean": 34.9811320755,
"line_max": 215,
"alpha_frac": 0.7425275302,
"autogenerated": false,
"ratio": 2.6486111111111112,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3891138641311111,
"avg_score": null,
"num_lines": null
} |
from functools import reduce
# Ref: MapReduce: Simplified Data Processing on Large Clusters (by Google)
# map()函数接收两个参数,一个是函数,一个是Iterable,map将传入的函数依次作用到序列的每个元素,并把结果作为新的Iterator返回
# map()函数作为高阶函数,体现的式对运算规则抽象的设计思想
def func(x):
return x ** 2
print(list(map(func, [1, 2, 3, 4, 5, 6])))
# reduce()函数接收两个参数,一个是函数,一个是Iterable
# reduce()函数作用到序列上,把结果继续作用到下一个元素做积累计算
# reduce(f, [x1, x2, x3, x4]) = f(f(f(x1, x2), x3), x4)
def fn(x, y):
return x * 10 + y
reduce(fn, [1, 3, 5, 7, 9])
# 练习一,把一组大小写不规范的名字转成首字母大写的一组名字
names_list1 = ['sTevEn JOBs',
'coCo lee',
'JAck zhaNG',
'LiSa ChEn',
'georgE w bUsH',
'PETER cHeN',
'brUce Ho',
'biLL W clinTON'
,'ciRAlI Clinton'
,'Yang SHEN'
,'elSA Y Shen'
,'robin zhAng'
,'Bruce LEE']
# 一个方法尽量只完成一个工作,便于单元测试的设计
def normalize_name(abnormal_name):
return reduce(lambda n1, n2: n1+' '+n2, list(map(lambda n: n[0].upper()+n[1:].lower(), abnormal_name.split(' '))))
# 由于设计原因,在python 3 很多场景其实推荐使用直接的推导式替代 map / reduce
def normalize_name_via_loop(abnormal_names):
return [' '.join([n[0].upper() + n[1:].lower() for n in name.split(' ')]) for name in abnormal_names]
names_list2 = list(map(normalize_name, names_list1))
print(names_list2)
names_list3 = normalize_name_via_loop(names_list1)
print(names_list3)
# 练习二,把练习一中大小写规范好的名字列表姓氏(最后一个词作为姓氏)分组, 组名就是姓氏
# 形如 {'Jobs':['Steven Jobs'], 'Zhang':['Robin Zhang','Jack Zhang'] ... }
# ref to play_collection.py
# reduce 具有一定的局限性,所以在 python3 中被从内建库取消了 http://www.artima.com/weblogs/viewpost.jsp?thread=98196
# reduce 在python3 中被迁移到 functools
# 其对于 + 或者 * 运算还是十分方便的 | {
"repo_name": "biztudio/JustPython",
"path": "syntaxlab/src/play_map_reduce.py",
"copies": "1",
"size": "2294",
"license": "mit",
"hash": -6748798976663107000,
"line_mean": 26.203125,
"line_max": 118,
"alpha_frac": 0.6201149425,
"autogenerated": false,
"ratio": 1.8471337579617835,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7933335118359164,
"avg_score": 0.00678271642052382,
"num_lines": 64
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.