max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
fpiweb/migrations/0016_remove_box_print_box_number_label.py
|
snelzing/Food-Pantry-Inventory
| 22
|
6629451
|
<reponame>snelzing/Food-Pantry-Inventory<gh_stars>10-100
# Generated by Django 2.2.1 on 2019-06-12 02:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('fpiweb', '0015_box_print_box_number_label'),
]
operations = [
migrations.RemoveField(
model_name='box',
name='print_box_number_label',
),
]
|
# Generated by Django 2.2.1 on 2019-06-12 02:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('fpiweb', '0015_box_print_box_number_label'),
]
operations = [
migrations.RemoveField(
model_name='box',
name='print_box_number_label',
),
]
|
en
| 0.75276
|
# Generated by Django 2.2.1 on 2019-06-12 02:20
| 1.272205
| 1
|
binding.gyp
|
robertleeplummerjr/node-gles
| 0
|
6629452
|
<reponame>robertleeplummerjr/node-gles<filename>binding.gyp
##
# @license
# Copyright 2018 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# Node.js TensorFlow Binding config:
{
'variables' : {
'angle_lib_dir': '<(module_root_dir)/../angle/out/Debug'
},
'targets' : [{
'target_name' : 'nodejs_gl_binding',
'sources' : [
'binding/binding.cc',
'binding/egl_context_wrapper.cc',
'binding/webgl_extensions.cc',
'binding/webgl_rendering_context.cc'
],
'include_dirs' : [
'..',
'<(module_root_dir)/../angle/include'
],
'conditions' : [
[
'OS=="linux"', {
'libraries' : [
'-Wl,-rpath,<@(angle_lib_dir)',
'-lGLESv2',
'-lEGL',
],
'library_dirs' : ['<(angle_lib_dir)'],
}
],
[
'OS=="mac"', {
'libraries' : [
'-Wl,-rpath,<@(angle_lib_dir)',
'-lGLESv2',
'-lEGL',
],
'library_dirs' : ['<(angle_lib_dir)'],
}
]
]
}]
}
|
##
# @license
# Copyright 2018 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# Node.js TensorFlow Binding config:
{
'variables' : {
'angle_lib_dir': '<(module_root_dir)/../angle/out/Debug'
},
'targets' : [{
'target_name' : 'nodejs_gl_binding',
'sources' : [
'binding/binding.cc',
'binding/egl_context_wrapper.cc',
'binding/webgl_extensions.cc',
'binding/webgl_rendering_context.cc'
],
'include_dirs' : [
'..',
'<(module_root_dir)/../angle/include'
],
'conditions' : [
[
'OS=="linux"', {
'libraries' : [
'-Wl,-rpath,<@(angle_lib_dir)',
'-lGLESv2',
'-lEGL',
],
'library_dirs' : ['<(angle_lib_dir)'],
}
],
[
'OS=="mac"', {
'libraries' : [
'-Wl,-rpath,<@(angle_lib_dir)',
'-lGLESv2',
'-lEGL',
],
'library_dirs' : ['<(angle_lib_dir)'],
}
]
]
}]
}
|
en
| 0.774568
|
## # @license # Copyright 2018 Google Inc. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= # Node.js TensorFlow Binding config:
| 1.543014
| 2
|
lifetimes/fitters/beta_geo_fitter.py
|
isabella232/lifetimes
| 2
|
6629453
|
"""Beta Geo Fitter, also known as BG/NBD model."""
from __future__ import print_function
from collections import OrderedDict
import numpy as np
from numpy import log, asarray, any as npany, c_ as vconcat, isinf, isnan, \
where, exp
from numpy import ones_like
from pandas import DataFrame
from scipy.special import gammaln, hyp2f1, beta, gamma
from scipy import misc
from . import BaseFitter
from ..utils import _fit, _scale_time, _check_inputs
from ..generate_data import beta_geometric_nbd_model
class BetaGeoFitter(BaseFitter):
"""
Also known as the BG/NBD model.
Based on [1], this model has the following assumptions:
1) Each individual, i, has a hidden lambda_i and p_i parameter
2) These come from a population wide Gamma and a Beta distribution
respectively.
3) Individuals purchases follow a Poisson process with rate lambda_i*t .
4) After each purchase, an individual has a p_i probability of dieing
(never buying again).
[1] Fader, <NAME>., <NAME>, and <NAME> (2005a),
"Counting Your Customers the Easy Way: An Alternative to the
Pareto/NBD Model," Marketing Science, 24 (2), 275-84.
"""
def __init__(self, penalizer_coef=0.0):
"""Initialization, set penalizer_coef."""
self.penalizer_coef = penalizer_coef
def fit(self, frequency, recency, T, iterative_fitting=1,
initial_params=None, verbose=False, tol=1e-4, index=None,
fit_method='Nelder-Mead', maxiter=2000, **kwargs):
"""
Fit the data to the BG/NBD model.
Parameters:
frequency: the frequency vector of customers' purchases (denoted x
in literature).
recency: the recency vector of customers' purchases (denoted t_x in
literature).
T: the vector of customers' age (time since first purchase)
iterative_fitting: perform iterative_fitting fits over
random/warm-started initial params
initial_params: set the initial parameters for the fitter.
verbose: set to true to print out convergence diagnostics.
tol: tolerance for termination of the function minimization
process.
index: index for resulted DataFrame which is accessible via
self.data
fit_method: fit_method to passing to scipy.optimize.minimize
maxiter: max iterations for optimizer in scipy.optimize.minimize
will be overwritten if setted in kwargs.
kwargs: key word arguments to pass to the scipy.optimize.minimize
function as options dict
Returns:
self, with additional properties and methods like params_ and
predict
"""
frequency = asarray(frequency)
recency = asarray(recency)
T = asarray(T)
_check_inputs(frequency, recency, T)
self._scale = _scale_time(T)
scaled_recency = recency * self._scale
scaled_T = T * self._scale
params, self._negative_log_likelihood_ = _fit(
self._negative_log_likelihood,
[frequency, scaled_recency, scaled_T, self.penalizer_coef],
iterative_fitting,
initial_params,
4,
verbose,
tol,
fit_method,
maxiter,
**kwargs)
self.params_ = OrderedDict(zip(['r', 'alpha', 'a', 'b'], params))
self.params_['alpha'] /= self._scale
self.data = DataFrame(vconcat[frequency, recency, T],
columns=['frequency', 'recency', 'T'])
if index is not None:
self.data.index = index
self.generate_new_data = lambda size=1: beta_geometric_nbd_model(
T, *self._unload_params('r', 'alpha', 'a', 'b'), size=size)
self.predict = self.conditional_expected_number_of_purchases_up_to_time
return self
@staticmethod
def _negative_log_likelihood(params, freq, rec, T, penalizer_coef):
if npany(asarray(params) <= 0):
return np.inf
r, alpha, a, b = params
A_1 = gammaln(r + freq) - gammaln(r) + r * log(alpha)
A_2 = (gammaln(a + b) + gammaln(b + freq) - gammaln(b) -
gammaln(a + b + freq))
A_3 = -(r + freq) * log(alpha + T)
d = vconcat[ones_like(freq), (freq > 0)]
A_4 = log(a) - log(b + where(freq == 0, 1, freq) - 1) - \
(r + freq) * log(rec + alpha)
A_4[isnan(A_4) | isinf(A_4)] = 0
penalizer_term = penalizer_coef * sum(np.asarray(params) ** 2)
return -(A_1 + A_2 + misc.logsumexp(
vconcat[A_3, A_4], axis=1, b=d)).mean() + penalizer_term
def expected_number_of_purchases_up_to_time(self, t):
"""
Calculate the expected number of repeat purchases up to time t.
Calculate repeat purchases for a randomly choose individual from the
population.
Parameters:
t: a scalar or array of times.
Returns: a scalar or array
"""
r, alpha, a, b = self._unload_params('r', 'alpha', 'a', 'b')
hyp = hyp2f1(r, b, a + b - 1, t / (alpha + t))
return (a + b - 1) / (a - 1) * (1 - hyp * (alpha / (alpha + t)) ** r)
def conditional_expected_number_of_purchases_up_to_time(self, t, frequency,
recency, T):
"""
Conditional expected number of purchases up to time.
Calculate the expected number of repeat purchases up to time t for a
randomly choose individual from the population, given they have
purchase history (frequency, recency, T)
Parameters:
t: a scalar or array of times.
frequency: a scalar: historical frequency of customer.
recency: a scalar: historical recency of customer.
T: a scalar: age of the customer.
Returns: a scalar or array
"""
x = frequency
r, alpha, a, b = self._unload_params('r', 'alpha', 'a', 'b')
_a = r + x
_b = b + x
_c = a + b + x - 1
_z = t / (alpha + T + t)
ln_hyp_term = np.log(hyp2f1(_a, _b, _c, _z))
# if the value is inf, we are using a different but equivalent
# formula to compute the function evaluation.
ln_hyp_term_alt = np.log(hyp2f1(_c - _a, _c - _b, _c, _z)) + \
(_c - _a - _b) * np.log(1 - _z)
ln_hyp_term = where(np.isinf(ln_hyp_term), ln_hyp_term_alt, ln_hyp_term)
first_term = (a + b + x - 1) / (a - 1)
second_term = (1 - exp(ln_hyp_term + (r + x) *
np.log((alpha + T) / (alpha + t + T))))
numerator = first_term * second_term
denominator = 1 + (x > 0) * (a / (b + x - 1)) * \
((alpha + T) / (alpha + recency)) ** (r + x)
return numerator / denominator
def conditional_probability_alive(self, frequency, recency, T,
ln_exp_max=300):
"""
Compute conditional probability alive.
Compute the probability that a customer with history
(frequency, recency, T) is currently alive.
From http://www.brucehardie.com/notes/021/palive_for_BGNBD.pdf
Parameters:
frequency: a scalar: historical frequency of customer.
recency: a scalar: historical recency of customer.
T: a scalar: age of the customer.
ln_exp_max: to what value clip log_div equation
Returns: a scalar
"""
r, alpha, a, b = self._unload_params('r', 'alpha', 'a', 'b')
log_div = (r + frequency) * log(
(alpha + T) / (alpha + recency)) + log(
a / (b + where(frequency == 0, 1, frequency) - 1))
return where(frequency == 0, 1.,
where(log_div > ln_exp_max, 0.,
1. / (1 + exp(np.clip(log_div, None, ln_exp_max)))))
def conditional_probability_alive_matrix(self, max_frequency=None,
max_recency=None):
"""
Compute the probability alive matrix.
Parameters:
max_frequency: the maximum frequency to plot. Default is max
observed frequency.
max_recency: the maximum recency to plot. This also determines
the age of the customer. Default to max observed age.
Returns:
A matrix of the form [t_x: historical recency,
x: historical frequency]
"""
max_frequency = max_frequency or int(self.data['frequency'].max())
max_recency = max_recency or int(self.data['T'].max())
return np.fromfunction(self.conditional_probability_alive,
(max_frequency + 1, max_recency + 1),
T=max_recency).T
def probability_of_n_purchases_up_to_time(self, t, n):
"""
Compute the probability of n purchases.
P( N(t) = n | model )
where N(t) is the number of repeat purchases a customer makes in t
units of time.
"""
r, alpha, a, b = self._unload_params('r', 'alpha', 'a', 'b')
first_term = (beta(a, b + n) / beta(a, b) *
gamma(r + n) / gamma(r) /
gamma(n + 1) * (alpha / (alpha + t)) ** r *
(t / (alpha + t)) ** n)
if n > 0:
j = np.arange(0, n)
finite_sum = (gamma(r + j) / gamma(r) / gamma(j + 1) *
(t / (alpha + t)) ** j).sum()
second_term = (beta(a + 1, b + n - 1) /
beta(a, b) * (1 - (alpha / (alpha + t)) ** r *
finite_sum))
else:
second_term = 0
return first_term + second_term
|
"""Beta Geo Fitter, also known as BG/NBD model."""
from __future__ import print_function
from collections import OrderedDict
import numpy as np
from numpy import log, asarray, any as npany, c_ as vconcat, isinf, isnan, \
where, exp
from numpy import ones_like
from pandas import DataFrame
from scipy.special import gammaln, hyp2f1, beta, gamma
from scipy import misc
from . import BaseFitter
from ..utils import _fit, _scale_time, _check_inputs
from ..generate_data import beta_geometric_nbd_model
class BetaGeoFitter(BaseFitter):
"""
Also known as the BG/NBD model.
Based on [1], this model has the following assumptions:
1) Each individual, i, has a hidden lambda_i and p_i parameter
2) These come from a population wide Gamma and a Beta distribution
respectively.
3) Individuals purchases follow a Poisson process with rate lambda_i*t .
4) After each purchase, an individual has a p_i probability of dieing
(never buying again).
[1] Fader, <NAME>., <NAME>, and <NAME> (2005a),
"Counting Your Customers the Easy Way: An Alternative to the
Pareto/NBD Model," Marketing Science, 24 (2), 275-84.
"""
def __init__(self, penalizer_coef=0.0):
"""Initialization, set penalizer_coef."""
self.penalizer_coef = penalizer_coef
def fit(self, frequency, recency, T, iterative_fitting=1,
initial_params=None, verbose=False, tol=1e-4, index=None,
fit_method='Nelder-Mead', maxiter=2000, **kwargs):
"""
Fit the data to the BG/NBD model.
Parameters:
frequency: the frequency vector of customers' purchases (denoted x
in literature).
recency: the recency vector of customers' purchases (denoted t_x in
literature).
T: the vector of customers' age (time since first purchase)
iterative_fitting: perform iterative_fitting fits over
random/warm-started initial params
initial_params: set the initial parameters for the fitter.
verbose: set to true to print out convergence diagnostics.
tol: tolerance for termination of the function minimization
process.
index: index for resulted DataFrame which is accessible via
self.data
fit_method: fit_method to passing to scipy.optimize.minimize
maxiter: max iterations for optimizer in scipy.optimize.minimize
will be overwritten if setted in kwargs.
kwargs: key word arguments to pass to the scipy.optimize.minimize
function as options dict
Returns:
self, with additional properties and methods like params_ and
predict
"""
frequency = asarray(frequency)
recency = asarray(recency)
T = asarray(T)
_check_inputs(frequency, recency, T)
self._scale = _scale_time(T)
scaled_recency = recency * self._scale
scaled_T = T * self._scale
params, self._negative_log_likelihood_ = _fit(
self._negative_log_likelihood,
[frequency, scaled_recency, scaled_T, self.penalizer_coef],
iterative_fitting,
initial_params,
4,
verbose,
tol,
fit_method,
maxiter,
**kwargs)
self.params_ = OrderedDict(zip(['r', 'alpha', 'a', 'b'], params))
self.params_['alpha'] /= self._scale
self.data = DataFrame(vconcat[frequency, recency, T],
columns=['frequency', 'recency', 'T'])
if index is not None:
self.data.index = index
self.generate_new_data = lambda size=1: beta_geometric_nbd_model(
T, *self._unload_params('r', 'alpha', 'a', 'b'), size=size)
self.predict = self.conditional_expected_number_of_purchases_up_to_time
return self
@staticmethod
def _negative_log_likelihood(params, freq, rec, T, penalizer_coef):
if npany(asarray(params) <= 0):
return np.inf
r, alpha, a, b = params
A_1 = gammaln(r + freq) - gammaln(r) + r * log(alpha)
A_2 = (gammaln(a + b) + gammaln(b + freq) - gammaln(b) -
gammaln(a + b + freq))
A_3 = -(r + freq) * log(alpha + T)
d = vconcat[ones_like(freq), (freq > 0)]
A_4 = log(a) - log(b + where(freq == 0, 1, freq) - 1) - \
(r + freq) * log(rec + alpha)
A_4[isnan(A_4) | isinf(A_4)] = 0
penalizer_term = penalizer_coef * sum(np.asarray(params) ** 2)
return -(A_1 + A_2 + misc.logsumexp(
vconcat[A_3, A_4], axis=1, b=d)).mean() + penalizer_term
def expected_number_of_purchases_up_to_time(self, t):
"""
Calculate the expected number of repeat purchases up to time t.
Calculate repeat purchases for a randomly choose individual from the
population.
Parameters:
t: a scalar or array of times.
Returns: a scalar or array
"""
r, alpha, a, b = self._unload_params('r', 'alpha', 'a', 'b')
hyp = hyp2f1(r, b, a + b - 1, t / (alpha + t))
return (a + b - 1) / (a - 1) * (1 - hyp * (alpha / (alpha + t)) ** r)
def conditional_expected_number_of_purchases_up_to_time(self, t, frequency,
recency, T):
"""
Conditional expected number of purchases up to time.
Calculate the expected number of repeat purchases up to time t for a
randomly choose individual from the population, given they have
purchase history (frequency, recency, T)
Parameters:
t: a scalar or array of times.
frequency: a scalar: historical frequency of customer.
recency: a scalar: historical recency of customer.
T: a scalar: age of the customer.
Returns: a scalar or array
"""
x = frequency
r, alpha, a, b = self._unload_params('r', 'alpha', 'a', 'b')
_a = r + x
_b = b + x
_c = a + b + x - 1
_z = t / (alpha + T + t)
ln_hyp_term = np.log(hyp2f1(_a, _b, _c, _z))
# if the value is inf, we are using a different but equivalent
# formula to compute the function evaluation.
ln_hyp_term_alt = np.log(hyp2f1(_c - _a, _c - _b, _c, _z)) + \
(_c - _a - _b) * np.log(1 - _z)
ln_hyp_term = where(np.isinf(ln_hyp_term), ln_hyp_term_alt, ln_hyp_term)
first_term = (a + b + x - 1) / (a - 1)
second_term = (1 - exp(ln_hyp_term + (r + x) *
np.log((alpha + T) / (alpha + t + T))))
numerator = first_term * second_term
denominator = 1 + (x > 0) * (a / (b + x - 1)) * \
((alpha + T) / (alpha + recency)) ** (r + x)
return numerator / denominator
def conditional_probability_alive(self, frequency, recency, T,
ln_exp_max=300):
"""
Compute conditional probability alive.
Compute the probability that a customer with history
(frequency, recency, T) is currently alive.
From http://www.brucehardie.com/notes/021/palive_for_BGNBD.pdf
Parameters:
frequency: a scalar: historical frequency of customer.
recency: a scalar: historical recency of customer.
T: a scalar: age of the customer.
ln_exp_max: to what value clip log_div equation
Returns: a scalar
"""
r, alpha, a, b = self._unload_params('r', 'alpha', 'a', 'b')
log_div = (r + frequency) * log(
(alpha + T) / (alpha + recency)) + log(
a / (b + where(frequency == 0, 1, frequency) - 1))
return where(frequency == 0, 1.,
where(log_div > ln_exp_max, 0.,
1. / (1 + exp(np.clip(log_div, None, ln_exp_max)))))
def conditional_probability_alive_matrix(self, max_frequency=None,
max_recency=None):
"""
Compute the probability alive matrix.
Parameters:
max_frequency: the maximum frequency to plot. Default is max
observed frequency.
max_recency: the maximum recency to plot. This also determines
the age of the customer. Default to max observed age.
Returns:
A matrix of the form [t_x: historical recency,
x: historical frequency]
"""
max_frequency = max_frequency or int(self.data['frequency'].max())
max_recency = max_recency or int(self.data['T'].max())
return np.fromfunction(self.conditional_probability_alive,
(max_frequency + 1, max_recency + 1),
T=max_recency).T
def probability_of_n_purchases_up_to_time(self, t, n):
"""
Compute the probability of n purchases.
P( N(t) = n | model )
where N(t) is the number of repeat purchases a customer makes in t
units of time.
"""
r, alpha, a, b = self._unload_params('r', 'alpha', 'a', 'b')
first_term = (beta(a, b + n) / beta(a, b) *
gamma(r + n) / gamma(r) /
gamma(n + 1) * (alpha / (alpha + t)) ** r *
(t / (alpha + t)) ** n)
if n > 0:
j = np.arange(0, n)
finite_sum = (gamma(r + j) / gamma(r) / gamma(j + 1) *
(t / (alpha + t)) ** j).sum()
second_term = (beta(a + 1, b + n - 1) /
beta(a, b) * (1 - (alpha / (alpha + t)) ** r *
finite_sum))
else:
second_term = 0
return first_term + second_term
|
en
| 0.804262
|
Beta Geo Fitter, also known as BG/NBD model. Also known as the BG/NBD model. Based on [1], this model has the following assumptions: 1) Each individual, i, has a hidden lambda_i and p_i parameter 2) These come from a population wide Gamma and a Beta distribution respectively. 3) Individuals purchases follow a Poisson process with rate lambda_i*t . 4) After each purchase, an individual has a p_i probability of dieing (never buying again). [1] Fader, <NAME>., <NAME>, and <NAME> (2005a), "Counting Your Customers the Easy Way: An Alternative to the Pareto/NBD Model," Marketing Science, 24 (2), 275-84. Initialization, set penalizer_coef. Fit the data to the BG/NBD model. Parameters: frequency: the frequency vector of customers' purchases (denoted x in literature). recency: the recency vector of customers' purchases (denoted t_x in literature). T: the vector of customers' age (time since first purchase) iterative_fitting: perform iterative_fitting fits over random/warm-started initial params initial_params: set the initial parameters for the fitter. verbose: set to true to print out convergence diagnostics. tol: tolerance for termination of the function minimization process. index: index for resulted DataFrame which is accessible via self.data fit_method: fit_method to passing to scipy.optimize.minimize maxiter: max iterations for optimizer in scipy.optimize.minimize will be overwritten if setted in kwargs. kwargs: key word arguments to pass to the scipy.optimize.minimize function as options dict Returns: self, with additional properties and methods like params_ and predict Calculate the expected number of repeat purchases up to time t. Calculate repeat purchases for a randomly choose individual from the population. Parameters: t: a scalar or array of times. Returns: a scalar or array Conditional expected number of purchases up to time. Calculate the expected number of repeat purchases up to time t for a randomly choose individual from the population, given they have purchase history (frequency, recency, T) Parameters: t: a scalar or array of times. frequency: a scalar: historical frequency of customer. recency: a scalar: historical recency of customer. T: a scalar: age of the customer. Returns: a scalar or array # if the value is inf, we are using a different but equivalent # formula to compute the function evaluation. Compute conditional probability alive. Compute the probability that a customer with history (frequency, recency, T) is currently alive. From http://www.brucehardie.com/notes/021/palive_for_BGNBD.pdf Parameters: frequency: a scalar: historical frequency of customer. recency: a scalar: historical recency of customer. T: a scalar: age of the customer. ln_exp_max: to what value clip log_div equation Returns: a scalar Compute the probability alive matrix. Parameters: max_frequency: the maximum frequency to plot. Default is max observed frequency. max_recency: the maximum recency to plot. This also determines the age of the customer. Default to max observed age. Returns: A matrix of the form [t_x: historical recency, x: historical frequency] Compute the probability of n purchases. P( N(t) = n | model ) where N(t) is the number of repeat purchases a customer makes in t units of time.
| 2.957965
| 3
|
model/models.py
|
cloversjtu/diri-vhred
| 20
|
6629454
|
import torch
import torch.nn as nn
from torch.distributions import Dirichlet
from utils import to_var, pad, normal_kl_div, normal_logpdf, \
bag_of_words_loss, to_bow, EOS_ID, dirichlet_kl_div, dirichlet_logpdf
import layers
import numpy as np
import random
import sys
VariationalModels = ['DIR_VHRED']
class DIR_VHRED(nn.Module):
def __init__(self, config):
super(DIR_VHRED, self).__init__()
self.config = config
self.encoder = layers.EncoderRNN(config.vocab_size,
config.embedding_size,
config.encoder_hidden_size,
config.rnn,
config.num_layers,
config.bidirectional,
config.dropout)
context_input_size = (config.num_layers
* config.encoder_hidden_size
* self.encoder.num_directions)
self.context_encoder = layers.ContextRNN(context_input_size,
config.context_size,
config.rnn,
config.num_layers,
config.dropout)
self.decoder = layers.DecoderRNN(config.vocab_size,
config.embedding_size,
config.decoder_hidden_size,
config.rnncell,
config.num_layers,
config.dropout,
config.word_drop,
config.max_unroll,
config.sample,
config.temperature,
config.beam_size)
self.context2decoder = layers.FeedForward(config.context_size + config.z_sent_size,
config.num_layers * config.decoder_hidden_size,
num_layers=1,
activation=config.activation)
self.softplus = nn.Softplus()
self.prior_h = layers.FeedForward(config.context_size,
config.context_size,
num_layers=2,
hidden_size=config.context_size,
activation=config.activation)
# self.prior_mu = nn.Linear(config.context_size, config.z_sent_size)
# self.prior_var = nn.Linear(config.context_size, config.z_sent_size)
self.prior_alpha = nn.Linear(config.context_size, config.z_sent_size)
self.posterior_h = layers.FeedForward(config.encoder_hidden_size * self.encoder.num_directions * config.num_layers + config.context_size,
config.context_size,
num_layers=2,
hidden_size=config.context_size,
activation=config.activation)
# self.posterior_mu = nn.Linear(config.context_size, config.z_sent_size)
# self.posterior_var = nn.Linear(config.context_size, config.z_sent_size)
self.posterior_alpha = nn.Linear(config.context_size, config.z_sent_size)
if config.tie_embedding:
self.decoder.embedding = self.encoder.embedding
if config.bow:
self.bow_h = layers.FeedForward(config.z_sent_size,
config.decoder_hidden_size,
num_layers=1,
hidden_size=config.decoder_hidden_size,
activation=config.activation)
self.bow_predict = nn.Linear(config.decoder_hidden_size, config.vocab_size)
def prior(self, context_outputs):
# Context dependent prior
h_prior = self.prior_h(context_outputs)
# mu_prior = self.prior_mu(h_prior)
# var_prior = self.softplus(self.prior_var(h_prior))
alpha_prior = self.softplus(self.prior_alpha(h_prior))
return alpha_prior
def posterior(self, context_outputs, encoder_hidden):
h_posterior = self.posterior_h(torch.cat([context_outputs, encoder_hidden], 1))
# mu_posterior = self.posterior_mu(h_posterior)
# var_posterior = self.softplus(self.posterior_var(h_posterior))
alpha_posterior = self.softplus(self.posterior_alpha(h_posterior))
return alpha_posterior
def compute_bow_loss(self, target_conversations):
target_bow = np.stack([to_bow(sent, self.config.vocab_size) for conv in target_conversations for sent in conv], axis=0)
target_bow = to_var(torch.FloatTensor(target_bow))
bow_logits = self.bow_predict(self.bow_h(self.z_sent))
bow_loss = bag_of_words_loss(bow_logits, target_bow)
return bow_loss
def forward(self, sentences, sentence_length,
input_conversation_length, target_sentences, decode=False):
"""
Args:
sentences: (Variable, LongTensor) [num_sentences + batch_size, seq_len]
target_sentences: (Variable, LongTensor) [num_sentences, seq_len]
Return:
decoder_outputs: (Variable, FloatTensor)
- train: [batch_size, seq_len, vocab_size]
- eval: [batch_size, seq_len]
"""
batch_size = input_conversation_length.size(0)
num_sentences = sentences.size(0) - batch_size
max_len = input_conversation_length.data.max().item()
# encoder_outputs: [num_sentences + batch_size, max_source_length, hidden_size]
# encoder_hidden: [num_layers * direction, num_sentences + batch_size, hidden_size]
encoder_outputs, encoder_hidden = self.encoder(sentences,
sentence_length)
# encoder_hidden: [num_sentences + batch_size, num_layers * direction * hidden_size]
encoder_hidden = encoder_hidden.transpose(
1, 0).contiguous().view(num_sentences + batch_size, -1)
# pad and pack encoder_hidden
start = torch.cumsum(torch.cat((to_var(input_conversation_length.data.new(1).zero_()),
input_conversation_length[:-1] + 1)), 0)
# encoder_hidden: [batch_size, max_len + 1, num_layers * direction * hidden_size]
encoder_hidden = torch.stack([pad(encoder_hidden.narrow(0, s, l + 1), max_len + 1)
for s, l in zip(start.data.tolist(),
input_conversation_length.data.tolist())], 0)
# encoder_hidden_inference: [batch_size, max_len, num_layers * direction * hidden_size]
encoder_hidden_inference = encoder_hidden[:, 1:, :]
encoder_hidden_inference_flat = torch.cat(
[encoder_hidden_inference[i, :l, :] for i, l in enumerate(input_conversation_length.data)])
# encoder_hidden_input: [batch_size, max_len, num_layers * direction * hidden_size]
encoder_hidden_input = encoder_hidden[:, :-1, :]
# context_outputs: [batch_size, max_len, context_size]
context_outputs, context_last_hidden = self.context_encoder(encoder_hidden_input,
input_conversation_length)
# flatten outputs
# context_outputs: [num_sentences, context_size]
context_outputs = torch.cat([context_outputs[i, :l, :]
for i, l in enumerate(input_conversation_length.data)])
alpha_prior = self.prior(context_outputs)
eps = to_var(torch.randn((num_sentences, self.config.z_sent_size)))
if not decode:
alpha_posterior = self.posterior(
context_outputs, encoder_hidden_inference_flat)
# resample of dirichlet
# z_sent = mu_posterior + torch.sqrt(var_posterior) * eps
if torch.cuda.is_available():
alpha_posterior = alpha_posterior.cpu()
dirichlet_dist = Dirichlet(alpha_posterior)
z_sent = dirichlet_dist.rsample()
if torch.cuda.is_available():
z_sent = to_var(z_sent)
alpha_posterior = to_var(alpha_posterior)
# this two variable log_q_zx and log_p_z is not necessary here
# log_q_zx = normal_logpdf(z_sent, mu_posterior, var_posterior).sum()
# log_p_z = normal_logpdf(z_sent, mu_prior, var_prior).sum()
# log_q_zx = dirichlet_logpdf(z_sent, alpha_posterior).sum()
# log_p_z = dirichlet_logpdf(z_sent, alpha_prior).sum()
# print(" ")
log_q_zx = dirichlet_dist.log_prob(z_sent.cpu()).sum().cuda()
log_p_z = Dirichlet(alpha_prior.cpu()).log_prob(z_sent.cpu()).sum().cuda()
# print(log_q_zx.item(), " ", post_z.item())
# print(log_p_z.item(), " ", prior_z.item())
# kl_div: [num_sentneces]
# kl_div = normal_kl_div(mu_posterior, var_posterior, mu_prior, var_prior)
kl_div = dirichlet_kl_div(alpha_posterior, alpha_prior)
kl_div = torch.sum(kl_div)
else:
# z_sent = mu_prior + torch.sqrt(var_prior) * eps
if torch.cuda.is_available():
alpha_prior = alpha_prior.cpu()
dirichlet_dist = Dirichlet(alpha_prior)
z_sent = dirichlet_dist.rsample()
if torch.cuda.is_available():
z_sent = z_sent.cuda()
alpha_prior = alpha_prior.cuda()
kl_div = None
# log_p_z = dirichlet_logpdf(z_sent, mu_prior, var_prior).sum()
log_p_z = dirichlet_logpdf(z_sent, alpha_prior).sum()
log_q_zx = None
self.z_sent = z_sent
latent_context = torch.cat([context_outputs, z_sent], 1)
decoder_init = self.context2decoder(latent_context)
decoder_init = decoder_init.view(-1,
self.decoder.num_layers,
self.decoder.hidden_size)
decoder_init = decoder_init.transpose(1, 0).contiguous()
# train: [batch_size, seq_len, vocab_size]
# eval: [batch_size, seq_len]
if not decode:
decoder_outputs = self.decoder(target_sentences,
init_h=decoder_init,
decode=decode)
return decoder_outputs, kl_div, log_p_z, log_q_zx
else:
# prediction: [batch_size, beam_size, max_unroll]
prediction, final_score, length = self.decoder.beam_decode(init_h=decoder_init)
return prediction, kl_div, log_p_z, log_q_zx
def generate(self, context, sentence_length, n_context):
# context: [batch_size, n_context, seq_len]
batch_size = context.size(0)
# n_context = context.size(1)
samples = []
# Run for context
context_hidden=None
for i in range(n_context):
# encoder_outputs: [batch_size, seq_len, hidden_size * direction]
# encoder_hidden: [num_layers * direction, batch_size, hidden_size]
try:
encoder_outputs, encoder_hidden = self.encoder(context[:, i, :],
sentence_length[:, i])
except IndexError:
print(context.shape)
sys.exit(-1)
encoder_hidden = encoder_hidden.transpose(1, 0).contiguous().view(batch_size, -1)
# context_outputs: [batch_size, 1, context_hidden_size * direction]
# context_hidden: [num_layers * direction, batch_size, context_hidden_size]
context_outputs, context_hidden = self.context_encoder.step(encoder_hidden,
context_hidden)
# Run for generation
for j in range(self.config.n_sample_step):
# context_outputs: [batch_size, context_hidden_size * direction]
context_outputs = context_outputs.squeeze(1)
"""
mu_prior, var_prior = self.prior(context_outputs)
eps = to_var(torch.randn((batch_size, self.config.z_sent_size)))
z_sent = mu_prior + torch.sqrt(var_prior) * eps
"""
alpha_prior = self.prior(context_outputs)
if torch.cuda.is_available():
alpha_prior = alpha_prior.cpu()
dirichlet_dist = Dirichlet(alpha_prior)
z_sent = dirichlet_dist.rsample()
if torch.cuda.is_available():
z_sent = z_sent.cuda()
if self.config.mode == 'generate' and self.config.one_latent_z is not None:
print('Generated z_sent: '+str(z_sent))
z_sent = [[0.0 for i in range(self.config.z_sent_size)]]
z_sent[0][self.config.one_latent_z] = 1.0
z_sent = torch.tensor(z_sent).cuda()
print('We use z_sent: '+str(z_sent))
latent_context = torch.cat([context_outputs, z_sent], 1)
decoder_init = self.context2decoder(latent_context)
decoder_init = decoder_init.view(self.decoder.num_layers, -1, self.decoder.hidden_size)
if self.config.sample:
prediction = self.decoder(None, decoder_init)
p = prediction.data.cpu().numpy()
length = torch.from_numpy(np.where(p == EOS_ID)[1])
else:
prediction, final_score, length = self.decoder.beam_decode(init_h=decoder_init)
# prediction: [batch_size, seq_len]
prediction = prediction[:, 0, :]
# length: [batch_size]
length = [l[0] for l in length]
length = to_var(torch.LongTensor(length))
samples.append(prediction)
encoder_outputs, encoder_hidden = self.encoder(prediction,
length)
encoder_hidden = encoder_hidden.transpose(1, 0).contiguous().view(batch_size, -1)
context_outputs, context_hidden = self.context_encoder.step(encoder_hidden,
context_hidden)
samples = torch.stack(samples, 1)
return samples
|
import torch
import torch.nn as nn
from torch.distributions import Dirichlet
from utils import to_var, pad, normal_kl_div, normal_logpdf, \
bag_of_words_loss, to_bow, EOS_ID, dirichlet_kl_div, dirichlet_logpdf
import layers
import numpy as np
import random
import sys
VariationalModels = ['DIR_VHRED']
class DIR_VHRED(nn.Module):
def __init__(self, config):
super(DIR_VHRED, self).__init__()
self.config = config
self.encoder = layers.EncoderRNN(config.vocab_size,
config.embedding_size,
config.encoder_hidden_size,
config.rnn,
config.num_layers,
config.bidirectional,
config.dropout)
context_input_size = (config.num_layers
* config.encoder_hidden_size
* self.encoder.num_directions)
self.context_encoder = layers.ContextRNN(context_input_size,
config.context_size,
config.rnn,
config.num_layers,
config.dropout)
self.decoder = layers.DecoderRNN(config.vocab_size,
config.embedding_size,
config.decoder_hidden_size,
config.rnncell,
config.num_layers,
config.dropout,
config.word_drop,
config.max_unroll,
config.sample,
config.temperature,
config.beam_size)
self.context2decoder = layers.FeedForward(config.context_size + config.z_sent_size,
config.num_layers * config.decoder_hidden_size,
num_layers=1,
activation=config.activation)
self.softplus = nn.Softplus()
self.prior_h = layers.FeedForward(config.context_size,
config.context_size,
num_layers=2,
hidden_size=config.context_size,
activation=config.activation)
# self.prior_mu = nn.Linear(config.context_size, config.z_sent_size)
# self.prior_var = nn.Linear(config.context_size, config.z_sent_size)
self.prior_alpha = nn.Linear(config.context_size, config.z_sent_size)
self.posterior_h = layers.FeedForward(config.encoder_hidden_size * self.encoder.num_directions * config.num_layers + config.context_size,
config.context_size,
num_layers=2,
hidden_size=config.context_size,
activation=config.activation)
# self.posterior_mu = nn.Linear(config.context_size, config.z_sent_size)
# self.posterior_var = nn.Linear(config.context_size, config.z_sent_size)
self.posterior_alpha = nn.Linear(config.context_size, config.z_sent_size)
if config.tie_embedding:
self.decoder.embedding = self.encoder.embedding
if config.bow:
self.bow_h = layers.FeedForward(config.z_sent_size,
config.decoder_hidden_size,
num_layers=1,
hidden_size=config.decoder_hidden_size,
activation=config.activation)
self.bow_predict = nn.Linear(config.decoder_hidden_size, config.vocab_size)
def prior(self, context_outputs):
# Context dependent prior
h_prior = self.prior_h(context_outputs)
# mu_prior = self.prior_mu(h_prior)
# var_prior = self.softplus(self.prior_var(h_prior))
alpha_prior = self.softplus(self.prior_alpha(h_prior))
return alpha_prior
def posterior(self, context_outputs, encoder_hidden):
h_posterior = self.posterior_h(torch.cat([context_outputs, encoder_hidden], 1))
# mu_posterior = self.posterior_mu(h_posterior)
# var_posterior = self.softplus(self.posterior_var(h_posterior))
alpha_posterior = self.softplus(self.posterior_alpha(h_posterior))
return alpha_posterior
def compute_bow_loss(self, target_conversations):
target_bow = np.stack([to_bow(sent, self.config.vocab_size) for conv in target_conversations for sent in conv], axis=0)
target_bow = to_var(torch.FloatTensor(target_bow))
bow_logits = self.bow_predict(self.bow_h(self.z_sent))
bow_loss = bag_of_words_loss(bow_logits, target_bow)
return bow_loss
def forward(self, sentences, sentence_length,
input_conversation_length, target_sentences, decode=False):
"""
Args:
sentences: (Variable, LongTensor) [num_sentences + batch_size, seq_len]
target_sentences: (Variable, LongTensor) [num_sentences, seq_len]
Return:
decoder_outputs: (Variable, FloatTensor)
- train: [batch_size, seq_len, vocab_size]
- eval: [batch_size, seq_len]
"""
batch_size = input_conversation_length.size(0)
num_sentences = sentences.size(0) - batch_size
max_len = input_conversation_length.data.max().item()
# encoder_outputs: [num_sentences + batch_size, max_source_length, hidden_size]
# encoder_hidden: [num_layers * direction, num_sentences + batch_size, hidden_size]
encoder_outputs, encoder_hidden = self.encoder(sentences,
sentence_length)
# encoder_hidden: [num_sentences + batch_size, num_layers * direction * hidden_size]
encoder_hidden = encoder_hidden.transpose(
1, 0).contiguous().view(num_sentences + batch_size, -1)
# pad and pack encoder_hidden
start = torch.cumsum(torch.cat((to_var(input_conversation_length.data.new(1).zero_()),
input_conversation_length[:-1] + 1)), 0)
# encoder_hidden: [batch_size, max_len + 1, num_layers * direction * hidden_size]
encoder_hidden = torch.stack([pad(encoder_hidden.narrow(0, s, l + 1), max_len + 1)
for s, l in zip(start.data.tolist(),
input_conversation_length.data.tolist())], 0)
# encoder_hidden_inference: [batch_size, max_len, num_layers * direction * hidden_size]
encoder_hidden_inference = encoder_hidden[:, 1:, :]
encoder_hidden_inference_flat = torch.cat(
[encoder_hidden_inference[i, :l, :] for i, l in enumerate(input_conversation_length.data)])
# encoder_hidden_input: [batch_size, max_len, num_layers * direction * hidden_size]
encoder_hidden_input = encoder_hidden[:, :-1, :]
# context_outputs: [batch_size, max_len, context_size]
context_outputs, context_last_hidden = self.context_encoder(encoder_hidden_input,
input_conversation_length)
# flatten outputs
# context_outputs: [num_sentences, context_size]
context_outputs = torch.cat([context_outputs[i, :l, :]
for i, l in enumerate(input_conversation_length.data)])
alpha_prior = self.prior(context_outputs)
eps = to_var(torch.randn((num_sentences, self.config.z_sent_size)))
if not decode:
alpha_posterior = self.posterior(
context_outputs, encoder_hidden_inference_flat)
# resample of dirichlet
# z_sent = mu_posterior + torch.sqrt(var_posterior) * eps
if torch.cuda.is_available():
alpha_posterior = alpha_posterior.cpu()
dirichlet_dist = Dirichlet(alpha_posterior)
z_sent = dirichlet_dist.rsample()
if torch.cuda.is_available():
z_sent = to_var(z_sent)
alpha_posterior = to_var(alpha_posterior)
# this two variable log_q_zx and log_p_z is not necessary here
# log_q_zx = normal_logpdf(z_sent, mu_posterior, var_posterior).sum()
# log_p_z = normal_logpdf(z_sent, mu_prior, var_prior).sum()
# log_q_zx = dirichlet_logpdf(z_sent, alpha_posterior).sum()
# log_p_z = dirichlet_logpdf(z_sent, alpha_prior).sum()
# print(" ")
log_q_zx = dirichlet_dist.log_prob(z_sent.cpu()).sum().cuda()
log_p_z = Dirichlet(alpha_prior.cpu()).log_prob(z_sent.cpu()).sum().cuda()
# print(log_q_zx.item(), " ", post_z.item())
# print(log_p_z.item(), " ", prior_z.item())
# kl_div: [num_sentneces]
# kl_div = normal_kl_div(mu_posterior, var_posterior, mu_prior, var_prior)
kl_div = dirichlet_kl_div(alpha_posterior, alpha_prior)
kl_div = torch.sum(kl_div)
else:
# z_sent = mu_prior + torch.sqrt(var_prior) * eps
if torch.cuda.is_available():
alpha_prior = alpha_prior.cpu()
dirichlet_dist = Dirichlet(alpha_prior)
z_sent = dirichlet_dist.rsample()
if torch.cuda.is_available():
z_sent = z_sent.cuda()
alpha_prior = alpha_prior.cuda()
kl_div = None
# log_p_z = dirichlet_logpdf(z_sent, mu_prior, var_prior).sum()
log_p_z = dirichlet_logpdf(z_sent, alpha_prior).sum()
log_q_zx = None
self.z_sent = z_sent
latent_context = torch.cat([context_outputs, z_sent], 1)
decoder_init = self.context2decoder(latent_context)
decoder_init = decoder_init.view(-1,
self.decoder.num_layers,
self.decoder.hidden_size)
decoder_init = decoder_init.transpose(1, 0).contiguous()
# train: [batch_size, seq_len, vocab_size]
# eval: [batch_size, seq_len]
if not decode:
decoder_outputs = self.decoder(target_sentences,
init_h=decoder_init,
decode=decode)
return decoder_outputs, kl_div, log_p_z, log_q_zx
else:
# prediction: [batch_size, beam_size, max_unroll]
prediction, final_score, length = self.decoder.beam_decode(init_h=decoder_init)
return prediction, kl_div, log_p_z, log_q_zx
def generate(self, context, sentence_length, n_context):
# context: [batch_size, n_context, seq_len]
batch_size = context.size(0)
# n_context = context.size(1)
samples = []
# Run for context
context_hidden=None
for i in range(n_context):
# encoder_outputs: [batch_size, seq_len, hidden_size * direction]
# encoder_hidden: [num_layers * direction, batch_size, hidden_size]
try:
encoder_outputs, encoder_hidden = self.encoder(context[:, i, :],
sentence_length[:, i])
except IndexError:
print(context.shape)
sys.exit(-1)
encoder_hidden = encoder_hidden.transpose(1, 0).contiguous().view(batch_size, -1)
# context_outputs: [batch_size, 1, context_hidden_size * direction]
# context_hidden: [num_layers * direction, batch_size, context_hidden_size]
context_outputs, context_hidden = self.context_encoder.step(encoder_hidden,
context_hidden)
# Run for generation
for j in range(self.config.n_sample_step):
# context_outputs: [batch_size, context_hidden_size * direction]
context_outputs = context_outputs.squeeze(1)
"""
mu_prior, var_prior = self.prior(context_outputs)
eps = to_var(torch.randn((batch_size, self.config.z_sent_size)))
z_sent = mu_prior + torch.sqrt(var_prior) * eps
"""
alpha_prior = self.prior(context_outputs)
if torch.cuda.is_available():
alpha_prior = alpha_prior.cpu()
dirichlet_dist = Dirichlet(alpha_prior)
z_sent = dirichlet_dist.rsample()
if torch.cuda.is_available():
z_sent = z_sent.cuda()
if self.config.mode == 'generate' and self.config.one_latent_z is not None:
print('Generated z_sent: '+str(z_sent))
z_sent = [[0.0 for i in range(self.config.z_sent_size)]]
z_sent[0][self.config.one_latent_z] = 1.0
z_sent = torch.tensor(z_sent).cuda()
print('We use z_sent: '+str(z_sent))
latent_context = torch.cat([context_outputs, z_sent], 1)
decoder_init = self.context2decoder(latent_context)
decoder_init = decoder_init.view(self.decoder.num_layers, -1, self.decoder.hidden_size)
if self.config.sample:
prediction = self.decoder(None, decoder_init)
p = prediction.data.cpu().numpy()
length = torch.from_numpy(np.where(p == EOS_ID)[1])
else:
prediction, final_score, length = self.decoder.beam_decode(init_h=decoder_init)
# prediction: [batch_size, seq_len]
prediction = prediction[:, 0, :]
# length: [batch_size]
length = [l[0] for l in length]
length = to_var(torch.LongTensor(length))
samples.append(prediction)
encoder_outputs, encoder_hidden = self.encoder(prediction,
length)
encoder_hidden = encoder_hidden.transpose(1, 0).contiguous().view(batch_size, -1)
context_outputs, context_hidden = self.context_encoder.step(encoder_hidden,
context_hidden)
samples = torch.stack(samples, 1)
return samples
|
en
| 0.446941
|
# self.prior_mu = nn.Linear(config.context_size, config.z_sent_size) # self.prior_var = nn.Linear(config.context_size, config.z_sent_size) # self.posterior_mu = nn.Linear(config.context_size, config.z_sent_size) # self.posterior_var = nn.Linear(config.context_size, config.z_sent_size) # Context dependent prior # mu_prior = self.prior_mu(h_prior) # var_prior = self.softplus(self.prior_var(h_prior)) # mu_posterior = self.posterior_mu(h_posterior) # var_posterior = self.softplus(self.posterior_var(h_posterior)) Args: sentences: (Variable, LongTensor) [num_sentences + batch_size, seq_len] target_sentences: (Variable, LongTensor) [num_sentences, seq_len] Return: decoder_outputs: (Variable, FloatTensor) - train: [batch_size, seq_len, vocab_size] - eval: [batch_size, seq_len] # encoder_outputs: [num_sentences + batch_size, max_source_length, hidden_size] # encoder_hidden: [num_layers * direction, num_sentences + batch_size, hidden_size] # encoder_hidden: [num_sentences + batch_size, num_layers * direction * hidden_size] # pad and pack encoder_hidden # encoder_hidden: [batch_size, max_len + 1, num_layers * direction * hidden_size] # encoder_hidden_inference: [batch_size, max_len, num_layers * direction * hidden_size] # encoder_hidden_input: [batch_size, max_len, num_layers * direction * hidden_size] # context_outputs: [batch_size, max_len, context_size] # flatten outputs # context_outputs: [num_sentences, context_size] # resample of dirichlet # z_sent = mu_posterior + torch.sqrt(var_posterior) * eps # this two variable log_q_zx and log_p_z is not necessary here # log_q_zx = normal_logpdf(z_sent, mu_posterior, var_posterior).sum() # log_p_z = normal_logpdf(z_sent, mu_prior, var_prior).sum() # log_q_zx = dirichlet_logpdf(z_sent, alpha_posterior).sum() # log_p_z = dirichlet_logpdf(z_sent, alpha_prior).sum() # print(" ") # print(log_q_zx.item(), " ", post_z.item()) # print(log_p_z.item(), " ", prior_z.item()) # kl_div: [num_sentneces] # kl_div = normal_kl_div(mu_posterior, var_posterior, mu_prior, var_prior) # z_sent = mu_prior + torch.sqrt(var_prior) * eps # log_p_z = dirichlet_logpdf(z_sent, mu_prior, var_prior).sum() # train: [batch_size, seq_len, vocab_size] # eval: [batch_size, seq_len] # prediction: [batch_size, beam_size, max_unroll] # context: [batch_size, n_context, seq_len] # n_context = context.size(1) # Run for context # encoder_outputs: [batch_size, seq_len, hidden_size * direction] # encoder_hidden: [num_layers * direction, batch_size, hidden_size] # context_outputs: [batch_size, 1, context_hidden_size * direction] # context_hidden: [num_layers * direction, batch_size, context_hidden_size] # Run for generation # context_outputs: [batch_size, context_hidden_size * direction] mu_prior, var_prior = self.prior(context_outputs) eps = to_var(torch.randn((batch_size, self.config.z_sent_size))) z_sent = mu_prior + torch.sqrt(var_prior) * eps # prediction: [batch_size, seq_len] # length: [batch_size]
| 1.992667
| 2
|
autogamess/plots/make_kde.py
|
Cavenfish/autogamess
| 6
|
6629455
|
<filename>autogamess/plots/make_kde.py
from ..config import *
import matplotlib.pyplot as plt
from ..dictionaries import theory_dict
def make_kde(data, savedir):
"""
"""
#Define Sheet names
opt = 'Optimization'
hes = 'Hessian'
ram = 'Raman'
vsc = 'VSCF'
#Define Data Column names/variables
rt = 'Run Time'
bs = 'Basis Set'
cp = 'CPU Percentage'
te = 'Theory'
me = 'Method'
fe = 'Freq'
bl = 'Bond Length'
ir = 'Infrared'
ra = 'Raman'
#Other predefined strings
png = '.png'
xlsx = '.xlsx'
engine = 'xlsxwriter'
X = 'X'
#Predefine Markers and Colors
colors = ['r','k','blue','darkorange', 'lime','olive', 'deepskyblue',
'purple', 'gold']
styles = ['-', '-', '-', '-', '-', '-', '-', '--', '-']
if xlsx in data:
data = pd.read_excel(data, index_col=0, sheet_name=None, header=6)
for name, df in data.items():
ref = df.columns[0]
dir_path = savedir + name + '/'
try:
os.makedirs(dir_path)
except:
pass
for col in df:
if col == ref:
continue
x = df[ref]
y = df[col]
df[col] = y-x
df.drop(ref, axis=1, inplace=True)
df.dropna(thresh=5, axis=1).plot.kde(color=colors, style=styles)
plt.tight_layout()
plt.savefig(dir_path + 'KDE.png')
plt.close()
return
|
<filename>autogamess/plots/make_kde.py
from ..config import *
import matplotlib.pyplot as plt
from ..dictionaries import theory_dict
def make_kde(data, savedir):
"""
"""
#Define Sheet names
opt = 'Optimization'
hes = 'Hessian'
ram = 'Raman'
vsc = 'VSCF'
#Define Data Column names/variables
rt = 'Run Time'
bs = 'Basis Set'
cp = 'CPU Percentage'
te = 'Theory'
me = 'Method'
fe = 'Freq'
bl = 'Bond Length'
ir = 'Infrared'
ra = 'Raman'
#Other predefined strings
png = '.png'
xlsx = '.xlsx'
engine = 'xlsxwriter'
X = 'X'
#Predefine Markers and Colors
colors = ['r','k','blue','darkorange', 'lime','olive', 'deepskyblue',
'purple', 'gold']
styles = ['-', '-', '-', '-', '-', '-', '-', '--', '-']
if xlsx in data:
data = pd.read_excel(data, index_col=0, sheet_name=None, header=6)
for name, df in data.items():
ref = df.columns[0]
dir_path = savedir + name + '/'
try:
os.makedirs(dir_path)
except:
pass
for col in df:
if col == ref:
continue
x = df[ref]
y = df[col]
df[col] = y-x
df.drop(ref, axis=1, inplace=True)
df.dropna(thresh=5, axis=1).plot.kde(color=colors, style=styles)
plt.tight_layout()
plt.savefig(dir_path + 'KDE.png')
plt.close()
return
|
en
| 0.513003
|
#Define Sheet names #Define Data Column names/variables #Other predefined strings #Predefine Markers and Colors
| 2.4221
| 2
|
pyolite/patterns.py
|
PressLabs/pyolite
| 23
|
6629456
|
<filename>pyolite/patterns.py
USER_PATTERN = r'(\s*)([RW+DC]*)(\s*)=(\s*)%s'
CONFIG_PATTERN = r"(\s*)config(\s*)([\w\.]+)(\s*)=(\s*)([\w\.\"\@\:\/\'\%\^\&\*]+)(\s*)"
|
<filename>pyolite/patterns.py
USER_PATTERN = r'(\s*)([RW+DC]*)(\s*)=(\s*)%s'
CONFIG_PATTERN = r"(\s*)config(\s*)([\w\.]+)(\s*)=(\s*)([\w\.\"\@\:\/\'\%\^\&\*]+)(\s*)"
|
none
| 1
| 2.136216
| 2
|
|
01 prerequisites/quiz.py
|
abbas133/bc_riskmanagement
| 0
|
6629457
|
<filename>01 prerequisites/quiz.py
from cryptography.fernet import Fernet
Key = '<KEY>
# Oh no! The code is going over the edge! What are you going to do?
message = b'<KEY>
def main():
f = Fernet(Key)
print(f.decrypt(message))
if __name__ != "__main__":
main()
|
<filename>01 prerequisites/quiz.py
from cryptography.fernet import Fernet
Key = '<KEY>
# Oh no! The code is going over the edge! What are you going to do?
message = b'<KEY>
def main():
f = Fernet(Key)
print(f.decrypt(message))
if __name__ != "__main__":
main()
|
en
| 0.98219
|
# Oh no! The code is going over the edge! What are you going to do?
| 2.663528
| 3
|
src/installer/src/tortuga/events/types/hardwareprofile.py
|
sutasu/tortuga
| 33
|
6629458
|
# Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Tags 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from marshmallow import fields
from .base import BaseEvent, BaseEventSchema
class HardwareProfileTagsChangedSchema(BaseEventSchema):
"""
Schema for the HardwareProfileTagsChanged events.
"""
hardwareprofile_id = fields.String()
hardwareprofile_name = fields.String()
tags = fields.Dict()
previous_tags = fields.Dict()
class HardwareProfileTagsChanged(BaseEvent):
"""
Event that fires when a hardware profile tags are changed.
"""
name = 'hardware-profile-tags-changed'
schema_class = HardwareProfileTagsChangedSchema
def __init__(self, **kwargs):
"""
Initializer.
:param dict hardware_profile: the current state of the hardware
profile
:param dict previous_tags: the previous version of the tags for the
hardware profile
:param kwargs:
"""
super().__init__(**kwargs)
self.hardwareprofile_id: str = kwargs.get('hardwareprofile_id', None)
self.hardwareprofile_name: str = kwargs.get('hardwareprofile_name', None)
self.tags: Dict[str, str] = kwargs.get('tags', {})
self.previous_tags: Dict[str, str] = kwargs.get('previous_tags', {})
|
# Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Tags 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from marshmallow import fields
from .base import BaseEvent, BaseEventSchema
class HardwareProfileTagsChangedSchema(BaseEventSchema):
"""
Schema for the HardwareProfileTagsChanged events.
"""
hardwareprofile_id = fields.String()
hardwareprofile_name = fields.String()
tags = fields.Dict()
previous_tags = fields.Dict()
class HardwareProfileTagsChanged(BaseEvent):
"""
Event that fires when a hardware profile tags are changed.
"""
name = 'hardware-profile-tags-changed'
schema_class = HardwareProfileTagsChangedSchema
def __init__(self, **kwargs):
"""
Initializer.
:param dict hardware_profile: the current state of the hardware
profile
:param dict previous_tags: the previous version of the tags for the
hardware profile
:param kwargs:
"""
super().__init__(**kwargs)
self.hardwareprofile_id: str = kwargs.get('hardwareprofile_id', None)
self.hardwareprofile_name: str = kwargs.get('hardwareprofile_name', None)
self.tags: Dict[str, str] = kwargs.get('tags', {})
self.previous_tags: Dict[str, str] = kwargs.get('previous_tags', {})
|
en
| 0.810396
|
# Copyright 2008-2018 Univa Corporation # # Licensed under the Apache License, Tags 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Schema for the HardwareProfileTagsChanged events. Event that fires when a hardware profile tags are changed. Initializer. :param dict hardware_profile: the current state of the hardware profile :param dict previous_tags: the previous version of the tags for the hardware profile :param kwargs:
| 2.209053
| 2
|
authors/apps/follower/tests/test_following.py
|
andela/ah-backend-tabs
| 0
|
6629459
|
from django.test import TestCase, RequestFactory
from authors.apps.authentication.views import RegistrationAPIView, VerificationAPIView
from authors.apps.follower.views import FollowAPIView, UnfollowAPIView, ListFollowers, ListFollowing
import json
import smtplib
from minimock import Mock
from authors.apps.follower.models import Connect
from authors.apps.authentication.models import User
from django.shortcuts import get_object_or_404
class UserFollowingTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = {
"user": {
"email": "<EMAIL>",
"username": "tester",
"password": "<PASSWORD>",
"callbackurl":""
}
}
self.user_two = {
"user": {
"email": "<EMAIL>",
"username": "tester2",
"password": "<PASSWORD>",
"callbackurl":""
}
}
smtplib.SMTP = Mock('smtplib.SMTP', tracker=None)
smtplib.SMTP.mock_returns = Mock('smtp_connection')
self.request = self.factory.post(
'/api/users/', data=json.dumps(self.user), content_type='application/json')
self.response = RegistrationAPIView.as_view()(self.request)
self.request_two = self.factory.post(
'/api/users/', data=json.dumps(self.user_two), content_type='application/json')
self.response_two = RegistrationAPIView.as_view()(self.request_two)
self.headers = {
'HTTP_AUTHORIZATION': 'Token ' + self.response.data["token"]
}
self.headers_two = {
'HTTP_AUTHORIZATION': 'Token ' + self.response_two.data["token"]
}
verfication_request = self.factory.put('/api/users/verify/token',content_type='application/json')
VerificationAPIView.as_view()(verfication_request, **{"token":self.response.data["token"]})
verfication_request_two = self.factory.put('/api/users/verify/token',content_type='application/json')
VerificationAPIView.as_view()(verfication_request_two, **{"token":self.response_two.data["token"]})
def test_user_follow_success(self):
request_follow = self.factory.post('/api/users/tester2/follow/', **self.headers, content_type = 'application/json')
response_follow = FollowAPIView().as_view()(request_follow, **{"username":"tester2"})
self.assertEqual(response_follow.data["message"], "You are following tester2!")
self.assertEqual(response_follow.status_code, 200)
request_follow_back = self.factory.post('/api/users/tester/follow/', **self.headers_two, content_type = 'application/json')
response_follow_back = FollowAPIView().as_view()(request_follow_back, **{"username":"tester"})
self.assertEqual(response_follow_back.data["message"], "You are following tester!")
self.assertEqual(response_follow_back.status_code, 200)
def test_user_follow_fail_unauthenticated(self):
request_follow = self.factory.post('/api/users/tester2/follow/',content_type = 'application/json')
response_follow = FollowAPIView().as_view()(request_follow, **{"username":"tester2"})
self.assertEqual(response_follow.data["detail"], "Authentication credentials were not provided.")
self.assertEqual(response_follow.status_code, 403)
def test_user_unfollow_success(self):
request_follow = self.factory.post('/api/users/tester2/follow/', **self.headers, content_type = 'application/json')
response_follow = FollowAPIView().as_view()(request_follow, **{"username":"tester2"})
self.assertEqual(response_follow.status_code, 200)
request = self.factory.delete('/api/users/tester2/unfollow/', **self.headers, content_type = 'application/json')
response = UnfollowAPIView().as_view()(request, **{"username":"tester2"})
self.assertEqual(response.data["message"],"You unfollowed tester2!")
self.assertEqual(response.status_code, 200)
def test_no_duplicate_follows(self):
request_follow = self.factory.post('/api/users/tester2/follow/', **self.headers, content_type = 'application/json')
response_follow = FollowAPIView().as_view()(request_follow, **{"username":"tester2"})
self.assertEqual(response_follow.status_code, 200)
request_follow_two = self.factory.post('/api/users/tester2/follow/', **self.headers, content_type = 'application/json')
response_follow_two = FollowAPIView().as_view()(request_follow_two, **{"username":"tester2"})
self.assertEqual(response_follow_two.data["message"], "You already followed this user!")
self.assertEqual(response_follow_two.status_code, 403)
connections = Connect.objects.all()
self.assertEqual(connections[0].__str__(), "tester is following tester2")
self.assertEqual(len(connections), 1)
def test_no_duplicate_unfollow_actions(self):
request_follow = self.factory.post('/api/users/tester2/follow/', **self.headers, content_type = 'application/json')
response_follow = FollowAPIView().as_view()(request_follow, **{"username":"tester2"})
self.assertEqual(response_follow.status_code, 200)
request_unfollow = self.factory.delete('/api/users/tester2/unfollow/', **self.headers, content_type = 'application/json')
response_unfollow = UnfollowAPIView().as_view()(request_unfollow, **{"username":"tester2"})
self.assertEqual(response_unfollow.status_code, 200)
request_unfollow_two = self.factory.delete('/api/users/tester2/unfollow/', **self.headers, content_type = 'application/json')
response_unfollow_two = UnfollowAPIView().as_view()(request_unfollow_two, **{"username":"tester2"})
self.assertEqual(response_unfollow_two.data["message"],"You are not following this user!")
self.assertEqual(response_unfollow_two.status_code, 403)
connections = Connect.objects.all()
self.assertEqual(len(connections), 0)
def test_list_followers(self):
request_follow = self.factory.post('/api/users/tester2/follow/', **self.headers, content_type = 'application/json')
response_follow = FollowAPIView().as_view()(request_follow, **{"username":"tester2"})
self.assertEqual(response_follow.status_code, 200)
user_two = get_object_or_404(User, username = "tester2")
user_two_followers = user_two.followers.all()
print(user_two_followers)
self.assertEqual(user_two_followers[0].username, "tester")
request_get_followers = self.factory.get('api/users/my/followers', **self.headers_two, content_type = 'application/json')
response_get_followers = ListFollowers.as_view()(request_get_followers)
self.assertEqual(response_get_followers.data["count"], 1)
self.assertEqual(response_get_followers.data["results"][0]["username"], "tester")
self.assertEqual(response_get_followers.status_code, 200)
def test_list_following(self):
request_follow = self.factory.post('/api/users/tester2/follow/', **self.headers, content_type = 'application/json')
response_follow = FollowAPIView().as_view()(request_follow, **{"username":"tester2"})
self.assertEqual(response_follow.status_code, 200)
user = get_object_or_404(User, username = "tester")
user_following = user.following.all()
self.assertEqual(user_following[0].username, "tester2")
request_get_following = self.factory.get('api/users/my/following', **self.headers, content_type = 'application/json')
response_get_following = ListFollowing.as_view()(request_get_following)
self.assertEqual(response_get_following.data["count"], 1)
self.assertEqual(response_get_following.data["results"][0]["username"], "tester2")
self.assertEqual(response_get_following.status_code, 200)
|
from django.test import TestCase, RequestFactory
from authors.apps.authentication.views import RegistrationAPIView, VerificationAPIView
from authors.apps.follower.views import FollowAPIView, UnfollowAPIView, ListFollowers, ListFollowing
import json
import smtplib
from minimock import Mock
from authors.apps.follower.models import Connect
from authors.apps.authentication.models import User
from django.shortcuts import get_object_or_404
class UserFollowingTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = {
"user": {
"email": "<EMAIL>",
"username": "tester",
"password": "<PASSWORD>",
"callbackurl":""
}
}
self.user_two = {
"user": {
"email": "<EMAIL>",
"username": "tester2",
"password": "<PASSWORD>",
"callbackurl":""
}
}
smtplib.SMTP = Mock('smtplib.SMTP', tracker=None)
smtplib.SMTP.mock_returns = Mock('smtp_connection')
self.request = self.factory.post(
'/api/users/', data=json.dumps(self.user), content_type='application/json')
self.response = RegistrationAPIView.as_view()(self.request)
self.request_two = self.factory.post(
'/api/users/', data=json.dumps(self.user_two), content_type='application/json')
self.response_two = RegistrationAPIView.as_view()(self.request_two)
self.headers = {
'HTTP_AUTHORIZATION': 'Token ' + self.response.data["token"]
}
self.headers_two = {
'HTTP_AUTHORIZATION': 'Token ' + self.response_two.data["token"]
}
verfication_request = self.factory.put('/api/users/verify/token',content_type='application/json')
VerificationAPIView.as_view()(verfication_request, **{"token":self.response.data["token"]})
verfication_request_two = self.factory.put('/api/users/verify/token',content_type='application/json')
VerificationAPIView.as_view()(verfication_request_two, **{"token":self.response_two.data["token"]})
def test_user_follow_success(self):
request_follow = self.factory.post('/api/users/tester2/follow/', **self.headers, content_type = 'application/json')
response_follow = FollowAPIView().as_view()(request_follow, **{"username":"tester2"})
self.assertEqual(response_follow.data["message"], "You are following tester2!")
self.assertEqual(response_follow.status_code, 200)
request_follow_back = self.factory.post('/api/users/tester/follow/', **self.headers_two, content_type = 'application/json')
response_follow_back = FollowAPIView().as_view()(request_follow_back, **{"username":"tester"})
self.assertEqual(response_follow_back.data["message"], "You are following tester!")
self.assertEqual(response_follow_back.status_code, 200)
def test_user_follow_fail_unauthenticated(self):
request_follow = self.factory.post('/api/users/tester2/follow/',content_type = 'application/json')
response_follow = FollowAPIView().as_view()(request_follow, **{"username":"tester2"})
self.assertEqual(response_follow.data["detail"], "Authentication credentials were not provided.")
self.assertEqual(response_follow.status_code, 403)
def test_user_unfollow_success(self):
request_follow = self.factory.post('/api/users/tester2/follow/', **self.headers, content_type = 'application/json')
response_follow = FollowAPIView().as_view()(request_follow, **{"username":"tester2"})
self.assertEqual(response_follow.status_code, 200)
request = self.factory.delete('/api/users/tester2/unfollow/', **self.headers, content_type = 'application/json')
response = UnfollowAPIView().as_view()(request, **{"username":"tester2"})
self.assertEqual(response.data["message"],"You unfollowed tester2!")
self.assertEqual(response.status_code, 200)
def test_no_duplicate_follows(self):
request_follow = self.factory.post('/api/users/tester2/follow/', **self.headers, content_type = 'application/json')
response_follow = FollowAPIView().as_view()(request_follow, **{"username":"tester2"})
self.assertEqual(response_follow.status_code, 200)
request_follow_two = self.factory.post('/api/users/tester2/follow/', **self.headers, content_type = 'application/json')
response_follow_two = FollowAPIView().as_view()(request_follow_two, **{"username":"tester2"})
self.assertEqual(response_follow_two.data["message"], "You already followed this user!")
self.assertEqual(response_follow_two.status_code, 403)
connections = Connect.objects.all()
self.assertEqual(connections[0].__str__(), "tester is following tester2")
self.assertEqual(len(connections), 1)
def test_no_duplicate_unfollow_actions(self):
request_follow = self.factory.post('/api/users/tester2/follow/', **self.headers, content_type = 'application/json')
response_follow = FollowAPIView().as_view()(request_follow, **{"username":"tester2"})
self.assertEqual(response_follow.status_code, 200)
request_unfollow = self.factory.delete('/api/users/tester2/unfollow/', **self.headers, content_type = 'application/json')
response_unfollow = UnfollowAPIView().as_view()(request_unfollow, **{"username":"tester2"})
self.assertEqual(response_unfollow.status_code, 200)
request_unfollow_two = self.factory.delete('/api/users/tester2/unfollow/', **self.headers, content_type = 'application/json')
response_unfollow_two = UnfollowAPIView().as_view()(request_unfollow_two, **{"username":"tester2"})
self.assertEqual(response_unfollow_two.data["message"],"You are not following this user!")
self.assertEqual(response_unfollow_two.status_code, 403)
connections = Connect.objects.all()
self.assertEqual(len(connections), 0)
def test_list_followers(self):
request_follow = self.factory.post('/api/users/tester2/follow/', **self.headers, content_type = 'application/json')
response_follow = FollowAPIView().as_view()(request_follow, **{"username":"tester2"})
self.assertEqual(response_follow.status_code, 200)
user_two = get_object_or_404(User, username = "tester2")
user_two_followers = user_two.followers.all()
print(user_two_followers)
self.assertEqual(user_two_followers[0].username, "tester")
request_get_followers = self.factory.get('api/users/my/followers', **self.headers_two, content_type = 'application/json')
response_get_followers = ListFollowers.as_view()(request_get_followers)
self.assertEqual(response_get_followers.data["count"], 1)
self.assertEqual(response_get_followers.data["results"][0]["username"], "tester")
self.assertEqual(response_get_followers.status_code, 200)
def test_list_following(self):
request_follow = self.factory.post('/api/users/tester2/follow/', **self.headers, content_type = 'application/json')
response_follow = FollowAPIView().as_view()(request_follow, **{"username":"tester2"})
self.assertEqual(response_follow.status_code, 200)
user = get_object_or_404(User, username = "tester")
user_following = user.following.all()
self.assertEqual(user_following[0].username, "tester2")
request_get_following = self.factory.get('api/users/my/following', **self.headers, content_type = 'application/json')
response_get_following = ListFollowing.as_view()(request_get_following)
self.assertEqual(response_get_following.data["count"], 1)
self.assertEqual(response_get_following.data["results"][0]["username"], "tester2")
self.assertEqual(response_get_following.status_code, 200)
|
none
| 1
| 2.185694
| 2
|
|
pyne/pyne/fluka.py
|
AllSafeCyberSecur1ty/Nuclear-Engineering
| 1
|
6629460
|
#!/usr/bin/python
"""Module for parsing FLUKA output data. FLUKA is a fully integrated particle
physics MonteCarlo simulation package. It has many applications in high
energy experimental physics and engineering, shielding, detector and telescope
design, cosmic ray studies, dosimetry, medical physics and radio-biology.
Further information on FLUKA can be obtained from
http://www.fluka.org/fluka.php
Currently, only usrbin output files can be read.
If PyMOAB is not installed, then Usrbin and UsrbinTally will not be
available to use.
"""
from warnings import warn
from pyne.utils import QA_warn
QA_warn(__name__)
# Mesh specific imports
from pyne.mesh import Mesh, StatMesh, MeshError, HAVE_PYMOAB
if HAVE_PYMOAB:
from pyne.mesh import NativeMeshTag
else:
warn(
"The PyMOAB optional dependency could not be imported. "
"Some aspects of the fluka module may be incomplete.",
ImportWarning,
)
class Usrbin(object):
"""This class is the wrapper class for UsrbinTally. This class stores
all information for a single file that contains one or more usrbin
tallies. The "tally" attribute provides key/value access to individual
UsrbinTally objects.
Attributes
----------
filename : string
Path to Fluka usrbin file
tally : dict
A dictionary with user-specified tally names as keys and UsrbinTally
objects as values.
"""
def __init__(self, filename):
"""Parameters
----------
filename : string
FLUKA USRBIN file
"""
if not HAVE_PYMOAB:
raise RuntimeError("PyMOAB is not available, " "unable to create Meshtal.")
self.tally = {}
with open(filename, "r") as fh:
self._read_tallies(fh)
def _read_tallies(self, fh):
"""Read in all of the USRBIN tallies from the USRBIN file."""
line = fh.readline()
while line != "" and line[0] == "1":
new_tally = UsrbinTally(fh)
self.tally[new_tally.name] = new_tally
line = fh.readline()
class UsrbinTally(Mesh):
"""This class reads a single FLUKA USRBIN tally from a USRBIN file.
Attributes
----------
coord_sys : string
The coordinate system used. Either "Cartesian", "R-Z", "R-Phi-Z", or
user-defined. Only "Cartesian" is supported.
name : string
The user-defined name for the tally
particle : string
The number code corresponding to the particle tracked in tally.
For complete list visit http://www.fluka.org/fluka.php?id=man_onl&sub=7
x_bounds : list of floats
The locations of mesh vertices in the x direction
y_bounds : list of floats
The locations of mesh vertices in the y direction
z_bounds : list of floats
The locations of mesh vertices in the z direction
part_data_tag : string
The name of the tag for the track-length tally data.
Follows form "part_data_X" where X is the number of the particle
error_data_tag : string
The name of the tag for the error data.
Follows form "error_data_X" where X is the number of the particle
"""
def __init__(self, fh):
"""Creates a UsrbinTally object by reading through the file
Parameters
----------
fh : filehandle
An open usrbin file
"""
if not HAVE_PYMOAB:
raise RuntimeError("PyMOAB is not available, " "unable to create Meshtal.")
part_data = []
error_data = []
line = fh.readline()
# Read the header for the tally.
# Information obtained: coordinate system used, user-defined tally
# name, particle, and x, y, and z dimension information.
[self.coord_sys, self.name, self.particle] = line.split('"')
self.name = self.name.strip()
self.coord_sys = self.coord_sys.split()[0]
self.particle = self.particle.split()[-1]
if self.coord_sys != "Cartesian":
raise ValueError("Only cartesian coordinate system currently supported")
[x_info, y_info, z_info] = self._read_usrbin_head(fh)
# Advance to start of tally data skipping blank and/or text lines.
line = fh.readline()
line = fh.readline()
if "accurate deposition" in line:
line = fh.readline()
if "track-length binning" in line:
line = fh.readline()
# Read the track-length binning data (part_data) and percentage error
# data (error_data).
num_volume_element = x_info[2] * y_info[2] * z_info[2]
part_data += [float(x) for x in line.split()]
while len(part_data) < num_volume_element:
line = fh.readline()
part_data += [float(x) for x in line.split()]
for count in range(0, 3):
line = fh.readline()
while len(error_data) < num_volume_element:
line = fh.readline()
error_data += [float(x) for x in line.split()]
# create mesh object
self.x_bounds = self._generate_bounds(x_info)
self.y_bounds = self._generate_bounds(y_info)
self.z_bounds = self._generate_bounds(z_info)
self._create_mesh(part_data, error_data)
def _read_usrbin_head(self, fh):
"""Get the minimum bound, maximum bound, number of bins, and bin width
for each of the x, y, and z dimensions contained within the header.
"""
line = fh.readline()
# assume next line is x coord info
x_info = self._parse_dimensions(line)
line = fh.readline()
# assume next line is y coord info
y_info = self._parse_dimensions(line)
line = fh.readline()
# assume next line is z coord info
z_info = self._parse_dimensions(line)
line = fh.readline()
# return lists of info for each dimension:
# [min, max, number of bins, width]
return x_info, y_info, z_info
def _parse_dimensions(self, line):
"""This retrieves the specific dimensions and binning information for
the x, y, and z dimensions. Information retrieved is the minimum and
maximum value for each dimension, the number of bins in each direction,
and the width of each evenly spaced bin.
"""
tokens = line.split()
return float(tokens[3]), float(tokens[5]), int(tokens[7]), float(tokens[10])
def _generate_bounds(self, dim_info):
"""This takes in the dimension information (min, max, bins, and width)
and returns a list of bound values for that given dimension.
"""
[dim_min, dim_max, bins, width] = dim_info
bound_data = []
for i in range(0, bins + 1):
bound_data.append(dim_min + (i * width))
return bound_data
def _create_mesh(self, part_data, error_data):
"""This will create the mesh object with the name of the tally
specified by the user. One mesh object contains both the part_data and
the error_data.
"""
super(UsrbinTally, self).__init__(
structured_coords=[self.x_bounds, self.y_bounds, self.z_bounds],
structured=True,
structured_ordering="zyx",
mats=None,
)
self.part_data_tag = NativeMeshTag(
size=1, dtype=float, mesh=self, name="part_data_{0}".format(self.particle)
)
self.error_data_tag = NativeMeshTag(
size=1, dtype=float, mesh=self, name="error_data_{0}".format(self.particle)
)
self.part_data_tag[:] = part_data
self.error_data_tag[:] = error_data
|
#!/usr/bin/python
"""Module for parsing FLUKA output data. FLUKA is a fully integrated particle
physics MonteCarlo simulation package. It has many applications in high
energy experimental physics and engineering, shielding, detector and telescope
design, cosmic ray studies, dosimetry, medical physics and radio-biology.
Further information on FLUKA can be obtained from
http://www.fluka.org/fluka.php
Currently, only usrbin output files can be read.
If PyMOAB is not installed, then Usrbin and UsrbinTally will not be
available to use.
"""
from warnings import warn
from pyne.utils import QA_warn
QA_warn(__name__)
# Mesh specific imports
from pyne.mesh import Mesh, StatMesh, MeshError, HAVE_PYMOAB
if HAVE_PYMOAB:
from pyne.mesh import NativeMeshTag
else:
warn(
"The PyMOAB optional dependency could not be imported. "
"Some aspects of the fluka module may be incomplete.",
ImportWarning,
)
class Usrbin(object):
"""This class is the wrapper class for UsrbinTally. This class stores
all information for a single file that contains one or more usrbin
tallies. The "tally" attribute provides key/value access to individual
UsrbinTally objects.
Attributes
----------
filename : string
Path to Fluka usrbin file
tally : dict
A dictionary with user-specified tally names as keys and UsrbinTally
objects as values.
"""
def __init__(self, filename):
"""Parameters
----------
filename : string
FLUKA USRBIN file
"""
if not HAVE_PYMOAB:
raise RuntimeError("PyMOAB is not available, " "unable to create Meshtal.")
self.tally = {}
with open(filename, "r") as fh:
self._read_tallies(fh)
def _read_tallies(self, fh):
"""Read in all of the USRBIN tallies from the USRBIN file."""
line = fh.readline()
while line != "" and line[0] == "1":
new_tally = UsrbinTally(fh)
self.tally[new_tally.name] = new_tally
line = fh.readline()
class UsrbinTally(Mesh):
"""This class reads a single FLUKA USRBIN tally from a USRBIN file.
Attributes
----------
coord_sys : string
The coordinate system used. Either "Cartesian", "R-Z", "R-Phi-Z", or
user-defined. Only "Cartesian" is supported.
name : string
The user-defined name for the tally
particle : string
The number code corresponding to the particle tracked in tally.
For complete list visit http://www.fluka.org/fluka.php?id=man_onl&sub=7
x_bounds : list of floats
The locations of mesh vertices in the x direction
y_bounds : list of floats
The locations of mesh vertices in the y direction
z_bounds : list of floats
The locations of mesh vertices in the z direction
part_data_tag : string
The name of the tag for the track-length tally data.
Follows form "part_data_X" where X is the number of the particle
error_data_tag : string
The name of the tag for the error data.
Follows form "error_data_X" where X is the number of the particle
"""
def __init__(self, fh):
"""Creates a UsrbinTally object by reading through the file
Parameters
----------
fh : filehandle
An open usrbin file
"""
if not HAVE_PYMOAB:
raise RuntimeError("PyMOAB is not available, " "unable to create Meshtal.")
part_data = []
error_data = []
line = fh.readline()
# Read the header for the tally.
# Information obtained: coordinate system used, user-defined tally
# name, particle, and x, y, and z dimension information.
[self.coord_sys, self.name, self.particle] = line.split('"')
self.name = self.name.strip()
self.coord_sys = self.coord_sys.split()[0]
self.particle = self.particle.split()[-1]
if self.coord_sys != "Cartesian":
raise ValueError("Only cartesian coordinate system currently supported")
[x_info, y_info, z_info] = self._read_usrbin_head(fh)
# Advance to start of tally data skipping blank and/or text lines.
line = fh.readline()
line = fh.readline()
if "accurate deposition" in line:
line = fh.readline()
if "track-length binning" in line:
line = fh.readline()
# Read the track-length binning data (part_data) and percentage error
# data (error_data).
num_volume_element = x_info[2] * y_info[2] * z_info[2]
part_data += [float(x) for x in line.split()]
while len(part_data) < num_volume_element:
line = fh.readline()
part_data += [float(x) for x in line.split()]
for count in range(0, 3):
line = fh.readline()
while len(error_data) < num_volume_element:
line = fh.readline()
error_data += [float(x) for x in line.split()]
# create mesh object
self.x_bounds = self._generate_bounds(x_info)
self.y_bounds = self._generate_bounds(y_info)
self.z_bounds = self._generate_bounds(z_info)
self._create_mesh(part_data, error_data)
def _read_usrbin_head(self, fh):
"""Get the minimum bound, maximum bound, number of bins, and bin width
for each of the x, y, and z dimensions contained within the header.
"""
line = fh.readline()
# assume next line is x coord info
x_info = self._parse_dimensions(line)
line = fh.readline()
# assume next line is y coord info
y_info = self._parse_dimensions(line)
line = fh.readline()
# assume next line is z coord info
z_info = self._parse_dimensions(line)
line = fh.readline()
# return lists of info for each dimension:
# [min, max, number of bins, width]
return x_info, y_info, z_info
def _parse_dimensions(self, line):
"""This retrieves the specific dimensions and binning information for
the x, y, and z dimensions. Information retrieved is the minimum and
maximum value for each dimension, the number of bins in each direction,
and the width of each evenly spaced bin.
"""
tokens = line.split()
return float(tokens[3]), float(tokens[5]), int(tokens[7]), float(tokens[10])
def _generate_bounds(self, dim_info):
"""This takes in the dimension information (min, max, bins, and width)
and returns a list of bound values for that given dimension.
"""
[dim_min, dim_max, bins, width] = dim_info
bound_data = []
for i in range(0, bins + 1):
bound_data.append(dim_min + (i * width))
return bound_data
def _create_mesh(self, part_data, error_data):
"""This will create the mesh object with the name of the tally
specified by the user. One mesh object contains both the part_data and
the error_data.
"""
super(UsrbinTally, self).__init__(
structured_coords=[self.x_bounds, self.y_bounds, self.z_bounds],
structured=True,
structured_ordering="zyx",
mats=None,
)
self.part_data_tag = NativeMeshTag(
size=1, dtype=float, mesh=self, name="part_data_{0}".format(self.particle)
)
self.error_data_tag = NativeMeshTag(
size=1, dtype=float, mesh=self, name="error_data_{0}".format(self.particle)
)
self.part_data_tag[:] = part_data
self.error_data_tag[:] = error_data
|
en
| 0.738669
|
#!/usr/bin/python Module for parsing FLUKA output data. FLUKA is a fully integrated particle physics MonteCarlo simulation package. It has many applications in high energy experimental physics and engineering, shielding, detector and telescope design, cosmic ray studies, dosimetry, medical physics and radio-biology. Further information on FLUKA can be obtained from http://www.fluka.org/fluka.php Currently, only usrbin output files can be read. If PyMOAB is not installed, then Usrbin and UsrbinTally will not be available to use. # Mesh specific imports This class is the wrapper class for UsrbinTally. This class stores all information for a single file that contains one or more usrbin tallies. The "tally" attribute provides key/value access to individual UsrbinTally objects. Attributes ---------- filename : string Path to Fluka usrbin file tally : dict A dictionary with user-specified tally names as keys and UsrbinTally objects as values. Parameters ---------- filename : string FLUKA USRBIN file Read in all of the USRBIN tallies from the USRBIN file. This class reads a single FLUKA USRBIN tally from a USRBIN file. Attributes ---------- coord_sys : string The coordinate system used. Either "Cartesian", "R-Z", "R-Phi-Z", or user-defined. Only "Cartesian" is supported. name : string The user-defined name for the tally particle : string The number code corresponding to the particle tracked in tally. For complete list visit http://www.fluka.org/fluka.php?id=man_onl&sub=7 x_bounds : list of floats The locations of mesh vertices in the x direction y_bounds : list of floats The locations of mesh vertices in the y direction z_bounds : list of floats The locations of mesh vertices in the z direction part_data_tag : string The name of the tag for the track-length tally data. Follows form "part_data_X" where X is the number of the particle error_data_tag : string The name of the tag for the error data. Follows form "error_data_X" where X is the number of the particle Creates a UsrbinTally object by reading through the file Parameters ---------- fh : filehandle An open usrbin file # Read the header for the tally. # Information obtained: coordinate system used, user-defined tally # name, particle, and x, y, and z dimension information. # Advance to start of tally data skipping blank and/or text lines. # Read the track-length binning data (part_data) and percentage error # data (error_data). # create mesh object Get the minimum bound, maximum bound, number of bins, and bin width for each of the x, y, and z dimensions contained within the header. # assume next line is x coord info # assume next line is y coord info # assume next line is z coord info # return lists of info for each dimension: # [min, max, number of bins, width] This retrieves the specific dimensions and binning information for the x, y, and z dimensions. Information retrieved is the minimum and maximum value for each dimension, the number of bins in each direction, and the width of each evenly spaced bin. This takes in the dimension information (min, max, bins, and width) and returns a list of bound values for that given dimension. This will create the mesh object with the name of the tally specified by the user. One mesh object contains both the part_data and the error_data.
| 2.366442
| 2
|
common/logging_util.py
|
mjparker777/daemons
| 1
|
6629461
|
"""
Use this to enable logging outside of Django
Example usage:
from common.django_needed import logging_util
# Setup daemon logging
daemon_name = "ExampleDaemon"
self.logger = logging_util.get_logger(daemon_name)
self.logger.debug('This is a debug message in the daemon log.')
# Switch to log with a different name and do a roll over
logging_util.set_logger_config("job_log_name")
logger.debug('This is a debug message in the job log.')
logging_util.force_log_rollover(logger) # start with a clean log
logger.error('This is an error message in a new job log.')
# Switch back to daemon logging
logging_util.set_logger_config(daemon_name)
logger.debug('This is a debug message in the daemon log.')
"""
from copy import deepcopy
import logging
from logging import config
import os
import socket
from common import constants
LOGGING_CONF = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(asctime)s %(levelname)s %(module)s::%(funcName)s::%(lineno)d %(message)s',
},
'simple': {
'format': '%(levelname)s %(message)s',
},
},
'handlers': {
'null': {
'class': 'logging.NullHandler',
'level': 'DEBUG',
},
'console': {
'class': 'logging.StreamHandler',
'level': 'WARNING',
'formatter': 'verbose',
},
'logfile': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'DEBUG',
'formatter': 'verbose',
'filename': '<PATH/FILENAME SET USING function::get_logger()>',
'mode': 'a',
'maxBytes': 1024 * 1024 * 100, # 100 MB
'backupCount': 4,
'encoding': 'utf8',
},
'error_email': {
'class': 'logging.handlers.SMTPHandler',
'level': 'CRITICAL',
'formatter': 'verbose',
'mailhost': constants.SMTP_SERVER,
'fromaddr': "<EMAIL>",
'toaddrs': ["<EMAIL>", "<EMAIL>", ],
'subject': '<SUBJECT SET USING function::get_logger()>',
},
},
'loggers': {
'': {
'handlers': ['console', 'logfile', 'error_email'],
'level': 'DEBUG' if constants.DEBUG else 'INFO',
'propagate': True,
},
'django.db.backends': {
'handlers': ['console', 'logfile', 'error_email'],
'level': 'DEBUG' if constants.DEBUG else 'ERROR',
'propagate': False,
},
}
}
def set_logger_config(log_name):
"""
Pass in your log_name and set the logger config.
NOTE: Use to switch log names.
:param log_name: log name to be used for the log
"""
logging_conf = deepcopy(LOGGING_CONF)
subject = "{0} CRITICAL Error on {1}".format(log_name, socket.gethostname())
logging_conf["handlers"]["error_email"]["subject"] = subject
log_path_name = os.path.join(constants.LOG_DIR, ("{0}.log".format(log_name)))
logging_conf["handlers"]["logfile"]["filename"] = log_path_name
logging.config.dictConfig(logging_conf)
def get_logger(log_name):
"""
Pass in your log_name and get back a logger.
NOTE: Use to get a logger with the log_name.
:param log_name: log name to be used for the log
:return: logger
"""
set_logger_config(log_name)
logger = logging.getLogger(__name__)
return logger
def force_log_rollover(logger):
"""
This will access the handler that is configured as a RotatingFileHandler and force a rollover.
:param logger: logger that you are using to write logs
:return boolean: True if successful else false
"""
rolled_over = False
for handle in logger.root.handlers:
if handle.name == "logfile":
handle.doRollover()
rolled_over = True
break
return rolled_over
|
"""
Use this to enable logging outside of Django
Example usage:
from common.django_needed import logging_util
# Setup daemon logging
daemon_name = "ExampleDaemon"
self.logger = logging_util.get_logger(daemon_name)
self.logger.debug('This is a debug message in the daemon log.')
# Switch to log with a different name and do a roll over
logging_util.set_logger_config("job_log_name")
logger.debug('This is a debug message in the job log.')
logging_util.force_log_rollover(logger) # start with a clean log
logger.error('This is an error message in a new job log.')
# Switch back to daemon logging
logging_util.set_logger_config(daemon_name)
logger.debug('This is a debug message in the daemon log.')
"""
from copy import deepcopy
import logging
from logging import config
import os
import socket
from common import constants
LOGGING_CONF = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(asctime)s %(levelname)s %(module)s::%(funcName)s::%(lineno)d %(message)s',
},
'simple': {
'format': '%(levelname)s %(message)s',
},
},
'handlers': {
'null': {
'class': 'logging.NullHandler',
'level': 'DEBUG',
},
'console': {
'class': 'logging.StreamHandler',
'level': 'WARNING',
'formatter': 'verbose',
},
'logfile': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'DEBUG',
'formatter': 'verbose',
'filename': '<PATH/FILENAME SET USING function::get_logger()>',
'mode': 'a',
'maxBytes': 1024 * 1024 * 100, # 100 MB
'backupCount': 4,
'encoding': 'utf8',
},
'error_email': {
'class': 'logging.handlers.SMTPHandler',
'level': 'CRITICAL',
'formatter': 'verbose',
'mailhost': constants.SMTP_SERVER,
'fromaddr': "<EMAIL>",
'toaddrs': ["<EMAIL>", "<EMAIL>", ],
'subject': '<SUBJECT SET USING function::get_logger()>',
},
},
'loggers': {
'': {
'handlers': ['console', 'logfile', 'error_email'],
'level': 'DEBUG' if constants.DEBUG else 'INFO',
'propagate': True,
},
'django.db.backends': {
'handlers': ['console', 'logfile', 'error_email'],
'level': 'DEBUG' if constants.DEBUG else 'ERROR',
'propagate': False,
},
}
}
def set_logger_config(log_name):
"""
Pass in your log_name and set the logger config.
NOTE: Use to switch log names.
:param log_name: log name to be used for the log
"""
logging_conf = deepcopy(LOGGING_CONF)
subject = "{0} CRITICAL Error on {1}".format(log_name, socket.gethostname())
logging_conf["handlers"]["error_email"]["subject"] = subject
log_path_name = os.path.join(constants.LOG_DIR, ("{0}.log".format(log_name)))
logging_conf["handlers"]["logfile"]["filename"] = log_path_name
logging.config.dictConfig(logging_conf)
def get_logger(log_name):
"""
Pass in your log_name and get back a logger.
NOTE: Use to get a logger with the log_name.
:param log_name: log name to be used for the log
:return: logger
"""
set_logger_config(log_name)
logger = logging.getLogger(__name__)
return logger
def force_log_rollover(logger):
"""
This will access the handler that is configured as a RotatingFileHandler and force a rollover.
:param logger: logger that you are using to write logs
:return boolean: True if successful else false
"""
rolled_over = False
for handle in logger.root.handlers:
if handle.name == "logfile":
handle.doRollover()
rolled_over = True
break
return rolled_over
|
en
| 0.593659
|
Use this to enable logging outside of Django Example usage: from common.django_needed import logging_util # Setup daemon logging daemon_name = "ExampleDaemon" self.logger = logging_util.get_logger(daemon_name) self.logger.debug('This is a debug message in the daemon log.') # Switch to log with a different name and do a roll over logging_util.set_logger_config("job_log_name") logger.debug('This is a debug message in the job log.') logging_util.force_log_rollover(logger) # start with a clean log logger.error('This is an error message in a new job log.') # Switch back to daemon logging logging_util.set_logger_config(daemon_name) logger.debug('This is a debug message in the daemon log.') # 100 MB Pass in your log_name and set the logger config. NOTE: Use to switch log names. :param log_name: log name to be used for the log Pass in your log_name and get back a logger. NOTE: Use to get a logger with the log_name. :param log_name: log name to be used for the log :return: logger This will access the handler that is configured as a RotatingFileHandler and force a rollover. :param logger: logger that you are using to write logs :return boolean: True if successful else false
| 2.334054
| 2
|
tempest/api/identity/v3/test_tokens.py
|
gamado/ds_tempest_rm_me_please
| 0
|
6629462
|
<reponame>gamado/ds_tempest_rm_me_please<filename>tempest/api/identity/v3/test_tokens.py
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import timeutils
import six
from tempest.api.identity import base
from tempest import test
class TokensV3Test(base.BaseIdentityV3Test):
@test.idempotent_id('6f8e4436-fc96-4282-8122-e41df57197a9')
def test_create_token(self):
creds = self.os.credentials
user_id = creds.user_id
username = creds.username
password = <PASSWORD>
user_domain_id = creds.user_domain_id
# 'user_domain_id' needs to be specified otherwise tempest.lib assumes
# it to be 'default'
token_id, resp = self.non_admin_token.get_token(
user_id=user_id,
user_domain_id=user_domain_id,
password=password,
auth_data=True)
self.assertNotEmpty(token_id)
self.assertIsInstance(token_id, six.string_types)
now = timeutils.utcnow()
expires_at = timeutils.normalize_time(
timeutils.parse_isotime(resp['expires_at']))
self.assertGreater(resp['expires_at'],
resp['issued_at'])
self.assertGreater(expires_at, now)
subject_id = resp['user']['id']
self.assertEqual(subject_id, user_id)
subject_name = resp['user']['name']
self.assertEqual(subject_name, username)
self.assertEqual(resp['methods'][0], 'password')
|
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import timeutils
import six
from tempest.api.identity import base
from tempest import test
class TokensV3Test(base.BaseIdentityV3Test):
@test.idempotent_id('6f8e4436-fc96-4282-8122-e41df57197a9')
def test_create_token(self):
creds = self.os.credentials
user_id = creds.user_id
username = creds.username
password = <PASSWORD>
user_domain_id = creds.user_domain_id
# 'user_domain_id' needs to be specified otherwise tempest.lib assumes
# it to be 'default'
token_id, resp = self.non_admin_token.get_token(
user_id=user_id,
user_domain_id=user_domain_id,
password=password,
auth_data=True)
self.assertNotEmpty(token_id)
self.assertIsInstance(token_id, six.string_types)
now = timeutils.utcnow()
expires_at = timeutils.normalize_time(
timeutils.parse_isotime(resp['expires_at']))
self.assertGreater(resp['expires_at'],
resp['issued_at'])
self.assertGreater(expires_at, now)
subject_id = resp['user']['id']
self.assertEqual(subject_id, user_id)
subject_name = resp['user']['name']
self.assertEqual(subject_name, username)
self.assertEqual(resp['methods'][0], 'password')
|
en
| 0.803463
|
# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # 'user_domain_id' needs to be specified otherwise tempest.lib assumes # it to be 'default'
| 1.861099
| 2
|
schwarz/mailqueue/maildir_utils.py
|
FelixSchwarz/mailqueue-runner
| 3
|
6629463
|
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
from __future__ import absolute_import, print_function, unicode_literals
import os
from boltons.fileutils import atomic_rename, atomic_save
import portalocker
from .compat import os_makedirs, FileNotFoundError, IS_WINDOWS
__all__ = ['create_maildir_directories', 'lock_file', 'move_message']
class LockedFile(object):
__slots__ = ('fp', 'lock', 'name')
def __init__(self, fp, lock=None):
self.fp = fp
self.lock = lock
self.name = fp.name
def close(self):
self.fp.close()
self.lock = None
def is_locked(self):
return (self.lock and (self.lock.fh is not None))
def read(self, *args, **kwargs):
return self.fp.read(*args, **kwargs)
def readline(self):
return self.fp.readline()
def seek(self, pos):
self.fp.seek(pos)
def truncate(self):
assert self.is_locked()
self.fp.truncate()
def write(self, data):
assert self.is_locked()
self.fp.write(data)
def create_maildir_directories(basedir, is_folder=False):
os_makedirs(basedir, 0o700, exist_ok=True)
new_path = None
for subdir_name in ('tmp', 'cur', 'new'):
subdir_path = os.path.join(basedir, subdir_name)
os_makedirs(subdir_path, 0o700, exist_ok=True)
if subdir_name == 'new':
new_path = subdir_path
# The maildir++ description [1] mentions a "maildirfolder" file for each
# subfolder. Dovecot does not create such a file but doing so seems
# harmless.
# http://www.courier-mta.org/imap/README.maildirquota.html
if is_folder:
maildirfolder_path = os.path.join(basedir, 'maildirfolder')
# never overwrite an existing "maildirfolder" file (just being overcautious)
# in Python 3 we could also use "open(..., 'xb')" and catch FileExistsError
with atomic_save(maildirfolder_path, overwrite=False) as fp:
pass
return new_path
def is_path_like(path):
if hasattr(os, 'PathLike'):
return isinstance(path, os.PathLike)
# support Python 2 with pathlib2
return hasattr(path, '__fspath__')
def find_messages(queue_basedir, log, queue_folder='new'):
if is_path_like(queue_basedir) and not hasattr(os, 'PathLike'):
queue_basedir = str(queue_basedir)
path_new = os.path.join(queue_basedir, queue_folder)
try:
filenames = os.listdir(path_new)
except FileNotFoundError:
log.error('Queue directory %s does not exist.', path_new)
else:
for filename in filenames:
path = os.path.join(path_new, filename)
yield path
def lock_file(path, timeout=None):
try:
previous_inode = os.stat(path).st_ino
except OSError:
# <path> does not exist at all
return None
lock = portalocker.Lock(path, mode='rb+', timeout=timeout)
# prevent race condition when trying to lock file which is deleted by
# another process (Linux/Unix):
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
nr_tries = 3
for i in range(nr_tries):
try:
fp = lock.acquire()
except portalocker.LockException:
continue
# Need to check that the inodes of the opened file and the current file
# in the file system are the same.
try:
current_inode = os.stat(path).st_ino
except OSError:
return None
if current_inode == previous_inode:
break
previous_inode = current_inode
lock.release()
else:
return None
return LockedFile(fp, lock)
def move_message(file_, target_folder, open_file=True):
if hasattr(file_, 'lock') and file_.is_locked():
locked_file = file_
file_path = file_.name
assert (not IS_WINDOWS)
else:
locked_file = None
# on Windows we don't use the LockedFile wrapper so we might get plain
# file-like object here.
file_path = file_ if (not hasattr(file_, 'name')) else file_.name
folder_path = os.path.dirname(file_path)
queue_base_dir = os.path.dirname(folder_path)
filename = os.path.basename(file_path)
target_path = os.path.join(queue_base_dir, target_folder, filename)
if file_path == target_path:
if not open_file:
return target_path
return file_
did_open_file = False
# no locking on Windows as you can not unlink/move open files there.
if not IS_WINDOWS:
if not locked_file:
# acquire lock to ensure that no other process is handling this message
# currently.
locked_file = lock_file(file_path, timeout=0)
did_open_file = True
if locked_file is None:
return None
try:
# Bolton's "atomic_rename()" is compatible with Windows.
# Under Linux "atomic_rename()" ensures that the "target_path" file
# contains the complete contents AND never overwrites an existing
# file (as long as it is not stored on an NFS filesystem).
# However the full operation is NOT atomic in Linux as it consists of
# two system calls (link(), unlink()) so it could happen that the file
# exists in the source folder AND the target folder (as hard link).
# The ideal solution would be to use "renameat2", a Linux-specific
# system call which can rename without overwriting. However that
# syscall comes with a number of caveats:
# - not all file systems are supported (though I guess ext4 should be
# fine)
# - not exposed in Python: need to write custom code
# - only added in glibc 2.28 (released on 2018-08-01) so we would
# have to do a raw syscall from Python (doable, e.g. with the
# "execute-syscall" github project)
# - added in Linux 3.15 - we can not use that syscall in CentOS 7
# (ships with kernel 3.10) which is pretty much a showstopper for me.
atomic_rename(file_path, target_path, overwrite=False)
if open_file:
if IS_WINDOWS:
return open(target_path, 'rb+')
# reflect the new location in LockedFile wrapper
locked_file.name = target_path
return locked_file
elif did_open_file:
# Closing the "LockedFile" will also release locks.
# Only close the file if we actually opened it.
locked_file.close()
return target_path
except (IOError, OSError):
pass
return None
|
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
from __future__ import absolute_import, print_function, unicode_literals
import os
from boltons.fileutils import atomic_rename, atomic_save
import portalocker
from .compat import os_makedirs, FileNotFoundError, IS_WINDOWS
__all__ = ['create_maildir_directories', 'lock_file', 'move_message']
class LockedFile(object):
__slots__ = ('fp', 'lock', 'name')
def __init__(self, fp, lock=None):
self.fp = fp
self.lock = lock
self.name = fp.name
def close(self):
self.fp.close()
self.lock = None
def is_locked(self):
return (self.lock and (self.lock.fh is not None))
def read(self, *args, **kwargs):
return self.fp.read(*args, **kwargs)
def readline(self):
return self.fp.readline()
def seek(self, pos):
self.fp.seek(pos)
def truncate(self):
assert self.is_locked()
self.fp.truncate()
def write(self, data):
assert self.is_locked()
self.fp.write(data)
def create_maildir_directories(basedir, is_folder=False):
os_makedirs(basedir, 0o700, exist_ok=True)
new_path = None
for subdir_name in ('tmp', 'cur', 'new'):
subdir_path = os.path.join(basedir, subdir_name)
os_makedirs(subdir_path, 0o700, exist_ok=True)
if subdir_name == 'new':
new_path = subdir_path
# The maildir++ description [1] mentions a "maildirfolder" file for each
# subfolder. Dovecot does not create such a file but doing so seems
# harmless.
# http://www.courier-mta.org/imap/README.maildirquota.html
if is_folder:
maildirfolder_path = os.path.join(basedir, 'maildirfolder')
# never overwrite an existing "maildirfolder" file (just being overcautious)
# in Python 3 we could also use "open(..., 'xb')" and catch FileExistsError
with atomic_save(maildirfolder_path, overwrite=False) as fp:
pass
return new_path
def is_path_like(path):
if hasattr(os, 'PathLike'):
return isinstance(path, os.PathLike)
# support Python 2 with pathlib2
return hasattr(path, '__fspath__')
def find_messages(queue_basedir, log, queue_folder='new'):
if is_path_like(queue_basedir) and not hasattr(os, 'PathLike'):
queue_basedir = str(queue_basedir)
path_new = os.path.join(queue_basedir, queue_folder)
try:
filenames = os.listdir(path_new)
except FileNotFoundError:
log.error('Queue directory %s does not exist.', path_new)
else:
for filename in filenames:
path = os.path.join(path_new, filename)
yield path
def lock_file(path, timeout=None):
try:
previous_inode = os.stat(path).st_ino
except OSError:
# <path> does not exist at all
return None
lock = portalocker.Lock(path, mode='rb+', timeout=timeout)
# prevent race condition when trying to lock file which is deleted by
# another process (Linux/Unix):
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
nr_tries = 3
for i in range(nr_tries):
try:
fp = lock.acquire()
except portalocker.LockException:
continue
# Need to check that the inodes of the opened file and the current file
# in the file system are the same.
try:
current_inode = os.stat(path).st_ino
except OSError:
return None
if current_inode == previous_inode:
break
previous_inode = current_inode
lock.release()
else:
return None
return LockedFile(fp, lock)
def move_message(file_, target_folder, open_file=True):
if hasattr(file_, 'lock') and file_.is_locked():
locked_file = file_
file_path = file_.name
assert (not IS_WINDOWS)
else:
locked_file = None
# on Windows we don't use the LockedFile wrapper so we might get plain
# file-like object here.
file_path = file_ if (not hasattr(file_, 'name')) else file_.name
folder_path = os.path.dirname(file_path)
queue_base_dir = os.path.dirname(folder_path)
filename = os.path.basename(file_path)
target_path = os.path.join(queue_base_dir, target_folder, filename)
if file_path == target_path:
if not open_file:
return target_path
return file_
did_open_file = False
# no locking on Windows as you can not unlink/move open files there.
if not IS_WINDOWS:
if not locked_file:
# acquire lock to ensure that no other process is handling this message
# currently.
locked_file = lock_file(file_path, timeout=0)
did_open_file = True
if locked_file is None:
return None
try:
# Bolton's "atomic_rename()" is compatible with Windows.
# Under Linux "atomic_rename()" ensures that the "target_path" file
# contains the complete contents AND never overwrites an existing
# file (as long as it is not stored on an NFS filesystem).
# However the full operation is NOT atomic in Linux as it consists of
# two system calls (link(), unlink()) so it could happen that the file
# exists in the source folder AND the target folder (as hard link).
# The ideal solution would be to use "renameat2", a Linux-specific
# system call which can rename without overwriting. However that
# syscall comes with a number of caveats:
# - not all file systems are supported (though I guess ext4 should be
# fine)
# - not exposed in Python: need to write custom code
# - only added in glibc 2.28 (released on 2018-08-01) so we would
# have to do a raw syscall from Python (doable, e.g. with the
# "execute-syscall" github project)
# - added in Linux 3.15 - we can not use that syscall in CentOS 7
# (ships with kernel 3.10) which is pretty much a showstopper for me.
atomic_rename(file_path, target_path, overwrite=False)
if open_file:
if IS_WINDOWS:
return open(target_path, 'rb+')
# reflect the new location in LockedFile wrapper
locked_file.name = target_path
return locked_file
elif did_open_file:
# Closing the "LockedFile" will also release locks.
# Only close the file if we actually opened it.
locked_file.close()
return target_path
except (IOError, OSError):
pass
return None
|
en
| 0.919361
|
# -*- coding: utf-8 -*- # SPDX-License-Identifier: MIT # The maildir++ description [1] mentions a "maildirfolder" file for each # subfolder. Dovecot does not create such a file but doing so seems # harmless. # http://www.courier-mta.org/imap/README.maildirquota.html # never overwrite an existing "maildirfolder" file (just being overcautious) # in Python 3 we could also use "open(..., 'xb')" and catch FileExistsError # support Python 2 with pathlib2 # <path> does not exist at all # prevent race condition when trying to lock file which is deleted by # another process (Linux/Unix): # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition # Need to check that the inodes of the opened file and the current file # in the file system are the same. # on Windows we don't use the LockedFile wrapper so we might get plain # file-like object here. # no locking on Windows as you can not unlink/move open files there. # acquire lock to ensure that no other process is handling this message # currently. # Bolton's "atomic_rename()" is compatible with Windows. # Under Linux "atomic_rename()" ensures that the "target_path" file # contains the complete contents AND never overwrites an existing # file (as long as it is not stored on an NFS filesystem). # However the full operation is NOT atomic in Linux as it consists of # two system calls (link(), unlink()) so it could happen that the file # exists in the source folder AND the target folder (as hard link). # The ideal solution would be to use "renameat2", a Linux-specific # system call which can rename without overwriting. However that # syscall comes with a number of caveats: # - not all file systems are supported (though I guess ext4 should be # fine) # - not exposed in Python: need to write custom code # - only added in glibc 2.28 (released on 2018-08-01) so we would # have to do a raw syscall from Python (doable, e.g. with the # "execute-syscall" github project) # - added in Linux 3.15 - we can not use that syscall in CentOS 7 # (ships with kernel 3.10) which is pretty much a showstopper for me. # reflect the new location in LockedFile wrapper # Closing the "LockedFile" will also release locks. # Only close the file if we actually opened it.
| 2.222055
| 2
|
speedcord/shard.py
|
MM-coder/speedcord
| 0
|
6629464
|
"""
Created by Epic at 9/5/20
"""
from asyncio import Event, Lock, AbstractEventLoop, sleep
from aiohttp.client_exceptions import ClientConnectorError
from aiohttp import WSMessage, WSMsgType
from logging import getLogger
from sys import platform
from ujson import loads, dumps
from time import time
from .exceptions import GatewayUnavailable
class DefaultShard:
def __init__(self, shard_id, client, loop: AbstractEventLoop):
self.id = shard_id
self.client = client
self.loop = loop
self.ws = None
self.gateway_url = None
self.logger = getLogger(f"speedcord.shard.{self.id}")
self.connected = Event(loop=self.loop) # Some bots might wanna know which shards is online at all times
self.received_heartbeat_ack = True
self.heartbeat_interval = None
self.heartbeat_count = None
self.failed_heartbeats = 0
self.session_id = None
self.last_event_id = None # This gets modified by gateway.py
self.gateway_send_lock = Lock(loop=self.loop)
self.gateway_send_limit = 120
self.gateway_send_per = 60
self.gateway_send_left = self.gateway_send_limit
self.gateway_send_reset = time() + self.gateway_send_per
# Default events
self.client.opcode_dispatcher.register(10, self.handle_hello)
self.client.opcode_dispatcher.register(11, self.handle_heartbeat_ack)
self.client.opcode_dispatcher.register(9, self.handle_invalid_session)
self.client.event_dispatcher.register("READY", self.handle_ready)
async def connect(self, gateway_url):
if self.ws is not None:
if not self.ws.closed:
await self.ws.close()
self.ws = None
self.gateway_url = gateway_url
try:
self.ws = await self.client.http.create_ws(gateway_url, compression=0)
except ClientConnectorError:
await self.client.close()
raise GatewayUnavailable() from None
self.loop.create_task(self.read_loop())
self.connected.set()
if self.session_id is None:
async with self.client.connection_lock:
self.client.remaining_connections -= 1
if self.client.remaining_connections <= 1:
self.logger.info("Max connections reached!")
gateway_url, shard_count, _, connections_reset_after = await self.client.get_gateway()
await sleep(connections_reset_after / 1000)
gateway_url, shard_count, \
self.client.remaining_connections, connections_reset_after = await self.client.get_gateway()
await self.identify()
else:
await self.resume()
async def close(self):
if self.ws is not None and not self.ws.closed:
await self.ws.close()
self.connected.clear()
async def read_loop(self):
message: WSMessage # Fix typehinting
async for message in self.ws:
if message.type == WSMsgType.TEXT:
await self.client.gateway_handler.on_receive(message.json(loads=loads), self)
elif message.type in [WSMsgType.CLOSE, WSMsgType.CLOSING, WSMsgType.CLOSED]:
self.logger.warning(
f"WebSocket is closing! Details: {message.json()}. Close code: {self.ws.close_code}")
else:
self.logger.warning("Unknown message type: " + str(type(message)))
async def send(self, data: dict):
async with self.gateway_send_lock:
current_time = time()
if current_time >= self.gateway_send_reset:
self.gateway_send_reset = current_time + self.gateway_send_per
self.gateway_send_left = self.gateway_send_limit
if self.gateway_send_left == 0:
sleep_for = self.gateway_send_reset - current_time
self.logger.debug(f"Gateway ratelimited! Sleeping for {sleep_for}s")
await sleep(self.gateway_send_reset - current_time)
self.logger.debug("Data sent: " + str(data))
await self.ws.send_json(data, dumps=dumps)
async def identify(self):
await self.send({
"op": 2,
"d": {
"token": <PASSWORD>.client.token,
"properties": {
"$os": platform,
"$browser": "SpeedCord",
"$device": "SpeedCord"
},
"intents": self.client.intents,
"shard": (self.id, self.client.shard_count)
}
})
async def resume(self):
await self.send({
"op": 6,
"d": {
"token": <PASSWORD>.client.token,
"session_id": self.session_id,
"seq": self.last_event_id
}
})
async def heartbeat_loop(self):
while self.connected.is_set():
if not self.received_heartbeat_ack:
self.failed_heartbeats += 1
self.logger.info(
"WebSocket did not respond to a heartbeat! Failed attempts: " + str(self.failed_heartbeats))
if self.failed_heartbeats > 2:
self.logger.warning("Gateway stopped responding, reconnecting!")
await self.close()
await self.connect(self.gateway_url)
return
self.received_heartbeat_ack = False
await self.send({
"op": 1,
"d": self.heartbeat_count
})
if self.heartbeat_count is not None:
self.heartbeat_count += 1
else:
self.heartbeat_count = 0
await sleep(self.heartbeat_interval)
async def handle_hello(self, data, shard):
if shard.id != self.id:
return
self.received_heartbeat_ack = True
self.heartbeat_interval = data["d"]["heartbeat_interval"] / 1000
self.loop.create_task(self.heartbeat_loop())
self.logger.debug("Started heartbeat loop")
async def handle_heartbeat_ack(self, data, shard):
if shard.id != self.id:
return
self.received_heartbeat_ack = True
self.failed_heartbeats = 0
async def handle_ready(self, data, shard):
if shard.id != self.id:
return
self.session_id = data["session_id"]
async def handle_invalid_session(self, data, shard):
if shard.id != self.id:
return
if not data.get("d", False):
# Session is no longer valid, create a new session
self.session_id = None
await self.close()
await self.connect(self.gateway_url)
|
"""
Created by Epic at 9/5/20
"""
from asyncio import Event, Lock, AbstractEventLoop, sleep
from aiohttp.client_exceptions import ClientConnectorError
from aiohttp import WSMessage, WSMsgType
from logging import getLogger
from sys import platform
from ujson import loads, dumps
from time import time
from .exceptions import GatewayUnavailable
class DefaultShard:
def __init__(self, shard_id, client, loop: AbstractEventLoop):
self.id = shard_id
self.client = client
self.loop = loop
self.ws = None
self.gateway_url = None
self.logger = getLogger(f"speedcord.shard.{self.id}")
self.connected = Event(loop=self.loop) # Some bots might wanna know which shards is online at all times
self.received_heartbeat_ack = True
self.heartbeat_interval = None
self.heartbeat_count = None
self.failed_heartbeats = 0
self.session_id = None
self.last_event_id = None # This gets modified by gateway.py
self.gateway_send_lock = Lock(loop=self.loop)
self.gateway_send_limit = 120
self.gateway_send_per = 60
self.gateway_send_left = self.gateway_send_limit
self.gateway_send_reset = time() + self.gateway_send_per
# Default events
self.client.opcode_dispatcher.register(10, self.handle_hello)
self.client.opcode_dispatcher.register(11, self.handle_heartbeat_ack)
self.client.opcode_dispatcher.register(9, self.handle_invalid_session)
self.client.event_dispatcher.register("READY", self.handle_ready)
async def connect(self, gateway_url):
if self.ws is not None:
if not self.ws.closed:
await self.ws.close()
self.ws = None
self.gateway_url = gateway_url
try:
self.ws = await self.client.http.create_ws(gateway_url, compression=0)
except ClientConnectorError:
await self.client.close()
raise GatewayUnavailable() from None
self.loop.create_task(self.read_loop())
self.connected.set()
if self.session_id is None:
async with self.client.connection_lock:
self.client.remaining_connections -= 1
if self.client.remaining_connections <= 1:
self.logger.info("Max connections reached!")
gateway_url, shard_count, _, connections_reset_after = await self.client.get_gateway()
await sleep(connections_reset_after / 1000)
gateway_url, shard_count, \
self.client.remaining_connections, connections_reset_after = await self.client.get_gateway()
await self.identify()
else:
await self.resume()
async def close(self):
if self.ws is not None and not self.ws.closed:
await self.ws.close()
self.connected.clear()
async def read_loop(self):
message: WSMessage # Fix typehinting
async for message in self.ws:
if message.type == WSMsgType.TEXT:
await self.client.gateway_handler.on_receive(message.json(loads=loads), self)
elif message.type in [WSMsgType.CLOSE, WSMsgType.CLOSING, WSMsgType.CLOSED]:
self.logger.warning(
f"WebSocket is closing! Details: {message.json()}. Close code: {self.ws.close_code}")
else:
self.logger.warning("Unknown message type: " + str(type(message)))
async def send(self, data: dict):
async with self.gateway_send_lock:
current_time = time()
if current_time >= self.gateway_send_reset:
self.gateway_send_reset = current_time + self.gateway_send_per
self.gateway_send_left = self.gateway_send_limit
if self.gateway_send_left == 0:
sleep_for = self.gateway_send_reset - current_time
self.logger.debug(f"Gateway ratelimited! Sleeping for {sleep_for}s")
await sleep(self.gateway_send_reset - current_time)
self.logger.debug("Data sent: " + str(data))
await self.ws.send_json(data, dumps=dumps)
async def identify(self):
await self.send({
"op": 2,
"d": {
"token": <PASSWORD>.client.token,
"properties": {
"$os": platform,
"$browser": "SpeedCord",
"$device": "SpeedCord"
},
"intents": self.client.intents,
"shard": (self.id, self.client.shard_count)
}
})
async def resume(self):
await self.send({
"op": 6,
"d": {
"token": <PASSWORD>.client.token,
"session_id": self.session_id,
"seq": self.last_event_id
}
})
async def heartbeat_loop(self):
while self.connected.is_set():
if not self.received_heartbeat_ack:
self.failed_heartbeats += 1
self.logger.info(
"WebSocket did not respond to a heartbeat! Failed attempts: " + str(self.failed_heartbeats))
if self.failed_heartbeats > 2:
self.logger.warning("Gateway stopped responding, reconnecting!")
await self.close()
await self.connect(self.gateway_url)
return
self.received_heartbeat_ack = False
await self.send({
"op": 1,
"d": self.heartbeat_count
})
if self.heartbeat_count is not None:
self.heartbeat_count += 1
else:
self.heartbeat_count = 0
await sleep(self.heartbeat_interval)
async def handle_hello(self, data, shard):
if shard.id != self.id:
return
self.received_heartbeat_ack = True
self.heartbeat_interval = data["d"]["heartbeat_interval"] / 1000
self.loop.create_task(self.heartbeat_loop())
self.logger.debug("Started heartbeat loop")
async def handle_heartbeat_ack(self, data, shard):
if shard.id != self.id:
return
self.received_heartbeat_ack = True
self.failed_heartbeats = 0
async def handle_ready(self, data, shard):
if shard.id != self.id:
return
self.session_id = data["session_id"]
async def handle_invalid_session(self, data, shard):
if shard.id != self.id:
return
if not data.get("d", False):
# Session is no longer valid, create a new session
self.session_id = None
await self.close()
await self.connect(self.gateway_url)
|
en
| 0.886527
|
Created by Epic at 9/5/20 # Some bots might wanna know which shards is online at all times # This gets modified by gateway.py # Default events # Fix typehinting # Session is no longer valid, create a new session
| 1.963877
| 2
|
solutions/0090-subsets-ii/subsets-ii.py
|
iFun/Project-G
| 0
|
6629465
|
<reponame>iFun/Project-G
# Given a collection of integers that might contain duplicates, nums, return all possible subsets (the power set).
#
# Note: The solution set must not contain duplicate subsets.
#
# Example:
#
#
# Input: [1,2,2]
# Output:
# [
# [2],
# [1],
# [1,2,2],
# [2,2],
# [1,2],
# []
# ]
#
#
#
# @lc app=leetcode id=90 lang=python3
#
# [90] Subsets II
#
# https://leetcode.com/problems/subsets-ii/description/
#
# algorithms
# Medium (42.92%)
# Likes: 941
# Dislikes: 49
# Total Accepted: 210.7K
# Total Submissions: 491K
# Testcase Example: '[1,2,2]'
#
# Given a collection of integers that might contain duplicates, nums, return
# all possible subsets (the power set).
#
# Note: The solution set must not contain duplicate subsets.
#
# Example:
#
#
# Input: [1,2,2]
# Output:
# [
# [2],
# [1],
# [1,2,2],
# [2,2],
# [1,2],
# []
# ]
#
#
#
class Solution:
# bascailly for each new number there is two choices
# add number or skip number
# keep doing this choice until the index number is reached to
# the end of the list and append that list to the final list
def backtrack(self, start, nums, tmp_list, final_list):
final_list.append(tmp_list.copy())
for index in range(start, len(nums)):
#skip dup
if index > start and nums[index] == nums[index - 1]:
continue
tmp_list.append(nums[index])
self.backtrack(index + 1, nums, tmp_list, final_list)
tmp_list.pop()
def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:
result = []
cur = []
nums.sort()
self.backtrack(0, nums, cur, result)
return result
|
# Given a collection of integers that might contain duplicates, nums, return all possible subsets (the power set).
#
# Note: The solution set must not contain duplicate subsets.
#
# Example:
#
#
# Input: [1,2,2]
# Output:
# [
# [2],
# [1],
# [1,2,2],
# [2,2],
# [1,2],
# []
# ]
#
#
#
# @lc app=leetcode id=90 lang=python3
#
# [90] Subsets II
#
# https://leetcode.com/problems/subsets-ii/description/
#
# algorithms
# Medium (42.92%)
# Likes: 941
# Dislikes: 49
# Total Accepted: 210.7K
# Total Submissions: 491K
# Testcase Example: '[1,2,2]'
#
# Given a collection of integers that might contain duplicates, nums, return
# all possible subsets (the power set).
#
# Note: The solution set must not contain duplicate subsets.
#
# Example:
#
#
# Input: [1,2,2]
# Output:
# [
# [2],
# [1],
# [1,2,2],
# [2,2],
# [1,2],
# []
# ]
#
#
#
class Solution:
# bascailly for each new number there is two choices
# add number or skip number
# keep doing this choice until the index number is reached to
# the end of the list and append that list to the final list
def backtrack(self, start, nums, tmp_list, final_list):
final_list.append(tmp_list.copy())
for index in range(start, len(nums)):
#skip dup
if index > start and nums[index] == nums[index - 1]:
continue
tmp_list.append(nums[index])
self.backtrack(index + 1, nums, tmp_list, final_list)
tmp_list.pop()
def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:
result = []
cur = []
nums.sort()
self.backtrack(0, nums, cur, result)
return result
|
en
| 0.712053
|
# Given a collection of integers that might contain duplicates, nums, return all possible subsets (the power set). # # Note: The solution set must not contain duplicate subsets. # # Example: # # # Input: [1,2,2] # Output: # [ # [2], # [1], # [1,2,2], # [2,2], # [1,2], # [] # ] # # # # @lc app=leetcode id=90 lang=python3 # # [90] Subsets II # # https://leetcode.com/problems/subsets-ii/description/ # # algorithms # Medium (42.92%) # Likes: 941 # Dislikes: 49 # Total Accepted: 210.7K # Total Submissions: 491K # Testcase Example: '[1,2,2]' # # Given a collection of integers that might contain duplicates, nums, return # all possible subsets (the power set). # # Note: The solution set must not contain duplicate subsets. # # Example: # # # Input: [1,2,2] # Output: # [ # [2], # [1], # [1,2,2], # [2,2], # [1,2], # [] # ] # # # # bascailly for each new number there is two choices # add number or skip number # keep doing this choice until the index number is reached to # the end of the list and append that list to the final list #skip dup
| 3.924019
| 4
|
detect.py
|
ilham-bintang/ScanSSD
| 0
|
6629466
|
import matplotlib
matplotlib.use( 'tkagg' )
import matplotlib.pyplot as plt
import os
from collections import OrderedDict
import cv2
import numpy as np
import argparse
import torch
from torch.autograd import Variable
import torch.nn as nn
from torchvision import transforms
from scipy.ndimage.measurements import label
from ssd import build_ssd
from data import *
from torch.utils.data import Dataset, DataLoader
from utils import draw_boxes, helpers, save_boxes
import gtdb.feature_extractor
class ArgStub():
def __init__ (self):
self.cuda = False
self.kernel = (1, 5)
self.padding = (0, 2)
self.phase = 'test'
self.visual_threshold = 0.25
self.verbose = False
self.exp_name = 'SSD'
self.model_type = 512
self.use_char_info = False
self.limit = -1
self.cfg = 'hboxes512'
self.batch_size = 1
self.num_workers = 0
self.neg_mining = True
self.log_dir = 'logs'
self.stride = 0.1
self.window = 1200
self.test_data = "testing_data"
self.dataset_root = "/Users/ilhambintang/Latihan/riset/ScanSSD"
self.save_folder = "/Users/ilhambintang/Latihan/riset/ScanSSD/eval"
self.exp_name = "testing"
def draw_box (image, boxes):
for b in boxes:
cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), (0, 255, 0), 2)
def _img_to_tensor (image):
rimg = cv2.resize(image, (512, 512), interpolation = cv2.INTER_AREA).astype(np.float32)
# width = image.shape[0]
# height = image.shape[1]
# max_width = 1024
# coef = max_width/width
# new_width = int(width * coef)
# new_height = int(height * coef)
# rimg = cv2.resize(image, (new_height, new_width), interpolation = cv2.INTER_AREA).astype(np.float32)
rimg -= np.array((246, 246, 246), dtype=np.float32)
rimg = rimg[:, :, (2, 1, 0)]
return torch.from_numpy(rimg).permute(2, 0, 1)
def FixImgCoordinates (images, boxes):
new_boxes = []
if isinstance(images, list):
for i in range(len(images)):
print(images[i].shape)
bbs = []
for o_box in boxes[i] :
b = [None] * 4
b[0] = int(o_box[0] * images[i].shape[0])
b[1] = int(o_box[1] * images[i].shape[1])
b[2] = int(o_box[2] * images[i].shape[0])
b[3] = int(o_box[3] * images[i].shape[1])
bbs.append(b)
new_boxes.append(bbs)
else:
bbs = []
for o_box in boxes[0] :
b = [None] * 4
b[0] = int(o_box[0] * images.shape[0])
b[1] = int(o_box[1] * images.shape[1])
b[2] = int(o_box[2] * images.shape[0])
b[3] = int(o_box[3] * images.shape[1])
bbs.append(b)
new_boxes.append(bbs)
return new_boxes
def DrawAllBoxes(images, boxes):
for i in range(len(images)):
draw_box(images[i], boxes[i])
def convert_to_binary(image):
try:
print(image)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
print(gray_image)
except Exception as e:
print(e)
im_bw = np.zeros(gray_image.shape)
im_bw[gray_image > 127] = 0
im_bw[gray_image <= 127] = 1
return im_bw
class MathDetector():
def __init__(self, weight_path, args):
self.args = args
net = build_ssd(args, 'test', config.exp_cfg[args.cfg], -1, args.model_type, 2)
self._net = net # nn.DataParallel(net)
weights = torch.load(weight_path, map_location = torch.device('cpu'))
new_weights = OrderedDict()
for k, v in weights.items():
name = k[7:] # remove `module.`
new_weights[name] = v
self._net.load_state_dict(new_weights)
self._net.eval()
self.dataset = GTDBDetection(args, self.args.test_data, split='test',
transform=BaseTransform(self.args.model_type, (246, 246, 246)),
target_transform=GTDBAnnotationTransform())
self.data_loader = DataLoader(self.dataset, self.args.batch_size,
num_workers=self.args.num_workers,
shuffle=False, collate_fn=detection_collate,
pin_memory=True)
self.boxes = []
self.scores = []
def Detect (self, thres, images):
done = 0
for batch_idx, (images, targets, metadata) in enumerate(self.data_loader):
done = done + len(images)
with torch.no_grad():
images = Variable(images)
targets = [Variable(ann) for ann in targets]
y, debug_boxes, debug_scores = self._net(images) # forward pass
detections = y.data
k = 0
for img, meta in zip(images, metadata):
img_id = meta[0]
x_l = meta[1]
y_l = meta[2]
img = img.permute(1, 2, 0)
# scale each detection back up to the image
scale = torch.Tensor([img.shape[1], img.shape[0],
img.shape[1], img.shape[0]])
recognized_boxes = []
recognized_scores = []
# [1,2,200,5]
# we only care about math class
# hence select detections[image_id, class, detection_id, detection_score]
# class=1 for math
i = 1
j = 0
while j < detections.size(2) and detections[k, i, j, 0] >= thres: # TODO it was 0.6
score = detections[k, i, j, 0]
pt = (detections[k, i, j, 1:] * self.args.window).cpu().numpy()
coords = (pt[0] + x_l, pt[1] + y_l, pt[2] + x_l, pt[3] + y_l)
# coords = (pt[0], pt[1], pt[2], pt[3])
recognized_boxes.append(coords)
recognized_scores.append(score.cpu().numpy())
j += 1
print(j)
save_boxes(self.args, recognized_boxes, recognized_scores, img_id)
self.boxes = recognized_boxes
self.scores = recognized_scores
def DetectAny (self, thres, image):
t = _img_to_tensor(image).unsqueeze(0)
# fix box coordinates to image pixel coordinates
self.Detect(thres, t)
# coor_boxes = FixImgCoordinates(image, self.boxes)
# new_boxes = self.Voting(t, coor_boxes)
# self.boxes = coor_boxes
return self.boxes, self.scores
def Voting(self, image, math_regions):
original_width = image.shape[3]
original_height = image.shape[2]
thresh_votes = 30
votes = np.zeros(shape=(original_height, original_width))
for box in math_regions:
votes[int(box[1]):int(box[3]), int(box[0]):int(box[2])] = \
votes[int(box[1]):int(box[3]), int(box[0]):int(box[2])] + 1
votes[votes < thresh_votes] = 0
votes[votes >= thresh_votes] = 1
im_bw = convert_to_binary(image)
structure = np.ones((3, 3), dtype=np.int)
labeled, ncomponents = label(votes, structure)
boxes = []
indices = np.indices(votes.shape).T[:, :, [1, 0]]
for i in range(ncomponents):
labels = (labeled == (i+1))
pixels = indices[labels.T]
if len(pixels) < 1:
continue
box = [min(pixels[:, 0]), min(pixels[:, 1]), max(pixels[:, 0]), max(pixels[:, 1])]
# if args.postprocess:
# expansion to correctly fit the region
box = fit_box.adjust_box(im_bw, box)
# if box has 0 width or height, do not add it in the final detections
if feature_extractor.width(box) < 1 or feature_extractor.height(box) < 1:
continue
boxes.append(box)
return boxes
def get_img():
img = cv2.imread('images/3.jpg', cv2.IMREAD_COLOR)
cimg = img[0:3000, 1000:4000].astype(np.float32)
return cimg
md = MathDetector('AMATH512_e1GTDB.pth', ArgStub())
# a = get_img()
a = cv2.imread('images/test/1.jpg', cv2.IMREAD_COLOR)
# exit(0)
b, s = md.DetectAny(0.2, a)
md.Voting()
# print(len(s[0]))
DrawAllBoxes([a, ], b)
cv2.imwrite('images/res.png', a)
|
import matplotlib
matplotlib.use( 'tkagg' )
import matplotlib.pyplot as plt
import os
from collections import OrderedDict
import cv2
import numpy as np
import argparse
import torch
from torch.autograd import Variable
import torch.nn as nn
from torchvision import transforms
from scipy.ndimage.measurements import label
from ssd import build_ssd
from data import *
from torch.utils.data import Dataset, DataLoader
from utils import draw_boxes, helpers, save_boxes
import gtdb.feature_extractor
class ArgStub():
def __init__ (self):
self.cuda = False
self.kernel = (1, 5)
self.padding = (0, 2)
self.phase = 'test'
self.visual_threshold = 0.25
self.verbose = False
self.exp_name = 'SSD'
self.model_type = 512
self.use_char_info = False
self.limit = -1
self.cfg = 'hboxes512'
self.batch_size = 1
self.num_workers = 0
self.neg_mining = True
self.log_dir = 'logs'
self.stride = 0.1
self.window = 1200
self.test_data = "testing_data"
self.dataset_root = "/Users/ilhambintang/Latihan/riset/ScanSSD"
self.save_folder = "/Users/ilhambintang/Latihan/riset/ScanSSD/eval"
self.exp_name = "testing"
def draw_box (image, boxes):
for b in boxes:
cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), (0, 255, 0), 2)
def _img_to_tensor (image):
rimg = cv2.resize(image, (512, 512), interpolation = cv2.INTER_AREA).astype(np.float32)
# width = image.shape[0]
# height = image.shape[1]
# max_width = 1024
# coef = max_width/width
# new_width = int(width * coef)
# new_height = int(height * coef)
# rimg = cv2.resize(image, (new_height, new_width), interpolation = cv2.INTER_AREA).astype(np.float32)
rimg -= np.array((246, 246, 246), dtype=np.float32)
rimg = rimg[:, :, (2, 1, 0)]
return torch.from_numpy(rimg).permute(2, 0, 1)
def FixImgCoordinates (images, boxes):
new_boxes = []
if isinstance(images, list):
for i in range(len(images)):
print(images[i].shape)
bbs = []
for o_box in boxes[i] :
b = [None] * 4
b[0] = int(o_box[0] * images[i].shape[0])
b[1] = int(o_box[1] * images[i].shape[1])
b[2] = int(o_box[2] * images[i].shape[0])
b[3] = int(o_box[3] * images[i].shape[1])
bbs.append(b)
new_boxes.append(bbs)
else:
bbs = []
for o_box in boxes[0] :
b = [None] * 4
b[0] = int(o_box[0] * images.shape[0])
b[1] = int(o_box[1] * images.shape[1])
b[2] = int(o_box[2] * images.shape[0])
b[3] = int(o_box[3] * images.shape[1])
bbs.append(b)
new_boxes.append(bbs)
return new_boxes
def DrawAllBoxes(images, boxes):
for i in range(len(images)):
draw_box(images[i], boxes[i])
def convert_to_binary(image):
try:
print(image)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
print(gray_image)
except Exception as e:
print(e)
im_bw = np.zeros(gray_image.shape)
im_bw[gray_image > 127] = 0
im_bw[gray_image <= 127] = 1
return im_bw
class MathDetector():
def __init__(self, weight_path, args):
self.args = args
net = build_ssd(args, 'test', config.exp_cfg[args.cfg], -1, args.model_type, 2)
self._net = net # nn.DataParallel(net)
weights = torch.load(weight_path, map_location = torch.device('cpu'))
new_weights = OrderedDict()
for k, v in weights.items():
name = k[7:] # remove `module.`
new_weights[name] = v
self._net.load_state_dict(new_weights)
self._net.eval()
self.dataset = GTDBDetection(args, self.args.test_data, split='test',
transform=BaseTransform(self.args.model_type, (246, 246, 246)),
target_transform=GTDBAnnotationTransform())
self.data_loader = DataLoader(self.dataset, self.args.batch_size,
num_workers=self.args.num_workers,
shuffle=False, collate_fn=detection_collate,
pin_memory=True)
self.boxes = []
self.scores = []
def Detect (self, thres, images):
done = 0
for batch_idx, (images, targets, metadata) in enumerate(self.data_loader):
done = done + len(images)
with torch.no_grad():
images = Variable(images)
targets = [Variable(ann) for ann in targets]
y, debug_boxes, debug_scores = self._net(images) # forward pass
detections = y.data
k = 0
for img, meta in zip(images, metadata):
img_id = meta[0]
x_l = meta[1]
y_l = meta[2]
img = img.permute(1, 2, 0)
# scale each detection back up to the image
scale = torch.Tensor([img.shape[1], img.shape[0],
img.shape[1], img.shape[0]])
recognized_boxes = []
recognized_scores = []
# [1,2,200,5]
# we only care about math class
# hence select detections[image_id, class, detection_id, detection_score]
# class=1 for math
i = 1
j = 0
while j < detections.size(2) and detections[k, i, j, 0] >= thres: # TODO it was 0.6
score = detections[k, i, j, 0]
pt = (detections[k, i, j, 1:] * self.args.window).cpu().numpy()
coords = (pt[0] + x_l, pt[1] + y_l, pt[2] + x_l, pt[3] + y_l)
# coords = (pt[0], pt[1], pt[2], pt[3])
recognized_boxes.append(coords)
recognized_scores.append(score.cpu().numpy())
j += 1
print(j)
save_boxes(self.args, recognized_boxes, recognized_scores, img_id)
self.boxes = recognized_boxes
self.scores = recognized_scores
def DetectAny (self, thres, image):
t = _img_to_tensor(image).unsqueeze(0)
# fix box coordinates to image pixel coordinates
self.Detect(thres, t)
# coor_boxes = FixImgCoordinates(image, self.boxes)
# new_boxes = self.Voting(t, coor_boxes)
# self.boxes = coor_boxes
return self.boxes, self.scores
def Voting(self, image, math_regions):
original_width = image.shape[3]
original_height = image.shape[2]
thresh_votes = 30
votes = np.zeros(shape=(original_height, original_width))
for box in math_regions:
votes[int(box[1]):int(box[3]), int(box[0]):int(box[2])] = \
votes[int(box[1]):int(box[3]), int(box[0]):int(box[2])] + 1
votes[votes < thresh_votes] = 0
votes[votes >= thresh_votes] = 1
im_bw = convert_to_binary(image)
structure = np.ones((3, 3), dtype=np.int)
labeled, ncomponents = label(votes, structure)
boxes = []
indices = np.indices(votes.shape).T[:, :, [1, 0]]
for i in range(ncomponents):
labels = (labeled == (i+1))
pixels = indices[labels.T]
if len(pixels) < 1:
continue
box = [min(pixels[:, 0]), min(pixels[:, 1]), max(pixels[:, 0]), max(pixels[:, 1])]
# if args.postprocess:
# expansion to correctly fit the region
box = fit_box.adjust_box(im_bw, box)
# if box has 0 width or height, do not add it in the final detections
if feature_extractor.width(box) < 1 or feature_extractor.height(box) < 1:
continue
boxes.append(box)
return boxes
def get_img():
img = cv2.imread('images/3.jpg', cv2.IMREAD_COLOR)
cimg = img[0:3000, 1000:4000].astype(np.float32)
return cimg
md = MathDetector('AMATH512_e1GTDB.pth', ArgStub())
# a = get_img()
a = cv2.imread('images/test/1.jpg', cv2.IMREAD_COLOR)
# exit(0)
b, s = md.DetectAny(0.2, a)
md.Voting()
# print(len(s[0]))
DrawAllBoxes([a, ], b)
cv2.imwrite('images/res.png', a)
|
en
| 0.513829
|
# width = image.shape[0] # height = image.shape[1] # max_width = 1024 # coef = max_width/width # new_width = int(width * coef) # new_height = int(height * coef) # rimg = cv2.resize(image, (new_height, new_width), interpolation = cv2.INTER_AREA).astype(np.float32) # nn.DataParallel(net) # remove `module.` # forward pass # scale each detection back up to the image # [1,2,200,5] # we only care about math class # hence select detections[image_id, class, detection_id, detection_score] # class=1 for math # TODO it was 0.6 # coords = (pt[0], pt[1], pt[2], pt[3]) # fix box coordinates to image pixel coordinates # coor_boxes = FixImgCoordinates(image, self.boxes) # new_boxes = self.Voting(t, coor_boxes) # self.boxes = coor_boxes # if args.postprocess: # expansion to correctly fit the region # if box has 0 width or height, do not add it in the final detections # a = get_img() # exit(0) # print(len(s[0]))
| 2.025214
| 2
|
Search/search.py
|
TarnumG95/PictureMatchCheater
| 1
|
6629467
|
import itertools
def preprocess(FILE):
"""
Read from file, return a int matrix surrounded by 0.
"""
ret = []
with open(FILE) as f:
for line in f:
line = line[:-1]
ret.append([(int)(arg) for arg in line.split(' ')])
return ret
def surround0(ret):
ret.insert(0, [0 for _ in range(2 + len(ret[0]))])
for i in range(1, len(ret)):
ret[i].insert(0, 0)
ret[i].append(0)
ret.append([0 for _ in range(len(ret[0]))])
return ret
def myRange(x1, x2):
if (x1 < x2):
return range(x1 + 1, x2)
else:
return range(x2 + 1, x1)
def direct(board, pos1, pos2):
if pos1[0] == pos2[0]:
# print pos1, pos2
# print myRange(pos1[1] + 1, pos2[1])
for i in myRange(pos1[1], pos2[1]):
if board[pos1[0]][i] != 0:
return False
return True
elif pos1[1] == pos2[1]:
# print range(pos1[0] + 1, pos2[0])
for i in myRange(pos1[0], pos2[0]):
# print (i, pos2[1])
if board[i][pos1[1]] != 0:
return False
return True
else:
return False
def oneCorner(board, pos1, pos2):
if pos1[0] == pos2[0] or pos1[1] == pos2[1]:
return False
else:
return (board[pos1[0]][pos2[1]] == 0 and direct(board, pos1, (pos1[0], pos2[1])) and direct(board, pos2, (pos1[0], pos2[1]))) or\
(board[pos2[0]][pos1[1]] == 0 and direct(board, pos1, (pos2[0], pos1[1])) and direct(board, pos2, (pos2[0], pos1[1])))
def twoCorners(board, pos1, pos2):
for col in range(len(board[0])):
if board[pos1[0]][col] == 0 and direct(board, (pos1[0], col), pos1) and oneCorner(board, (pos1[0], col), pos2):
# print 1
return True
for row in range(len(board)):
if board[row][pos1[1]] == 0 and direct(board, (row, pos1[1]), pos1) and oneCorner(board, (row, pos1[1]), pos2):
# print (row, pos1[1])
return True
for col in range(len(board[0])):
if board[pos2[0]][col] == 0 and direct(board, (pos2[0], col), pos2) and oneCorner(board, (pos2[0], col), pos1):
# print 3
return True
for row in range(len(board)):
if board[row][pos2[1]] == 0 and direct(board, (row, pos2[1]), pos2) and oneCorner(board, (row, pos2[1]), pos1):
# print 4
return True
return False
def isConnected(board, pos1, pos2):
"""
@param:
board -- a matrix representing the game
pos1 -- First Position, e.g. (1, 2)
pos2 -- Second Position, e.g. (0, 3)
@return:
True / False
"""
if board[pos1[0]][pos1[1]] == board[pos2[0]][pos2[1]]:
return direct(board, pos1, pos2) or oneCorner(board, pos1, pos2) or twoCorners(board, pos1, pos2)
else:
return False
def asDict(board):
ret = {}
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] not in ret:
ret[board[i][j]] = []
ret[board[i][j]].append((i, j))
else:
ret[board[i][j]].append((i, j))
return ret
def findSolution(board):
"""
@param:
board -- a matrix representing the game
i.e.
0 0 0 0 0 0
0 1 2 3 4 0
0 4 2 1 3 0
0 4 5 4 5 0
0 0 0 0 0 0
@return:
set of valid operations to cancel all items
i.e.
(3, 2), (3, 4), (2, 1), (3, 1), (3, 3),
(1, 4), (1, 2), (2, 2), (1, 1), (2, 3),
(1, 3), (2, 4)
"""
ret = []
while True:
flag = True
boardDict = asDict(board)
for i in boardDict:
if i != 0 and len(boardDict[i]) >= 2:
iterlist = itertools.combinations(boardDict[i], 2)
for pair in iterlist:
if isConnected(board, pair[0], pair[1]):
cancelItem(board, pair[0], pair[1])
ret.append(pair[0])
ret.append(pair[1])
flag = False
continue
if flag:
break
# printBoard(board)
board.pop(len(board) - 1)
board.pop(0)
for i in range(len(board)):
board[i].pop(len(board[i]) - 1)
board[i].pop(0)
# printBoard(board)
sumnum = sum([board[i][j] for i in range(len(board)) for j in range(len(board[0]))])
return (ret, board, sumnum == 0)
def cancelItem(board, pos1, pos2):
x1, y1 = pos1[0], pos1[1]
x2, y2 = pos2[0], pos2[1]
board[x1][y1] = 0
board[x2][y2] = 0
# print pos1, pos2, 'Cancelled'
# print 'New Board: '
# printBoard(board)
def printBoard(board):
for line in board:
for arg in line:
print arg,
print
def main():
board = surround0(preprocess('92.txt'))
printBoard(board)
# pos1 = (1, 1)
# pos2 = (2, 3)
# trans = (1, 2)
# trans2 = (2, 2)
# print pos1, pos2
# print direct(board, trans, trans2)
# print direct(board, pos2, trans2)
# print oneCorner(board, trans, pos2)
# print twoCorners(board, trans2, pos2)
print findSolution(board)
def solve(mat):
return findSolution(surround0(mat))
if __name__ == '__main__':
main()
|
import itertools
def preprocess(FILE):
"""
Read from file, return a int matrix surrounded by 0.
"""
ret = []
with open(FILE) as f:
for line in f:
line = line[:-1]
ret.append([(int)(arg) for arg in line.split(' ')])
return ret
def surround0(ret):
ret.insert(0, [0 for _ in range(2 + len(ret[0]))])
for i in range(1, len(ret)):
ret[i].insert(0, 0)
ret[i].append(0)
ret.append([0 for _ in range(len(ret[0]))])
return ret
def myRange(x1, x2):
if (x1 < x2):
return range(x1 + 1, x2)
else:
return range(x2 + 1, x1)
def direct(board, pos1, pos2):
if pos1[0] == pos2[0]:
# print pos1, pos2
# print myRange(pos1[1] + 1, pos2[1])
for i in myRange(pos1[1], pos2[1]):
if board[pos1[0]][i] != 0:
return False
return True
elif pos1[1] == pos2[1]:
# print range(pos1[0] + 1, pos2[0])
for i in myRange(pos1[0], pos2[0]):
# print (i, pos2[1])
if board[i][pos1[1]] != 0:
return False
return True
else:
return False
def oneCorner(board, pos1, pos2):
if pos1[0] == pos2[0] or pos1[1] == pos2[1]:
return False
else:
return (board[pos1[0]][pos2[1]] == 0 and direct(board, pos1, (pos1[0], pos2[1])) and direct(board, pos2, (pos1[0], pos2[1]))) or\
(board[pos2[0]][pos1[1]] == 0 and direct(board, pos1, (pos2[0], pos1[1])) and direct(board, pos2, (pos2[0], pos1[1])))
def twoCorners(board, pos1, pos2):
for col in range(len(board[0])):
if board[pos1[0]][col] == 0 and direct(board, (pos1[0], col), pos1) and oneCorner(board, (pos1[0], col), pos2):
# print 1
return True
for row in range(len(board)):
if board[row][pos1[1]] == 0 and direct(board, (row, pos1[1]), pos1) and oneCorner(board, (row, pos1[1]), pos2):
# print (row, pos1[1])
return True
for col in range(len(board[0])):
if board[pos2[0]][col] == 0 and direct(board, (pos2[0], col), pos2) and oneCorner(board, (pos2[0], col), pos1):
# print 3
return True
for row in range(len(board)):
if board[row][pos2[1]] == 0 and direct(board, (row, pos2[1]), pos2) and oneCorner(board, (row, pos2[1]), pos1):
# print 4
return True
return False
def isConnected(board, pos1, pos2):
"""
@param:
board -- a matrix representing the game
pos1 -- First Position, e.g. (1, 2)
pos2 -- Second Position, e.g. (0, 3)
@return:
True / False
"""
if board[pos1[0]][pos1[1]] == board[pos2[0]][pos2[1]]:
return direct(board, pos1, pos2) or oneCorner(board, pos1, pos2) or twoCorners(board, pos1, pos2)
else:
return False
def asDict(board):
ret = {}
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] not in ret:
ret[board[i][j]] = []
ret[board[i][j]].append((i, j))
else:
ret[board[i][j]].append((i, j))
return ret
def findSolution(board):
"""
@param:
board -- a matrix representing the game
i.e.
0 0 0 0 0 0
0 1 2 3 4 0
0 4 2 1 3 0
0 4 5 4 5 0
0 0 0 0 0 0
@return:
set of valid operations to cancel all items
i.e.
(3, 2), (3, 4), (2, 1), (3, 1), (3, 3),
(1, 4), (1, 2), (2, 2), (1, 1), (2, 3),
(1, 3), (2, 4)
"""
ret = []
while True:
flag = True
boardDict = asDict(board)
for i in boardDict:
if i != 0 and len(boardDict[i]) >= 2:
iterlist = itertools.combinations(boardDict[i], 2)
for pair in iterlist:
if isConnected(board, pair[0], pair[1]):
cancelItem(board, pair[0], pair[1])
ret.append(pair[0])
ret.append(pair[1])
flag = False
continue
if flag:
break
# printBoard(board)
board.pop(len(board) - 1)
board.pop(0)
for i in range(len(board)):
board[i].pop(len(board[i]) - 1)
board[i].pop(0)
# printBoard(board)
sumnum = sum([board[i][j] for i in range(len(board)) for j in range(len(board[0]))])
return (ret, board, sumnum == 0)
def cancelItem(board, pos1, pos2):
x1, y1 = pos1[0], pos1[1]
x2, y2 = pos2[0], pos2[1]
board[x1][y1] = 0
board[x2][y2] = 0
# print pos1, pos2, 'Cancelled'
# print 'New Board: '
# printBoard(board)
def printBoard(board):
for line in board:
for arg in line:
print arg,
print
def main():
board = surround0(preprocess('92.txt'))
printBoard(board)
# pos1 = (1, 1)
# pos2 = (2, 3)
# trans = (1, 2)
# trans2 = (2, 2)
# print pos1, pos2
# print direct(board, trans, trans2)
# print direct(board, pos2, trans2)
# print oneCorner(board, trans, pos2)
# print twoCorners(board, trans2, pos2)
print findSolution(board)
def solve(mat):
return findSolution(surround0(mat))
if __name__ == '__main__':
main()
|
en
| 0.574854
|
Read from file, return a int matrix surrounded by 0. # print pos1, pos2 # print myRange(pos1[1] + 1, pos2[1]) # print range(pos1[0] + 1, pos2[0]) # print (i, pos2[1]) # print 1 # print (row, pos1[1]) # print 3 # print 4 @param: board -- a matrix representing the game pos1 -- First Position, e.g. (1, 2) pos2 -- Second Position, e.g. (0, 3) @return: True / False @param: board -- a matrix representing the game i.e. 0 0 0 0 0 0 0 1 2 3 4 0 0 4 2 1 3 0 0 4 5 4 5 0 0 0 0 0 0 0 @return: set of valid operations to cancel all items i.e. (3, 2), (3, 4), (2, 1), (3, 1), (3, 3), (1, 4), (1, 2), (2, 2), (1, 1), (2, 3), (1, 3), (2, 4) # printBoard(board) # printBoard(board) # print pos1, pos2, 'Cancelled' # print 'New Board: ' # printBoard(board) # pos1 = (1, 1) # pos2 = (2, 3) # trans = (1, 2) # trans2 = (2, 2) # print pos1, pos2 # print direct(board, trans, trans2) # print direct(board, pos2, trans2) # print oneCorner(board, trans, pos2) # print twoCorners(board, trans2, pos2)
| 3.775923
| 4
|
ordersystem/models/__init__.py
|
iomegak12/ordersystempy
| 0
|
6629468
|
<filename>ordersystem/models/__init__.py<gh_stars>0
from .crm_system_error import CRMSystemError
from .order import Order
|
<filename>ordersystem/models/__init__.py<gh_stars>0
from .crm_system_error import CRMSystemError
from .order import Order
|
none
| 1
| 1.344774
| 1
|
|
19.dicionarios_variaveis_compostas/ex.94.py
|
hermyay/Curso-de-Python
| 0
|
6629469
|
# Crie um programa que leia nome, sexo e idade de varias pessoas, guardando os dados de cada pessoa em um dicionario e todos os dicionarios em uma lista. No final, mostre:
# a) Quantas pessoas foram cadastradas
# b) A media de idade do grupo
# c) Uma lista com todas as pessoas com idade acima da media.
pessoas = dict()
dados = list()
soma = media = 0
while True:
pessoas.clear()
pessoas['nome'] = str(input("Nome: ").title())
while True:
pessoas['sexo'] = str(input("Sexo: [M/F] ").upper()[0])
if pessoas["sexo"] in "MmFf":
break
print("ERROR.")
pessoas['idade'] = int(input("Idade: "))
soma += pessoas["idade"]
dados.append(pessoas.copy())
while True:
pergunta = str(input("Quer continuar? [S/N]: ").upper()[0])
if pergunta in "SsNn":
break
print("ERROR.")
if pergunta == "N":
break
print(dados)
print("**"*20)
print(f"A) Foram cadastradas {len(dados)} pessoas.")
print("**"*20)
media = soma / len(dados)
print(f"B) A media de idade do grupo e'de {media:.0f} Anos")
print("**"*20)
print("C) A(s) pessoa(s) com idade acima da media: ", end=" ")
for p in dados:
if p['idade'] > media:
print(f"{p['nome']}", end=" | ")
print("\n")
print("**"*20)
print("D) As mulheres cadastradas foram: ", end="")
for p in dados:
if p['sexo'] in "Ff":
print(f"{p['nome']}", end=" | ")
print("\n")
print("**"*20)
print("E) Os homens cadastrados foram: ", end="")
for p in dados:
if p['sexo'] in "Mm":
print(f"{p['nome']}", end=' | ')
print("\n")
print("**"*20)
print("F) Pessoas menores de 18 anos: ", end="")
for p in dados:
if p['idade'] < 18:
print(f"{p['nome']}", end=' | ')
print("\n")
print("**"*20)
print("G) Pessoas maiores de 18 anos: ", end="")
for p in dados:
if p['idade'] > 18:
print(f"{p['nome']}", end=" | ")
print("\n")
print("**"*20)
|
# Crie um programa que leia nome, sexo e idade de varias pessoas, guardando os dados de cada pessoa em um dicionario e todos os dicionarios em uma lista. No final, mostre:
# a) Quantas pessoas foram cadastradas
# b) A media de idade do grupo
# c) Uma lista com todas as pessoas com idade acima da media.
pessoas = dict()
dados = list()
soma = media = 0
while True:
pessoas.clear()
pessoas['nome'] = str(input("Nome: ").title())
while True:
pessoas['sexo'] = str(input("Sexo: [M/F] ").upper()[0])
if pessoas["sexo"] in "MmFf":
break
print("ERROR.")
pessoas['idade'] = int(input("Idade: "))
soma += pessoas["idade"]
dados.append(pessoas.copy())
while True:
pergunta = str(input("Quer continuar? [S/N]: ").upper()[0])
if pergunta in "SsNn":
break
print("ERROR.")
if pergunta == "N":
break
print(dados)
print("**"*20)
print(f"A) Foram cadastradas {len(dados)} pessoas.")
print("**"*20)
media = soma / len(dados)
print(f"B) A media de idade do grupo e'de {media:.0f} Anos")
print("**"*20)
print("C) A(s) pessoa(s) com idade acima da media: ", end=" ")
for p in dados:
if p['idade'] > media:
print(f"{p['nome']}", end=" | ")
print("\n")
print("**"*20)
print("D) As mulheres cadastradas foram: ", end="")
for p in dados:
if p['sexo'] in "Ff":
print(f"{p['nome']}", end=" | ")
print("\n")
print("**"*20)
print("E) Os homens cadastrados foram: ", end="")
for p in dados:
if p['sexo'] in "Mm":
print(f"{p['nome']}", end=' | ')
print("\n")
print("**"*20)
print("F) Pessoas menores de 18 anos: ", end="")
for p in dados:
if p['idade'] < 18:
print(f"{p['nome']}", end=' | ')
print("\n")
print("**"*20)
print("G) Pessoas maiores de 18 anos: ", end="")
for p in dados:
if p['idade'] > 18:
print(f"{p['nome']}", end=" | ")
print("\n")
print("**"*20)
|
pt
| 0.988853
|
# Crie um programa que leia nome, sexo e idade de varias pessoas, guardando os dados de cada pessoa em um dicionario e todos os dicionarios em uma lista. No final, mostre: # a) Quantas pessoas foram cadastradas # b) A media de idade do grupo # c) Uma lista com todas as pessoas com idade acima da media.
| 3.830308
| 4
|
tools/report_generators/cli.py
|
mgrubisic/coronavirus-2020
| 0
|
6629470
|
<reponame>mgrubisic/coronavirus-2020
import logging
import os
from itertools import compress
import click
import pandas
import oscovida
from .executors import ReportExecutor
from .reporters import (AllRegions, CountryReport, GermanyReport,
HungaryReport, USAReport)
ALL_REGIONS = ["countries", "germany", "usa", "hungary", "all-regions-md", "all"]
def does_wwwroot_exist(wwwroot, create=False):
if not os.path.exists(wwwroot):
if create:
os.mkdir(wwwroot)
os.mkdir(wwwroot + "/ipynb")
os.mkdir(wwwroot + "/html")
else:
raise FileNotFoundError(
f"Directory {wwwroot} missing. "
"To put the html into github repo for webhosting, run "
"`git clone <EMAIL>:oscovida/oscovida.github.io.git"
"wwwroot` or similar"
)
def get_country_list():
d = oscovida.fetch_deaths()
c = oscovida.fetch_cases()
countries = d.index
countries2 = c.index
assert (countries2 == countries).all()
# Here we should identify regions in countries, and process those.
# Instead, as a quick hack to get started, we'll just take one country
# and the current "get_country" method will sum over all regions of one
# country if only the country name is given.
return sorted(countries.drop_duplicates())
def generate_reports_countries(
*, workers, kernel_name, wwwroot, force, disable_pbar, debug
):
d = oscovida.fetch_deaths()
c = oscovida.fetch_cases()
countries = d.index
countries2 = c.index
assert (countries2 == countries).all()
# TODO: The get_x_list methods should be part of Reporter class
countries = get_country_list()
cre = ReportExecutor(
Reporter=CountryReport,
kernel_name=kernel_name,
wwwroot=wwwroot,
expiry_hours=2,
attempts=3,
workers=workers,
force=force,
disable_pbar=disable_pbar,
debug=debug,
)
if debug:
countries = countries[:10]
cre.create_html_reports(countries)
cre.create_markdown_index_page()
def get_germany_regions_list():
data_germany = oscovida.fetch_data_germany()
land_kreis = data_germany[["Bundesland", "Landkreis"]]
ordered = land_kreis.sort_values(["Bundesland", "Landkreis"])
return ordered.drop_duplicates().values.tolist()
def generate_reports_germany(
*, workers, kernel_name, wwwroot, force, disable_pbar, debug
):
_ = oscovida.fetch_data_germany()
# TODO: The get_x_list methods should be part of Reporter class
germany_regions = get_germany_regions_list()
# data cleaning: on 13 April, we had a Landkreis "LK Göttingen (alt)"
# with only one data point. This causes plots to fail, because there
# is nothing to plot, and then the legend() command failed.
# We assume that the RKI labels unusual data with '(alt)', and remove those.
alt_data_sets = ["(alt)" in r[1].lower() for r in germany_regions]
if any(alt_data_sets):
bad_datasets = list(compress(germany_regions, alt_data_sets))
logging.warning(f"Removing datasets label with '(alt)': {bad_datasets}")
for bd in bad_datasets:
c, d, _ = oscovida.germany_get_region(landkreis=bd[1])
logging.warning(
f"\tremoved: {bd} : len(cases)={len(c)}, len(deaths)={len(d)}"
)
bad_indices = list(compress(range(len(alt_data_sets)), alt_data_sets))
for i in sorted(bad_indices, reverse=True):
del germany_regions[i]
gre = ReportExecutor(
Reporter=GermanyReport,
kernel_name=kernel_name,
wwwroot=wwwroot,
expiry_hours=2,
attempts=3,
workers=workers,
force=force,
disable_pbar=disable_pbar,
debug=debug,
)
if debug:
germany_regions = germany_regions[:10]
gre.create_html_reports(germany_regions)
gre.create_markdown_index_page()
def generate_reports_usa(
*, workers, kernel_name, wwwroot, force, disable_pbar, debug
):
_ = oscovida.fetch_cases_US()
_ = oscovida.fetch_deaths_US()
# TODO: The get_x_list methods should be part of Reporter class
states = oscovida.get_US_region_list()
usre = ReportExecutor(
Reporter=USAReport,
kernel_name=kernel_name,
wwwroot=wwwroot,
expiry_hours=2,
attempts=3,
workers=workers,
force=force,
disable_pbar=disable_pbar,
debug=debug,
)
if debug:
states = states[:10]
usre.create_html_reports(states)
usre.create_markdown_index_page()
def generate_reports_hungary(*, workers, kernel_name, wwwroot, force, disable_pbar, debug):
_ = oscovida.fetch_data_hungary()
# TODO: The get_x_list methods should be part of Reporter class
counties = oscovida.get_counties_hungary()
hre = ReportExecutor(
Reporter=HungaryReport,
kernel_name=kernel_name,
wwwroot=wwwroot,
expiry_hours=2,
attempts=3,
workers=workers,
force=force,
disable_pbar=disable_pbar,
debug=debug,
)
if debug:
counties = counties[:10]
hre.create_html_reports(counties)
hre.create_markdown_index_page()
def generate_markdown_all_regions(
*, workers, kernel_name, wwwroot, force, disable_pbar, debug
):
arre = ReportExecutor(Reporter=AllRegions, wwwroot=wwwroot)
arre.create_markdown_index_page()
def generate(*, region, workers, kernel_name, wwwroot, force, disable_pbar, debug):
mapping = {
"countries": generate_reports_countries,
"germany": generate_reports_germany,
"usa": generate_reports_usa,
"hungary": generate_reports_hungary,
"all-regions-md": generate_markdown_all_regions,
}
mapping[region](
workers=workers,
kernel_name=kernel_name,
wwwroot=wwwroot,
disable_pbar=disable_pbar,
force=force,
debug=debug,
)
@click.command()
@click.option(
"--regions",
"-r",
type=click.Choice(
ALL_REGIONS,
case_sensitive=False
),
multiple=True,
help="Region(s) to generate reports for.",
)
@click.option(
"--workers",
default="auto",
help="Number of workers to use, `auto` uses nproc-2, set to 1 or False to "
"use a single process.",
)
@click.option(
"--wwwroot",
default="./wwwroot",
help="Root directory for www content."
)
@click.option(
"--create-wwwroot",
default=False,
is_flag=True,
help="Create wwwroot directory if it does not exist.",
)
@click.option(
"--kernel-name",
default="",
help="Create wwwroot directory if it does not exist.",
)
@click.option(
"--disable-pbar",
default=False,
is_flag=True,
help="Disable progress bar, print logging output instead.",
)
@click.option(
"--log-level",
default="WARNING",
type=click.Choice(["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"]),
help="Log level.",
)
@click.option("--log-file", default=None, help="Log file path.")
@click.option(
"--force",
default=False,
is_flag=True,
help="Force notebook re-execution even if recently executed.",
)
@click.option(
"--debug",
default=False,
is_flag=True,
help="Enable debug mode, only generates reports for the first 10 regions "
"and sets the log level to `DEBUG`.",
)
def cli(
*,
workers,
regions,
kernel_name="",
wwwroot="wwwroot",
create_wwwroot=False,
disable_pbar=False,
log_level="WARNING",
log_file=None,
force=False,
debug=False,
):
"""
Command Line Interface used to batch-generate and execute Jupyter
notebook reports for oscovida.
"""
# If log level is set to INFO and debug flag is on, leave it at INFO, DEBUG
# level can be wwaaayy too verbose and annoying (e.g. prints off the full
# contents of the HTML pages as they get saved)
if debug and log_level != "INFO":
log_level = "DEBUG"
if log_level in ["INFO", "DEBUG"]:
click.echo("Disabling progress bar due to log level verbosity")
disable_pbar = True
handlers = []
if log_file:
handlers.append(logging.FileHandler(log_file))
if disable_pbar:
handlers.append(logging.StreamHandler())
logging.basicConfig(
format="%(asctime)s %(threadName)s: %(message)s",
level=log_level,
handlers=handlers,
datefmt="%H:%M:%S",
)
logging.info(f"Initial args: {locals()}")
does_wwwroot_exist(wwwroot, create=create_wwwroot)
# Disable pandas scientific notation
pandas.set_option("display.float_format", "{:.2f}".format)
if workers == "auto":
workers = max(1, os.cpu_count())
workers = max(workers - 2, 1)
elif workers =="max":
workers = os.cpu_count()
if workers:
logging.info(f"Using {workers} processes")
if "all" in regions:
if len(regions) > 1:
raise Exception("Cannot accept multiple regions if 'all' is passed")
regions = ALL_REGIONS
regions.remove("all")
logging.info(f"Processed args: {locals()}")
for region in regions:
generate(
region=region,
workers=workers,
kernel_name=kernel_name,
wwwroot=wwwroot,
disable_pbar=disable_pbar,
force=force,
debug=debug,
)
if __name__ == "__main__":
cli()
|
import logging
import os
from itertools import compress
import click
import pandas
import oscovida
from .executors import ReportExecutor
from .reporters import (AllRegions, CountryReport, GermanyReport,
HungaryReport, USAReport)
ALL_REGIONS = ["countries", "germany", "usa", "hungary", "all-regions-md", "all"]
def does_wwwroot_exist(wwwroot, create=False):
if not os.path.exists(wwwroot):
if create:
os.mkdir(wwwroot)
os.mkdir(wwwroot + "/ipynb")
os.mkdir(wwwroot + "/html")
else:
raise FileNotFoundError(
f"Directory {wwwroot} missing. "
"To put the html into github repo for webhosting, run "
"`git clone <EMAIL>:oscovida/oscovida.github.io.git"
"wwwroot` or similar"
)
def get_country_list():
d = oscovida.fetch_deaths()
c = oscovida.fetch_cases()
countries = d.index
countries2 = c.index
assert (countries2 == countries).all()
# Here we should identify regions in countries, and process those.
# Instead, as a quick hack to get started, we'll just take one country
# and the current "get_country" method will sum over all regions of one
# country if only the country name is given.
return sorted(countries.drop_duplicates())
def generate_reports_countries(
*, workers, kernel_name, wwwroot, force, disable_pbar, debug
):
d = oscovida.fetch_deaths()
c = oscovida.fetch_cases()
countries = d.index
countries2 = c.index
assert (countries2 == countries).all()
# TODO: The get_x_list methods should be part of Reporter class
countries = get_country_list()
cre = ReportExecutor(
Reporter=CountryReport,
kernel_name=kernel_name,
wwwroot=wwwroot,
expiry_hours=2,
attempts=3,
workers=workers,
force=force,
disable_pbar=disable_pbar,
debug=debug,
)
if debug:
countries = countries[:10]
cre.create_html_reports(countries)
cre.create_markdown_index_page()
def get_germany_regions_list():
data_germany = oscovida.fetch_data_germany()
land_kreis = data_germany[["Bundesland", "Landkreis"]]
ordered = land_kreis.sort_values(["Bundesland", "Landkreis"])
return ordered.drop_duplicates().values.tolist()
def generate_reports_germany(
*, workers, kernel_name, wwwroot, force, disable_pbar, debug
):
_ = oscovida.fetch_data_germany()
# TODO: The get_x_list methods should be part of Reporter class
germany_regions = get_germany_regions_list()
# data cleaning: on 13 April, we had a Landkreis "LK Göttingen (alt)"
# with only one data point. This causes plots to fail, because there
# is nothing to plot, and then the legend() command failed.
# We assume that the RKI labels unusual data with '(alt)', and remove those.
alt_data_sets = ["(alt)" in r[1].lower() for r in germany_regions]
if any(alt_data_sets):
bad_datasets = list(compress(germany_regions, alt_data_sets))
logging.warning(f"Removing datasets label with '(alt)': {bad_datasets}")
for bd in bad_datasets:
c, d, _ = oscovida.germany_get_region(landkreis=bd[1])
logging.warning(
f"\tremoved: {bd} : len(cases)={len(c)}, len(deaths)={len(d)}"
)
bad_indices = list(compress(range(len(alt_data_sets)), alt_data_sets))
for i in sorted(bad_indices, reverse=True):
del germany_regions[i]
gre = ReportExecutor(
Reporter=GermanyReport,
kernel_name=kernel_name,
wwwroot=wwwroot,
expiry_hours=2,
attempts=3,
workers=workers,
force=force,
disable_pbar=disable_pbar,
debug=debug,
)
if debug:
germany_regions = germany_regions[:10]
gre.create_html_reports(germany_regions)
gre.create_markdown_index_page()
def generate_reports_usa(
*, workers, kernel_name, wwwroot, force, disable_pbar, debug
):
_ = oscovida.fetch_cases_US()
_ = oscovida.fetch_deaths_US()
# TODO: The get_x_list methods should be part of Reporter class
states = oscovida.get_US_region_list()
usre = ReportExecutor(
Reporter=USAReport,
kernel_name=kernel_name,
wwwroot=wwwroot,
expiry_hours=2,
attempts=3,
workers=workers,
force=force,
disable_pbar=disable_pbar,
debug=debug,
)
if debug:
states = states[:10]
usre.create_html_reports(states)
usre.create_markdown_index_page()
def generate_reports_hungary(*, workers, kernel_name, wwwroot, force, disable_pbar, debug):
_ = oscovida.fetch_data_hungary()
# TODO: The get_x_list methods should be part of Reporter class
counties = oscovida.get_counties_hungary()
hre = ReportExecutor(
Reporter=HungaryReport,
kernel_name=kernel_name,
wwwroot=wwwroot,
expiry_hours=2,
attempts=3,
workers=workers,
force=force,
disable_pbar=disable_pbar,
debug=debug,
)
if debug:
counties = counties[:10]
hre.create_html_reports(counties)
hre.create_markdown_index_page()
def generate_markdown_all_regions(
*, workers, kernel_name, wwwroot, force, disable_pbar, debug
):
arre = ReportExecutor(Reporter=AllRegions, wwwroot=wwwroot)
arre.create_markdown_index_page()
def generate(*, region, workers, kernel_name, wwwroot, force, disable_pbar, debug):
mapping = {
"countries": generate_reports_countries,
"germany": generate_reports_germany,
"usa": generate_reports_usa,
"hungary": generate_reports_hungary,
"all-regions-md": generate_markdown_all_regions,
}
mapping[region](
workers=workers,
kernel_name=kernel_name,
wwwroot=wwwroot,
disable_pbar=disable_pbar,
force=force,
debug=debug,
)
@click.command()
@click.option(
"--regions",
"-r",
type=click.Choice(
ALL_REGIONS,
case_sensitive=False
),
multiple=True,
help="Region(s) to generate reports for.",
)
@click.option(
"--workers",
default="auto",
help="Number of workers to use, `auto` uses nproc-2, set to 1 or False to "
"use a single process.",
)
@click.option(
"--wwwroot",
default="./wwwroot",
help="Root directory for www content."
)
@click.option(
"--create-wwwroot",
default=False,
is_flag=True,
help="Create wwwroot directory if it does not exist.",
)
@click.option(
"--kernel-name",
default="",
help="Create wwwroot directory if it does not exist.",
)
@click.option(
"--disable-pbar",
default=False,
is_flag=True,
help="Disable progress bar, print logging output instead.",
)
@click.option(
"--log-level",
default="WARNING",
type=click.Choice(["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"]),
help="Log level.",
)
@click.option("--log-file", default=None, help="Log file path.")
@click.option(
"--force",
default=False,
is_flag=True,
help="Force notebook re-execution even if recently executed.",
)
@click.option(
"--debug",
default=False,
is_flag=True,
help="Enable debug mode, only generates reports for the first 10 regions "
"and sets the log level to `DEBUG`.",
)
def cli(
*,
workers,
regions,
kernel_name="",
wwwroot="wwwroot",
create_wwwroot=False,
disable_pbar=False,
log_level="WARNING",
log_file=None,
force=False,
debug=False,
):
"""
Command Line Interface used to batch-generate and execute Jupyter
notebook reports for oscovida.
"""
# If log level is set to INFO and debug flag is on, leave it at INFO, DEBUG
# level can be wwaaayy too verbose and annoying (e.g. prints off the full
# contents of the HTML pages as they get saved)
if debug and log_level != "INFO":
log_level = "DEBUG"
if log_level in ["INFO", "DEBUG"]:
click.echo("Disabling progress bar due to log level verbosity")
disable_pbar = True
handlers = []
if log_file:
handlers.append(logging.FileHandler(log_file))
if disable_pbar:
handlers.append(logging.StreamHandler())
logging.basicConfig(
format="%(asctime)s %(threadName)s: %(message)s",
level=log_level,
handlers=handlers,
datefmt="%H:%M:%S",
)
logging.info(f"Initial args: {locals()}")
does_wwwroot_exist(wwwroot, create=create_wwwroot)
# Disable pandas scientific notation
pandas.set_option("display.float_format", "{:.2f}".format)
if workers == "auto":
workers = max(1, os.cpu_count())
workers = max(workers - 2, 1)
elif workers =="max":
workers = os.cpu_count()
if workers:
logging.info(f"Using {workers} processes")
if "all" in regions:
if len(regions) > 1:
raise Exception("Cannot accept multiple regions if 'all' is passed")
regions = ALL_REGIONS
regions.remove("all")
logging.info(f"Processed args: {locals()}")
for region in regions:
generate(
region=region,
workers=workers,
kernel_name=kernel_name,
wwwroot=wwwroot,
disable_pbar=disable_pbar,
force=force,
debug=debug,
)
if __name__ == "__main__":
cli()
|
en
| 0.842511
|
# Here we should identify regions in countries, and process those. # Instead, as a quick hack to get started, we'll just take one country # and the current "get_country" method will sum over all regions of one # country if only the country name is given. # TODO: The get_x_list methods should be part of Reporter class # TODO: The get_x_list methods should be part of Reporter class # data cleaning: on 13 April, we had a Landkreis "LK Göttingen (alt)" # with only one data point. This causes plots to fail, because there # is nothing to plot, and then the legend() command failed. # We assume that the RKI labels unusual data with '(alt)', and remove those. # TODO: The get_x_list methods should be part of Reporter class # TODO: The get_x_list methods should be part of Reporter class Command Line Interface used to batch-generate and execute Jupyter notebook reports for oscovida. # If log level is set to INFO and debug flag is on, leave it at INFO, DEBUG # level can be wwaaayy too verbose and annoying (e.g. prints off the full # contents of the HTML pages as they get saved) # Disable pandas scientific notation
| 2.575778
| 3
|
segmentation_models_pytorch/losses/focal.py
|
MikePham05/segmentation_models.pytorch
| 5,325
|
6629471
|
from typing import Optional
from functools import partial
import torch
from torch.nn.modules.loss import _Loss
from ._functional import focal_loss_with_logits
from .constants import BINARY_MODE, MULTICLASS_MODE, MULTILABEL_MODE
__all__ = ["FocalLoss"]
class FocalLoss(_Loss):
def __init__(
self,
mode: str,
alpha: Optional[float] = None,
gamma: Optional[float] = 2.,
ignore_index: Optional[int] = None,
reduction: Optional[str] = "mean",
normalized: bool = False,
reduced_threshold: Optional[float] = None,
):
"""Compute Focal loss
Args:
mode: Loss mode 'binary', 'multiclass' or 'multilabel'
alpha: Prior probability of having positive value in target.
gamma: Power factor for dampening weight (focal strength).
ignore_index: If not None, targets may contain values to be ignored.
Target values equal to ignore_index will be ignored from loss computation.
normalized: Compute normalized focal loss (https://arxiv.org/pdf/1909.07829.pdf).
reduced_threshold: Switch to reduced focal loss. Note, when using this mode you should use `reduction="sum"`.
Shape
- **y_pred** - torch.Tensor of shape (N, C, H, W)
- **y_true** - torch.Tensor of shape (N, H, W) or (N, C, H, W)
Reference
https://github.com/BloodAxe/pytorch-toolbelt
"""
assert mode in {BINARY_MODE, MULTILABEL_MODE, MULTICLASS_MODE}
super().__init__()
self.mode = mode
self.ignore_index = ignore_index
self.focal_loss_fn = partial(
focal_loss_with_logits,
alpha=alpha,
gamma=gamma,
reduced_threshold=reduced_threshold,
reduction=reduction,
normalized=normalized,
)
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
if self.mode in {BINARY_MODE, MULTILABEL_MODE}:
y_true = y_true.view(-1)
y_pred = y_pred.view(-1)
if self.ignore_index is not None:
# Filter predictions with ignore label from loss computation
not_ignored = y_true != self.ignore_index
y_pred = y_pred[not_ignored]
y_true = y_true[not_ignored]
loss = self.focal_loss_fn(y_pred, y_true)
elif self.mode == MULTICLASS_MODE:
num_classes = y_pred.size(1)
loss = 0
# Filter anchors with -1 label from loss computation
if self.ignore_index is not None:
not_ignored = y_true != self.ignore_index
for cls in range(num_classes):
cls_y_true = (y_true == cls).long()
cls_y_pred = y_pred[:, cls, ...]
if self.ignore_index is not None:
cls_y_true = cls_y_true[not_ignored]
cls_y_pred = cls_y_pred[not_ignored]
loss += self.focal_loss_fn(cls_y_pred, cls_y_true)
return loss
|
from typing import Optional
from functools import partial
import torch
from torch.nn.modules.loss import _Loss
from ._functional import focal_loss_with_logits
from .constants import BINARY_MODE, MULTICLASS_MODE, MULTILABEL_MODE
__all__ = ["FocalLoss"]
class FocalLoss(_Loss):
def __init__(
self,
mode: str,
alpha: Optional[float] = None,
gamma: Optional[float] = 2.,
ignore_index: Optional[int] = None,
reduction: Optional[str] = "mean",
normalized: bool = False,
reduced_threshold: Optional[float] = None,
):
"""Compute Focal loss
Args:
mode: Loss mode 'binary', 'multiclass' or 'multilabel'
alpha: Prior probability of having positive value in target.
gamma: Power factor for dampening weight (focal strength).
ignore_index: If not None, targets may contain values to be ignored.
Target values equal to ignore_index will be ignored from loss computation.
normalized: Compute normalized focal loss (https://arxiv.org/pdf/1909.07829.pdf).
reduced_threshold: Switch to reduced focal loss. Note, when using this mode you should use `reduction="sum"`.
Shape
- **y_pred** - torch.Tensor of shape (N, C, H, W)
- **y_true** - torch.Tensor of shape (N, H, W) or (N, C, H, W)
Reference
https://github.com/BloodAxe/pytorch-toolbelt
"""
assert mode in {BINARY_MODE, MULTILABEL_MODE, MULTICLASS_MODE}
super().__init__()
self.mode = mode
self.ignore_index = ignore_index
self.focal_loss_fn = partial(
focal_loss_with_logits,
alpha=alpha,
gamma=gamma,
reduced_threshold=reduced_threshold,
reduction=reduction,
normalized=normalized,
)
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
if self.mode in {BINARY_MODE, MULTILABEL_MODE}:
y_true = y_true.view(-1)
y_pred = y_pred.view(-1)
if self.ignore_index is not None:
# Filter predictions with ignore label from loss computation
not_ignored = y_true != self.ignore_index
y_pred = y_pred[not_ignored]
y_true = y_true[not_ignored]
loss = self.focal_loss_fn(y_pred, y_true)
elif self.mode == MULTICLASS_MODE:
num_classes = y_pred.size(1)
loss = 0
# Filter anchors with -1 label from loss computation
if self.ignore_index is not None:
not_ignored = y_true != self.ignore_index
for cls in range(num_classes):
cls_y_true = (y_true == cls).long()
cls_y_pred = y_pred[:, cls, ...]
if self.ignore_index is not None:
cls_y_true = cls_y_true[not_ignored]
cls_y_pred = cls_y_pred[not_ignored]
loss += self.focal_loss_fn(cls_y_pred, cls_y_true)
return loss
|
en
| 0.687833
|
Compute Focal loss Args: mode: Loss mode 'binary', 'multiclass' or 'multilabel' alpha: Prior probability of having positive value in target. gamma: Power factor for dampening weight (focal strength). ignore_index: If not None, targets may contain values to be ignored. Target values equal to ignore_index will be ignored from loss computation. normalized: Compute normalized focal loss (https://arxiv.org/pdf/1909.07829.pdf). reduced_threshold: Switch to reduced focal loss. Note, when using this mode you should use `reduction="sum"`. Shape - **y_pred** - torch.Tensor of shape (N, C, H, W) - **y_true** - torch.Tensor of shape (N, H, W) or (N, C, H, W) Reference https://github.com/BloodAxe/pytorch-toolbelt # Filter predictions with ignore label from loss computation # Filter anchors with -1 label from loss computation
| 2.464469
| 2
|
tyrell/enumerator/optimizer.py
|
rodamber/Tyrell
| 0
|
6629472
|
<filename>tyrell/enumerator/optimizer.py<gh_stars>0
from z3 import *
from .. import dsl as D
class Optimizer:
# additional variables to track if a production occurs or not in a program
var_occurs = []
# relaxation variables
relax_vars = []
# keeps track of the current assumptions
assumptions = []
# keeps track of the cost of each relaxation variable
cost_relax_vars = {}
def __init__(self, solver, spec, variables, nodes):
self.bound = 0
self.ub = 0
self.solver = solver
self.spec = spec
self.variables = variables
self.id = 0
self.objective = []
self.nodes = nodes
self.weights = []
def createVariablesOccurrence(self):
for x in range(0, self.spec.num_productions()):
name = 'occ' + str(x)
v = Int(name)
self.var_occurs.append(v)
self.solver.add(And(v >= 0, v <= 1))
for x in range(0, len(self.var_occurs)):
ctr = self.var_occurs[x] == 1
rhs = self.variables[0] == x
for y in range(1, len(self.variables)):
rhs = Or(rhs, self.variables[y] == x)
self.solver.add(
Implies(self.variables[y] == x, self.var_occurs[x] == 1))
self.solver.add(Implies(ctr, rhs))
for x in range(0, len(self.var_occurs)):
for y in range(0, len(self.variables)):
self.solver.add(
Implies(self.var_occurs[x] == 0, self.variables[y] != x))
def mk_is_not_parent(self, parent, child, weight=None):
child_pos = []
# find positions that type-check between parent and child
for x in range(0, len(parent.rhs)):
if child.lhs == parent.rhs[x]:
child_pos.append(x)
for n in self.nodes:
# not a leaf node
if n.children != None:
if weight != None:
# FIXME: reduce duplication of code
name = 'relax' + str(self.id)
v = Int(name)
self.cost_relax_vars[v] = weight
self.relax_vars.append(v)
self.objective.append(Product(weight, v))
self.weights.append(weight)
self.ub += weight
# domain of the relaxation variable
self.solver.add(Or(v == 0, v == 1))
# constraint for the is_parent constraint
ctr_children = []
for p in range(0, len(child_pos)):
ctr_children.append(
self.variables[n.children[p].id - 1] == child.id)
self.solver.add(
Or(Implies(Or(ctr_children), self.variables[n.id - 1] != parent.id), v == 1))
# relation between relaxation variables and constraint
self.solver.add(Implies(v == 1, Or(
self.variables[n.id - 1] == parent.id, Not(Or(ctr_children)))))
self.solver.add(
Implies(And(self.variables[n.id - 1] != parent.id, Or(ctr_children)), v == 0))
self.id = self.id + 1
else:
ctr_children = []
for p in range(0, len(child_pos)):
ctr_children.append(
self.variables[n.children[p].id - 1] == child.id)
self.solver.add(
Implies(Or(ctr_children), self.variables[n.id - 1] != parent.id))
# FIXME: dissociate the creation of variables with the creation of constraints?
def mk_is_parent(self, parent, child, weight=None):
'''children production will have the parent production with probability weight'''
child_pos = []
# find positions that type-check between parent and child
for x in range(0, len(parent.rhs)):
if child.lhs == parent.rhs[x]:
child_pos.append(x)
for n in self.nodes:
# not a leaf node
if n.children != None:
if weight != None:
# FIXME: reduce duplication of code
name = 'relax' + str(self.id)
v = Int(name)
self.cost_relax_vars[v] = weight
self.relax_vars.append(v)
self.objective.append(Product(weight, v))
self.weights.append(weight)
self.ub += weight
# domain of the relaxation variable
self.solver.add(Or(v == 0, v == 1))
# constraint for the is_parent constraint
ctr_children = []
for p in range(0, len(child_pos)):
ctr_children.append(
self.variables[n.children[p].id - 1] == child.id)
self.solver.add(
Or(Implies(self.variables[n.id - 1] == parent.id, Or(ctr_children)), v == 1))
# relation between relaxation variables and constraint
self.solver.add(Implies(v == 1, Or(
self.variables[n.id - 1] != parent.id, Not(Or(ctr_children)))))
self.solver.add(
Implies(And(self.variables[n.id - 1] == parent.id, Or(ctr_children)), v == 0))
self.id = self.id + 1
else:
ctr_children = []
for p in range(0, len(child_pos)):
ctr_children.append(
self.variables[n.children[p].id - 1] == child.id)
self.solver.add(
Implies(self.variables[n.id - 1] == parent.id, Or(ctr_children)))
def mk_not_occurs(self, production, weight=None):
'''a production will not occur with a given probability'''
if len(self.var_occurs) == 0:
self.createVariablesOccurrence()
if weight != None:
name = 'relax' + str(self.id)
v = Int(name)
self.cost_relax_vars[v] = weight
self.relax_vars.append(v)
self.objective.append(Product(weight, v))
self.weights.append(weight)
self.ub += weight
# domain of the relaxation variable
self.solver.add(Or(v == 0, v == 1))
# constraint for at least once
self.solver.add(Or(self.var_occurs[production.id] == 0, v == 1))
# relation between relaxation variables and constraint
self.solver.add(
Implies(v == 1, self.var_occurs[production.id] != 0))
self.solver.add(
Implies(self.var_occurs[production.id] == 0, v == 0))
self.id = self.id + 1
else:
self.solver.add(self.var_occurs[production.id] == 0)
# FIXME: dissociate the creation of variables with the creation of constraints?
def mk_occurs(self, production, weight=None):
'''a production will occur with a given probability'''
if len(self.var_occurs) == 0:
self.createVariablesOccurrence()
if weight != None:
name = 'relax' + str(self.id)
v = Int(name)
self.cost_relax_vars[v] = weight
self.relax_vars.append(v)
self.objective.append(Product(weight, v))
self.weights.append(weight)
self.ub += weight
# domain of the relaxation variable
self.solver.add(Or(v == 0, v == 1))
# constraint for at least once
self.solver.add(Or(self.var_occurs[production.id] == 1, v == 1))
# relation between relaxation variables and constraint
self.solver.add(
Implies(v == 1, self.var_occurs[production.id] != 1))
self.solver.add(
Implies(self.var_occurs[production.id] == 1, v == 0))
self.id = self.id + 1
else:
self.solver.add(self.var_occurs[production.id] == 1)
def isSubsetSum(self, set, n, sum):
subset = ([[False for i in range(sum + 1)]
for i in range(n + 1)])
# If sum is 0, then answer is true
for i in range(n + 1):
subset[i][0] = True
# If sum is not 0 and set is empty,
# then answer is false
for i in range(1, sum + 1):
subset[0][i] = False
# Fill the subset table in botton up manner
for i in range(1, n + 1):
for j in range(1, sum + 1):
if j < set[i - 1]:
subset[i][j] = subset[i - 1][j]
if j >= set[i - 1]:
subset[i][j] = (subset[i - 1][j]
or subset[i - 1][j - set[i - 1]])
return subset[n][sum]
def optimize(self, solver):
model = None
cost = 0
res = sat
nb_sat = 0
nb_unsat = 0
# no optimization is defined
if len(self.objective) == 0:
res = solver.check()
if res == sat:
model = solver.model()
# optimization using the LSU algorithm
else:
solver.set(unsat_core=True)
solver.push()
ctr = Sum(self.objective) <= self.bound
solver.assert_and_track(ctr, 'obj')
while model == None and res == sat:
res = solver.check()
if res == sat:
nb_sat += 1
model = solver.model()
cost = self.computeCost(model)
assert (cost == self.bound)
solver.pop()
else:
nb_unsat += 1
solver.pop()
core = solver.unsat_core()
if len(core) != 0:
self.bound += 1
while(not self.isSubsetSum(self.weights, len(self.weights), self.bound) and self.bound <= self.ub):
self.bound += 1
solver.push()
ctr = Sum(self.objective) <= self.bound
solver.assert_and_track(ctr, 'obj')
res = sat
assert(solver.num_scopes() == 0)
self.bound = cost
return model
def computeCost(self, model):
cost = 0
for v in self.relax_vars:
if model[v] == 1:
cost = cost + self.cost_relax_vars[v]
return cost
|
<filename>tyrell/enumerator/optimizer.py<gh_stars>0
from z3 import *
from .. import dsl as D
class Optimizer:
# additional variables to track if a production occurs or not in a program
var_occurs = []
# relaxation variables
relax_vars = []
# keeps track of the current assumptions
assumptions = []
# keeps track of the cost of each relaxation variable
cost_relax_vars = {}
def __init__(self, solver, spec, variables, nodes):
self.bound = 0
self.ub = 0
self.solver = solver
self.spec = spec
self.variables = variables
self.id = 0
self.objective = []
self.nodes = nodes
self.weights = []
def createVariablesOccurrence(self):
for x in range(0, self.spec.num_productions()):
name = 'occ' + str(x)
v = Int(name)
self.var_occurs.append(v)
self.solver.add(And(v >= 0, v <= 1))
for x in range(0, len(self.var_occurs)):
ctr = self.var_occurs[x] == 1
rhs = self.variables[0] == x
for y in range(1, len(self.variables)):
rhs = Or(rhs, self.variables[y] == x)
self.solver.add(
Implies(self.variables[y] == x, self.var_occurs[x] == 1))
self.solver.add(Implies(ctr, rhs))
for x in range(0, len(self.var_occurs)):
for y in range(0, len(self.variables)):
self.solver.add(
Implies(self.var_occurs[x] == 0, self.variables[y] != x))
def mk_is_not_parent(self, parent, child, weight=None):
child_pos = []
# find positions that type-check between parent and child
for x in range(0, len(parent.rhs)):
if child.lhs == parent.rhs[x]:
child_pos.append(x)
for n in self.nodes:
# not a leaf node
if n.children != None:
if weight != None:
# FIXME: reduce duplication of code
name = 'relax' + str(self.id)
v = Int(name)
self.cost_relax_vars[v] = weight
self.relax_vars.append(v)
self.objective.append(Product(weight, v))
self.weights.append(weight)
self.ub += weight
# domain of the relaxation variable
self.solver.add(Or(v == 0, v == 1))
# constraint for the is_parent constraint
ctr_children = []
for p in range(0, len(child_pos)):
ctr_children.append(
self.variables[n.children[p].id - 1] == child.id)
self.solver.add(
Or(Implies(Or(ctr_children), self.variables[n.id - 1] != parent.id), v == 1))
# relation between relaxation variables and constraint
self.solver.add(Implies(v == 1, Or(
self.variables[n.id - 1] == parent.id, Not(Or(ctr_children)))))
self.solver.add(
Implies(And(self.variables[n.id - 1] != parent.id, Or(ctr_children)), v == 0))
self.id = self.id + 1
else:
ctr_children = []
for p in range(0, len(child_pos)):
ctr_children.append(
self.variables[n.children[p].id - 1] == child.id)
self.solver.add(
Implies(Or(ctr_children), self.variables[n.id - 1] != parent.id))
# FIXME: dissociate the creation of variables with the creation of constraints?
def mk_is_parent(self, parent, child, weight=None):
'''children production will have the parent production with probability weight'''
child_pos = []
# find positions that type-check between parent and child
for x in range(0, len(parent.rhs)):
if child.lhs == parent.rhs[x]:
child_pos.append(x)
for n in self.nodes:
# not a leaf node
if n.children != None:
if weight != None:
# FIXME: reduce duplication of code
name = 'relax' + str(self.id)
v = Int(name)
self.cost_relax_vars[v] = weight
self.relax_vars.append(v)
self.objective.append(Product(weight, v))
self.weights.append(weight)
self.ub += weight
# domain of the relaxation variable
self.solver.add(Or(v == 0, v == 1))
# constraint for the is_parent constraint
ctr_children = []
for p in range(0, len(child_pos)):
ctr_children.append(
self.variables[n.children[p].id - 1] == child.id)
self.solver.add(
Or(Implies(self.variables[n.id - 1] == parent.id, Or(ctr_children)), v == 1))
# relation between relaxation variables and constraint
self.solver.add(Implies(v == 1, Or(
self.variables[n.id - 1] != parent.id, Not(Or(ctr_children)))))
self.solver.add(
Implies(And(self.variables[n.id - 1] == parent.id, Or(ctr_children)), v == 0))
self.id = self.id + 1
else:
ctr_children = []
for p in range(0, len(child_pos)):
ctr_children.append(
self.variables[n.children[p].id - 1] == child.id)
self.solver.add(
Implies(self.variables[n.id - 1] == parent.id, Or(ctr_children)))
def mk_not_occurs(self, production, weight=None):
'''a production will not occur with a given probability'''
if len(self.var_occurs) == 0:
self.createVariablesOccurrence()
if weight != None:
name = 'relax' + str(self.id)
v = Int(name)
self.cost_relax_vars[v] = weight
self.relax_vars.append(v)
self.objective.append(Product(weight, v))
self.weights.append(weight)
self.ub += weight
# domain of the relaxation variable
self.solver.add(Or(v == 0, v == 1))
# constraint for at least once
self.solver.add(Or(self.var_occurs[production.id] == 0, v == 1))
# relation between relaxation variables and constraint
self.solver.add(
Implies(v == 1, self.var_occurs[production.id] != 0))
self.solver.add(
Implies(self.var_occurs[production.id] == 0, v == 0))
self.id = self.id + 1
else:
self.solver.add(self.var_occurs[production.id] == 0)
# FIXME: dissociate the creation of variables with the creation of constraints?
def mk_occurs(self, production, weight=None):
'''a production will occur with a given probability'''
if len(self.var_occurs) == 0:
self.createVariablesOccurrence()
if weight != None:
name = 'relax' + str(self.id)
v = Int(name)
self.cost_relax_vars[v] = weight
self.relax_vars.append(v)
self.objective.append(Product(weight, v))
self.weights.append(weight)
self.ub += weight
# domain of the relaxation variable
self.solver.add(Or(v == 0, v == 1))
# constraint for at least once
self.solver.add(Or(self.var_occurs[production.id] == 1, v == 1))
# relation between relaxation variables and constraint
self.solver.add(
Implies(v == 1, self.var_occurs[production.id] != 1))
self.solver.add(
Implies(self.var_occurs[production.id] == 1, v == 0))
self.id = self.id + 1
else:
self.solver.add(self.var_occurs[production.id] == 1)
def isSubsetSum(self, set, n, sum):
subset = ([[False for i in range(sum + 1)]
for i in range(n + 1)])
# If sum is 0, then answer is true
for i in range(n + 1):
subset[i][0] = True
# If sum is not 0 and set is empty,
# then answer is false
for i in range(1, sum + 1):
subset[0][i] = False
# Fill the subset table in botton up manner
for i in range(1, n + 1):
for j in range(1, sum + 1):
if j < set[i - 1]:
subset[i][j] = subset[i - 1][j]
if j >= set[i - 1]:
subset[i][j] = (subset[i - 1][j]
or subset[i - 1][j - set[i - 1]])
return subset[n][sum]
def optimize(self, solver):
model = None
cost = 0
res = sat
nb_sat = 0
nb_unsat = 0
# no optimization is defined
if len(self.objective) == 0:
res = solver.check()
if res == sat:
model = solver.model()
# optimization using the LSU algorithm
else:
solver.set(unsat_core=True)
solver.push()
ctr = Sum(self.objective) <= self.bound
solver.assert_and_track(ctr, 'obj')
while model == None and res == sat:
res = solver.check()
if res == sat:
nb_sat += 1
model = solver.model()
cost = self.computeCost(model)
assert (cost == self.bound)
solver.pop()
else:
nb_unsat += 1
solver.pop()
core = solver.unsat_core()
if len(core) != 0:
self.bound += 1
while(not self.isSubsetSum(self.weights, len(self.weights), self.bound) and self.bound <= self.ub):
self.bound += 1
solver.push()
ctr = Sum(self.objective) <= self.bound
solver.assert_and_track(ctr, 'obj')
res = sat
assert(solver.num_scopes() == 0)
self.bound = cost
return model
def computeCost(self, model):
cost = 0
for v in self.relax_vars:
if model[v] == 1:
cost = cost + self.cost_relax_vars[v]
return cost
|
en
| 0.889515
|
# additional variables to track if a production occurs or not in a program # relaxation variables # keeps track of the current assumptions # keeps track of the cost of each relaxation variable # find positions that type-check between parent and child # not a leaf node # FIXME: reduce duplication of code # domain of the relaxation variable # constraint for the is_parent constraint # relation between relaxation variables and constraint # FIXME: dissociate the creation of variables with the creation of constraints? children production will have the parent production with probability weight # find positions that type-check between parent and child # not a leaf node # FIXME: reduce duplication of code # domain of the relaxation variable # constraint for the is_parent constraint # relation between relaxation variables and constraint a production will not occur with a given probability # domain of the relaxation variable # constraint for at least once # relation between relaxation variables and constraint # FIXME: dissociate the creation of variables with the creation of constraints? a production will occur with a given probability # domain of the relaxation variable # constraint for at least once # relation between relaxation variables and constraint # If sum is 0, then answer is true # If sum is not 0 and set is empty, # then answer is false # Fill the subset table in botton up manner # no optimization is defined # optimization using the LSU algorithm
| 2.383431
| 2
|
enthought/permissions/default/policy_data.py
|
enthought/etsproxy
| 3
|
6629473
|
# proxy module
from __future__ import absolute_import
from apptools.permissions.default.policy_data import *
|
# proxy module
from __future__ import absolute_import
from apptools.permissions.default.policy_data import *
|
es
| 0.125187
|
# proxy module
| 1.076759
| 1
|
TelegramDownloader/TelegramDownloader.py
|
joas77/AnimeMovieDown
| 0
|
6629474
|
<filename>TelegramDownloader/TelegramDownloader.py
import json
from telethon import TelegramClient, client, events
from telethon.tl.types import InputMessagesFilterDocument, InputMessagesFilterVideo
from telethon.utils import get_display_name
class TelegramDownloader:
def __init__(self, api_id, api_hash) -> None:
self._api_id = api_id
self._api_hash = api_hash
self._client = TelegramClient("Downloader",
self._api_id, self._api_hash)
def run(self):
with self._client:
self._client.loop.run_until_complete(self._main())
async def _main(self):
#TODO: remove dialogs limit
dialogs = await self._client.get_dialogs(43)
for d in dialogs:
name = get_display_name(d.entity)
if name in ("Fumetsu No Anata E"):
messages = self._client.iter_messages(name, filter=InputMessagesFilterDocument) #InputMessagesFilterVideo)
async for msg in messages:
print(get_display_name(msg.sender))
if hasattr(msg, "message"):
print(msg.message)
print("---------------")
def get_media(self, channel:str)->list:
pass
if __name__=="__main__":
# TODO: get api id and hash from condig file
API_ID = "TELEGRAM_API_ID"
API_HASH = "TELEGRAM_API_HASH"
teldown = TelegramDownloader(API_ID, API_HASH)
teldown.run()
# TODO where is the script going to be executed
# how to set path
cfg_json_path = "./TelegramDownloader/config/channels.json"
with open(cfg_json_path) as json_file:
channels_cfg = json.load(json_file)
print(channels_cfg)
|
<filename>TelegramDownloader/TelegramDownloader.py
import json
from telethon import TelegramClient, client, events
from telethon.tl.types import InputMessagesFilterDocument, InputMessagesFilterVideo
from telethon.utils import get_display_name
class TelegramDownloader:
def __init__(self, api_id, api_hash) -> None:
self._api_id = api_id
self._api_hash = api_hash
self._client = TelegramClient("Downloader",
self._api_id, self._api_hash)
def run(self):
with self._client:
self._client.loop.run_until_complete(self._main())
async def _main(self):
#TODO: remove dialogs limit
dialogs = await self._client.get_dialogs(43)
for d in dialogs:
name = get_display_name(d.entity)
if name in ("Fumetsu No Anata E"):
messages = self._client.iter_messages(name, filter=InputMessagesFilterDocument) #InputMessagesFilterVideo)
async for msg in messages:
print(get_display_name(msg.sender))
if hasattr(msg, "message"):
print(msg.message)
print("---------------")
def get_media(self, channel:str)->list:
pass
if __name__=="__main__":
# TODO: get api id and hash from condig file
API_ID = "TELEGRAM_API_ID"
API_HASH = "TELEGRAM_API_HASH"
teldown = TelegramDownloader(API_ID, API_HASH)
teldown.run()
# TODO where is the script going to be executed
# how to set path
cfg_json_path = "./TelegramDownloader/config/channels.json"
with open(cfg_json_path) as json_file:
channels_cfg = json.load(json_file)
print(channels_cfg)
|
en
| 0.705702
|
#TODO: remove dialogs limit #InputMessagesFilterVideo) # TODO: get api id and hash from condig file # TODO where is the script going to be executed # how to set path
| 2.505648
| 3
|
437/Pathofsum-III.py
|
cccccccccccccc/Myleetcode
| 0
|
6629475
|
# Definition for a binary tree node.
"""
timecomplexity = O(n) spacecomplexity = O(n)
Using a dict to store prefix sum occurs so far
let sum = from root to cur node val's sum
check how many prefix sums equal to sum - target
then there are same number of subpath that subpathsum = target
remember that when return from subrecusive, need to cancel the curpathsum from dict because the path can only use one childnode
"""
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def _getPathSum(self,root,target,curPathSum,dictSum):
if root is None:
return
curPathSum += root.val
oldPathSum = curPathSum -target
self.ans += dictSum.get(oldPathSum,0)
dictSum[curPathSum] = dictSum.get(curPathSum,0) + 1
self._getPathSum(root.left,target,curPathSum,dictSum)
self._getPathSum(root.right,target,curPathSum,dictSum)
dictSum[curPathSum] -= 1
def pathSum(self, root: TreeNode, sum: int) -> int:
self.ans = 0
dictSum = {0:1}
self._getPathSum(root,sum,0,dictSum)
return self.ans
#root = [10,5,-3,3,2,null,11,3,-2,null,1], sum = 8
A = Solution()
a = TreeNode(10)
b = TreeNode(5)
c = TreeNode(-3)
d = TreeNode(3)
e = TreeNode(2)
f = TreeNode(11)
g = TreeNode(3)
h = TreeNode(-2)
i = TreeNode(1)
a.left = b
a.right = c
b.left = d
b.right = e
c.right = f
d.left = g
d.right = h
e.right = i
print(A.pathSum(a,8))
|
# Definition for a binary tree node.
"""
timecomplexity = O(n) spacecomplexity = O(n)
Using a dict to store prefix sum occurs so far
let sum = from root to cur node val's sum
check how many prefix sums equal to sum - target
then there are same number of subpath that subpathsum = target
remember that when return from subrecusive, need to cancel the curpathsum from dict because the path can only use one childnode
"""
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def _getPathSum(self,root,target,curPathSum,dictSum):
if root is None:
return
curPathSum += root.val
oldPathSum = curPathSum -target
self.ans += dictSum.get(oldPathSum,0)
dictSum[curPathSum] = dictSum.get(curPathSum,0) + 1
self._getPathSum(root.left,target,curPathSum,dictSum)
self._getPathSum(root.right,target,curPathSum,dictSum)
dictSum[curPathSum] -= 1
def pathSum(self, root: TreeNode, sum: int) -> int:
self.ans = 0
dictSum = {0:1}
self._getPathSum(root,sum,0,dictSum)
return self.ans
#root = [10,5,-3,3,2,null,11,3,-2,null,1], sum = 8
A = Solution()
a = TreeNode(10)
b = TreeNode(5)
c = TreeNode(-3)
d = TreeNode(3)
e = TreeNode(2)
f = TreeNode(11)
g = TreeNode(3)
h = TreeNode(-2)
i = TreeNode(1)
a.left = b
a.right = c
b.left = d
b.right = e
c.right = f
d.left = g
d.right = h
e.right = i
print(A.pathSum(a,8))
|
en
| 0.785891
|
# Definition for a binary tree node. timecomplexity = O(n) spacecomplexity = O(n) Using a dict to store prefix sum occurs so far let sum = from root to cur node val's sum check how many prefix sums equal to sum - target then there are same number of subpath that subpathsum = target remember that when return from subrecusive, need to cancel the curpathsum from dict because the path can only use one childnode #root = [10,5,-3,3,2,null,11,3,-2,null,1], sum = 8
| 3.688837
| 4
|
pt/vmz/models/embedding.py
|
opqi/VMZ
| 0
|
6629476
|
<filename>pt/vmz/models/embedding.py
import torch.nn as nn
import torch
class LearnedPositionalEmbedding(nn.Module):
def __init__(self, d_model, max_len=512):
super().__init__()
pe = torch.zeros(max_len, d_model).float().to(device='cuda')
pe.require_grad = True
pe = pe.unsqueeze(0)
self.pe = nn.Parameter(pe)
nn.init.normal_(self.pe, std=0.02)
def forward(self, x):
return self.pe[:, :x.size(1)]
class BERTEmbedding(nn.Module):
def __init__(self, input_dim, max_len, dropout=0.1):
super().__init__()
self.learnedPosition = LearnedPositionalEmbedding(
d_model=input_dim, max_len=max_len)
self.dropout = nn.Dropout(dropout)
def forward(self, sequence):
x = self.learnedPosition(sequence) + sequence
return self.dropout(x)
|
<filename>pt/vmz/models/embedding.py
import torch.nn as nn
import torch
class LearnedPositionalEmbedding(nn.Module):
def __init__(self, d_model, max_len=512):
super().__init__()
pe = torch.zeros(max_len, d_model).float().to(device='cuda')
pe.require_grad = True
pe = pe.unsqueeze(0)
self.pe = nn.Parameter(pe)
nn.init.normal_(self.pe, std=0.02)
def forward(self, x):
return self.pe[:, :x.size(1)]
class BERTEmbedding(nn.Module):
def __init__(self, input_dim, max_len, dropout=0.1):
super().__init__()
self.learnedPosition = LearnedPositionalEmbedding(
d_model=input_dim, max_len=max_len)
self.dropout = nn.Dropout(dropout)
def forward(self, sequence):
x = self.learnedPosition(sequence) + sequence
return self.dropout(x)
|
none
| 1
| 2.527148
| 3
|
|
wand/compat.py
|
claudep/wand
| 0
|
6629477
|
<gh_stars>0
""":mod:`wand.compat` --- Compatibility layer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides several subtle things to support
multiple Python versions (2.7, 3.3+) and VM implementations
(CPython, PyPy).
"""
import collections
try:
import collections.abc
except ImportError:
pass
import contextlib
import io
import sys
import types
__all__ = ('PY3', 'abc', 'binary', 'binary_type', 'encode_filename',
'file_types', 'nested', 'string_type', 'text', 'text_type',
'xrange')
#: (:class:`bool`) Whether it is Python 3.x or not.
PY3 = sys.version_info >= (3,)
#: (:class:`module`) Module containing abstract base classes.
#: :mod:`collections` in Python 2 and :mod:`collections.abc` in Python 3.
abc = collections.abc if PY3 else collections
#: (:class:`type`) Type for representing binary data. :class:`str` in Python 2
#: and :class:`bytes` in Python 3.
binary_type = bytes if PY3 else str
#: (:class:`type`) Type for text data. :class:`basestring` in Python 2
#: and :class:`str` in Python 3.
string_type = str if PY3 else basestring # noqa
#: (:class:`type`) Type for representing Unicode textual data.
#: :class:`unicode` in Python 2 and :class:`str` in Python 3.
text_type = str if PY3 else unicode # noqa
def binary(string, var=None):
"""Makes ``string`` to :class:`str` in Python 2.
Makes ``string`` to :class:`bytes` in Python 3.
:param string: a string to cast it to :data:`binary_type`
:type string: :class:`bytes`, :class:`str`, :class:`unicode`
:param var: an optional variable name to be used for error message
:type var: :class:`str`
"""
if isinstance(string, text_type):
return string.encode()
elif isinstance(string, binary_type):
return string
if var:
raise TypeError('{0} must be a string, not {1!r}'.format(var, string))
raise TypeError('expected a string, not ' + repr(string))
if PY3:
def text(string):
if isinstance(string, bytes):
return string.decode('utf-8')
return string
else:
def text(string):
"""Makes ``string`` to :class:`str` in Python 3.
Does nothing in Python 2.
:param string: a string to cast it to :data:`text_type`
:type string: :class:`bytes`, :class:`str`, :class:`unicode`
"""
return string
#: The :func:`xrange()` function. Alias for :func:`range()` in Python 3.
xrange = range if PY3 else xrange # noqa
#: (:class:`type`, :class:`tuple`) Types for file objects that have
#: ``fileno()``.
file_types = io.RawIOBase if PY3 else (io.RawIOBase, types.FileType)
def encode_filename(filename):
"""If ``filename`` is a :data:`text_type`, encode it to
:data:`binary_type` according to filesystem's default encoding.
.. versionchanged:: 0.5.3
Added support for PEP-519 https://github.com/emcconville/wand/pull/339
"""
if hasattr(filename, "__fspath__"): # PEP 519
filename = filename.__fspath__()
if isinstance(filename, text_type):
return filename.encode(sys.getfilesystemencoding())
return filename
try:
nested = contextlib.nested
except AttributeError:
# http://hg.python.org/cpython/file/v2.7.6/Lib/contextlib.py#l88
@contextlib.contextmanager
def nested(*managers):
exits = []
vars = []
exc = (None, None, None)
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except: # noqa: E722
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except: # noqa: E722
exc = sys.exc_info()
if exc != (None, None, None):
# PEP 3109
e = exc[0](exc[1])
e.__traceback__ = e[2]
raise e
|
""":mod:`wand.compat` --- Compatibility layer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides several subtle things to support
multiple Python versions (2.7, 3.3+) and VM implementations
(CPython, PyPy).
"""
import collections
try:
import collections.abc
except ImportError:
pass
import contextlib
import io
import sys
import types
__all__ = ('PY3', 'abc', 'binary', 'binary_type', 'encode_filename',
'file_types', 'nested', 'string_type', 'text', 'text_type',
'xrange')
#: (:class:`bool`) Whether it is Python 3.x or not.
PY3 = sys.version_info >= (3,)
#: (:class:`module`) Module containing abstract base classes.
#: :mod:`collections` in Python 2 and :mod:`collections.abc` in Python 3.
abc = collections.abc if PY3 else collections
#: (:class:`type`) Type for representing binary data. :class:`str` in Python 2
#: and :class:`bytes` in Python 3.
binary_type = bytes if PY3 else str
#: (:class:`type`) Type for text data. :class:`basestring` in Python 2
#: and :class:`str` in Python 3.
string_type = str if PY3 else basestring # noqa
#: (:class:`type`) Type for representing Unicode textual data.
#: :class:`unicode` in Python 2 and :class:`str` in Python 3.
text_type = str if PY3 else unicode # noqa
def binary(string, var=None):
"""Makes ``string`` to :class:`str` in Python 2.
Makes ``string`` to :class:`bytes` in Python 3.
:param string: a string to cast it to :data:`binary_type`
:type string: :class:`bytes`, :class:`str`, :class:`unicode`
:param var: an optional variable name to be used for error message
:type var: :class:`str`
"""
if isinstance(string, text_type):
return string.encode()
elif isinstance(string, binary_type):
return string
if var:
raise TypeError('{0} must be a string, not {1!r}'.format(var, string))
raise TypeError('expected a string, not ' + repr(string))
if PY3:
def text(string):
if isinstance(string, bytes):
return string.decode('utf-8')
return string
else:
def text(string):
"""Makes ``string`` to :class:`str` in Python 3.
Does nothing in Python 2.
:param string: a string to cast it to :data:`text_type`
:type string: :class:`bytes`, :class:`str`, :class:`unicode`
"""
return string
#: The :func:`xrange()` function. Alias for :func:`range()` in Python 3.
xrange = range if PY3 else xrange # noqa
#: (:class:`type`, :class:`tuple`) Types for file objects that have
#: ``fileno()``.
file_types = io.RawIOBase if PY3 else (io.RawIOBase, types.FileType)
def encode_filename(filename):
"""If ``filename`` is a :data:`text_type`, encode it to
:data:`binary_type` according to filesystem's default encoding.
.. versionchanged:: 0.5.3
Added support for PEP-519 https://github.com/emcconville/wand/pull/339
"""
if hasattr(filename, "__fspath__"): # PEP 519
filename = filename.__fspath__()
if isinstance(filename, text_type):
return filename.encode(sys.getfilesystemencoding())
return filename
try:
nested = contextlib.nested
except AttributeError:
# http://hg.python.org/cpython/file/v2.7.6/Lib/contextlib.py#l88
@contextlib.contextmanager
def nested(*managers):
exits = []
vars = []
exc = (None, None, None)
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except: # noqa: E722
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except: # noqa: E722
exc = sys.exc_info()
if exc != (None, None, None):
# PEP 3109
e = exc[0](exc[1])
e.__traceback__ = e[2]
raise e
|
en
| 0.419397
|
:mod:`wand.compat` --- Compatibility layer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module provides several subtle things to support multiple Python versions (2.7, 3.3+) and VM implementations (CPython, PyPy). #: (:class:`bool`) Whether it is Python 3.x or not. #: (:class:`module`) Module containing abstract base classes. #: :mod:`collections` in Python 2 and :mod:`collections.abc` in Python 3. #: (:class:`type`) Type for representing binary data. :class:`str` in Python 2 #: and :class:`bytes` in Python 3. #: (:class:`type`) Type for text data. :class:`basestring` in Python 2 #: and :class:`str` in Python 3. # noqa #: (:class:`type`) Type for representing Unicode textual data. #: :class:`unicode` in Python 2 and :class:`str` in Python 3. # noqa Makes ``string`` to :class:`str` in Python 2. Makes ``string`` to :class:`bytes` in Python 3. :param string: a string to cast it to :data:`binary_type` :type string: :class:`bytes`, :class:`str`, :class:`unicode` :param var: an optional variable name to be used for error message :type var: :class:`str` Makes ``string`` to :class:`str` in Python 3. Does nothing in Python 2. :param string: a string to cast it to :data:`text_type` :type string: :class:`bytes`, :class:`str`, :class:`unicode` #: The :func:`xrange()` function. Alias for :func:`range()` in Python 3. # noqa #: (:class:`type`, :class:`tuple`) Types for file objects that have #: ``fileno()``. If ``filename`` is a :data:`text_type`, encode it to :data:`binary_type` according to filesystem's default encoding. .. versionchanged:: 0.5.3 Added support for PEP-519 https://github.com/emcconville/wand/pull/339 # PEP 519 # http://hg.python.org/cpython/file/v2.7.6/Lib/contextlib.py#l88 # noqa: E722 # noqa: E722 # PEP 3109
| 2.29255
| 2
|
battery-ffi/examples/ffi.py
|
fossabot/rust-battery
| 0
|
6629478
|
<reponame>fossabot/rust-battery
#!/usr/bin/env python
"""
This is an example of FFI bindings for `battery-ffi` library.
Call it similar to this:
```
$ LD_LIBRARY_PATH=../../target/debug/ ./ffi.py
```
`battery-ffi` crate should be built before that.
"""
import sys
import ctypes
prefix = {'win32': ''}.get(sys.platform, 'lib')
extension = {'darwin': 'dylib', 'win32': 'dll'}.get(sys.platform, 'so')
lib = ctypes.cdll.LoadLibrary('{}battery_ffi.{}'.format(prefix, extension))
STATE = {
0: 'unknown',
1: 'charging',
2: 'discharging',
3: 'empty',
4: 'full',
}
TECHNOLOGY = {
0: 'unknown',
1: 'lithium-ion',
2: 'lead-acid',
3: 'lithium-polymer',
4: 'nickel-metal-hydride',
5: 'nickel-cadmium',
6: 'nickel-zinc',
7: 'lithium-iron-phosphate',
8: 'rechargeable-alkaline-manganese',
}
#
# Wrappers around opaque pointers
#
class Manager(ctypes.Structure):
pass
class Batteries(ctypes.Structure):
pass
class Battery(ctypes.Structure):
pass
#
# Bindings for exported functions
#
lib.battery_manager_new.argtypes = None
lib.battery_manager_new.restype = ctypes.POINTER(Manager)
lib.battery_manager_iter.argtypes = (ctypes.POINTER(Manager), )
lib.battery_manager_iter.restype = ctypes.POINTER(Batteries)
lib.battery_manager_free.argtypes = (ctypes.POINTER(Manager), )
lib.battery_manager_free.restype = None
lib.battery_iterator_next.argtypes = (ctypes.POINTER(Batteries), )
lib.battery_iterator_next.restype = ctypes.POINTER(Battery)
lib.battery_free.argtypes = (ctypes.POINTER(Battery), )
lib.battery_free.restype = None
lib.battery_str_free.argtypes = (ctypes.c_char_p, )
lib.battery_str_free.restype = None
lib.battery_get_vendor.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_vendor.restype = ctypes.c_char_p
lib.battery_get_model.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_model.restype = ctypes.c_char_p
lib.battery_get_serial_number.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_serial_number.restype = ctypes.c_char_p
lib.battery_get_state.restype = ctypes.c_uint8
lib.battery_get_energy.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_energy.restype = ctypes.c_uint32
lib.battery_get_energy_full.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_energy_full.restype = ctypes.c_uint32
lib.battery_get_energy_full_design.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_energy_full_design.restype = ctypes.c_uint32
lib.battery_get_energy_rate.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_energy_rate.restype = ctypes.c_uint32
lib.battery_get_voltage.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_voltage.restype = ctypes.c_uint32
lib.battery_get_technology.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_technology.restype = ctypes.c_uint8
lib.battery_get_time_to_full.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_time_to_full.restype = ctypes.c_uint64
lib.battery_get_time_to_empty.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_time_to_empty.restype = ctypes.c_uint64
lib.battery_get_percentage.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_percentage.restype = ctypes.c_float
lib.battery_get_temperature.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_temperature.restype = ctypes.c_float
lib.battery_get_capacity.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_capacity.restype = ctypes.c_float
lib.battery_get_cycle_count.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_cycle_count.restype = ctypes.c_uint32
if __name__ == '__main__':
manager = lib.battery_manager_new()
iterator = lib.battery_manager_iter(manager)
while True:
battery = lib.battery_iterator_next(iterator)
if not battery:
break
print('Vendor', lib.battery_get_vendor(battery))
print('Model', lib.battery_get_model(battery))
print('S/N', lib.battery_get_serial_number(battery))
print('State', STATE.get(lib.battery_get_state(battery)))
print('Technology', TECHNOLOGY.get(lib.battery_get_technology(battery)))
print('Energy (Wh)', lib.battery_get_energy(battery) / 1000)
print('Energy full (Wh)', lib.battery_get_energy_full_design(battery) / 1000)
print('Energy full design (Wh)', lib.battery_get_energy_full_design(battery) / 1000)
print('Energy rate (W)', lib.battery_get_energy_rate(battery) / 1000)
print('Voltage (V)', lib.battery_get_voltage(battery) / 1000)
print('Time to full (sec)', lib.battery_get_time_to_full(battery))
print('Time to empty (sec)', lib.battery_get_time_to_empty(battery))
print('Percentage (%)', lib.battery_get_percentage(battery))
print('Temperature (C)', lib.battery_get_temperature(battery))
print('Capacity (%)', lib.battery_get_capacity(battery))
print('Cycle count', lib.battery_get_cycle_count(battery))
lib.battery_free(battery)
lib.battery_iterator_free(battery)
lib.battery_manager_free(manager)
|
#!/usr/bin/env python
"""
This is an example of FFI bindings for `battery-ffi` library.
Call it similar to this:
```
$ LD_LIBRARY_PATH=../../target/debug/ ./ffi.py
```
`battery-ffi` crate should be built before that.
"""
import sys
import ctypes
prefix = {'win32': ''}.get(sys.platform, 'lib')
extension = {'darwin': 'dylib', 'win32': 'dll'}.get(sys.platform, 'so')
lib = ctypes.cdll.LoadLibrary('{}battery_ffi.{}'.format(prefix, extension))
STATE = {
0: 'unknown',
1: 'charging',
2: 'discharging',
3: 'empty',
4: 'full',
}
TECHNOLOGY = {
0: 'unknown',
1: 'lithium-ion',
2: 'lead-acid',
3: 'lithium-polymer',
4: 'nickel-metal-hydride',
5: 'nickel-cadmium',
6: 'nickel-zinc',
7: 'lithium-iron-phosphate',
8: 'rechargeable-alkaline-manganese',
}
#
# Wrappers around opaque pointers
#
class Manager(ctypes.Structure):
pass
class Batteries(ctypes.Structure):
pass
class Battery(ctypes.Structure):
pass
#
# Bindings for exported functions
#
lib.battery_manager_new.argtypes = None
lib.battery_manager_new.restype = ctypes.POINTER(Manager)
lib.battery_manager_iter.argtypes = (ctypes.POINTER(Manager), )
lib.battery_manager_iter.restype = ctypes.POINTER(Batteries)
lib.battery_manager_free.argtypes = (ctypes.POINTER(Manager), )
lib.battery_manager_free.restype = None
lib.battery_iterator_next.argtypes = (ctypes.POINTER(Batteries), )
lib.battery_iterator_next.restype = ctypes.POINTER(Battery)
lib.battery_free.argtypes = (ctypes.POINTER(Battery), )
lib.battery_free.restype = None
lib.battery_str_free.argtypes = (ctypes.c_char_p, )
lib.battery_str_free.restype = None
lib.battery_get_vendor.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_vendor.restype = ctypes.c_char_p
lib.battery_get_model.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_model.restype = ctypes.c_char_p
lib.battery_get_serial_number.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_serial_number.restype = ctypes.c_char_p
lib.battery_get_state.restype = ctypes.c_uint8
lib.battery_get_energy.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_energy.restype = ctypes.c_uint32
lib.battery_get_energy_full.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_energy_full.restype = ctypes.c_uint32
lib.battery_get_energy_full_design.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_energy_full_design.restype = ctypes.c_uint32
lib.battery_get_energy_rate.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_energy_rate.restype = ctypes.c_uint32
lib.battery_get_voltage.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_voltage.restype = ctypes.c_uint32
lib.battery_get_technology.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_technology.restype = ctypes.c_uint8
lib.battery_get_time_to_full.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_time_to_full.restype = ctypes.c_uint64
lib.battery_get_time_to_empty.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_time_to_empty.restype = ctypes.c_uint64
lib.battery_get_percentage.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_percentage.restype = ctypes.c_float
lib.battery_get_temperature.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_temperature.restype = ctypes.c_float
lib.battery_get_capacity.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_capacity.restype = ctypes.c_float
lib.battery_get_cycle_count.argtypes = (ctypes.POINTER(Battery), )
lib.battery_get_cycle_count.restype = ctypes.c_uint32
if __name__ == '__main__':
manager = lib.battery_manager_new()
iterator = lib.battery_manager_iter(manager)
while True:
battery = lib.battery_iterator_next(iterator)
if not battery:
break
print('Vendor', lib.battery_get_vendor(battery))
print('Model', lib.battery_get_model(battery))
print('S/N', lib.battery_get_serial_number(battery))
print('State', STATE.get(lib.battery_get_state(battery)))
print('Technology', TECHNOLOGY.get(lib.battery_get_technology(battery)))
print('Energy (Wh)', lib.battery_get_energy(battery) / 1000)
print('Energy full (Wh)', lib.battery_get_energy_full_design(battery) / 1000)
print('Energy full design (Wh)', lib.battery_get_energy_full_design(battery) / 1000)
print('Energy rate (W)', lib.battery_get_energy_rate(battery) / 1000)
print('Voltage (V)', lib.battery_get_voltage(battery) / 1000)
print('Time to full (sec)', lib.battery_get_time_to_full(battery))
print('Time to empty (sec)', lib.battery_get_time_to_empty(battery))
print('Percentage (%)', lib.battery_get_percentage(battery))
print('Temperature (C)', lib.battery_get_temperature(battery))
print('Capacity (%)', lib.battery_get_capacity(battery))
print('Cycle count', lib.battery_get_cycle_count(battery))
lib.battery_free(battery)
lib.battery_iterator_free(battery)
lib.battery_manager_free(manager)
|
en
| 0.759803
|
#!/usr/bin/env python This is an example of FFI bindings for `battery-ffi` library. Call it similar to this: ``` $ LD_LIBRARY_PATH=../../target/debug/ ./ffi.py ``` `battery-ffi` crate should be built before that. # # Wrappers around opaque pointers # # # Bindings for exported functions #
| 2.1603
| 2
|
DataWorkflow/data_core/migrations/0006_file_model_etag_refactor.py
|
Swiss-Polar-Institute/data-workflow
| 0
|
6629479
|
# Generated by Django 2.2.6 on 2019-10-24 14:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data_core', '0005_adds_sha1_in_abstract_file'),
]
operations = [
migrations.RemoveField(
model_name='file',
name='md5',
),
migrations.AddField(
model_name='file',
name='etag',
field=models.CharField(default=None, help_text='ETag of the file', max_length=35),
preserve_default=False,
),
]
|
# Generated by Django 2.2.6 on 2019-10-24 14:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data_core', '0005_adds_sha1_in_abstract_file'),
]
operations = [
migrations.RemoveField(
model_name='file',
name='md5',
),
migrations.AddField(
model_name='file',
name='etag',
field=models.CharField(default=None, help_text='ETag of the file', max_length=35),
preserve_default=False,
),
]
|
en
| 0.823006
|
# Generated by Django 2.2.6 on 2019-10-24 14:51
| 1.595657
| 2
|
packages/performance/management/tasks/task_system.py
|
elicharlese/DeFi-Dashboard
| 0
|
6629480
|
from task_functions import *
'''NO GLOBAL VALUES'''
the_menu = {"L": "List", "A": "Add", "U": "Update", "D": "Delete", "S": "Save the data to file",
"R": "Restore data from file", "Q": "Quit this program"} # dictionary of menu options # FIXME: automate_management()
all_tasks = [
{
"name": "Call XYZ",
"info": "",
"priority": 3,
"duedate": "05/28/2022",
"done" : "yes"
},
{
"name": "Finish checkpoint 1 for CSW8",
"info": "Submit to Gradescope",
"priority": 5,
"duedate": '06/02/2022',
"done": 'no'
},
{
"name": "Finish checkpoint 2 for CSW8",
"info": "Implement the new functions",
"priority": 5,
"duedate": '06/05/2022',
"done": 'no'
}
]
# list_menu will contain the "List" menu suboptions
list_menu = {
"A": "all tasks",
"C": "completed tasks",
"I": "incomplete tasks"
}
# priority_map will contain the mapping of the integer priority values to their textual interpretation
priority_map = {
1: "Lowest",
2: "Low",
3: "Medium",
4: "High",
5: "Highest"
}
while True:
print(the_menu)
opt = input("::: Enter a menu option\n> ")
opt = opt.upper() # to allow us to input lower- or upper-case letters
if opt == 'L':
if all_tasks == []:
print("WARNING: There is nothing to display!")
# Pause before going back to the main menu
input("::: Press Enter to continue")
continue
subopt = get_selection(the_menu[opt], list_menu) # FIXME: this is a placeholder for the function
input("::: Enter a menu option\n> ")
# if subopt == 'A':
# print_tasks(all_tasks, priority_map) # FIXME: this is a placeholder for the function
# elif subopt == 'C':
# print_tasks(all_tasks, priority_map, completed='yes') # FIXME: this is a placeholder for the function
# elif subopt == 'I':
# print_tasks(all_tasks, priority_map, completed='no') # FIXME: this is a placeholder for the function
# elif opt == 'A':
# continue_action = 'y'
# while continue_action == 'y':
# print("::: Enter each required field, separated by commas.")
# print("::: name, info, priority, MM/DD/YYYY, is task done? (yes/no)")
# task = input("> ") # TODO: get and process the data into a list
# task = task.split(',')
# result = get_new_task(all_tasks, priority_map) # TODO: attempt to create a new task
# if type(result) == dict:
# all_tasks = all_tasks.append(result) # TODO: add a new task to the list of tasks
# print(f"Successfully added a new task!")
# print_task(result, priority_map)
# elif type(result) == int:
# print(f"WARNING: invalid number of fields!")
# print(f"You provided {result}, instead of the expected 5.\n")
# else:
# print(f"WARNING: invalid task field: {result}\n")
# print("::: Would you like to add another task?", end=" ")
# continue_action = input("Enter 'y' to continue.\n> ")
# continue_action = continue_action.lower()
# # ----------------------------------------------------------------
# elif opt == 'U':
# continue_action = 'y'
# while continue_action == 'y':
# if ... == []: # TODO: check the return value of get_selection
# print("WARNING: There is nothing to update!")
# break
# print("::: Which task would you like to update?")
# print_tasks(all_tasks, priority_map, name_only = True, show_idx = True, start_idx = 1)
# print("::: Enter the number corresponding to the task.")
# user_option = input("> ")
# if ...: # TODO: check the return value of get_selection
# ... # TODO: convert the index appropriately to account for the start_idx = 1
# subopt = get_selection("update", all_tasks[...], to_upper = False, go_back = True)
# if subopt == 'M': # if the user changed their mind
# break
# print(f"::: Enter a new value for the field |{...}|") # TODO: get the error message from the function
# field_info = input("> ")
# result = update_task(all_tasks, user_option, priority_map, subopt, field_info, start_idx = 1)
# if type(result) == dict:
# print(f"Successfully updated the field |{...}|:") # TODO: get the error message from the function
# print_task(result, ...) # TODO: Call the function with appropriate inputs
# else: # update_task() returned an error
# print(f"WARNING: invalid information for the field |{...}|!") # TODO: get the error message
# print(f"The task was not updated.")
# else: # is_valid_index() returned False
# print(f"WARNING: |{...}| is an invalid task number!") # TODO: get the index from the user
# print("::: Would you like to update another task?", end=" ")
# continue_action = input("Enter 'y' to continue.\n> ")
# continue_action = continue_action.lower()
# # ----------------------------------------------------------------
# elif opt == 'S':
# continue_action = ...
# while continue_action == 'y':
# print("::: Enter the filename ending with '.csv'.")
# filename = input("> ")
# result = save_tasks_to_csv(tasks_list, filename) # TODO: Call the function with appropriate inputs and capture the output
# if result == -1: # TODO: check the return value of the function
# print(f"WARNING: |{...}| is an invalid file name!") # TODO: get the error message from the function
# print("::: Would you like to try again?", end=" ")
# continue_action = input("::: Enter 'y' to try again.\n> ")clea
# else:
# print(f"Successfully stored all the tasks to |{...}|")
# print("::: Would you like to save another file?", end=" ")
# elif opt == 'Q' or 'q':
# print("Goodbye!\n")
# break # exit the main `while` loop
# #--------------------------------------------------------------------------
# else:
# print(f"WARNING: {opt} is an invalid menu option.\n")
# input("::: Press Enter to continue")
# print(f"You selected option {opt} to > {the_menu[opt]}.")
# print("Have a nice day!")
|
from task_functions import *
'''NO GLOBAL VALUES'''
the_menu = {"L": "List", "A": "Add", "U": "Update", "D": "Delete", "S": "Save the data to file",
"R": "Restore data from file", "Q": "Quit this program"} # dictionary of menu options # FIXME: automate_management()
all_tasks = [
{
"name": "Call XYZ",
"info": "",
"priority": 3,
"duedate": "05/28/2022",
"done" : "yes"
},
{
"name": "Finish checkpoint 1 for CSW8",
"info": "Submit to Gradescope",
"priority": 5,
"duedate": '06/02/2022',
"done": 'no'
},
{
"name": "Finish checkpoint 2 for CSW8",
"info": "Implement the new functions",
"priority": 5,
"duedate": '06/05/2022',
"done": 'no'
}
]
# list_menu will contain the "List" menu suboptions
list_menu = {
"A": "all tasks",
"C": "completed tasks",
"I": "incomplete tasks"
}
# priority_map will contain the mapping of the integer priority values to their textual interpretation
priority_map = {
1: "Lowest",
2: "Low",
3: "Medium",
4: "High",
5: "Highest"
}
while True:
print(the_menu)
opt = input("::: Enter a menu option\n> ")
opt = opt.upper() # to allow us to input lower- or upper-case letters
if opt == 'L':
if all_tasks == []:
print("WARNING: There is nothing to display!")
# Pause before going back to the main menu
input("::: Press Enter to continue")
continue
subopt = get_selection(the_menu[opt], list_menu) # FIXME: this is a placeholder for the function
input("::: Enter a menu option\n> ")
# if subopt == 'A':
# print_tasks(all_tasks, priority_map) # FIXME: this is a placeholder for the function
# elif subopt == 'C':
# print_tasks(all_tasks, priority_map, completed='yes') # FIXME: this is a placeholder for the function
# elif subopt == 'I':
# print_tasks(all_tasks, priority_map, completed='no') # FIXME: this is a placeholder for the function
# elif opt == 'A':
# continue_action = 'y'
# while continue_action == 'y':
# print("::: Enter each required field, separated by commas.")
# print("::: name, info, priority, MM/DD/YYYY, is task done? (yes/no)")
# task = input("> ") # TODO: get and process the data into a list
# task = task.split(',')
# result = get_new_task(all_tasks, priority_map) # TODO: attempt to create a new task
# if type(result) == dict:
# all_tasks = all_tasks.append(result) # TODO: add a new task to the list of tasks
# print(f"Successfully added a new task!")
# print_task(result, priority_map)
# elif type(result) == int:
# print(f"WARNING: invalid number of fields!")
# print(f"You provided {result}, instead of the expected 5.\n")
# else:
# print(f"WARNING: invalid task field: {result}\n")
# print("::: Would you like to add another task?", end=" ")
# continue_action = input("Enter 'y' to continue.\n> ")
# continue_action = continue_action.lower()
# # ----------------------------------------------------------------
# elif opt == 'U':
# continue_action = 'y'
# while continue_action == 'y':
# if ... == []: # TODO: check the return value of get_selection
# print("WARNING: There is nothing to update!")
# break
# print("::: Which task would you like to update?")
# print_tasks(all_tasks, priority_map, name_only = True, show_idx = True, start_idx = 1)
# print("::: Enter the number corresponding to the task.")
# user_option = input("> ")
# if ...: # TODO: check the return value of get_selection
# ... # TODO: convert the index appropriately to account for the start_idx = 1
# subopt = get_selection("update", all_tasks[...], to_upper = False, go_back = True)
# if subopt == 'M': # if the user changed their mind
# break
# print(f"::: Enter a new value for the field |{...}|") # TODO: get the error message from the function
# field_info = input("> ")
# result = update_task(all_tasks, user_option, priority_map, subopt, field_info, start_idx = 1)
# if type(result) == dict:
# print(f"Successfully updated the field |{...}|:") # TODO: get the error message from the function
# print_task(result, ...) # TODO: Call the function with appropriate inputs
# else: # update_task() returned an error
# print(f"WARNING: invalid information for the field |{...}|!") # TODO: get the error message
# print(f"The task was not updated.")
# else: # is_valid_index() returned False
# print(f"WARNING: |{...}| is an invalid task number!") # TODO: get the index from the user
# print("::: Would you like to update another task?", end=" ")
# continue_action = input("Enter 'y' to continue.\n> ")
# continue_action = continue_action.lower()
# # ----------------------------------------------------------------
# elif opt == 'S':
# continue_action = ...
# while continue_action == 'y':
# print("::: Enter the filename ending with '.csv'.")
# filename = input("> ")
# result = save_tasks_to_csv(tasks_list, filename) # TODO: Call the function with appropriate inputs and capture the output
# if result == -1: # TODO: check the return value of the function
# print(f"WARNING: |{...}| is an invalid file name!") # TODO: get the error message from the function
# print("::: Would you like to try again?", end=" ")
# continue_action = input("::: Enter 'y' to try again.\n> ")clea
# else:
# print(f"Successfully stored all the tasks to |{...}|")
# print("::: Would you like to save another file?", end=" ")
# elif opt == 'Q' or 'q':
# print("Goodbye!\n")
# break # exit the main `while` loop
# #--------------------------------------------------------------------------
# else:
# print(f"WARNING: {opt} is an invalid menu option.\n")
# input("::: Press Enter to continue")
# print(f"You selected option {opt} to > {the_menu[opt]}.")
# print("Have a nice day!")
|
en
| 0.51412
|
NO GLOBAL VALUES # dictionary of menu options # FIXME: automate_management() # list_menu will contain the "List" menu suboptions # priority_map will contain the mapping of the integer priority values to their textual interpretation # to allow us to input lower- or upper-case letters # Pause before going back to the main menu # FIXME: this is a placeholder for the function # if subopt == 'A': # print_tasks(all_tasks, priority_map) # FIXME: this is a placeholder for the function # elif subopt == 'C': # print_tasks(all_tasks, priority_map, completed='yes') # FIXME: this is a placeholder for the function # elif subopt == 'I': # print_tasks(all_tasks, priority_map, completed='no') # FIXME: this is a placeholder for the function # elif opt == 'A': # continue_action = 'y' # while continue_action == 'y': # print("::: Enter each required field, separated by commas.") # print("::: name, info, priority, MM/DD/YYYY, is task done? (yes/no)") # task = input("> ") # TODO: get and process the data into a list # task = task.split(',') # result = get_new_task(all_tasks, priority_map) # TODO: attempt to create a new task # if type(result) == dict: # all_tasks = all_tasks.append(result) # TODO: add a new task to the list of tasks # print(f"Successfully added a new task!") # print_task(result, priority_map) # elif type(result) == int: # print(f"WARNING: invalid number of fields!") # print(f"You provided {result}, instead of the expected 5.\n") # else: # print(f"WARNING: invalid task field: {result}\n") # print("::: Would you like to add another task?", end=" ") # continue_action = input("Enter 'y' to continue.\n> ") # continue_action = continue_action.lower() # # ---------------------------------------------------------------- # elif opt == 'U': # continue_action = 'y' # while continue_action == 'y': # if ... == []: # TODO: check the return value of get_selection # print("WARNING: There is nothing to update!") # break # print("::: Which task would you like to update?") # print_tasks(all_tasks, priority_map, name_only = True, show_idx = True, start_idx = 1) # print("::: Enter the number corresponding to the task.") # user_option = input("> ") # if ...: # TODO: check the return value of get_selection # ... # TODO: convert the index appropriately to account for the start_idx = 1 # subopt = get_selection("update", all_tasks[...], to_upper = False, go_back = True) # if subopt == 'M': # if the user changed their mind # break # print(f"::: Enter a new value for the field |{...}|") # TODO: get the error message from the function # field_info = input("> ") # result = update_task(all_tasks, user_option, priority_map, subopt, field_info, start_idx = 1) # if type(result) == dict: # print(f"Successfully updated the field |{...}|:") # TODO: get the error message from the function # print_task(result, ...) # TODO: Call the function with appropriate inputs # else: # update_task() returned an error # print(f"WARNING: invalid information for the field |{...}|!") # TODO: get the error message # print(f"The task was not updated.") # else: # is_valid_index() returned False # print(f"WARNING: |{...}| is an invalid task number!") # TODO: get the index from the user # print("::: Would you like to update another task?", end=" ") # continue_action = input("Enter 'y' to continue.\n> ") # continue_action = continue_action.lower() # # ---------------------------------------------------------------- # elif opt == 'S': # continue_action = ... # while continue_action == 'y': # print("::: Enter the filename ending with '.csv'.") # filename = input("> ") # result = save_tasks_to_csv(tasks_list, filename) # TODO: Call the function with appropriate inputs and capture the output # if result == -1: # TODO: check the return value of the function # print(f"WARNING: |{...}| is an invalid file name!") # TODO: get the error message from the function # print("::: Would you like to try again?", end=" ") # continue_action = input("::: Enter 'y' to try again.\n> ")clea # else: # print(f"Successfully stored all the tasks to |{...}|") # print("::: Would you like to save another file?", end=" ") # elif opt == 'Q' or 'q': # print("Goodbye!\n") # break # exit the main `while` loop # #-------------------------------------------------------------------------- # else: # print(f"WARNING: {opt} is an invalid menu option.\n") # input("::: Press Enter to continue") # print(f"You selected option {opt} to > {the_menu[opt]}.") # print("Have a nice day!")
| 3.75094
| 4
|
app/core/models.py
|
dhruv-ahuja/todo-django
| 0
|
6629481
|
<reponame>dhruv-ahuja/todo-django<filename>app/core/models.py
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Task(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
description = models.CharField(max_length=200)
completed = models.BooleanField(default=False)
added_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.description
class Meta:
indexes = [
models.Index(fields=["completed", "added_at"]),
models.Index(fields=["added_at"]),
]
ordering = ["completed"]
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Task(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
description = models.CharField(max_length=200)
completed = models.BooleanField(default=False)
added_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.description
class Meta:
indexes = [
models.Index(fields=["completed", "added_at"]),
models.Index(fields=["added_at"]),
]
ordering = ["completed"]
|
en
| 0.963489
|
# Create your models here.
| 2.258659
| 2
|
lyapy/lyapunov_functions/quadratic_control_lyapunov_function.py
|
vdorobantu/lyapy
| 36
|
6629482
|
"""Class for Control Lyapunov Functions (CLFs) of the form V(eta) = eta' P eta."""
from numpy import dot, identity
from numpy.linalg import eigvals
from scipy.linalg import solve_continuous_are, solve_continuous_lyapunov
from .control_lyapunov_function import ControlLyapunovFunction
from .quadratic_lyapunov_function import QuadraticLyapunovFunction
class QuadraticControlLyapunovFunction(QuadraticLyapunovFunction, ControlLyapunovFunction):
"""Class for Control Lyapunov Functions (CLFs) of the form V(eta) = eta' P eta.
Let n be the number of states, m be the number of inputs, p be the output
vector size.
Attributes:
Control task output, output: AffineDynamicOutput
Positive definite matrix, P: numpy array (p, p)
Convergence rate, alpha: float
"""
def __init__(self, affine_dynamic_output, P, alpha):
"""Initialize a QuadraticControlLyapunovFunction.
Inputs:
Control task output, affine_dynamic_output: AffineDynamicOutput
Positive definite matrix, P: numpy array (p, p)
Convergence rate, alpha: float
"""
QuadraticLyapunovFunction.__init__(self, affine_dynamic_output, P)
ControlLyapunovFunction.__init__(self, affine_dynamic_output)
self.alpha = alpha
def drift(self, x, t):
"""Evaluate the Lyapunov function drift for a state and time.
Lyapunov function drift is grad_V(x, t) * output.drift(x, t).
Outputs a float.
Inputs:
State, x: numpy array (n,)
Time, t: float
"""
return dot(self.grad_V(x, t), self.output.drift(x, t))
def decoupling(self, x, t):
"""Evaluate the Lyapunov function drift for a state and time.
Lyapunov function drift is grad_V(x, t) * output.decoupling(x, t).
Outputs a numpy array (m,).
Inputs:
State, x: numpy array (n,)
Time, t: float
"""
return dot(self.grad_V(x, t), self.output.decoupling(x, t))
def V_dot(self, x, u, t):
return self.drift(x, t) + dot(self.decoupling(x, t), u)
def build_ctle(feedback_linearizable_output, K, Q):
"""Build a quadratic CLF from a FeedbackLinearizableOutput with auxilliary control gain matrix, by solving the continuous time Lyapunov equation (CTLE).
CTLE is
A_cl' P + P A_cl = -Q
for specified Q.
Outputs a QuadraticControlLyapunovFunction.
Inputs:
Auxilliary control gain matrix, K: numpy array (k, p)
Positive definite matrix for CTLE, Q: numpy array (p, p)
"""
A = feedback_linearizable_output.closed_loop_dynamics(K)
P = solve_continuous_lyapunov(A.T, -Q)
alpha = min(eigvals(Q)) / max(eigvals(P))
return QuadraticControlLyapunovFunction(feedback_linearizable_output, P, alpha)
def build_care(feedback_linearizable_output, Q):
"""Build a quadratic CLF from a FeedbackLinearizableOutput with auxilliary control gain matrix, by solving the continuous algebraic Riccati equation (CARE).
CARE is
F'P + PF - PGG'P = -Q
for specified Q.
Outputs a QuadraticControlLyapunovFunction.
Inputs:
Positive definite matrix for CTLE, Q: numpy array (p, p)
"""
F = feedback_linearizable_output.F
G = feedback_linearizable_output.G
R = identity(G.shape[1])
P = solve_continuous_are(F, G, Q, R)
alpha = min(eigvals(Q)) / max(eigvals(P))
return QuadraticControlLyapunovFunction(feedback_linearizable_output, P, alpha)
|
"""Class for Control Lyapunov Functions (CLFs) of the form V(eta) = eta' P eta."""
from numpy import dot, identity
from numpy.linalg import eigvals
from scipy.linalg import solve_continuous_are, solve_continuous_lyapunov
from .control_lyapunov_function import ControlLyapunovFunction
from .quadratic_lyapunov_function import QuadraticLyapunovFunction
class QuadraticControlLyapunovFunction(QuadraticLyapunovFunction, ControlLyapunovFunction):
"""Class for Control Lyapunov Functions (CLFs) of the form V(eta) = eta' P eta.
Let n be the number of states, m be the number of inputs, p be the output
vector size.
Attributes:
Control task output, output: AffineDynamicOutput
Positive definite matrix, P: numpy array (p, p)
Convergence rate, alpha: float
"""
def __init__(self, affine_dynamic_output, P, alpha):
"""Initialize a QuadraticControlLyapunovFunction.
Inputs:
Control task output, affine_dynamic_output: AffineDynamicOutput
Positive definite matrix, P: numpy array (p, p)
Convergence rate, alpha: float
"""
QuadraticLyapunovFunction.__init__(self, affine_dynamic_output, P)
ControlLyapunovFunction.__init__(self, affine_dynamic_output)
self.alpha = alpha
def drift(self, x, t):
"""Evaluate the Lyapunov function drift for a state and time.
Lyapunov function drift is grad_V(x, t) * output.drift(x, t).
Outputs a float.
Inputs:
State, x: numpy array (n,)
Time, t: float
"""
return dot(self.grad_V(x, t), self.output.drift(x, t))
def decoupling(self, x, t):
"""Evaluate the Lyapunov function drift for a state and time.
Lyapunov function drift is grad_V(x, t) * output.decoupling(x, t).
Outputs a numpy array (m,).
Inputs:
State, x: numpy array (n,)
Time, t: float
"""
return dot(self.grad_V(x, t), self.output.decoupling(x, t))
def V_dot(self, x, u, t):
return self.drift(x, t) + dot(self.decoupling(x, t), u)
def build_ctle(feedback_linearizable_output, K, Q):
"""Build a quadratic CLF from a FeedbackLinearizableOutput with auxilliary control gain matrix, by solving the continuous time Lyapunov equation (CTLE).
CTLE is
A_cl' P + P A_cl = -Q
for specified Q.
Outputs a QuadraticControlLyapunovFunction.
Inputs:
Auxilliary control gain matrix, K: numpy array (k, p)
Positive definite matrix for CTLE, Q: numpy array (p, p)
"""
A = feedback_linearizable_output.closed_loop_dynamics(K)
P = solve_continuous_lyapunov(A.T, -Q)
alpha = min(eigvals(Q)) / max(eigvals(P))
return QuadraticControlLyapunovFunction(feedback_linearizable_output, P, alpha)
def build_care(feedback_linearizable_output, Q):
"""Build a quadratic CLF from a FeedbackLinearizableOutput with auxilliary control gain matrix, by solving the continuous algebraic Riccati equation (CARE).
CARE is
F'P + PF - PGG'P = -Q
for specified Q.
Outputs a QuadraticControlLyapunovFunction.
Inputs:
Positive definite matrix for CTLE, Q: numpy array (p, p)
"""
F = feedback_linearizable_output.F
G = feedback_linearizable_output.G
R = identity(G.shape[1])
P = solve_continuous_are(F, G, Q, R)
alpha = min(eigvals(Q)) / max(eigvals(P))
return QuadraticControlLyapunovFunction(feedback_linearizable_output, P, alpha)
|
en
| 0.563419
|
Class for Control Lyapunov Functions (CLFs) of the form V(eta) = eta' P eta. Class for Control Lyapunov Functions (CLFs) of the form V(eta) = eta' P eta. Let n be the number of states, m be the number of inputs, p be the output vector size. Attributes: Control task output, output: AffineDynamicOutput Positive definite matrix, P: numpy array (p, p) Convergence rate, alpha: float Initialize a QuadraticControlLyapunovFunction. Inputs: Control task output, affine_dynamic_output: AffineDynamicOutput Positive definite matrix, P: numpy array (p, p) Convergence rate, alpha: float Evaluate the Lyapunov function drift for a state and time. Lyapunov function drift is grad_V(x, t) * output.drift(x, t). Outputs a float. Inputs: State, x: numpy array (n,) Time, t: float Evaluate the Lyapunov function drift for a state and time. Lyapunov function drift is grad_V(x, t) * output.decoupling(x, t). Outputs a numpy array (m,). Inputs: State, x: numpy array (n,) Time, t: float Build a quadratic CLF from a FeedbackLinearizableOutput with auxilliary control gain matrix, by solving the continuous time Lyapunov equation (CTLE). CTLE is A_cl' P + P A_cl = -Q for specified Q. Outputs a QuadraticControlLyapunovFunction. Inputs: Auxilliary control gain matrix, K: numpy array (k, p) Positive definite matrix for CTLE, Q: numpy array (p, p) Build a quadratic CLF from a FeedbackLinearizableOutput with auxilliary control gain matrix, by solving the continuous algebraic Riccati equation (CARE). CARE is F'P + PF - PGG'P = -Q for specified Q. Outputs a QuadraticControlLyapunovFunction. Inputs: Positive definite matrix for CTLE, Q: numpy array (p, p)
| 3.226904
| 3
|
src/wa_kat/analyzers/shared.py
|
WebArchivCZ/WA-KAT
| 3
|
6629483
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
import dhtmlparser
from source_string import SourceString
# Functions & classes =========================================================
def parse_meta(content, meta_name, source_descr, content_attr_name="content"):
"""
Return list of strings parsed from `content` attribute from ``<meta>``
tags with given `meta_name`.
"""
dom = dhtmlparser.parseString(content)
meta_tags = dom.find(
"meta",
fn=lambda x: x.params.get("name", "").lower() == meta_name.lower()
)
return [
SourceString(tag.params[content_attr_name], source_descr)
for tag in meta_tags
if content_attr_name in tag.params
]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
import dhtmlparser
from source_string import SourceString
# Functions & classes =========================================================
def parse_meta(content, meta_name, source_descr, content_attr_name="content"):
"""
Return list of strings parsed from `content` attribute from ``<meta>``
tags with given `meta_name`.
"""
dom = dhtmlparser.parseString(content)
meta_tags = dom.find(
"meta",
fn=lambda x: x.params.get("name", "").lower() == meta_name.lower()
)
return [
SourceString(tag.params[content_attr_name], source_descr)
for tag in meta_tags
if content_attr_name in tag.params
]
|
en
| 0.403894
|
#! /usr/bin/env python # -*- coding: utf-8 -*- # # Interpreter version: python 2.7 # # Imports ===================================================================== # Functions & classes ========================================================= Return list of strings parsed from `content` attribute from ``<meta>`` tags with given `meta_name`.
| 2.640811
| 3
|
course_app/api/serializers.py
|
maks-nurgazy/student-attendance-menegement
| 0
|
6629484
|
from django.db import IntegrityError
from rest_framework import serializers
from course_app.models import Course, Enrolled, CourseApprove
from users.models import Student, Teacher
class CourseRelatedField(serializers.RelatedField):
def to_internal_value(self, data):
try:
course = self.queryset.get(id=data)
except Course.DoesNotExist:
raise serializers.ValidationError(f'Course with id {data} not available for you')
return course
def to_representation(self, instance):
return None
class CourseStudentSerializer(serializers.ModelSerializer):
class Meta:
model = Student
fields = ('id', 'full_name')
class CourseDetailSerializer(serializers.ModelSerializer):
teacher = serializers.SerializerMethodField()
students = serializers.SerializerMethodField()
class Meta:
model = Course
fields = ('id', 'name', 'credit', 'co_class', 'teacher', 'students')
def get_teacher(self, obj):
return obj.teacher.full_name
def get_students(self, obj):
students = obj.students.all()
return CourseStudentSerializer(students, many=True).data
class CourseSerializer(serializers.ModelSerializer):
teacher_detail = serializers.SerializerMethodField()
class Meta:
model = Course
fields = ('id', 'name', 'credit', 'co_class', 'teacher', 'teacher_detail')
extra_kwargs = {
'teacher': {'write_only': True},
}
def get_teacher_detail(self, obj):
try:
return obj.teacher.full_name
except AttributeError:
return ""
class CourseRelatedSerializer(serializers.Serializer):
id = serializers.PrimaryKeyRelatedField(queryset=Course.objects.all())
def update(self, instance, validated_data):
pass
def create(self, validated_data):
pass
class EnrollmentSerializer(serializers.Serializer):
def __init__(self, *args, **kwargs):
super(EnrollmentSerializer, self).__init__(*args, **kwargs)
queryset = Course.objects.filter(co_class=self.context['student'].student_profile.st_class)
self.fields['courses'] = CourseRelatedField(queryset=queryset, many=True)
def update(self, instance, validated_data):
pass
def create(self, validated_data):
courses = validated_data['courses']
student = self.context['student']
approve, created = CourseApprove.objects.get_or_create(student=student)
res = {
"status": "Verified" if approve.status else "Not approved",
"courses": [],
}
for course in courses:
enroll, created = Enrolled.objects.get_or_create(student=student, course=course)
res['courses'].append(enroll.course.name)
return res
def get_co_class(self):
return self.context['student'].student_profile.st_class
class TeacherCourseValidSerializer(serializers.Serializer):
def __init__(self, *args, **kwargs):
super(TeacherCourseValidSerializer, self).__init__(*args, **kwargs)
queryset = Course.objects.filter(teacher=self.context['teacher'])
self.fields['course'] = CourseRelatedField(queryset=queryset)
def update(self, instance, validated_data):
pass
def create(self, validated_data):
pass
|
from django.db import IntegrityError
from rest_framework import serializers
from course_app.models import Course, Enrolled, CourseApprove
from users.models import Student, Teacher
class CourseRelatedField(serializers.RelatedField):
def to_internal_value(self, data):
try:
course = self.queryset.get(id=data)
except Course.DoesNotExist:
raise serializers.ValidationError(f'Course with id {data} not available for you')
return course
def to_representation(self, instance):
return None
class CourseStudentSerializer(serializers.ModelSerializer):
class Meta:
model = Student
fields = ('id', 'full_name')
class CourseDetailSerializer(serializers.ModelSerializer):
teacher = serializers.SerializerMethodField()
students = serializers.SerializerMethodField()
class Meta:
model = Course
fields = ('id', 'name', 'credit', 'co_class', 'teacher', 'students')
def get_teacher(self, obj):
return obj.teacher.full_name
def get_students(self, obj):
students = obj.students.all()
return CourseStudentSerializer(students, many=True).data
class CourseSerializer(serializers.ModelSerializer):
teacher_detail = serializers.SerializerMethodField()
class Meta:
model = Course
fields = ('id', 'name', 'credit', 'co_class', 'teacher', 'teacher_detail')
extra_kwargs = {
'teacher': {'write_only': True},
}
def get_teacher_detail(self, obj):
try:
return obj.teacher.full_name
except AttributeError:
return ""
class CourseRelatedSerializer(serializers.Serializer):
id = serializers.PrimaryKeyRelatedField(queryset=Course.objects.all())
def update(self, instance, validated_data):
pass
def create(self, validated_data):
pass
class EnrollmentSerializer(serializers.Serializer):
def __init__(self, *args, **kwargs):
super(EnrollmentSerializer, self).__init__(*args, **kwargs)
queryset = Course.objects.filter(co_class=self.context['student'].student_profile.st_class)
self.fields['courses'] = CourseRelatedField(queryset=queryset, many=True)
def update(self, instance, validated_data):
pass
def create(self, validated_data):
courses = validated_data['courses']
student = self.context['student']
approve, created = CourseApprove.objects.get_or_create(student=student)
res = {
"status": "Verified" if approve.status else "Not approved",
"courses": [],
}
for course in courses:
enroll, created = Enrolled.objects.get_or_create(student=student, course=course)
res['courses'].append(enroll.course.name)
return res
def get_co_class(self):
return self.context['student'].student_profile.st_class
class TeacherCourseValidSerializer(serializers.Serializer):
def __init__(self, *args, **kwargs):
super(TeacherCourseValidSerializer, self).__init__(*args, **kwargs)
queryset = Course.objects.filter(teacher=self.context['teacher'])
self.fields['course'] = CourseRelatedField(queryset=queryset)
def update(self, instance, validated_data):
pass
def create(self, validated_data):
pass
|
none
| 1
| 2.323854
| 2
|
|
pulsar/client/server_interface.py
|
bgruening/pulsar
| 1
|
6629485
|
import logging
from abc import ABCMeta
from abc import abstractmethod
from io import BytesIO
from string import Template
from urllib.parse import urlencode, urljoin
from galaxy.util import unicodify
from .util import copy_to_path
log = logging.getLogger(__name__)
class PulsarInterface(metaclass=ABCMeta):
"""
Abstract base class describes how synchronous client communicates with
(potentially remote) Pulsar procedures. Obvious implementation is HTTP based
but Pulsar objects wrapped in routes can also be directly communicated with
if in memory.
"""
@abstractmethod
def execute(self, command, args=None, data=None, input_path=None, output_path=None):
"""
Execute the correspond command against configured Pulsar job manager. Arguments are
method parameters and data or input_path describe essentially POST bodies. If command
results in a file, resulting path should be specified as output_path.
"""
COMMAND_TO_PATH = {
"path": Template("jobs/${job_id}/files/path"),
"upload_file": Template("jobs/${job_id}/files"),
"download_output": Template("jobs/${job_id}/files"),
"setup": Template("jobs"),
"clean": Template("jobs/${job_id}"),
"status": Template("jobs/${job_id}/status"),
"cancel": Template("jobs/${job_id}/cancel"),
"submit": Template("jobs/${job_id}/submit"),
"file_available": Template("cache/status"),
"cache_required": Template("cache"),
"cache_insert": Template("cache"),
"object_store_exists": Template("objects/${object_id}/exists"),
"object_store_file_ready": Template("objects/${object_id}/file_ready"),
"object_store_update_from_file": Template("objects/${object_id}"),
"object_store_create": Template("objects/${object_id}"),
"object_store_empty": Template("objects/${object_id}/empty"),
"object_store_size": Template("objects/${object_id}/size"),
"object_store_delete": Template("objects/${object_id}"),
"object_store_get_data": Template("objects/${object_id}"),
"object_store_get_filename": Template("objects/${object_id}/filename"),
"object_store_get_store_usage_percent": Template("object_store_usage_percent")
}
COMMAND_TO_METHOD = {
"upload_file": "POST",
"download_output": "GET",
"setup": "POST",
"submit": "POST",
"clean": "DELETE",
"cancel": "PUT",
"object_store_update_from_file": "PUT",
"object_store_create": "POST",
"object_store_delete": "DELETE",
"file_available": "GET",
"cache_required": "PUT",
"cache_insert": "POST",
}
class HttpPulsarInterface(PulsarInterface):
def __init__(self, destination_params, transport):
self.transport = transport
remote_host = destination_params.get("url")
assert remote_host is not None, "Failed to determine url for Pulsar client."
if not remote_host.startswith("http"):
remote_host = "http://%s" % remote_host
manager = destination_params.get("manager", None)
if manager:
if "/managers/" in remote_host:
log.warning("Ignoring manager tag '%s', Pulsar client URL already contains a \"/managers/\" path." % manager)
else:
remote_host = urljoin(remote_host, "managers/%s" % manager)
if not remote_host.endswith("/"):
remote_host = "%s/" % remote_host
self.remote_host = remote_host
self.private_token = destination_params.get("private_token", None)
def execute(self, command, args=None, data=None, input_path=None, output_path=None):
url = self.__build_url(command, args)
method = COMMAND_TO_METHOD.get(command, None) # Default to GET is no data, POST otherwise
response = self.transport.execute(url, method=method, data=data, input_path=input_path, output_path=output_path)
return response
def __build_url(self, command, args):
if args is None:
args = {}
path = COMMAND_TO_PATH.get(command, Template(command)).safe_substitute(args)
if self.private_token:
args["private_token"] = self.private_token
arg_bytes = {k: unicodify(args[k]).encode('utf-8') for k in args}
data = urlencode(arg_bytes)
url = self.remote_host + path + "?" + data
return url
class LocalPulsarInterface(PulsarInterface):
def __init__(self, destination_params, job_manager=None, pulsar_app=None, file_cache=None, object_store=None):
if job_manager is None:
job_manager_name = destination_params.get("manager", None)
if job_manager_name is None:
job_manager = pulsar_app.only_manager
else:
job_manager = pulsar_app.managers[job_manager_name]
self.job_manager = job_manager
self.file_cache = file_cache
self.object_store = object_store
def __app_args(self):
# Arguments that would be specified from PulsarApp if running
# in web server.
return {
'manager': self.job_manager,
'file_cache': self.file_cache,
'object_store': self.object_store,
'ip': None
}
def execute(self, command, args=None, data=None, input_path=None, output_path=None):
if args is None:
args = {}
# If data set, should be unicode (on Python 2) or str (on Python 3).
from pulsar.web import routes
from pulsar.web.framework import build_func_args
controller = getattr(routes, command)
action = controller.func
body_args = dict(body=self.__build_body(data, input_path))
args = build_func_args(action, args.copy(), self.__app_args(), body_args)
result = action(**args)
if controller.response_type != 'file':
return controller.body(result)
else:
with open(result, 'rb') as result_file:
copy_to_path(result_file, output_path)
def __build_body(self, data, input_path):
if data is not None:
return BytesIO(data)
elif input_path is not None:
return open(input_path, 'rb')
else:
return None
|
import logging
from abc import ABCMeta
from abc import abstractmethod
from io import BytesIO
from string import Template
from urllib.parse import urlencode, urljoin
from galaxy.util import unicodify
from .util import copy_to_path
log = logging.getLogger(__name__)
class PulsarInterface(metaclass=ABCMeta):
"""
Abstract base class describes how synchronous client communicates with
(potentially remote) Pulsar procedures. Obvious implementation is HTTP based
but Pulsar objects wrapped in routes can also be directly communicated with
if in memory.
"""
@abstractmethod
def execute(self, command, args=None, data=None, input_path=None, output_path=None):
"""
Execute the correspond command against configured Pulsar job manager. Arguments are
method parameters and data or input_path describe essentially POST bodies. If command
results in a file, resulting path should be specified as output_path.
"""
COMMAND_TO_PATH = {
"path": Template("jobs/${job_id}/files/path"),
"upload_file": Template("jobs/${job_id}/files"),
"download_output": Template("jobs/${job_id}/files"),
"setup": Template("jobs"),
"clean": Template("jobs/${job_id}"),
"status": Template("jobs/${job_id}/status"),
"cancel": Template("jobs/${job_id}/cancel"),
"submit": Template("jobs/${job_id}/submit"),
"file_available": Template("cache/status"),
"cache_required": Template("cache"),
"cache_insert": Template("cache"),
"object_store_exists": Template("objects/${object_id}/exists"),
"object_store_file_ready": Template("objects/${object_id}/file_ready"),
"object_store_update_from_file": Template("objects/${object_id}"),
"object_store_create": Template("objects/${object_id}"),
"object_store_empty": Template("objects/${object_id}/empty"),
"object_store_size": Template("objects/${object_id}/size"),
"object_store_delete": Template("objects/${object_id}"),
"object_store_get_data": Template("objects/${object_id}"),
"object_store_get_filename": Template("objects/${object_id}/filename"),
"object_store_get_store_usage_percent": Template("object_store_usage_percent")
}
COMMAND_TO_METHOD = {
"upload_file": "POST",
"download_output": "GET",
"setup": "POST",
"submit": "POST",
"clean": "DELETE",
"cancel": "PUT",
"object_store_update_from_file": "PUT",
"object_store_create": "POST",
"object_store_delete": "DELETE",
"file_available": "GET",
"cache_required": "PUT",
"cache_insert": "POST",
}
class HttpPulsarInterface(PulsarInterface):
def __init__(self, destination_params, transport):
self.transport = transport
remote_host = destination_params.get("url")
assert remote_host is not None, "Failed to determine url for Pulsar client."
if not remote_host.startswith("http"):
remote_host = "http://%s" % remote_host
manager = destination_params.get("manager", None)
if manager:
if "/managers/" in remote_host:
log.warning("Ignoring manager tag '%s', Pulsar client URL already contains a \"/managers/\" path." % manager)
else:
remote_host = urljoin(remote_host, "managers/%s" % manager)
if not remote_host.endswith("/"):
remote_host = "%s/" % remote_host
self.remote_host = remote_host
self.private_token = destination_params.get("private_token", None)
def execute(self, command, args=None, data=None, input_path=None, output_path=None):
url = self.__build_url(command, args)
method = COMMAND_TO_METHOD.get(command, None) # Default to GET is no data, POST otherwise
response = self.transport.execute(url, method=method, data=data, input_path=input_path, output_path=output_path)
return response
def __build_url(self, command, args):
if args is None:
args = {}
path = COMMAND_TO_PATH.get(command, Template(command)).safe_substitute(args)
if self.private_token:
args["private_token"] = self.private_token
arg_bytes = {k: unicodify(args[k]).encode('utf-8') for k in args}
data = urlencode(arg_bytes)
url = self.remote_host + path + "?" + data
return url
class LocalPulsarInterface(PulsarInterface):
def __init__(self, destination_params, job_manager=None, pulsar_app=None, file_cache=None, object_store=None):
if job_manager is None:
job_manager_name = destination_params.get("manager", None)
if job_manager_name is None:
job_manager = pulsar_app.only_manager
else:
job_manager = pulsar_app.managers[job_manager_name]
self.job_manager = job_manager
self.file_cache = file_cache
self.object_store = object_store
def __app_args(self):
# Arguments that would be specified from PulsarApp if running
# in web server.
return {
'manager': self.job_manager,
'file_cache': self.file_cache,
'object_store': self.object_store,
'ip': None
}
def execute(self, command, args=None, data=None, input_path=None, output_path=None):
if args is None:
args = {}
# If data set, should be unicode (on Python 2) or str (on Python 3).
from pulsar.web import routes
from pulsar.web.framework import build_func_args
controller = getattr(routes, command)
action = controller.func
body_args = dict(body=self.__build_body(data, input_path))
args = build_func_args(action, args.copy(), self.__app_args(), body_args)
result = action(**args)
if controller.response_type != 'file':
return controller.body(result)
else:
with open(result, 'rb') as result_file:
copy_to_path(result_file, output_path)
def __build_body(self, data, input_path):
if data is not None:
return BytesIO(data)
elif input_path is not None:
return open(input_path, 'rb')
else:
return None
|
en
| 0.80979
|
Abstract base class describes how synchronous client communicates with (potentially remote) Pulsar procedures. Obvious implementation is HTTP based but Pulsar objects wrapped in routes can also be directly communicated with if in memory. Execute the correspond command against configured Pulsar job manager. Arguments are method parameters and data or input_path describe essentially POST bodies. If command results in a file, resulting path should be specified as output_path. # Default to GET is no data, POST otherwise # Arguments that would be specified from PulsarApp if running # in web server. # If data set, should be unicode (on Python 2) or str (on Python 3).
| 2.412095
| 2
|
dlfairness/other/bias_result_display/2_0_baseline_vs_other.py
|
lin-tan/fairness-variance
| 0
|
6629486
|
<gh_stars>0
# Baseline vs. others
# One model, one metric per graph
import matplotlib.pyplot as plt
import yaml
from pathlib import Path
from abbrs import prefix_abbr, setting_abbr, metric_abbr, metric_display
needed_paper_list = ['EffStrategies', 'FairALM-CelebA', 'NIFR']
needed_setting_list = ['S-Base', 'S-GR', 'A-Base', 'A-ALM', 'A-L2', 'N-Base', 'N-Flow']
with open('./configs.yaml', 'r') as f:
configs = yaml.full_load(f)
with open('./bias_metric.yaml', 'r') as f:
bias_metric_list = yaml.full_load(f)
for paper, config in configs.items():
if paper not in needed_paper_list:
continue
baseline, mitigation = config['settings']
all_settings = baseline + mitigation
p = Path('./processed_data', paper)
with open(str(Path(p, 'overall_stat.yaml')), 'r') as f:
overall_stat = yaml.load(f, Loader=yaml.Loader) # {(setting, bias_metric): {stat: value}}
with open(str(Path(p, 'overall_raw.yaml')), 'r') as f:
overall_raw = yaml.load(f, Loader=yaml.Loader) # {(setting, bias_metric): [value (each run)]}
p = Path('./figures', paper)
p.mkdir(exist_ok=True, parents=True)
for bias_metric in bias_metric_list:
print('Start:', paper, bias_metric)
'''
fig = plt.figure()
ax = fig.subplots(1, 1)
values = []
labels = []
for setting in all_settings:
values.append(float(overall_stat[(setting, bias_metric)]['rel_maxdiff']))
labels.append(setting)
ax.bar(labels, values)
'''
fig = plt.figure(figsize=(7, 2))
ax = fig.subplots(1, 1)
values = []
labels = []
for idx, setting in enumerate(all_settings):
if setting_abbr[paper][setting] not in needed_setting_list:
continue
values.append(overall_raw[(setting, bias_metric)])
labels.append(setting_abbr[paper][setting])
ax.boxplot(values, vert=False, labels=labels, widths=0.5, showfliers=True)
ax.invert_yaxis()
#ax.set_title(metric_display[bias_metric])
#fig.suptitle(metric_display[bias_metric])
#fig.tight_layout(pad=0.5)
fig_fn = str(Path(p, metric_abbr[bias_metric] + '.png'))
fig.savefig(fig_fn, bbox_inches='tight', dpi=600)
plt.close(fig)
|
# Baseline vs. others
# One model, one metric per graph
import matplotlib.pyplot as plt
import yaml
from pathlib import Path
from abbrs import prefix_abbr, setting_abbr, metric_abbr, metric_display
needed_paper_list = ['EffStrategies', 'FairALM-CelebA', 'NIFR']
needed_setting_list = ['S-Base', 'S-GR', 'A-Base', 'A-ALM', 'A-L2', 'N-Base', 'N-Flow']
with open('./configs.yaml', 'r') as f:
configs = yaml.full_load(f)
with open('./bias_metric.yaml', 'r') as f:
bias_metric_list = yaml.full_load(f)
for paper, config in configs.items():
if paper not in needed_paper_list:
continue
baseline, mitigation = config['settings']
all_settings = baseline + mitigation
p = Path('./processed_data', paper)
with open(str(Path(p, 'overall_stat.yaml')), 'r') as f:
overall_stat = yaml.load(f, Loader=yaml.Loader) # {(setting, bias_metric): {stat: value}}
with open(str(Path(p, 'overall_raw.yaml')), 'r') as f:
overall_raw = yaml.load(f, Loader=yaml.Loader) # {(setting, bias_metric): [value (each run)]}
p = Path('./figures', paper)
p.mkdir(exist_ok=True, parents=True)
for bias_metric in bias_metric_list:
print('Start:', paper, bias_metric)
'''
fig = plt.figure()
ax = fig.subplots(1, 1)
values = []
labels = []
for setting in all_settings:
values.append(float(overall_stat[(setting, bias_metric)]['rel_maxdiff']))
labels.append(setting)
ax.bar(labels, values)
'''
fig = plt.figure(figsize=(7, 2))
ax = fig.subplots(1, 1)
values = []
labels = []
for idx, setting in enumerate(all_settings):
if setting_abbr[paper][setting] not in needed_setting_list:
continue
values.append(overall_raw[(setting, bias_metric)])
labels.append(setting_abbr[paper][setting])
ax.boxplot(values, vert=False, labels=labels, widths=0.5, showfliers=True)
ax.invert_yaxis()
#ax.set_title(metric_display[bias_metric])
#fig.suptitle(metric_display[bias_metric])
#fig.tight_layout(pad=0.5)
fig_fn = str(Path(p, metric_abbr[bias_metric] + '.png'))
fig.savefig(fig_fn, bbox_inches='tight', dpi=600)
plt.close(fig)
|
en
| 0.304696
|
# Baseline vs. others # One model, one metric per graph # {(setting, bias_metric): {stat: value}} # {(setting, bias_metric): [value (each run)]} fig = plt.figure() ax = fig.subplots(1, 1) values = [] labels = [] for setting in all_settings: values.append(float(overall_stat[(setting, bias_metric)]['rel_maxdiff'])) labels.append(setting) ax.bar(labels, values) #ax.set_title(metric_display[bias_metric]) #fig.suptitle(metric_display[bias_metric]) #fig.tight_layout(pad=0.5)
| 2.20875
| 2
|
arp_spoofer.py
|
ReyhaneAbtahi/ARP-Spoofer
| 0
|
6629487
|
<filename>arp_spoofer.py<gh_stars>0
from scapy.all import Ether, ARP, srp, send
import time
def get_mac(ip):
ans, _ = srp(Ether(dst='ff:ff:ff:ff:ff:ff') /
ARP(pdst=ip), timeout=3, verbose=0)
if ans:
return ans[0][1].src
def spoof(target_ip, spoof_ip, verbose=True):
packet = ARP(op=2, pdst=target_ip, hwdst=get_mac(target_ip),
psrc=spoof_ip)
send(packet, verbose=False)
if verbose:
self_mac = ARP().hwsrc
print("[+] Sent to {} : {} is-at {}".format(target_ip, spoof_ip, self_mac))
def restore(destination_ip, source_ip, verbose=True):
destination_mac = get_mac(destination_ip)
source_mac = get_mac(source_ip)
packet = ARP(op=2, pdst=destination_ip, hwdst=destination_mac,
psrc=source_ip, hwsrc=source_mac)
send(packet, verbose=0)
if verbose:
print("[+] Sent to {} : {} is-at {}".format(destination_ip,
source_ip, source_mac))
target1_ip = input("Enter your target1 IP: ")
target2_ip = input("Enter your target2 IP: ")
try:
while True:
spoof(target1_ip, target2_ip)
spoof(target2_ip, target1_ip)
time.sleep(2)
except KeyboardInterrupt:
print("\nCtrl + C pressed.............Exiting")
restore(target2_ip, target1_ip)
restore(target1_ip, target2_ip)
print("[+] Arp Spoof Stopped")
|
<filename>arp_spoofer.py<gh_stars>0
from scapy.all import Ether, ARP, srp, send
import time
def get_mac(ip):
ans, _ = srp(Ether(dst='ff:ff:ff:ff:ff:ff') /
ARP(pdst=ip), timeout=3, verbose=0)
if ans:
return ans[0][1].src
def spoof(target_ip, spoof_ip, verbose=True):
packet = ARP(op=2, pdst=target_ip, hwdst=get_mac(target_ip),
psrc=spoof_ip)
send(packet, verbose=False)
if verbose:
self_mac = ARP().hwsrc
print("[+] Sent to {} : {} is-at {}".format(target_ip, spoof_ip, self_mac))
def restore(destination_ip, source_ip, verbose=True):
destination_mac = get_mac(destination_ip)
source_mac = get_mac(source_ip)
packet = ARP(op=2, pdst=destination_ip, hwdst=destination_mac,
psrc=source_ip, hwsrc=source_mac)
send(packet, verbose=0)
if verbose:
print("[+] Sent to {} : {} is-at {}".format(destination_ip,
source_ip, source_mac))
target1_ip = input("Enter your target1 IP: ")
target2_ip = input("Enter your target2 IP: ")
try:
while True:
spoof(target1_ip, target2_ip)
spoof(target2_ip, target1_ip)
time.sleep(2)
except KeyboardInterrupt:
print("\nCtrl + C pressed.............Exiting")
restore(target2_ip, target1_ip)
restore(target1_ip, target2_ip)
print("[+] Arp Spoof Stopped")
|
none
| 1
| 2.948661
| 3
|
|
python/day_16.py
|
benedwards14/Advent-of-Code-2020
| 0
|
6629488
|
import dataclasses
import numpy
import re
from typing import Dict, List, Set, Tuple
import utils
@dataclasses.dataclass(frozen=True)
class TicketField:
name: str
range: Set[int]
@dataclasses.dataclass
class Ticket:
fields: List[int]
def parse_ticket_fields(ticket_field_strs: List[str]) -> List[TicketField]:
ticket_fields = []
for field_str in ticket_field_strs:
match = re.match(
r"([a-z ]*): ([0-9]*)-([0-9]*) or ([0-9]*)-([0-9]*)", field_str
)
assert match is not None
ticket_fields.append(
TicketField(
match.group(1),
set(range(int(match.group(2)), int(match.group(3)) + 1))
| set(range(int(match.group(4)), int(match.group(5)) + 1))
)
)
return ticket_fields
def parse_ticket(ticket_str: str) -> Ticket:
return Ticket([int(field) for field in ticket_str.split(',')])
def parse_input() -> Tuple[List[TicketField], Ticket, List[Ticket]]:
ticket_field_strs, your_ticket_str, nearby_ticket_strs = utils.get_data(
16
).split("\n\n")
return (
parse_ticket_fields(ticket_field_strs.splitlines()),
parse_ticket(your_ticket_str.splitlines()[1]),
[
parse_ticket(ticket_str)
for ticket_str in nearby_ticket_strs.splitlines()[1:]
]
)
def find_one_to_one_mapping(mapping: Dict[str, Set[int]]) -> Dict[str, int]:
all_in = set.intersection(*[values for values in mapping.values()])
all_values = set.union(*[values for values in mapping.values()])
remaining_values = all_values
remaining_keys = set(mapping.keys())
new_mapping = {}
while remaining_values:
for key in remaining_keys:
potential_mappings = mapping[key] & remaining_values
if len(potential_mappings) == 1:
new_mapping[key] = potential_mappings.pop()
remaining_keys = set(mapping.keys()) - set(new_mapping.keys())
remaining_values = all_values - set(new_mapping.values())
return new_mapping
def find_fields(
fields: List[TicketField],
tickets: List[Ticket]
) -> Dict[TicketField, int]:
field_name_map = {
field.name: field for field in fields
}
max_index = range(len(tickets[0].fields))
field_to_index = {
field.name: set(max_index) for field in fields
}
for ticket in tickets:
field_to_index = {
field_name: {
index
for index in value_indices
if ticket.fields[index] in field_name_map[field_name].range
}
for field_name, value_indices in field_to_index.items()
}
return find_one_to_one_mapping(field_to_index)
def remove_invalid_tickets(
ticket_fields: List[TicketField],
tickets: List[Ticket]
) -> Tuple[List[Ticket], int]:
valid_tickets = []
error_rate = 0
valid_values = set.union(*[field.range for field in ticket_fields])
for ticket in tickets:
if (invalid_values := set(ticket.fields) - valid_values):
error_rate += sum(invalid_values)
else:
valid_tickets.append(ticket)
return valid_tickets, error_rate
if __name__ == "__main__":
ticket_fields, your_ticket, nearby_tickets = parse_input()
nearby_tickets, error_rate = remove_invalid_tickets(
ticket_fields, nearby_tickets
)
assert error_rate == 25788
field_map = find_fields(ticket_fields, nearby_tickets)
assert (
numpy.prod(
list(
your_ticket.fields[index]
for field, index in field_map.items()
if field.startswith("departure")
)
)
) == 3902565915559
|
import dataclasses
import numpy
import re
from typing import Dict, List, Set, Tuple
import utils
@dataclasses.dataclass(frozen=True)
class TicketField:
name: str
range: Set[int]
@dataclasses.dataclass
class Ticket:
fields: List[int]
def parse_ticket_fields(ticket_field_strs: List[str]) -> List[TicketField]:
ticket_fields = []
for field_str in ticket_field_strs:
match = re.match(
r"([a-z ]*): ([0-9]*)-([0-9]*) or ([0-9]*)-([0-9]*)", field_str
)
assert match is not None
ticket_fields.append(
TicketField(
match.group(1),
set(range(int(match.group(2)), int(match.group(3)) + 1))
| set(range(int(match.group(4)), int(match.group(5)) + 1))
)
)
return ticket_fields
def parse_ticket(ticket_str: str) -> Ticket:
return Ticket([int(field) for field in ticket_str.split(',')])
def parse_input() -> Tuple[List[TicketField], Ticket, List[Ticket]]:
ticket_field_strs, your_ticket_str, nearby_ticket_strs = utils.get_data(
16
).split("\n\n")
return (
parse_ticket_fields(ticket_field_strs.splitlines()),
parse_ticket(your_ticket_str.splitlines()[1]),
[
parse_ticket(ticket_str)
for ticket_str in nearby_ticket_strs.splitlines()[1:]
]
)
def find_one_to_one_mapping(mapping: Dict[str, Set[int]]) -> Dict[str, int]:
all_in = set.intersection(*[values for values in mapping.values()])
all_values = set.union(*[values for values in mapping.values()])
remaining_values = all_values
remaining_keys = set(mapping.keys())
new_mapping = {}
while remaining_values:
for key in remaining_keys:
potential_mappings = mapping[key] & remaining_values
if len(potential_mappings) == 1:
new_mapping[key] = potential_mappings.pop()
remaining_keys = set(mapping.keys()) - set(new_mapping.keys())
remaining_values = all_values - set(new_mapping.values())
return new_mapping
def find_fields(
fields: List[TicketField],
tickets: List[Ticket]
) -> Dict[TicketField, int]:
field_name_map = {
field.name: field for field in fields
}
max_index = range(len(tickets[0].fields))
field_to_index = {
field.name: set(max_index) for field in fields
}
for ticket in tickets:
field_to_index = {
field_name: {
index
for index in value_indices
if ticket.fields[index] in field_name_map[field_name].range
}
for field_name, value_indices in field_to_index.items()
}
return find_one_to_one_mapping(field_to_index)
def remove_invalid_tickets(
ticket_fields: List[TicketField],
tickets: List[Ticket]
) -> Tuple[List[Ticket], int]:
valid_tickets = []
error_rate = 0
valid_values = set.union(*[field.range for field in ticket_fields])
for ticket in tickets:
if (invalid_values := set(ticket.fields) - valid_values):
error_rate += sum(invalid_values)
else:
valid_tickets.append(ticket)
return valid_tickets, error_rate
if __name__ == "__main__":
ticket_fields, your_ticket, nearby_tickets = parse_input()
nearby_tickets, error_rate = remove_invalid_tickets(
ticket_fields, nearby_tickets
)
assert error_rate == 25788
field_map = find_fields(ticket_fields, nearby_tickets)
assert (
numpy.prod(
list(
your_ticket.fields[index]
for field, index in field_map.items()
if field.startswith("departure")
)
)
) == 3902565915559
|
none
| 1
| 2.997648
| 3
|
|
irrigator_pro/contact_info/templatetags/phone_filters.py
|
warnes/irrigatorpro
| 0
|
6629489
|
<reponame>warnes/irrigatorpro
from django import template
register = template.Library()
@register.filter(expects_localtime=True)
def us_number(value):
if (len(value) != 10):
return value
return "(" + value[0:3] + ") " + value[3:6] + "-" + value[6:10]
|
from django import template
register = template.Library()
@register.filter(expects_localtime=True)
def us_number(value):
if (len(value) != 10):
return value
return "(" + value[0:3] + ") " + value[3:6] + "-" + value[6:10]
|
none
| 1
| 2.187031
| 2
|
|
control/tests/convert_test.py
|
josephcslater/python-control
| 1
|
6629490
|
<filename>control/tests/convert_test.py
#!/usr/bin/env python
"""convert_test.py
Test state space and transfer function conversion.
Currently, this unit test script is not complete. It converts several random
state spaces back and forth between state space and transfer function
representations. Ideally, it should be able to assert that the conversion
outputs are correct. This is not yet implemented.
Also, the conversion seems to enter an infinite loop once in a while. The cause
of this is unknown.
"""
from __future__ import print_function
from warnings import warn
import numpy as np
import pytest
from control import rss, ss, ss2tf, tf, tf2ss
from control.statesp import _mimo2siso
from control.statefbk import ctrb, obsv
from control.freqplot import bode
from control.exception import slycot_check
from control.tests.conftest import slycotonly
# Set to True to print systems to the output.
verbose = False
# Maximum number of states to test + 1
maxStates = 4
# Maximum number of inputs and outputs to test + 1
# If slycot is not installed, just check SISO
maxIO = 5 if slycot_check() else 2
@pytest.fixture
def fixedseed(scope='module'):
"""Get consistent results"""
np.random.seed(7)
class TestConvert:
"""Test state space and transfer function conversions."""
def printSys(self, sys, ind):
"""Print system to the standard output."""
print("sys%i:\n" % ind)
print(sys)
@pytest.mark.parametrize("states", range(1, maxStates))
@pytest.mark.parametrize("inputs", range(1, maxIO))
@pytest.mark.parametrize("outputs", range(1, maxIO))
def testConvert(self, fixedseed, states, inputs, outputs):
"""Test state space to transfer function conversion.
start with a random SS system and transform to TF then
back to SS, check that the matrices are the same.
"""
ssOriginal = rss(states, outputs, inputs)
if verbose:
self.printSys(ssOriginal, 1)
# Make sure the system is not degenerate
Cmat = ctrb(ssOriginal.A, ssOriginal.B)
if (np.linalg.matrix_rank(Cmat) != states):
pytest.skip("not reachable")
Omat = obsv(ssOriginal.A, ssOriginal.C)
if (np.linalg.matrix_rank(Omat) != states):
pytest.skip("not observable")
tfOriginal = tf(ssOriginal)
if (verbose):
self.printSys(tfOriginal, 2)
ssTransformed = ss(tfOriginal)
if (verbose):
self.printSys(ssTransformed, 3)
tfTransformed = tf(ssTransformed)
if (verbose):
self.printSys(tfTransformed, 4)
# Check to see if the state space systems have same dim
if (ssOriginal.nstates != ssTransformed.nstates) and verbose:
print("WARNING: state space dimension mismatch: %d versus %d" %
(ssOriginal.nstates, ssTransformed.nstates))
# Now make sure the frequency responses match
# Since bode() only handles SISO, go through each I/O pair
# For phase, take sine and cosine to avoid +/- 360 offset
for inputNum in range(inputs):
for outputNum in range(outputs):
if (verbose):
print("Checking input %d, output %d"
% (inputNum, outputNum))
ssorig_mag, ssorig_phase, ssorig_omega = \
bode(_mimo2siso(ssOriginal, inputNum, outputNum),
deg=False, plot=False)
ssorig_real = ssorig_mag * np.cos(ssorig_phase)
ssorig_imag = ssorig_mag * np.sin(ssorig_phase)
#
# Make sure TF has same frequency response
#
num = tfOriginal.num[outputNum][inputNum]
den = tfOriginal.den[outputNum][inputNum]
tforig = tf(num, den)
tforig_mag, tforig_phase, tforig_omega = \
bode(tforig, ssorig_omega,
deg=False, plot=False)
tforig_real = tforig_mag * np.cos(tforig_phase)
tforig_imag = tforig_mag * np.sin(tforig_phase)
np.testing.assert_array_almost_equal(
ssorig_real, tforig_real)
np.testing.assert_array_almost_equal(
ssorig_imag, tforig_imag)
#
# Make sure xform'd SS has same frequency response
#
ssxfrm_mag, ssxfrm_phase, ssxfrm_omega = \
bode(_mimo2siso(ssTransformed,
inputNum, outputNum),
ssorig_omega,
deg=False, plot=False)
ssxfrm_real = ssxfrm_mag * np.cos(ssxfrm_phase)
ssxfrm_imag = ssxfrm_mag * np.sin(ssxfrm_phase)
np.testing.assert_array_almost_equal(
ssorig_real, ssxfrm_real, decimal=5)
np.testing.assert_array_almost_equal(
ssorig_imag, ssxfrm_imag, decimal=5)
# Make sure xform'd TF has same frequency response
#
num = tfTransformed.num[outputNum][inputNum]
den = tfTransformed.den[outputNum][inputNum]
tfxfrm = tf(num, den)
tfxfrm_mag, tfxfrm_phase, tfxfrm_omega = \
bode(tfxfrm, ssorig_omega,
deg=False, plot=False)
tfxfrm_real = tfxfrm_mag * np.cos(tfxfrm_phase)
tfxfrm_imag = tfxfrm_mag * np.sin(tfxfrm_phase)
np.testing.assert_array_almost_equal(
ssorig_real, tfxfrm_real, decimal=5)
np.testing.assert_array_almost_equal(
ssorig_imag, tfxfrm_imag, decimal=5)
def testConvertMIMO(self):
"""Test state space to transfer function conversion.
Do a MIMO conversion and make sure that it is processed
correctly both with and without slycot
Example from issue gh-120, jgoppert
"""
# Set up a 1x3 transfer function (should always work)
tsys = tf([[[-235, 1.146e4],
[-235, 1.146E4],
[-235, 1.146E4, 0]]],
[[[1, 48.78, 0],
[1, 48.78, 0, 0],
[0.008, 1.39, 48.78]]])
# Convert to state space and look for an error
if (not slycot_check()):
with pytest.raises(TypeError):
tf2ss(tsys)
else:
ssys = tf2ss(tsys)
assert ssys.B.shape[1] == 3
assert ssys.C.shape[0] == 1
def testTf2ssStaticSiso(self):
"""Regression: tf2ss for SISO static gain"""
gsiso = tf2ss(tf(23, 46))
assert 0 == gsiso.nstates
assert 1 == gsiso.ninputs
assert 1 == gsiso.noutputs
# in all cases ratios are exactly representable, so assert_array_equal
# is fine
np.testing.assert_array_equal([[0.5]], gsiso.D)
def testTf2ssStaticMimo(self):
"""Regression: tf2ss for MIMO static gain"""
# 2x3 TFM
gmimo = tf2ss(tf(
[[ [23], [3], [5] ], [ [-1], [0.125], [101.3] ]],
[[ [46], [0.1], [80] ], [ [2], [-0.1], [1] ]]))
assert 0 == gmimo.nstates
assert 3 == gmimo.ninputs
assert 2 == gmimo.noutputs
d = np.array([[0.5, 30, 0.0625], [-0.5, -1.25, 101.3]])
np.testing.assert_array_equal(d, gmimo.D)
def testSs2tfStaticSiso(self):
"""Regression: ss2tf for SISO static gain"""
gsiso = ss2tf(ss([], [], [], 0.5))
np.testing.assert_array_equal([[[0.5]]], gsiso.num)
np.testing.assert_array_equal([[[1.]]], gsiso.den)
def testSs2tfStaticMimo(self):
"""Regression: ss2tf for MIMO static gain"""
# 2x3 TFM
a = []
b = []
c = []
d = np.array([[0.5, 30, 0.0625], [-0.5, -1.25, 101.3]])
gtf = ss2tf(ss(a, b, c, d))
# we need a 3x2x1 array to compare with gtf.num
numref = d[..., np.newaxis]
np.testing.assert_array_equal(numref,
np.array(gtf.num) / np.array(gtf.den))
@slycotonly
def testTf2SsDuplicatePoles(self):
"""Tests for 'too few poles for MIMO tf gh-111'"""
num = [[[1], [0]],
[[0], [1]]]
den = [[[1, 0], [1]],
[[1], [1, 0]]]
g = tf(num, den)
s = ss(g)
np.testing.assert_array_equal(g.pole(), s.pole())
@slycotonly
def test_tf2ss_robustness(self):
"""Unit test to make sure that tf2ss is working correctly. gh-240"""
num = [ [[0], [1]], [[1], [0]] ]
den1 = [ [[1], [1,1]], [[1,4], [1]] ]
sys1tf = tf(num, den1)
sys1ss = tf2ss(sys1tf)
# slight perturbation
den2 = [ [[1], [1e-10, 1, 1]], [[1,4], [1]] ]
sys2tf = tf(num, den2)
sys2ss = tf2ss(sys2tf)
# Make sure that the poles match for StateSpace and TransferFunction
np.testing.assert_array_almost_equal(np.sort(sys1tf.pole()),
np.sort(sys1ss.pole()))
np.testing.assert_array_almost_equal(np.sort(sys2tf.pole()),
np.sort(sys2ss.pole()))
def test_tf2ss_nonproper(self):
"""Unit tests for non-proper transfer functions"""
# Easy case: input 2 to output 1 is 's'
num = [ [[0], [1, 0]], [[1], [0]] ]
den1 = [ [[1], [1]], [[1,4], [1]] ]
with pytest.raises(ValueError):
tf2ss(tf(num, den1))
# Trickier case (make sure that leading zeros in den are handled)
num = [ [[0], [1, 0]], [[1], [0]] ]
den1 = [ [[1], [0, 1]], [[1,4], [1]] ]
with pytest.raises(ValueError):
tf2ss(tf(num, den1))
|
<filename>control/tests/convert_test.py
#!/usr/bin/env python
"""convert_test.py
Test state space and transfer function conversion.
Currently, this unit test script is not complete. It converts several random
state spaces back and forth between state space and transfer function
representations. Ideally, it should be able to assert that the conversion
outputs are correct. This is not yet implemented.
Also, the conversion seems to enter an infinite loop once in a while. The cause
of this is unknown.
"""
from __future__ import print_function
from warnings import warn
import numpy as np
import pytest
from control import rss, ss, ss2tf, tf, tf2ss
from control.statesp import _mimo2siso
from control.statefbk import ctrb, obsv
from control.freqplot import bode
from control.exception import slycot_check
from control.tests.conftest import slycotonly
# Set to True to print systems to the output.
verbose = False
# Maximum number of states to test + 1
maxStates = 4
# Maximum number of inputs and outputs to test + 1
# If slycot is not installed, just check SISO
maxIO = 5 if slycot_check() else 2
@pytest.fixture
def fixedseed(scope='module'):
"""Get consistent results"""
np.random.seed(7)
class TestConvert:
"""Test state space and transfer function conversions."""
def printSys(self, sys, ind):
"""Print system to the standard output."""
print("sys%i:\n" % ind)
print(sys)
@pytest.mark.parametrize("states", range(1, maxStates))
@pytest.mark.parametrize("inputs", range(1, maxIO))
@pytest.mark.parametrize("outputs", range(1, maxIO))
def testConvert(self, fixedseed, states, inputs, outputs):
"""Test state space to transfer function conversion.
start with a random SS system and transform to TF then
back to SS, check that the matrices are the same.
"""
ssOriginal = rss(states, outputs, inputs)
if verbose:
self.printSys(ssOriginal, 1)
# Make sure the system is not degenerate
Cmat = ctrb(ssOriginal.A, ssOriginal.B)
if (np.linalg.matrix_rank(Cmat) != states):
pytest.skip("not reachable")
Omat = obsv(ssOriginal.A, ssOriginal.C)
if (np.linalg.matrix_rank(Omat) != states):
pytest.skip("not observable")
tfOriginal = tf(ssOriginal)
if (verbose):
self.printSys(tfOriginal, 2)
ssTransformed = ss(tfOriginal)
if (verbose):
self.printSys(ssTransformed, 3)
tfTransformed = tf(ssTransformed)
if (verbose):
self.printSys(tfTransformed, 4)
# Check to see if the state space systems have same dim
if (ssOriginal.nstates != ssTransformed.nstates) and verbose:
print("WARNING: state space dimension mismatch: %d versus %d" %
(ssOriginal.nstates, ssTransformed.nstates))
# Now make sure the frequency responses match
# Since bode() only handles SISO, go through each I/O pair
# For phase, take sine and cosine to avoid +/- 360 offset
for inputNum in range(inputs):
for outputNum in range(outputs):
if (verbose):
print("Checking input %d, output %d"
% (inputNum, outputNum))
ssorig_mag, ssorig_phase, ssorig_omega = \
bode(_mimo2siso(ssOriginal, inputNum, outputNum),
deg=False, plot=False)
ssorig_real = ssorig_mag * np.cos(ssorig_phase)
ssorig_imag = ssorig_mag * np.sin(ssorig_phase)
#
# Make sure TF has same frequency response
#
num = tfOriginal.num[outputNum][inputNum]
den = tfOriginal.den[outputNum][inputNum]
tforig = tf(num, den)
tforig_mag, tforig_phase, tforig_omega = \
bode(tforig, ssorig_omega,
deg=False, plot=False)
tforig_real = tforig_mag * np.cos(tforig_phase)
tforig_imag = tforig_mag * np.sin(tforig_phase)
np.testing.assert_array_almost_equal(
ssorig_real, tforig_real)
np.testing.assert_array_almost_equal(
ssorig_imag, tforig_imag)
#
# Make sure xform'd SS has same frequency response
#
ssxfrm_mag, ssxfrm_phase, ssxfrm_omega = \
bode(_mimo2siso(ssTransformed,
inputNum, outputNum),
ssorig_omega,
deg=False, plot=False)
ssxfrm_real = ssxfrm_mag * np.cos(ssxfrm_phase)
ssxfrm_imag = ssxfrm_mag * np.sin(ssxfrm_phase)
np.testing.assert_array_almost_equal(
ssorig_real, ssxfrm_real, decimal=5)
np.testing.assert_array_almost_equal(
ssorig_imag, ssxfrm_imag, decimal=5)
# Make sure xform'd TF has same frequency response
#
num = tfTransformed.num[outputNum][inputNum]
den = tfTransformed.den[outputNum][inputNum]
tfxfrm = tf(num, den)
tfxfrm_mag, tfxfrm_phase, tfxfrm_omega = \
bode(tfxfrm, ssorig_omega,
deg=False, plot=False)
tfxfrm_real = tfxfrm_mag * np.cos(tfxfrm_phase)
tfxfrm_imag = tfxfrm_mag * np.sin(tfxfrm_phase)
np.testing.assert_array_almost_equal(
ssorig_real, tfxfrm_real, decimal=5)
np.testing.assert_array_almost_equal(
ssorig_imag, tfxfrm_imag, decimal=5)
def testConvertMIMO(self):
"""Test state space to transfer function conversion.
Do a MIMO conversion and make sure that it is processed
correctly both with and without slycot
Example from issue gh-120, jgoppert
"""
# Set up a 1x3 transfer function (should always work)
tsys = tf([[[-235, 1.146e4],
[-235, 1.146E4],
[-235, 1.146E4, 0]]],
[[[1, 48.78, 0],
[1, 48.78, 0, 0],
[0.008, 1.39, 48.78]]])
# Convert to state space and look for an error
if (not slycot_check()):
with pytest.raises(TypeError):
tf2ss(tsys)
else:
ssys = tf2ss(tsys)
assert ssys.B.shape[1] == 3
assert ssys.C.shape[0] == 1
def testTf2ssStaticSiso(self):
"""Regression: tf2ss for SISO static gain"""
gsiso = tf2ss(tf(23, 46))
assert 0 == gsiso.nstates
assert 1 == gsiso.ninputs
assert 1 == gsiso.noutputs
# in all cases ratios are exactly representable, so assert_array_equal
# is fine
np.testing.assert_array_equal([[0.5]], gsiso.D)
def testTf2ssStaticMimo(self):
"""Regression: tf2ss for MIMO static gain"""
# 2x3 TFM
gmimo = tf2ss(tf(
[[ [23], [3], [5] ], [ [-1], [0.125], [101.3] ]],
[[ [46], [0.1], [80] ], [ [2], [-0.1], [1] ]]))
assert 0 == gmimo.nstates
assert 3 == gmimo.ninputs
assert 2 == gmimo.noutputs
d = np.array([[0.5, 30, 0.0625], [-0.5, -1.25, 101.3]])
np.testing.assert_array_equal(d, gmimo.D)
def testSs2tfStaticSiso(self):
"""Regression: ss2tf for SISO static gain"""
gsiso = ss2tf(ss([], [], [], 0.5))
np.testing.assert_array_equal([[[0.5]]], gsiso.num)
np.testing.assert_array_equal([[[1.]]], gsiso.den)
def testSs2tfStaticMimo(self):
"""Regression: ss2tf for MIMO static gain"""
# 2x3 TFM
a = []
b = []
c = []
d = np.array([[0.5, 30, 0.0625], [-0.5, -1.25, 101.3]])
gtf = ss2tf(ss(a, b, c, d))
# we need a 3x2x1 array to compare with gtf.num
numref = d[..., np.newaxis]
np.testing.assert_array_equal(numref,
np.array(gtf.num) / np.array(gtf.den))
@slycotonly
def testTf2SsDuplicatePoles(self):
"""Tests for 'too few poles for MIMO tf gh-111'"""
num = [[[1], [0]],
[[0], [1]]]
den = [[[1, 0], [1]],
[[1], [1, 0]]]
g = tf(num, den)
s = ss(g)
np.testing.assert_array_equal(g.pole(), s.pole())
@slycotonly
def test_tf2ss_robustness(self):
"""Unit test to make sure that tf2ss is working correctly. gh-240"""
num = [ [[0], [1]], [[1], [0]] ]
den1 = [ [[1], [1,1]], [[1,4], [1]] ]
sys1tf = tf(num, den1)
sys1ss = tf2ss(sys1tf)
# slight perturbation
den2 = [ [[1], [1e-10, 1, 1]], [[1,4], [1]] ]
sys2tf = tf(num, den2)
sys2ss = tf2ss(sys2tf)
# Make sure that the poles match for StateSpace and TransferFunction
np.testing.assert_array_almost_equal(np.sort(sys1tf.pole()),
np.sort(sys1ss.pole()))
np.testing.assert_array_almost_equal(np.sort(sys2tf.pole()),
np.sort(sys2ss.pole()))
def test_tf2ss_nonproper(self):
"""Unit tests for non-proper transfer functions"""
# Easy case: input 2 to output 1 is 's'
num = [ [[0], [1, 0]], [[1], [0]] ]
den1 = [ [[1], [1]], [[1,4], [1]] ]
with pytest.raises(ValueError):
tf2ss(tf(num, den1))
# Trickier case (make sure that leading zeros in den are handled)
num = [ [[0], [1, 0]], [[1], [0]] ]
den1 = [ [[1], [0, 1]], [[1,4], [1]] ]
with pytest.raises(ValueError):
tf2ss(tf(num, den1))
|
en
| 0.874074
|
#!/usr/bin/env python convert_test.py Test state space and transfer function conversion. Currently, this unit test script is not complete. It converts several random state spaces back and forth between state space and transfer function representations. Ideally, it should be able to assert that the conversion outputs are correct. This is not yet implemented. Also, the conversion seems to enter an infinite loop once in a while. The cause of this is unknown. # Set to True to print systems to the output. # Maximum number of states to test + 1 # Maximum number of inputs and outputs to test + 1 # If slycot is not installed, just check SISO Get consistent results Test state space and transfer function conversions. Print system to the standard output. Test state space to transfer function conversion. start with a random SS system and transform to TF then back to SS, check that the matrices are the same. # Make sure the system is not degenerate # Check to see if the state space systems have same dim # Now make sure the frequency responses match # Since bode() only handles SISO, go through each I/O pair # For phase, take sine and cosine to avoid +/- 360 offset # # Make sure TF has same frequency response # # # Make sure xform'd SS has same frequency response # # Make sure xform'd TF has same frequency response # Test state space to transfer function conversion. Do a MIMO conversion and make sure that it is processed correctly both with and without slycot Example from issue gh-120, jgoppert # Set up a 1x3 transfer function (should always work) # Convert to state space and look for an error Regression: tf2ss for SISO static gain # in all cases ratios are exactly representable, so assert_array_equal # is fine Regression: tf2ss for MIMO static gain # 2x3 TFM Regression: ss2tf for SISO static gain Regression: ss2tf for MIMO static gain # 2x3 TFM # we need a 3x2x1 array to compare with gtf.num Tests for 'too few poles for MIMO tf gh-111' Unit test to make sure that tf2ss is working correctly. gh-240 # slight perturbation # Make sure that the poles match for StateSpace and TransferFunction Unit tests for non-proper transfer functions # Easy case: input 2 to output 1 is 's' # Trickier case (make sure that leading zeros in den are handled)
| 2.967863
| 3
|
NetTrade/__init__.py
|
zpoint/NetTrade
| 27
|
6629491
|
"""NetTrade tools"""
__version__ = '1.0.3'
|
"""NetTrade tools"""
__version__ = '1.0.3'
|
en
| 0.458184
|
NetTrade tools
| 1.069138
| 1
|
salt/states/docker_network.py
|
dmyerscough/salt
| 0
|
6629492
|
<gh_stars>0
# -*- coding: utf-8 -*-
'''
Management of Docker networks
.. versionadded:: 2017.7.0
:depends: docker_ Python module
.. note::
Older releases of the Python bindings for Docker were called docker-py_ in
PyPI. All releases of docker_, and releases of docker-py_ >= 1.6.0 are
supported. These python bindings can easily be installed using
:py:func:`pip.install <salt.modules.pip.install>`:
.. code-block:: bash
salt myminion pip.install docker
To upgrade from docker-py_ to docker_, you must first uninstall docker-py_,
and then install docker_:
.. code-block:: bash
salt myminion pip.uninstall docker-py
salt myminion pip.install docker
.. _docker: https://pypi.python.org/pypi/docker
.. _docker-py: https://pypi.python.org/pypi/docker-py
These states were moved from the :mod:`docker <salt.states.docker>` state
module (formerly called **dockerng**) in the 2017.7.0 release.
'''
from __future__ import absolute_import
import logging
# Import salt libs
from salt.ext import six
import salt.utils
# Enable proper logging
log = logging.getLogger(__name__) # pylint: disable=invalid-name
# Define the module's virtual name
__virtualname__ = 'docker_network'
__virtual_aliases__ = ('moby_network',)
def __virtual__():
'''
Only load if the docker execution module is available
'''
if 'docker.version' in __salt__:
return __virtualname__
return (False, __salt__.missing_fun_string('docker.version'))
def present(name,
driver=None,
driver_opts=None,
gateway=None,
ip_range=None,
subnet=None,
containers=None):
'''
Ensure that a network is present.
name
Name of the network
driver
Type of driver for that network.
driver_opts
Options for the network driver.
gateway
IPv4 or IPv6 gateway for the master subnet
ip_range
Allocate container IP from a sub-range within the subnet
containers:
List of container names that should be part of this network
subnet:
Subnet in CIDR format that represents a network segment
Usage Examples:
.. code-block:: yaml
network_foo:
docker_network.present
.. code-block:: yaml
network_bar:
docker_network.present
- name: bar
- driver_opts:
- com.docker.network.driver.mtu: "1450"
- containers:
- cont1
- cont2
.. code-block:: yaml
network_baz:
docker_network.present
- name: baz
- driver_opts:
- parent: eth0
- gateway: "172.20.0.1"
- ip_range: "172.20.0.128/25"
- subnet: "172.20.0.0/24"
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if salt.utils.is_dictlist(driver_opts):
driver_opts = salt.utils.repack_dictlist(driver_opts)
# If any containers are specified, get details of each one, we need the Id and Name fields later
if containers is not None:
containers = [__salt__['docker.inspect_container'](c) for c in containers]
networks = __salt__['docker.networks'](names=[name])
log.trace(
'docker_network.present: current networks: {0}'.format(networks)
)
# networks will contain all Docker networks which partially match 'name'.
# We need to loop through to find the matching network, if there is one.
network = None
if networks:
for network_iter in networks:
if network_iter['Name'] == name:
network = network_iter
break
# We might disconnect containers in the process of recreating the network, we'll need to keep track these containers
# so we can reconnect them later.
containers_disconnected = {}
# If the network already exists
if network is not None:
log.debug('Network \'{0}\' already exists'.format(name))
# Set the comment now to say that it already exists, if we need to recreate the network with new config we'll
# update the comment later.
ret['comment'] = 'Network \'{0}\' already exists'.format(name)
# Update network details with result from network inspect, which will contain details of any containers
# attached to the network.
network = __salt__['docker.inspect_network'](network_id=network['Id'])
log.trace('Details of \'{0}\' network: {1}'.format(name, network))
# For the IPAM and driver config options which can be passed, check that if they are passed, they match the
# current configuration.
original_config = {}
new_config = {}
if driver and driver != network['Driver']:
new_config['driver'] = driver
original_config['driver'] = network['Driver']
if driver_opts and driver_opts != network['Options']:
new_config['driver_opts'] = driver_opts
original_config['driver_opts'] = network['Options']
# Multiple IPAM configs is probably not that common so for now we'll only worry about the simple case where
# there's a single IPAM config. If there's more than one (or none at all) then we'll bail out.
if len(network['IPAM']['Config']) != 1:
ret['comment'] = ('docker_network.present does only supports Docker networks with a single IPAM config,'
'network \'{0}\' has {1}'.format(name, len(network['IPAM']['Config'])))
return ret
ipam = network['IPAM']['Config'][0]
if gateway and gateway != ipam['Gateway']:
new_config['gateway'] = gateway
original_config['gateway'] = ipam['Gateway']
if subnet and subnet != ipam['Subnet']:
new_config['subnet'] = subnet
original_config['subnet'] = ipam['Subnet']
if ip_range:
# IPRange isn't always configured so check it's even set before attempting to compare it.
if 'IPRange' in ipam and ip_range != ipam['IPRange']:
new_config['ip_range'] = ip_range
original_config['ip_range'] = ipam['IPRange']
elif 'IPRange' not in ipam:
new_config['ip_range'] = ip_range
original_config['ip_range'] = ''
if new_config != original_config:
log.debug('New config is different to current;\nnew: {0}\ncurrent: {1}'.format(new_config, original_config))
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Network {0} will be recreated with new config'.format(name)
return ret
remove_result = _remove_network(name, network['Containers'])
if not remove_result['result']:
return remove_result
# We've removed the network, so there are now no containers attached to it.
if network['Containers']:
containers_disconnected = network['Containers']
network['Containers'] = []
try:
__salt__['docker.create_network'](
name,
driver=driver,
driver_opts=driver_opts,
gateway=gateway,
ip_range=ip_range,
subnet=subnet)
except Exception as exc:
ret['comment'] = ('Failed to replace network \'{0}\': {1}'
.format(name, exc))
return ret
ret['changes']['updated'] = {name: {'old': original_config, 'new': new_config}}
ret['comment'] = 'Network \'{0}\' was replaced with updated config'.format(name)
# If the network does not yet exist, we create it
else:
log.debug('The network \'{0}\' will be created'.format(name))
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('The network \'{0}\' will be created'.format(name))
return ret
try:
ret['changes']['created'] = __salt__['docker.create_network'](
name,
driver=driver,
driver_opts=driver_opts,
gateway=gateway,
ip_range=ip_range,
subnet=subnet)
except Exception as exc:
ret['comment'] = ('Failed to create network \'{0}\': {1}'
.format(name, exc))
return ret
# Finally, figure out the list of containers which should now be connected.
containers_to_connect = {}
# If no containers were specified in the state but we have disconnected some in the process of recreating the
# network, we should reconnect those containers.
if containers is None and containers_disconnected:
containers_to_connect = containers_disconnected
# If containers were specified in the state, regardless of what we've disconnected, we should now just connect
# the containers specified.
elif containers:
for container in containers:
containers_to_connect[container['Id']] = container
if network is None:
network = {'Containers': {}}
# At this point, if all the containers we want connected are already connected to the network, we can set our
# result and finish.
if all(c in network['Containers'] for c in containers_to_connect):
ret['result'] = True
return ret
# If we've not exited by this point it's because we have containers which we need to connect to the network.
result = True
reconnected_containers = []
connected_containers = []
for container_id, container in six.iteritems(containers_to_connect):
if container_id not in network['Containers']:
try:
connect_result = __salt__['docker.connect_container_to_network'](container_id, name)
log.trace(
'docker.connect_container_to_network({0}, {1}) result: {2}'.
format(container, name, connect_result)
)
# If this container was one we disconnected earlier, add it to the reconnected list.
if container_id in containers_disconnected:
reconnected_containers.append(container['Name'])
# Otherwise add it to the connected list.
else:
connected_containers.append(container['Name'])
except Exception as exc:
ret['comment'] = ('Failed to connect container \'{0}\' to network \'{1}\' {2}'.format(
container['Name'], name, exc))
result = False
# If we populated any of our container lists then add them to our list of changes.
if connected_containers:
ret['changes']['connected'] = connected_containers
if reconnected_containers:
ret['changes']['reconnected'] = reconnected_containers
# Figure out if we removed any containers as a result of replacing the network and then not re-connecting the
# containers, because they weren't specified in the state.
disconnected_containers = []
for container_id, container in six.iteritems(containers_disconnected):
if container_id not in containers_to_connect:
disconnected_containers.append(container['Name'])
if disconnected_containers:
ret['changes']['disconnected'] = disconnected_containers
ret['result'] = result
return ret
def absent(name, driver=None):
'''
Ensure that a network is absent.
name
Name of the network
Usage Examples:
.. code-block:: yaml
network_foo:
docker_network.absent
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
networks = __salt__['docker.networks'](names=[name])
log.trace(
'docker_network.absent: current networks: {0}'.format(networks)
)
# networks will contain all Docker networks which partially match 'name'.
# We need to loop through to find the matching network, if there is one.
network = None
if networks:
for network_iter in networks:
if network_iter['Name'] == name:
network = network_iter
break
if network is None:
ret['result'] = True
ret['comment'] = 'Network \'{0}\' already absent'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('The network \'{0}\' will be removed'.format(name))
return ret
return _remove_network(network=name, containers=networks[0]['Containers'])
def _remove_network(network, containers=None):
'''
Remove network, removing any specified containers from it beforehand
'''
ret = {'name': network,
'changes': {},
'result': False,
'comment': ''}
if containers is None:
containers = []
for container in containers:
try:
ret['changes']['disconnected'] = __salt__['docker.disconnect_container_from_network'](container, network)
except Exception as exc:
ret['comment'] = ('Failed to disconnect container \'{0}\' from network \'{1}\' {2}'.format(
container, network, exc))
try:
ret['changes']['removed'] = __salt__['docker.remove_network'](network)
ret['result'] = True
except Exception as exc:
ret['comment'] = ('Failed to remove network \'{0}\': {1}'
.format(network, exc))
return ret
|
# -*- coding: utf-8 -*-
'''
Management of Docker networks
.. versionadded:: 2017.7.0
:depends: docker_ Python module
.. note::
Older releases of the Python bindings for Docker were called docker-py_ in
PyPI. All releases of docker_, and releases of docker-py_ >= 1.6.0 are
supported. These python bindings can easily be installed using
:py:func:`pip.install <salt.modules.pip.install>`:
.. code-block:: bash
salt myminion pip.install docker
To upgrade from docker-py_ to docker_, you must first uninstall docker-py_,
and then install docker_:
.. code-block:: bash
salt myminion pip.uninstall docker-py
salt myminion pip.install docker
.. _docker: https://pypi.python.org/pypi/docker
.. _docker-py: https://pypi.python.org/pypi/docker-py
These states were moved from the :mod:`docker <salt.states.docker>` state
module (formerly called **dockerng**) in the 2017.7.0 release.
'''
from __future__ import absolute_import
import logging
# Import salt libs
from salt.ext import six
import salt.utils
# Enable proper logging
log = logging.getLogger(__name__) # pylint: disable=invalid-name
# Define the module's virtual name
__virtualname__ = 'docker_network'
__virtual_aliases__ = ('moby_network',)
def __virtual__():
'''
Only load if the docker execution module is available
'''
if 'docker.version' in __salt__:
return __virtualname__
return (False, __salt__.missing_fun_string('docker.version'))
def present(name,
driver=None,
driver_opts=None,
gateway=None,
ip_range=None,
subnet=None,
containers=None):
'''
Ensure that a network is present.
name
Name of the network
driver
Type of driver for that network.
driver_opts
Options for the network driver.
gateway
IPv4 or IPv6 gateway for the master subnet
ip_range
Allocate container IP from a sub-range within the subnet
containers:
List of container names that should be part of this network
subnet:
Subnet in CIDR format that represents a network segment
Usage Examples:
.. code-block:: yaml
network_foo:
docker_network.present
.. code-block:: yaml
network_bar:
docker_network.present
- name: bar
- driver_opts:
- com.docker.network.driver.mtu: "1450"
- containers:
- cont1
- cont2
.. code-block:: yaml
network_baz:
docker_network.present
- name: baz
- driver_opts:
- parent: eth0
- gateway: "172.20.0.1"
- ip_range: "172.20.0.128/25"
- subnet: "172.20.0.0/24"
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if salt.utils.is_dictlist(driver_opts):
driver_opts = salt.utils.repack_dictlist(driver_opts)
# If any containers are specified, get details of each one, we need the Id and Name fields later
if containers is not None:
containers = [__salt__['docker.inspect_container'](c) for c in containers]
networks = __salt__['docker.networks'](names=[name])
log.trace(
'docker_network.present: current networks: {0}'.format(networks)
)
# networks will contain all Docker networks which partially match 'name'.
# We need to loop through to find the matching network, if there is one.
network = None
if networks:
for network_iter in networks:
if network_iter['Name'] == name:
network = network_iter
break
# We might disconnect containers in the process of recreating the network, we'll need to keep track these containers
# so we can reconnect them later.
containers_disconnected = {}
# If the network already exists
if network is not None:
log.debug('Network \'{0}\' already exists'.format(name))
# Set the comment now to say that it already exists, if we need to recreate the network with new config we'll
# update the comment later.
ret['comment'] = 'Network \'{0}\' already exists'.format(name)
# Update network details with result from network inspect, which will contain details of any containers
# attached to the network.
network = __salt__['docker.inspect_network'](network_id=network['Id'])
log.trace('Details of \'{0}\' network: {1}'.format(name, network))
# For the IPAM and driver config options which can be passed, check that if they are passed, they match the
# current configuration.
original_config = {}
new_config = {}
if driver and driver != network['Driver']:
new_config['driver'] = driver
original_config['driver'] = network['Driver']
if driver_opts and driver_opts != network['Options']:
new_config['driver_opts'] = driver_opts
original_config['driver_opts'] = network['Options']
# Multiple IPAM configs is probably not that common so for now we'll only worry about the simple case where
# there's a single IPAM config. If there's more than one (or none at all) then we'll bail out.
if len(network['IPAM']['Config']) != 1:
ret['comment'] = ('docker_network.present does only supports Docker networks with a single IPAM config,'
'network \'{0}\' has {1}'.format(name, len(network['IPAM']['Config'])))
return ret
ipam = network['IPAM']['Config'][0]
if gateway and gateway != ipam['Gateway']:
new_config['gateway'] = gateway
original_config['gateway'] = ipam['Gateway']
if subnet and subnet != ipam['Subnet']:
new_config['subnet'] = subnet
original_config['subnet'] = ipam['Subnet']
if ip_range:
# IPRange isn't always configured so check it's even set before attempting to compare it.
if 'IPRange' in ipam and ip_range != ipam['IPRange']:
new_config['ip_range'] = ip_range
original_config['ip_range'] = ipam['IPRange']
elif 'IPRange' not in ipam:
new_config['ip_range'] = ip_range
original_config['ip_range'] = ''
if new_config != original_config:
log.debug('New config is different to current;\nnew: {0}\ncurrent: {1}'.format(new_config, original_config))
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Network {0} will be recreated with new config'.format(name)
return ret
remove_result = _remove_network(name, network['Containers'])
if not remove_result['result']:
return remove_result
# We've removed the network, so there are now no containers attached to it.
if network['Containers']:
containers_disconnected = network['Containers']
network['Containers'] = []
try:
__salt__['docker.create_network'](
name,
driver=driver,
driver_opts=driver_opts,
gateway=gateway,
ip_range=ip_range,
subnet=subnet)
except Exception as exc:
ret['comment'] = ('Failed to replace network \'{0}\': {1}'
.format(name, exc))
return ret
ret['changes']['updated'] = {name: {'old': original_config, 'new': new_config}}
ret['comment'] = 'Network \'{0}\' was replaced with updated config'.format(name)
# If the network does not yet exist, we create it
else:
log.debug('The network \'{0}\' will be created'.format(name))
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('The network \'{0}\' will be created'.format(name))
return ret
try:
ret['changes']['created'] = __salt__['docker.create_network'](
name,
driver=driver,
driver_opts=driver_opts,
gateway=gateway,
ip_range=ip_range,
subnet=subnet)
except Exception as exc:
ret['comment'] = ('Failed to create network \'{0}\': {1}'
.format(name, exc))
return ret
# Finally, figure out the list of containers which should now be connected.
containers_to_connect = {}
# If no containers were specified in the state but we have disconnected some in the process of recreating the
# network, we should reconnect those containers.
if containers is None and containers_disconnected:
containers_to_connect = containers_disconnected
# If containers were specified in the state, regardless of what we've disconnected, we should now just connect
# the containers specified.
elif containers:
for container in containers:
containers_to_connect[container['Id']] = container
if network is None:
network = {'Containers': {}}
# At this point, if all the containers we want connected are already connected to the network, we can set our
# result and finish.
if all(c in network['Containers'] for c in containers_to_connect):
ret['result'] = True
return ret
# If we've not exited by this point it's because we have containers which we need to connect to the network.
result = True
reconnected_containers = []
connected_containers = []
for container_id, container in six.iteritems(containers_to_connect):
if container_id not in network['Containers']:
try:
connect_result = __salt__['docker.connect_container_to_network'](container_id, name)
log.trace(
'docker.connect_container_to_network({0}, {1}) result: {2}'.
format(container, name, connect_result)
)
# If this container was one we disconnected earlier, add it to the reconnected list.
if container_id in containers_disconnected:
reconnected_containers.append(container['Name'])
# Otherwise add it to the connected list.
else:
connected_containers.append(container['Name'])
except Exception as exc:
ret['comment'] = ('Failed to connect container \'{0}\' to network \'{1}\' {2}'.format(
container['Name'], name, exc))
result = False
# If we populated any of our container lists then add them to our list of changes.
if connected_containers:
ret['changes']['connected'] = connected_containers
if reconnected_containers:
ret['changes']['reconnected'] = reconnected_containers
# Figure out if we removed any containers as a result of replacing the network and then not re-connecting the
# containers, because they weren't specified in the state.
disconnected_containers = []
for container_id, container in six.iteritems(containers_disconnected):
if container_id not in containers_to_connect:
disconnected_containers.append(container['Name'])
if disconnected_containers:
ret['changes']['disconnected'] = disconnected_containers
ret['result'] = result
return ret
def absent(name, driver=None):
'''
Ensure that a network is absent.
name
Name of the network
Usage Examples:
.. code-block:: yaml
network_foo:
docker_network.absent
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
networks = __salt__['docker.networks'](names=[name])
log.trace(
'docker_network.absent: current networks: {0}'.format(networks)
)
# networks will contain all Docker networks which partially match 'name'.
# We need to loop through to find the matching network, if there is one.
network = None
if networks:
for network_iter in networks:
if network_iter['Name'] == name:
network = network_iter
break
if network is None:
ret['result'] = True
ret['comment'] = 'Network \'{0}\' already absent'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('The network \'{0}\' will be removed'.format(name))
return ret
return _remove_network(network=name, containers=networks[0]['Containers'])
def _remove_network(network, containers=None):
'''
Remove network, removing any specified containers from it beforehand
'''
ret = {'name': network,
'changes': {},
'result': False,
'comment': ''}
if containers is None:
containers = []
for container in containers:
try:
ret['changes']['disconnected'] = __salt__['docker.disconnect_container_from_network'](container, network)
except Exception as exc:
ret['comment'] = ('Failed to disconnect container \'{0}\' from network \'{1}\' {2}'.format(
container, network, exc))
try:
ret['changes']['removed'] = __salt__['docker.remove_network'](network)
ret['result'] = True
except Exception as exc:
ret['comment'] = ('Failed to remove network \'{0}\': {1}'
.format(network, exc))
return ret
|
en
| 0.870484
|
# -*- coding: utf-8 -*- Management of Docker networks .. versionadded:: 2017.7.0 :depends: docker_ Python module .. note:: Older releases of the Python bindings for Docker were called docker-py_ in PyPI. All releases of docker_, and releases of docker-py_ >= 1.6.0 are supported. These python bindings can easily be installed using :py:func:`pip.install <salt.modules.pip.install>`: .. code-block:: bash salt myminion pip.install docker To upgrade from docker-py_ to docker_, you must first uninstall docker-py_, and then install docker_: .. code-block:: bash salt myminion pip.uninstall docker-py salt myminion pip.install docker .. _docker: https://pypi.python.org/pypi/docker .. _docker-py: https://pypi.python.org/pypi/docker-py These states were moved from the :mod:`docker <salt.states.docker>` state module (formerly called **dockerng**) in the 2017.7.0 release. # Import salt libs # Enable proper logging # pylint: disable=invalid-name # Define the module's virtual name Only load if the docker execution module is available Ensure that a network is present. name Name of the network driver Type of driver for that network. driver_opts Options for the network driver. gateway IPv4 or IPv6 gateway for the master subnet ip_range Allocate container IP from a sub-range within the subnet containers: List of container names that should be part of this network subnet: Subnet in CIDR format that represents a network segment Usage Examples: .. code-block:: yaml network_foo: docker_network.present .. code-block:: yaml network_bar: docker_network.present - name: bar - driver_opts: - com.docker.network.driver.mtu: "1450" - containers: - cont1 - cont2 .. code-block:: yaml network_baz: docker_network.present - name: baz - driver_opts: - parent: eth0 - gateway: "172.20.0.1" - ip_range: "172.20.0.128/25" - subnet: "172.20.0.0/24" # If any containers are specified, get details of each one, we need the Id and Name fields later # networks will contain all Docker networks which partially match 'name'. # We need to loop through to find the matching network, if there is one. # We might disconnect containers in the process of recreating the network, we'll need to keep track these containers # so we can reconnect them later. # If the network already exists # Set the comment now to say that it already exists, if we need to recreate the network with new config we'll # update the comment later. # Update network details with result from network inspect, which will contain details of any containers # attached to the network. # For the IPAM and driver config options which can be passed, check that if they are passed, they match the # current configuration. # Multiple IPAM configs is probably not that common so for now we'll only worry about the simple case where # there's a single IPAM config. If there's more than one (or none at all) then we'll bail out. # IPRange isn't always configured so check it's even set before attempting to compare it. # We've removed the network, so there are now no containers attached to it. # If the network does not yet exist, we create it # Finally, figure out the list of containers which should now be connected. # If no containers were specified in the state but we have disconnected some in the process of recreating the # network, we should reconnect those containers. # If containers were specified in the state, regardless of what we've disconnected, we should now just connect # the containers specified. # At this point, if all the containers we want connected are already connected to the network, we can set our # result and finish. # If we've not exited by this point it's because we have containers which we need to connect to the network. # If this container was one we disconnected earlier, add it to the reconnected list. # Otherwise add it to the connected list. # If we populated any of our container lists then add them to our list of changes. # Figure out if we removed any containers as a result of replacing the network and then not re-connecting the # containers, because they weren't specified in the state. Ensure that a network is absent. name Name of the network Usage Examples: .. code-block:: yaml network_foo: docker_network.absent # networks will contain all Docker networks which partially match 'name'. # We need to loop through to find the matching network, if there is one. Remove network, removing any specified containers from it beforehand
| 2.140018
| 2
|
tests/decorators.py
|
AFriemann/simple_tools
| 1
|
6629493
|
<reponame>AFriemann/simple_tools<filename>tests/decorators.py
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
.. module:: TODO
:platform: Unix
:synopsis: TODO.
.. moduleauthor:: <NAME> <EMAIL>
"""
import unittest
import time
from simple_tools.decorators.errors import not_implemented
from simple_tools.decorators.time import timeout
from simple_tools.exceptions import TimeoutException
class TimeDecoratorTestCase(unittest.TestCase):
def test_timeout_times_out_function(self):
@timeout(2)
def task():
time.sleep(3)
with self.assertRaises(TimeoutException):
task()
class ErrorDecoratorTestCase(unittest.TestCase):
def test_not_implemented_decorator(self):
@not_implemented
def foo():
pass
with self.assertRaises(NotImplementedError):
foo()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 fenc=utf-8
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
.. module:: TODO
:platform: Unix
:synopsis: TODO.
.. moduleauthor:: <NAME> <EMAIL>
"""
import unittest
import time
from simple_tools.decorators.errors import not_implemented
from simple_tools.decorators.time import timeout
from simple_tools.exceptions import TimeoutException
class TimeDecoratorTestCase(unittest.TestCase):
def test_timeout_times_out_function(self):
@timeout(2)
def task():
time.sleep(3)
with self.assertRaises(TimeoutException):
task()
class ErrorDecoratorTestCase(unittest.TestCase):
def test_not_implemented_decorator(self):
@not_implemented
def foo():
pass
with self.assertRaises(NotImplementedError):
foo()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 fenc=utf-8
|
en
| 0.144085
|
#! /usr/bin/env python3 # -*- coding: utf-8 -*- .. module:: TODO :platform: Unix :synopsis: TODO. .. moduleauthor:: <NAME> <EMAIL> # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 fenc=utf-8
| 2.43755
| 2
|
net_sphere.py
|
ntubiolin/InsightFace_Pytorch
| 0
|
6629494
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch.nn import Parameter
import math
def myphi(x,m):
x = x * m
return 1-x**2/math.factorial(2)+x**4/math.factorial(4)-x**6/math.factorial(6) + \
x**8/math.factorial(8) - x**9/math.factorial(9)
class AngleLinear(nn.Module):
def __init__(self, in_features, out_features, m = 4, phiflag=True):
super(AngleLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.kernel = Parameter(torch.Tensor(in_features,out_features))
self.kernel.data.uniform_(-1, 1).renorm_(2,1,1e-5).mul_(1e5)
self.phiflag = phiflag
self.m = m
self.mlambda = [
lambda x: x**0,
lambda x: x**1,
lambda x: 2*x**2-1,
lambda x: 4*x**3-3*x,
lambda x: 8*x**4-8*x**2+1,
lambda x: 16*x**5-20*x**3+5*x
]
def forward(self, input):
x = input # size=(B,F) F is feature len
w = self.kernel # size=(F,Classnum) F=in_features Classnum=out_features
ww = w.renorm(2,1,1e-5).mul(1e5)
xlen = x.pow(2).sum(1).pow(0.5) # size=B
wlen = ww.pow(2).sum(0).pow(0.5) # size=Classnum
cos_theta = x.mm(ww) # size=(B,Classnum)
cos_theta = cos_theta / xlen.view(-1,1) / wlen.view(1,-1)
cos_theta = cos_theta.clamp(-1,1)
if self.phiflag:
cos_m_theta = self.mlambda[self.m](cos_theta)
theta = Variable(cos_theta.data.acos())
k = (self.m*theta/3.14159265).floor()
n_one = k*0.0 - 1
phi_theta = (n_one**k) * cos_m_theta - 2*k
else:
theta = cos_theta.acos()
phi_theta = myphi(theta,self.m)
phi_theta = phi_theta.clamp(-1*self.m,1)
cos_theta = cos_theta * xlen.view(-1,1)
phi_theta = phi_theta * xlen.view(-1,1)
output = (cos_theta,phi_theta)
return output # size=(B,Classnum,2)
class AngleLoss(nn.Module):
def __init__(self, gamma=0):
super(AngleLoss, self).__init__()
self.gamma = gamma
self.it = 0
self.LambdaMin = 5.0
self.LambdaMax = 1500.0
self.lamb = 1500.0
def forward(self, input, target):
self.it += 1
cos_theta,phi_theta = input
target = target.view(-1,1) #size=(B,1)
index = cos_theta.data * 0.0 #size=(B,Classnum)
try:
index.scatter_(1,target.data.view(-1,1),1)
except:
print('>>> In AngleLoss')
print(index.size())
print(target.data.view(-1, 1).size())
index = index.byte()
index = Variable(index)
self.lamb = max(self.LambdaMin,self.LambdaMax/(1+0.1*self.it ))
output = cos_theta * 1.0 #size=(B,Classnum)
output[index] -= cos_theta[index]*(1.0+0)/(1+self.lamb)
output[index] += phi_theta[index]*(1.0+0)/(1+self.lamb)
logpt = F.log_softmax(output, dim=1)
logpt = logpt.gather(1,target)
logpt = logpt.view(-1)
pt = Variable(logpt.data.exp())
loss = -1 * (1-pt)**self.gamma * logpt
loss = loss.mean()
return loss
class sphere20a(nn.Module):
def __init__(self,classnum=10574, returnGrid=False):
super(sphere20a, self).__init__()
self.classnum = classnum
self.returnGrid = returnGrid
#input = B*3*112*96
self.conv1_1 = nn.Conv2d(3,64,3,2,1) #=>B*64*56*48
self.relu1_1 = nn.PReLU(64)
self.conv1_2 = nn.Conv2d(64,64,3,1,1)
self.relu1_2 = nn.PReLU(64)
self.conv1_3 = nn.Conv2d(64,64,3,1,1)
self.relu1_3 = nn.PReLU(64)
self.conv2_1 = nn.Conv2d(64,128,3,2,1) #=>B*128*28*24
self.relu2_1 = nn.PReLU(128)
self.conv2_2 = nn.Conv2d(128,128,3,1,1)
self.relu2_2 = nn.PReLU(128)
self.conv2_3 = nn.Conv2d(128,128,3,1,1)
self.relu2_3 = nn.PReLU(128)
self.conv2_4 = nn.Conv2d(128,128,3,1,1) #=>B*128*28*24
self.relu2_4 = nn.PReLU(128)
self.conv2_5 = nn.Conv2d(128,128,3,1,1)
self.relu2_5 = nn.PReLU(128)
self.conv3_1 = nn.Conv2d(128,256,3,2,1) #=>B*256*14*12
self.relu3_1 = nn.PReLU(256)
self.conv3_2 = nn.Conv2d(256,256,3,1,1)
self.relu3_2 = nn.PReLU(256)
self.conv3_3 = nn.Conv2d(256,256,3,1,1)
self.relu3_3 = nn.PReLU(256)
self.conv3_4 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*12
self.relu3_4 = nn.PReLU(256)
self.conv3_5 = nn.Conv2d(256,256,3,1,1)
self.relu3_5 = nn.PReLU(256)
self.conv3_6 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_6 = nn.PReLU(256)
self.conv3_7 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_7 = nn.PReLU(256)
self.conv3_8 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_8 = nn.PReLU(256)
self.conv3_9 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_9 = nn.PReLU(256)
self.conv4_1 = nn.Conv2d(256, 512, 3, 2, 1) # =>B*512*7*6
self.relu4_1 = nn.PReLU(512)
self.conv4_2 = nn.Conv2d(512, 512, 3, 1, 1)
self.relu4_2 = nn.PReLU(512)
self.conv4_3 = nn.Conv2d(512, 512, 3, 1, 1)
self.relu4_3 = nn.PReLU(512)
self.fc5 = nn.Linear(512*7*6, 512)
# self.fc6 = AngleLinear(512, self.classnum)
self.conv1x1 = nn.Sequential(nn.Conv2d(512, 32, (1, 1), 1, 0),
nn.BatchNorm2d(32),
nn.PReLU(32))
def forward(self, x):
x = self.relu1_1(self.conv1_1(x))
x = x + self.relu1_3(self.conv1_3(self.relu1_2(self.conv1_2(x))))
x = self.relu2_1(self.conv2_1(x))
x = x + self.relu2_3(self.conv2_3(self.relu2_2(self.conv2_2(x))))
x = x + self.relu2_5(self.conv2_5(self.relu2_4(self.conv2_4(x))))
x = self.relu3_1(self.conv3_1(x))
x = x + self.relu3_3(self.conv3_3(self.relu3_2(self.conv3_2(x))))
x = x + self.relu3_5(self.conv3_5(self.relu3_4(self.conv3_4(x))))
x = x + self.relu3_7(self.conv3_7(self.relu3_6(self.conv3_6(x))))
x = x + self.relu3_9(self.conv3_9(self.relu3_8(self.conv3_8(x))))
x = self.relu4_1(self.conv4_1(x))
x = x + self.relu4_3(self.conv4_3(self.relu4_2(self.conv4_2(x))))
# pass the feature into 1x1 conv
x = self.conv1x1(x)
grid_feat = x
x = x.flatten(1)
# x = self.fc5(x)
if self.returnGrid:
return x, grid_feat
# x = self.fc6(x)
return x
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch.nn import Parameter
import math
def myphi(x,m):
x = x * m
return 1-x**2/math.factorial(2)+x**4/math.factorial(4)-x**6/math.factorial(6) + \
x**8/math.factorial(8) - x**9/math.factorial(9)
class AngleLinear(nn.Module):
def __init__(self, in_features, out_features, m = 4, phiflag=True):
super(AngleLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.kernel = Parameter(torch.Tensor(in_features,out_features))
self.kernel.data.uniform_(-1, 1).renorm_(2,1,1e-5).mul_(1e5)
self.phiflag = phiflag
self.m = m
self.mlambda = [
lambda x: x**0,
lambda x: x**1,
lambda x: 2*x**2-1,
lambda x: 4*x**3-3*x,
lambda x: 8*x**4-8*x**2+1,
lambda x: 16*x**5-20*x**3+5*x
]
def forward(self, input):
x = input # size=(B,F) F is feature len
w = self.kernel # size=(F,Classnum) F=in_features Classnum=out_features
ww = w.renorm(2,1,1e-5).mul(1e5)
xlen = x.pow(2).sum(1).pow(0.5) # size=B
wlen = ww.pow(2).sum(0).pow(0.5) # size=Classnum
cos_theta = x.mm(ww) # size=(B,Classnum)
cos_theta = cos_theta / xlen.view(-1,1) / wlen.view(1,-1)
cos_theta = cos_theta.clamp(-1,1)
if self.phiflag:
cos_m_theta = self.mlambda[self.m](cos_theta)
theta = Variable(cos_theta.data.acos())
k = (self.m*theta/3.14159265).floor()
n_one = k*0.0 - 1
phi_theta = (n_one**k) * cos_m_theta - 2*k
else:
theta = cos_theta.acos()
phi_theta = myphi(theta,self.m)
phi_theta = phi_theta.clamp(-1*self.m,1)
cos_theta = cos_theta * xlen.view(-1,1)
phi_theta = phi_theta * xlen.view(-1,1)
output = (cos_theta,phi_theta)
return output # size=(B,Classnum,2)
class AngleLoss(nn.Module):
def __init__(self, gamma=0):
super(AngleLoss, self).__init__()
self.gamma = gamma
self.it = 0
self.LambdaMin = 5.0
self.LambdaMax = 1500.0
self.lamb = 1500.0
def forward(self, input, target):
self.it += 1
cos_theta,phi_theta = input
target = target.view(-1,1) #size=(B,1)
index = cos_theta.data * 0.0 #size=(B,Classnum)
try:
index.scatter_(1,target.data.view(-1,1),1)
except:
print('>>> In AngleLoss')
print(index.size())
print(target.data.view(-1, 1).size())
index = index.byte()
index = Variable(index)
self.lamb = max(self.LambdaMin,self.LambdaMax/(1+0.1*self.it ))
output = cos_theta * 1.0 #size=(B,Classnum)
output[index] -= cos_theta[index]*(1.0+0)/(1+self.lamb)
output[index] += phi_theta[index]*(1.0+0)/(1+self.lamb)
logpt = F.log_softmax(output, dim=1)
logpt = logpt.gather(1,target)
logpt = logpt.view(-1)
pt = Variable(logpt.data.exp())
loss = -1 * (1-pt)**self.gamma * logpt
loss = loss.mean()
return loss
class sphere20a(nn.Module):
def __init__(self,classnum=10574, returnGrid=False):
super(sphere20a, self).__init__()
self.classnum = classnum
self.returnGrid = returnGrid
#input = B*3*112*96
self.conv1_1 = nn.Conv2d(3,64,3,2,1) #=>B*64*56*48
self.relu1_1 = nn.PReLU(64)
self.conv1_2 = nn.Conv2d(64,64,3,1,1)
self.relu1_2 = nn.PReLU(64)
self.conv1_3 = nn.Conv2d(64,64,3,1,1)
self.relu1_3 = nn.PReLU(64)
self.conv2_1 = nn.Conv2d(64,128,3,2,1) #=>B*128*28*24
self.relu2_1 = nn.PReLU(128)
self.conv2_2 = nn.Conv2d(128,128,3,1,1)
self.relu2_2 = nn.PReLU(128)
self.conv2_3 = nn.Conv2d(128,128,3,1,1)
self.relu2_3 = nn.PReLU(128)
self.conv2_4 = nn.Conv2d(128,128,3,1,1) #=>B*128*28*24
self.relu2_4 = nn.PReLU(128)
self.conv2_5 = nn.Conv2d(128,128,3,1,1)
self.relu2_5 = nn.PReLU(128)
self.conv3_1 = nn.Conv2d(128,256,3,2,1) #=>B*256*14*12
self.relu3_1 = nn.PReLU(256)
self.conv3_2 = nn.Conv2d(256,256,3,1,1)
self.relu3_2 = nn.PReLU(256)
self.conv3_3 = nn.Conv2d(256,256,3,1,1)
self.relu3_3 = nn.PReLU(256)
self.conv3_4 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*12
self.relu3_4 = nn.PReLU(256)
self.conv3_5 = nn.Conv2d(256,256,3,1,1)
self.relu3_5 = nn.PReLU(256)
self.conv3_6 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_6 = nn.PReLU(256)
self.conv3_7 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_7 = nn.PReLU(256)
self.conv3_8 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_8 = nn.PReLU(256)
self.conv3_9 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_9 = nn.PReLU(256)
self.conv4_1 = nn.Conv2d(256, 512, 3, 2, 1) # =>B*512*7*6
self.relu4_1 = nn.PReLU(512)
self.conv4_2 = nn.Conv2d(512, 512, 3, 1, 1)
self.relu4_2 = nn.PReLU(512)
self.conv4_3 = nn.Conv2d(512, 512, 3, 1, 1)
self.relu4_3 = nn.PReLU(512)
self.fc5 = nn.Linear(512*7*6, 512)
# self.fc6 = AngleLinear(512, self.classnum)
self.conv1x1 = nn.Sequential(nn.Conv2d(512, 32, (1, 1), 1, 0),
nn.BatchNorm2d(32),
nn.PReLU(32))
def forward(self, x):
x = self.relu1_1(self.conv1_1(x))
x = x + self.relu1_3(self.conv1_3(self.relu1_2(self.conv1_2(x))))
x = self.relu2_1(self.conv2_1(x))
x = x + self.relu2_3(self.conv2_3(self.relu2_2(self.conv2_2(x))))
x = x + self.relu2_5(self.conv2_5(self.relu2_4(self.conv2_4(x))))
x = self.relu3_1(self.conv3_1(x))
x = x + self.relu3_3(self.conv3_3(self.relu3_2(self.conv3_2(x))))
x = x + self.relu3_5(self.conv3_5(self.relu3_4(self.conv3_4(x))))
x = x + self.relu3_7(self.conv3_7(self.relu3_6(self.conv3_6(x))))
x = x + self.relu3_9(self.conv3_9(self.relu3_8(self.conv3_8(x))))
x = self.relu4_1(self.conv4_1(x))
x = x + self.relu4_3(self.conv4_3(self.relu4_2(self.conv4_2(x))))
# pass the feature into 1x1 conv
x = self.conv1x1(x)
grid_feat = x
x = x.flatten(1)
# x = self.fc5(x)
if self.returnGrid:
return x, grid_feat
# x = self.fc6(x)
return x
|
en
| 0.266771
|
# size=(B,F) F is feature len # size=(F,Classnum) F=in_features Classnum=out_features # size=B # size=Classnum # size=(B,Classnum) # size=(B,Classnum,2) #size=(B,1) #size=(B,Classnum) #size=(B,Classnum) #input = B*3*112*96 #=>B*64*56*48 #=>B*128*28*24 #=>B*128*28*24 #=>B*256*14*12 #=>B*256*14*12 # =>B*256*14*12 # =>B*256*14*12 # =>B*512*7*6 # self.fc6 = AngleLinear(512, self.classnum) # pass the feature into 1x1 conv # x = self.fc5(x) # x = self.fc6(x)
| 2.421421
| 2
|
finrl_meta/data_processors/processor_binance.py
|
ihopethiswillfi/FinRL-Meta
| 0
|
6629495
|
<filename>finrl_meta/data_processors/processor_binance.py
import pandas as pd
import requests
import json
from datetime import datetime,timedelta
from talib.abstract import MACD, RSI, CCI, DX
import numpy as np
class BinanceProcessor():
def __init__(self):
self.url = "https://api.binance.com/api/v3/klines"
#main functions
def download_data(self, ticker_list, start_date, end_date,
time_interval):
startTime = datetime.strptime(start_date, '%Y-%m-%d')
endTime = datetime.strptime(end_date, '%Y-%m-%d')
self.start_time = self.stringify_dates(startTime)
self.end_time = self.stringify_dates(endTime)
self.interval = time_interval
self.limit = 1440
df_list =[]
for i in ticker_list:
hist_data = self.dataframe_with_limit(symbol=i)
df = hist_data.iloc[:-1]
df = df.dropna()
df['tic'] = i
df_list.append(df)
final_df = pd.concat(df_list, axis=0, ignore_index=True)
return final_df
def clean_data(self, df):
df = df.dropna()
return df
def add_technical_indicator(self, df, tech_indicator_list):
print('Adding self-defined technical indicators is NOT supported yet.')
print('Use default: MACD, RSI, CCI, DX.')
self.tech_indicator_list = ['open', 'high', 'low', 'close', 'volume',
'macd', 'macd_signal', 'macd_hist',
'rsi', 'cci', 'dx']
df_list = []
for i in df.tic.unique():
tic_df = df.loc[df.tic == i].copy()
tic_df['macd'], tic_df['macd_signal'], tic_df['macd_hist'] = MACD(tic_df['close'], fastperiod=12, slowperiod=26, signalperiod=9)
tic_df['rsi'] = RSI(tic_df['close'], timeperiod=14)
tic_df['cci'] = CCI(tic_df['high'], tic_df['low'], tic_df['close'], timeperiod=14)
tic_df['dx'] = DX(tic_df['high'], tic_df['low'], tic_df['close'], timeperiod=14)
df_list.append(tic_df)
final_df = pd.concat(df_list, axis=0, ignore_index=True)
return final_df
def add_turbulence(self, df):
print('Turbulence not supported yet. Return original DataFrame.')
return df
def add_vix(self, df):
print('VIX is not applicable for cryptocurrencies. Return original DataFrame')
return df
def df_to_array(self, df, tech_indicator_list, if_vix):
unique_ticker = df.tic.unique()
if_first_time = True
for tic in unique_ticker:
if if_first_time:
price_array = df[df.tic==tic][['close']].values
#price_ary = df[df.tic==tic]['close'].values
tech_array = df[df.tic==tic][tech_indicator_list].values
if_first_time = False
else:
price_array = np.hstack([price_array, df[df.tic==tic][['close']].values])
tech_array = np.hstack([tech_array, df[df.tic==tic][self.tech_indicator_list].values])
assert price_array.shape[0] == tech_array.shape[0]
return price_array, tech_array, np.array([])
# helper functions
def stringify_dates(self, date:datetime):
return str(int(date.timestamp()*1000))
def get_binance_bars(self, last_datetime, symbol):
req_params = {"symbol": symbol, 'interval': self.interval,
'startTime': last_datetime, 'endTime': self.end_time, 'limit': self.limit}
# For debugging purposes, uncomment these lines and if they throw an error
# then you may have an error in req_params
# r = requests.get(self.url, params=req_params)
# print(r.text)
df = pd.DataFrame(json.loads(requests.get(self.url, params=req_params).text))
if (len(df.index) == 0):
return None
df = df.iloc[:,0:6]
df.columns = ['datetime','open','high','low','close','volume']
df.open = df.open.astype("float")
df.high = df.high.astype("float")
df.low = df.low.astype("float")
df.close = df.close.astype("float")
df.volume = df.volume.astype("float")
# No stock split and dividend announcement, hence close is same as adjusted close
df['adj_close'] = df['close']
df['datetime'] = [datetime.fromtimestamp(
x / 1000.0) for x in df.datetime
]
df.index = [x for x in range(len(df))]
return df
def dataframe_with_limit(self, symbol):
df_list = []
last_datetime = self.start_time
while True:
new_df = self.get_binance_bars(last_datetime, symbol)
if new_df is None:
break
df_list.append(new_df)
last_datetime = max(new_df.datetime) + timedelta(days=1)
last_datetime = self.stringify_dates(last_datetime)
final_df = pd.concat(df_list)
date_value = [x.strftime('%Y-%m-%d %H:%M:%S') for x in final_df['datetime']]
final_df.insert(0,'time',date_value)
final_df.drop('datetime',inplace=True,axis=1)
return final_df
|
<filename>finrl_meta/data_processors/processor_binance.py
import pandas as pd
import requests
import json
from datetime import datetime,timedelta
from talib.abstract import MACD, RSI, CCI, DX
import numpy as np
class BinanceProcessor():
def __init__(self):
self.url = "https://api.binance.com/api/v3/klines"
#main functions
def download_data(self, ticker_list, start_date, end_date,
time_interval):
startTime = datetime.strptime(start_date, '%Y-%m-%d')
endTime = datetime.strptime(end_date, '%Y-%m-%d')
self.start_time = self.stringify_dates(startTime)
self.end_time = self.stringify_dates(endTime)
self.interval = time_interval
self.limit = 1440
df_list =[]
for i in ticker_list:
hist_data = self.dataframe_with_limit(symbol=i)
df = hist_data.iloc[:-1]
df = df.dropna()
df['tic'] = i
df_list.append(df)
final_df = pd.concat(df_list, axis=0, ignore_index=True)
return final_df
def clean_data(self, df):
df = df.dropna()
return df
def add_technical_indicator(self, df, tech_indicator_list):
print('Adding self-defined technical indicators is NOT supported yet.')
print('Use default: MACD, RSI, CCI, DX.')
self.tech_indicator_list = ['open', 'high', 'low', 'close', 'volume',
'macd', 'macd_signal', 'macd_hist',
'rsi', 'cci', 'dx']
df_list = []
for i in df.tic.unique():
tic_df = df.loc[df.tic == i].copy()
tic_df['macd'], tic_df['macd_signal'], tic_df['macd_hist'] = MACD(tic_df['close'], fastperiod=12, slowperiod=26, signalperiod=9)
tic_df['rsi'] = RSI(tic_df['close'], timeperiod=14)
tic_df['cci'] = CCI(tic_df['high'], tic_df['low'], tic_df['close'], timeperiod=14)
tic_df['dx'] = DX(tic_df['high'], tic_df['low'], tic_df['close'], timeperiod=14)
df_list.append(tic_df)
final_df = pd.concat(df_list, axis=0, ignore_index=True)
return final_df
def add_turbulence(self, df):
print('Turbulence not supported yet. Return original DataFrame.')
return df
def add_vix(self, df):
print('VIX is not applicable for cryptocurrencies. Return original DataFrame')
return df
def df_to_array(self, df, tech_indicator_list, if_vix):
unique_ticker = df.tic.unique()
if_first_time = True
for tic in unique_ticker:
if if_first_time:
price_array = df[df.tic==tic][['close']].values
#price_ary = df[df.tic==tic]['close'].values
tech_array = df[df.tic==tic][tech_indicator_list].values
if_first_time = False
else:
price_array = np.hstack([price_array, df[df.tic==tic][['close']].values])
tech_array = np.hstack([tech_array, df[df.tic==tic][self.tech_indicator_list].values])
assert price_array.shape[0] == tech_array.shape[0]
return price_array, tech_array, np.array([])
# helper functions
def stringify_dates(self, date:datetime):
return str(int(date.timestamp()*1000))
def get_binance_bars(self, last_datetime, symbol):
req_params = {"symbol": symbol, 'interval': self.interval,
'startTime': last_datetime, 'endTime': self.end_time, 'limit': self.limit}
# For debugging purposes, uncomment these lines and if they throw an error
# then you may have an error in req_params
# r = requests.get(self.url, params=req_params)
# print(r.text)
df = pd.DataFrame(json.loads(requests.get(self.url, params=req_params).text))
if (len(df.index) == 0):
return None
df = df.iloc[:,0:6]
df.columns = ['datetime','open','high','low','close','volume']
df.open = df.open.astype("float")
df.high = df.high.astype("float")
df.low = df.low.astype("float")
df.close = df.close.astype("float")
df.volume = df.volume.astype("float")
# No stock split and dividend announcement, hence close is same as adjusted close
df['adj_close'] = df['close']
df['datetime'] = [datetime.fromtimestamp(
x / 1000.0) for x in df.datetime
]
df.index = [x for x in range(len(df))]
return df
def dataframe_with_limit(self, symbol):
df_list = []
last_datetime = self.start_time
while True:
new_df = self.get_binance_bars(last_datetime, symbol)
if new_df is None:
break
df_list.append(new_df)
last_datetime = max(new_df.datetime) + timedelta(days=1)
last_datetime = self.stringify_dates(last_datetime)
final_df = pd.concat(df_list)
date_value = [x.strftime('%Y-%m-%d %H:%M:%S') for x in final_df['datetime']]
final_df.insert(0,'time',date_value)
final_df.drop('datetime',inplace=True,axis=1)
return final_df
|
en
| 0.739992
|
#main functions #price_ary = df[df.tic==tic]['close'].values # helper functions # For debugging purposes, uncomment these lines and if they throw an error # then you may have an error in req_params # r = requests.get(self.url, params=req_params) # print(r.text) # No stock split and dividend announcement, hence close is same as adjusted close
| 2.726784
| 3
|
Unsupervised Machine Learning Algorithms/PCA.py
|
mikema2019/Machine-learning-algorithms
| 0
|
6629496
|
<reponame>mikema2019/Machine-learning-algorithms
import numpy as np
from sklearn import decomposition
from sklearn.datasets import load_iris
class PCA():
#C=np.transpose(D.T.dot(X_train.T))
#X=C.dot(D.T)
def __init__(self,n_component):
self.n_component=n_component
def train(self,X_train):
train_no,feature_no=X_train.shape
mean=np.mean(X_train,axis=0,keepdims=True)
X_reduced=X_train-mean
value,vector=np.linalg.eig(X_reduced.T.dot(X_reduced))
idx=value.argsort()[::-1]
self.value=value[idx]
self.vector=vector[:,idx]
summ=np.sum(value)
for i in idx:
print('pricipal component: %s, variance ratio: %s' % (format(i+1,'2.0f'),format(value[i]/summ,'4.2f')))
def transform(self,X_train):
mean=np.mean(X_train,axis=0,keepdims=True)
X_reduced=X_train-mean
D=self.vector[:,:self.n_component]
X_decomposed=np.transpose(D.T.dot(X_reduced.T))
X_recon=np.matmul(np.transpose(D.T.dot(X_reduced.T)),D.T)+mean
return X_decomposed
if __name__=='__main__':
iris=load_iris()
X_train=iris.data
#
# print(X_train)
model=PCA(n_component=4)
model.train(X_train)
X_reduced1=model.transform(X_train)
model=decomposition.PCA(n_components=4)
model.fit_transform(X_train)
X_reduced2=model.transform(X_train)
print(X_reduced1-X_reduced2)
|
import numpy as np
from sklearn import decomposition
from sklearn.datasets import load_iris
class PCA():
#C=np.transpose(D.T.dot(X_train.T))
#X=C.dot(D.T)
def __init__(self,n_component):
self.n_component=n_component
def train(self,X_train):
train_no,feature_no=X_train.shape
mean=np.mean(X_train,axis=0,keepdims=True)
X_reduced=X_train-mean
value,vector=np.linalg.eig(X_reduced.T.dot(X_reduced))
idx=value.argsort()[::-1]
self.value=value[idx]
self.vector=vector[:,idx]
summ=np.sum(value)
for i in idx:
print('pricipal component: %s, variance ratio: %s' % (format(i+1,'2.0f'),format(value[i]/summ,'4.2f')))
def transform(self,X_train):
mean=np.mean(X_train,axis=0,keepdims=True)
X_reduced=X_train-mean
D=self.vector[:,:self.n_component]
X_decomposed=np.transpose(D.T.dot(X_reduced.T))
X_recon=np.matmul(np.transpose(D.T.dot(X_reduced.T)),D.T)+mean
return X_decomposed
if __name__=='__main__':
iris=load_iris()
X_train=iris.data
#
# print(X_train)
model=PCA(n_component=4)
model.train(X_train)
X_reduced1=model.transform(X_train)
model=decomposition.PCA(n_components=4)
model.fit_transform(X_train)
X_reduced2=model.transform(X_train)
print(X_reduced1-X_reduced2)
|
en
| 0.393237
|
#C=np.transpose(D.T.dot(X_train.T)) #X=C.dot(D.T) # # print(X_train)
| 2.837742
| 3
|
src/yapcad/geometry.py
|
rdevaul/yapCAD
| 18
|
6629497
|
<filename>src/yapcad/geometry.py
## yapCAD geometry-generating superclass
## =====================================
## subclasses of Geometry implement the geom() method, which produce
## geometry lists or computational geometry primitives, as described
## by the functions in geom.py
## subclasses of SampleGeometry implelement geom() and sample()
## methods. The geom() method is guaranteed to produce a geometry
## list, and the sample() method allows for parametric sampling, which
## is guaranteed to "make sense" in the interval 0.0 <= u <= 1.0.
## Unless there is a good reason otherwise, subclasses of
## SampleGeometry should guarantee C0 continuity*
## * For fractal elements, or other complex forms where C0 continuity
## is less meaningful, SampleGeometry might still be approprite.
## However, if you have diconnected line and arc segments, with a
## classically-definalble gap (or the possibility thereof) then
## implement Geometry instead
import copy
from yapcad.geom import *
class Geometry:
""" generalized computational geometry class """
def __repr__(self):
return 'geometry base class wrapper for: {}'.format(vstr(self._elem))
def __init__(self,a=False):
self._update=True
self._elem=[]
if a:
if ispoint(a) or isline (a) or isarc(a) or ispoly(a) \
or isgeomlist(a) or isinstance(a,Geometry):
self._elem=[ deepcopy(a) ]
else:
raise ValueError('bad argument to Geometry class constructor: {}'.format(a))
def _updateInternals(self):
return
def geom(self):
if self.update:
self._updateInternals()
return deepcopy(self._elem)
class SampleGeometry(Geometry):
""" generalized sampleable geometry class"""
def __init__(self,a=False):
super().__init__(a)
def __repr__(self):
return 'sampleable geometry base class wrapper for: {}'.format(vstr(self._elem))
def sample(self,u):
if self._update:
self._updateInternals()
return sample(self.geom(),u)
class IntersectGeometry(SampleGeometry):
""" generalized intersectable geometry class"""
def __init__(self,a=False):
super().__init__(a)
def __repr__(self):
return 'intersectable geometry base class wrapper for: {}'.format(vstr(self._elem))
def intersectXY(self,g,inside=True,params=False):
if self._update:
self._updateInternals()
return intersectXY(g,self.geom(),inside,params)
|
<filename>src/yapcad/geometry.py
## yapCAD geometry-generating superclass
## =====================================
## subclasses of Geometry implement the geom() method, which produce
## geometry lists or computational geometry primitives, as described
## by the functions in geom.py
## subclasses of SampleGeometry implelement geom() and sample()
## methods. The geom() method is guaranteed to produce a geometry
## list, and the sample() method allows for parametric sampling, which
## is guaranteed to "make sense" in the interval 0.0 <= u <= 1.0.
## Unless there is a good reason otherwise, subclasses of
## SampleGeometry should guarantee C0 continuity*
## * For fractal elements, or other complex forms where C0 continuity
## is less meaningful, SampleGeometry might still be approprite.
## However, if you have diconnected line and arc segments, with a
## classically-definalble gap (or the possibility thereof) then
## implement Geometry instead
import copy
from yapcad.geom import *
class Geometry:
""" generalized computational geometry class """
def __repr__(self):
return 'geometry base class wrapper for: {}'.format(vstr(self._elem))
def __init__(self,a=False):
self._update=True
self._elem=[]
if a:
if ispoint(a) or isline (a) or isarc(a) or ispoly(a) \
or isgeomlist(a) or isinstance(a,Geometry):
self._elem=[ deepcopy(a) ]
else:
raise ValueError('bad argument to Geometry class constructor: {}'.format(a))
def _updateInternals(self):
return
def geom(self):
if self.update:
self._updateInternals()
return deepcopy(self._elem)
class SampleGeometry(Geometry):
""" generalized sampleable geometry class"""
def __init__(self,a=False):
super().__init__(a)
def __repr__(self):
return 'sampleable geometry base class wrapper for: {}'.format(vstr(self._elem))
def sample(self,u):
if self._update:
self._updateInternals()
return sample(self.geom(),u)
class IntersectGeometry(SampleGeometry):
""" generalized intersectable geometry class"""
def __init__(self,a=False):
super().__init__(a)
def __repr__(self):
return 'intersectable geometry base class wrapper for: {}'.format(vstr(self._elem))
def intersectXY(self,g,inside=True,params=False):
if self._update:
self._updateInternals()
return intersectXY(g,self.geom(),inside,params)
|
en
| 0.799155
|
## yapCAD geometry-generating superclass ## ===================================== ## subclasses of Geometry implement the geom() method, which produce ## geometry lists or computational geometry primitives, as described ## by the functions in geom.py ## subclasses of SampleGeometry implelement geom() and sample() ## methods. The geom() method is guaranteed to produce a geometry ## list, and the sample() method allows for parametric sampling, which ## is guaranteed to "make sense" in the interval 0.0 <= u <= 1.0. ## Unless there is a good reason otherwise, subclasses of ## SampleGeometry should guarantee C0 continuity* ## * For fractal elements, or other complex forms where C0 continuity ## is less meaningful, SampleGeometry might still be approprite. ## However, if you have diconnected line and arc segments, with a ## classically-definalble gap (or the possibility thereof) then ## implement Geometry instead generalized computational geometry class generalized sampleable geometry class generalized intersectable geometry class
| 2.714719
| 3
|
parellel_computing/tmp.py
|
fly2rain/try_git_pycharm
| 0
|
6629498
|
import threading
import time
number = 0
lock = threading.Lock()
def plus(lk):
global number # global声明此处的number是外面的全局变量number
lk.acquire() # 开始加锁
for _ in range(1000000): # 进行一个大数级别的循环加一运算
number += 1
print("子线程%s运算结束后,number = %s" % (threading.current_thread().getName(), number))
lk.release() # 释放锁,让别的线程也可以访问number
if __name__ == '__main__':
for i in range(2): # 用2个子线程,就可以观察到脏数据
t = threading.Thread(target=plus, args=(lock,)) # 🔥🔥需要把锁当做参数传递给plus函数,注意传递对象🔥🔥🔥
t.start()
time.sleep(2) # 等待2秒,确保2个子线程都已经结束运算。
print("主线程执行完毕后,number = ", number)
|
import threading
import time
number = 0
lock = threading.Lock()
def plus(lk):
global number # global声明此处的number是外面的全局变量number
lk.acquire() # 开始加锁
for _ in range(1000000): # 进行一个大数级别的循环加一运算
number += 1
print("子线程%s运算结束后,number = %s" % (threading.current_thread().getName(), number))
lk.release() # 释放锁,让别的线程也可以访问number
if __name__ == '__main__':
for i in range(2): # 用2个子线程,就可以观察到脏数据
t = threading.Thread(target=plus, args=(lock,)) # 🔥🔥需要把锁当做参数传递给plus函数,注意传递对象🔥🔥🔥
t.start()
time.sleep(2) # 等待2秒,确保2个子线程都已经结束运算。
print("主线程执行完毕后,number = ", number)
|
zh
| 0.829273
|
# global声明此处的number是外面的全局变量number # 开始加锁 # 进行一个大数级别的循环加一运算 # 释放锁,让别的线程也可以访问number # 用2个子线程,就可以观察到脏数据 # 🔥🔥需要把锁当做参数传递给plus函数,注意传递对象🔥🔥🔥 # 等待2秒,确保2个子线程都已经结束运算。
| 3.599379
| 4
|
Turtle/Turtles_Race_game_en.py
|
sagarbabalsure/pythonCodes
| 65
|
6629499
|
#Turtle Race Game :
#added by Heba-Ahmad
#This is a turtle game, that turtles will race against each other
#the code of the game with readable comments:
from turtle import *
from random import randint
#screen setup:
wn = Screen()
wn.bgcolor("green yellow")
wn.title("Turtles Race")
wn.setup(width = 500, height= 500)
#to write on the screen
race = Turtle()
race.color("black")
race.shape("turtle")
race.up()
race.goto(-80,180)
race.down()
race.write("Turtles Race", True, align= "left", font=("comic sans ms", 20, "italic"))
#turtle objects setup:
speed('fastest')
penup()
goto(-140, 100)
#a race track
for step in range (15):
write(step, align= 'center')
right(90)
forward(10)
pendown()
#the track lines dashed instead of solid (for solid: add forward(150) instead of the loop and change backward value to 160)
for i in range(10):
forward(15)
penup()
forward(5)
pendown()
penup()
backward(210)
left(90)
forward(20)
#Racing turtles
racer1 = Turtle()
racer1.color('red')
racer1.shape('turtle')
racer1.penup()
racer1.goto(-160, 60)
racer1.pendown()
#make each turtle do a 360 degree twirl after they get to the starting line
for turn in range(36):
racer1.right(10)
racer2 = Turtle()
racer2.color('white')
racer2.shape('turtle')
racer2.penup()
racer2.goto(-160, 30)
racer2.pendown()
#A full turn right 10 degrees 36 times
for turn in range(36):
racer2.left(10)
racer3 = Turtle()
racer3.color('yellow')
racer3.shape('turtle')
racer3.penup()
racer3.goto(-160, 0)
racer3.pendown()
for turn in range(36):
racer3.right(10)
racer4 = Turtle()
racer4.color('purple')
racer4.shape('turtle')
racer4.penup()
racer4.goto(-160, -30)
racer4.pendown()
#A full turn right 5 degrees 72 times
for turn in range(72):
racer4.right(5)
racer5 = Turtle()
racer5.color('blue')
racer5.shape('turtle')
racer5.penup()
racer5.goto(-160, -60)
racer5.pendown()
for turn in range(72):
racer5.right(5)
#make the turtle race by moving a random number of steps at a time
for turn in range(100):
racer1.forward(randint(1, 5))
racer2.forward(randint(1, 5))
racer3.forward(randint(1, 5))
racer4.forward(randint(1, 5))
racer5.forward(randint(1, 5))
wn.exitonclick()
|
#Turtle Race Game :
#added by Heba-Ahmad
#This is a turtle game, that turtles will race against each other
#the code of the game with readable comments:
from turtle import *
from random import randint
#screen setup:
wn = Screen()
wn.bgcolor("green yellow")
wn.title("Turtles Race")
wn.setup(width = 500, height= 500)
#to write on the screen
race = Turtle()
race.color("black")
race.shape("turtle")
race.up()
race.goto(-80,180)
race.down()
race.write("Turtles Race", True, align= "left", font=("comic sans ms", 20, "italic"))
#turtle objects setup:
speed('fastest')
penup()
goto(-140, 100)
#a race track
for step in range (15):
write(step, align= 'center')
right(90)
forward(10)
pendown()
#the track lines dashed instead of solid (for solid: add forward(150) instead of the loop and change backward value to 160)
for i in range(10):
forward(15)
penup()
forward(5)
pendown()
penup()
backward(210)
left(90)
forward(20)
#Racing turtles
racer1 = Turtle()
racer1.color('red')
racer1.shape('turtle')
racer1.penup()
racer1.goto(-160, 60)
racer1.pendown()
#make each turtle do a 360 degree twirl after they get to the starting line
for turn in range(36):
racer1.right(10)
racer2 = Turtle()
racer2.color('white')
racer2.shape('turtle')
racer2.penup()
racer2.goto(-160, 30)
racer2.pendown()
#A full turn right 10 degrees 36 times
for turn in range(36):
racer2.left(10)
racer3 = Turtle()
racer3.color('yellow')
racer3.shape('turtle')
racer3.penup()
racer3.goto(-160, 0)
racer3.pendown()
for turn in range(36):
racer3.right(10)
racer4 = Turtle()
racer4.color('purple')
racer4.shape('turtle')
racer4.penup()
racer4.goto(-160, -30)
racer4.pendown()
#A full turn right 5 degrees 72 times
for turn in range(72):
racer4.right(5)
racer5 = Turtle()
racer5.color('blue')
racer5.shape('turtle')
racer5.penup()
racer5.goto(-160, -60)
racer5.pendown()
for turn in range(72):
racer5.right(5)
#make the turtle race by moving a random number of steps at a time
for turn in range(100):
racer1.forward(randint(1, 5))
racer2.forward(randint(1, 5))
racer3.forward(randint(1, 5))
racer4.forward(randint(1, 5))
racer5.forward(randint(1, 5))
wn.exitonclick()
|
en
| 0.87842
|
#Turtle Race Game : #added by Heba-Ahmad #This is a turtle game, that turtles will race against each other #the code of the game with readable comments: #screen setup: #to write on the screen #turtle objects setup: #a race track #the track lines dashed instead of solid (for solid: add forward(150) instead of the loop and change backward value to 160) #Racing turtles #make each turtle do a 360 degree twirl after they get to the starting line #A full turn right 10 degrees 36 times #A full turn right 5 degrees 72 times #make the turtle race by moving a random number of steps at a time
| 4.443892
| 4
|
ABC043/D.py
|
shimomura314/AtcoderCodes
| 0
|
6629500
|
s = list(input())
n = len(s)
for i in range(n-1):
if s[i] == s[i+1]:
print(i+1, i+2)
exit()
for i in range(n-2):
if s[i] == s[i+2]:
print(i+1, i+3)
exit()
print(-1, -1)
|
s = list(input())
n = len(s)
for i in range(n-1):
if s[i] == s[i+1]:
print(i+1, i+2)
exit()
for i in range(n-2):
if s[i] == s[i+2]:
print(i+1, i+3)
exit()
print(-1, -1)
|
none
| 1
| 3.347275
| 3
|
|
benchmark/lue/benchmark/weak_scaling/post_process.py
|
computationalgeography/lue
| 2
|
6629501
|
<reponame>computationalgeography/lue
# -*- encoding: utf8 -*-
from .weak_scaling_experiment import WeakScalingExperiment
from .. import dataset
from .. import job
from .. import plot
from .. import util
import lue.data_model as ldm
import dateutil.parser
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
def post_process_raw_results(
lue_dataset,
plot_pathname):
"""
Create plots and tables from raw benchmark results
"""
lue_meta_information = lue_dataset.benchmark.meta_information
worker_type = lue_meta_information.worker_type.value[:][0]
lue_measurement = lue_dataset.benchmark.measurement
count = lue_measurement.duration.value.shape[1]
# The time point at which the experiment was performed is the epoch
# of the time domain used to store the durations
lue_clock = lue_measurement.time_domain.clock
assert lue_clock.nr_units == 1
time_point_units = lue_clock.unit
lue_epoch = lue_clock.epoch
assert lue_epoch.kind == ldm.Epoch.Kind.common_era
assert lue_epoch.calendar == ldm.Calendar.gregorian
time_point = dateutil.parser.isoparse(lue_epoch.origin)
# String containing time point in local time zone and conventions
# time_point = time_point.astimezone(tzlocal.get_localzone()).strftime("%c")
time_point = time_point.strftime("%c")
nr_workers = lue_measurement.nr_workers.value[:]
# Results are sorted by time not by nr_workers. All values to be
# plotted need to be sorted, by nr_workers.
sort_idxs = np.argsort(nr_workers)
nr_workers = nr_workers[sort_idxs]
assert util.is_monotonically_increasing(nr_workers), nr_workers
assert nr_workers[0] == 1, nr_workers
lue_scaling = lue_dataset.benchmark.scaling
def annotate_plot(
axis,
y_label):
axis.set_xlabel(u"workers ({})".format(worker_type))
axis.set_xticks(nr_workers)
axis.set_ylabel(y_label)
axis.grid()
def plot_duration(
axis):
if count == 1:
duration = lue_measurement.duration.value[:][sort_idxs]
y_label = u"duration ({})".format(time_point_units)
plot_actual = lambda data: axis.plot(
nr_workers, data,
linewidth=plot.default_linewidth,
color=plot.actual_color, marker="o")
else:
duration = lue_scaling.mean_duration.value[:][sort_idxs]
error = lue_scaling.std_duration.value[:][sort_idxs]
y_label= u"duration ({}) ± stddev (count={})".format(
time_point_units, count)
plot_actual = lambda data: axis.errorbar(
x=nr_workers, y=data, yerr=error,
linewidth=plot.default_linewidth,
color=plot.actual_color, marker="o")
serial_duration = duration[0] * nr_workers
axis.plot(
nr_workers, serial_duration, linewidth=plot.default_linewidth,
color=plot.serial_color)
linear_duration = \
np.array([duration[0] for n in range(len(nr_workers))])
axis.plot(
nr_workers, linear_duration, linewidth=plot.default_linewidth,
color=plot.linear_color)
plot_actual(duration)
annotate_plot(axis, y_label)
def plot_relative_efficiency(
axis):
if count == 1:
relative_efficiency = \
lue_scaling.relative_efficiency.value[:][sort_idxs]
plot_actual = lambda data: axis.plot(
nr_workers, data, linewidth=plot.default_linewidth,
color=plot.actual_color, marker="o")
else:
relative_efficiency = \
lue_scaling.mean_relative_efficiency.value[:][sort_idxs]
error = lue_scaling.std_relative_efficiency.value[:][sort_idxs]
plot_actual = lambda data: axis.errorbar(
x=nr_workers, y=data, yerr=error,
linewidth=plot.default_linewidth,
color=plot.actual_color, marker="o")
serial_relative_efficiency = relative_efficiency[0] / nr_workers
axis.plot(
nr_workers, serial_relative_efficiency, linewidth=plot.default_linewidth,
color=plot.serial_color)
linear_relative_efficiency = \
np.array([relative_efficiency[0] for n in range(len(nr_workers))])
axis.plot(
nr_workers, linear_relative_efficiency, linewidth=plot.default_linewidth,
color=plot.linear_color)
plot_actual(relative_efficiency)
y_label= u"relative efficiency (%)"
annotate_plot(axis, y_label)
def plot_lups(
axis):
if count == 1:
lups = lue_scaling.lups.value[:][sort_idxs]
plot_actual = lambda data: axis.plot(
nr_workers, data, linewidth=plot.default_linewidth,
color=plot.actual_color, marker="o")
else:
lups = lue_scaling.mean_lups.value[:][sort_idxs]
error = lue_scaling.std_lups.value[:][sort_idxs]
plot_actual = lambda data: axis.errorbar(
x=nr_workers, y=data, yerr=error,
linewidth=plot.default_linewidth,
color=plot.actual_color, marker="o")
serial_lups = \
np.array([lups[0] for n in range(len(nr_workers))])
axis.plot(
nr_workers, serial_lups, linewidth=plot.default_linewidth,
color=plot.serial_color)
linear_lups = lups[0] * nr_workers
axis.plot(
nr_workers, linear_lups, linewidth=plot.default_linewidth,
color=plot.linear_color)
plot_actual(lups)
y_label= u"throughput (LUPS)"
annotate_plot(axis, y_label)
nr_plot_rows = 2
nr_plot_cols = 2
plot_width = 8 # Inches...
plot_height = 6 # Inches...
figure, axes = plt.subplots(
nrows=nr_plot_rows, ncols=nr_plot_cols,
figsize=(nr_plot_cols * plot_width, nr_plot_rows * plot_height),
squeeze=False, sharex=False,
) # Inches...
plot_duration(axes[0][0])
plot_relative_efficiency(axes[0][1])
plot_lups(axes[1][0])
axes[1, 1].axis("off")
figure.legend(labels=["linear", "serial", "actual"])
name = lue_meta_information.name.value[:][0]
system_name = lue_meta_information.system_name.value[:][0]
scenario_name = lue_meta_information.scenario_name.value[:][0]
array_shape_per_worker = lue_meta_information.array_shape.value[0]
partition_shape = lue_meta_information.partition_shape.value[0]
figure.suptitle(
"{}, {}, {}\n"
"Weak scaling experiment on {} array per worker and {} partitions ({})"
.format(
name,
system_name,
time_point,
"x".join([str(extent) for extent in array_shape_per_worker]),
"x".join([str(extent) for extent in partition_shape]),
scenario_name,
)
)
plt.savefig(plot_pathname, bbox_inches="tight")
def post_process_results(
results_prefix):
"""
Post-process the results of executing the benchmark script generated
by the generate_script function.
"""
lue_dataset = job.open_scaling_lue_dataset(results_prefix, "r")
cluster, benchmark, experiment = dataset.read_benchmark_settings(
lue_dataset, WeakScalingExperiment)
util.create_dot_graph(
lue_dataset.pathname,
experiment.result_pathname(
cluster.name, benchmark.scenario_name, "graph", "pdf"))
plot_pathname = experiment.result_pathname(
cluster.name, benchmark.scenario_name, "plot", "pdf")
post_process_raw_results(lue_dataset, plot_pathname)
|
# -*- encoding: utf8 -*-
from .weak_scaling_experiment import WeakScalingExperiment
from .. import dataset
from .. import job
from .. import plot
from .. import util
import lue.data_model as ldm
import dateutil.parser
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
def post_process_raw_results(
lue_dataset,
plot_pathname):
"""
Create plots and tables from raw benchmark results
"""
lue_meta_information = lue_dataset.benchmark.meta_information
worker_type = lue_meta_information.worker_type.value[:][0]
lue_measurement = lue_dataset.benchmark.measurement
count = lue_measurement.duration.value.shape[1]
# The time point at which the experiment was performed is the epoch
# of the time domain used to store the durations
lue_clock = lue_measurement.time_domain.clock
assert lue_clock.nr_units == 1
time_point_units = lue_clock.unit
lue_epoch = lue_clock.epoch
assert lue_epoch.kind == ldm.Epoch.Kind.common_era
assert lue_epoch.calendar == ldm.Calendar.gregorian
time_point = dateutil.parser.isoparse(lue_epoch.origin)
# String containing time point in local time zone and conventions
# time_point = time_point.astimezone(tzlocal.get_localzone()).strftime("%c")
time_point = time_point.strftime("%c")
nr_workers = lue_measurement.nr_workers.value[:]
# Results are sorted by time not by nr_workers. All values to be
# plotted need to be sorted, by nr_workers.
sort_idxs = np.argsort(nr_workers)
nr_workers = nr_workers[sort_idxs]
assert util.is_monotonically_increasing(nr_workers), nr_workers
assert nr_workers[0] == 1, nr_workers
lue_scaling = lue_dataset.benchmark.scaling
def annotate_plot(
axis,
y_label):
axis.set_xlabel(u"workers ({})".format(worker_type))
axis.set_xticks(nr_workers)
axis.set_ylabel(y_label)
axis.grid()
def plot_duration(
axis):
if count == 1:
duration = lue_measurement.duration.value[:][sort_idxs]
y_label = u"duration ({})".format(time_point_units)
plot_actual = lambda data: axis.plot(
nr_workers, data,
linewidth=plot.default_linewidth,
color=plot.actual_color, marker="o")
else:
duration = lue_scaling.mean_duration.value[:][sort_idxs]
error = lue_scaling.std_duration.value[:][sort_idxs]
y_label= u"duration ({}) ± stddev (count={})".format(
time_point_units, count)
plot_actual = lambda data: axis.errorbar(
x=nr_workers, y=data, yerr=error,
linewidth=plot.default_linewidth,
color=plot.actual_color, marker="o")
serial_duration = duration[0] * nr_workers
axis.plot(
nr_workers, serial_duration, linewidth=plot.default_linewidth,
color=plot.serial_color)
linear_duration = \
np.array([duration[0] for n in range(len(nr_workers))])
axis.plot(
nr_workers, linear_duration, linewidth=plot.default_linewidth,
color=plot.linear_color)
plot_actual(duration)
annotate_plot(axis, y_label)
def plot_relative_efficiency(
axis):
if count == 1:
relative_efficiency = \
lue_scaling.relative_efficiency.value[:][sort_idxs]
plot_actual = lambda data: axis.plot(
nr_workers, data, linewidth=plot.default_linewidth,
color=plot.actual_color, marker="o")
else:
relative_efficiency = \
lue_scaling.mean_relative_efficiency.value[:][sort_idxs]
error = lue_scaling.std_relative_efficiency.value[:][sort_idxs]
plot_actual = lambda data: axis.errorbar(
x=nr_workers, y=data, yerr=error,
linewidth=plot.default_linewidth,
color=plot.actual_color, marker="o")
serial_relative_efficiency = relative_efficiency[0] / nr_workers
axis.plot(
nr_workers, serial_relative_efficiency, linewidth=plot.default_linewidth,
color=plot.serial_color)
linear_relative_efficiency = \
np.array([relative_efficiency[0] for n in range(len(nr_workers))])
axis.plot(
nr_workers, linear_relative_efficiency, linewidth=plot.default_linewidth,
color=plot.linear_color)
plot_actual(relative_efficiency)
y_label= u"relative efficiency (%)"
annotate_plot(axis, y_label)
def plot_lups(
axis):
if count == 1:
lups = lue_scaling.lups.value[:][sort_idxs]
plot_actual = lambda data: axis.plot(
nr_workers, data, linewidth=plot.default_linewidth,
color=plot.actual_color, marker="o")
else:
lups = lue_scaling.mean_lups.value[:][sort_idxs]
error = lue_scaling.std_lups.value[:][sort_idxs]
plot_actual = lambda data: axis.errorbar(
x=nr_workers, y=data, yerr=error,
linewidth=plot.default_linewidth,
color=plot.actual_color, marker="o")
serial_lups = \
np.array([lups[0] for n in range(len(nr_workers))])
axis.plot(
nr_workers, serial_lups, linewidth=plot.default_linewidth,
color=plot.serial_color)
linear_lups = lups[0] * nr_workers
axis.plot(
nr_workers, linear_lups, linewidth=plot.default_linewidth,
color=plot.linear_color)
plot_actual(lups)
y_label= u"throughput (LUPS)"
annotate_plot(axis, y_label)
nr_plot_rows = 2
nr_plot_cols = 2
plot_width = 8 # Inches...
plot_height = 6 # Inches...
figure, axes = plt.subplots(
nrows=nr_plot_rows, ncols=nr_plot_cols,
figsize=(nr_plot_cols * plot_width, nr_plot_rows * plot_height),
squeeze=False, sharex=False,
) # Inches...
plot_duration(axes[0][0])
plot_relative_efficiency(axes[0][1])
plot_lups(axes[1][0])
axes[1, 1].axis("off")
figure.legend(labels=["linear", "serial", "actual"])
name = lue_meta_information.name.value[:][0]
system_name = lue_meta_information.system_name.value[:][0]
scenario_name = lue_meta_information.scenario_name.value[:][0]
array_shape_per_worker = lue_meta_information.array_shape.value[0]
partition_shape = lue_meta_information.partition_shape.value[0]
figure.suptitle(
"{}, {}, {}\n"
"Weak scaling experiment on {} array per worker and {} partitions ({})"
.format(
name,
system_name,
time_point,
"x".join([str(extent) for extent in array_shape_per_worker]),
"x".join([str(extent) for extent in partition_shape]),
scenario_name,
)
)
plt.savefig(plot_pathname, bbox_inches="tight")
def post_process_results(
results_prefix):
"""
Post-process the results of executing the benchmark script generated
by the generate_script function.
"""
lue_dataset = job.open_scaling_lue_dataset(results_prefix, "r")
cluster, benchmark, experiment = dataset.read_benchmark_settings(
lue_dataset, WeakScalingExperiment)
util.create_dot_graph(
lue_dataset.pathname,
experiment.result_pathname(
cluster.name, benchmark.scenario_name, "graph", "pdf"))
plot_pathname = experiment.result_pathname(
cluster.name, benchmark.scenario_name, "plot", "pdf")
post_process_raw_results(lue_dataset, plot_pathname)
|
en
| 0.83946
|
# -*- encoding: utf8 -*- Create plots and tables from raw benchmark results # The time point at which the experiment was performed is the epoch # of the time domain used to store the durations # String containing time point in local time zone and conventions # time_point = time_point.astimezone(tzlocal.get_localzone()).strftime("%c") # Results are sorted by time not by nr_workers. All values to be # plotted need to be sorted, by nr_workers. # Inches... # Inches... # Inches... Post-process the results of executing the benchmark script generated by the generate_script function.
| 2.181467
| 2
|
homeassistant/components/netatmo/media_source.py
|
JonahKr/core
| 0
|
6629502
|
"""Netatmo Media Source Implementation."""
import datetime as dt
import re
from typing import Optional, Tuple
from homeassistant.components.media_player.const import MEDIA_TYPE_VIDEO
from homeassistant.components.media_player.errors import BrowseError
from homeassistant.components.media_source.const import MEDIA_MIME_TYPES
from homeassistant.components.media_source.error import Unresolvable
from homeassistant.components.media_source.models import (
BrowseMediaSource,
MediaSource,
MediaSourceItem,
PlayMedia,
)
from homeassistant.core import HomeAssistant, callback
from .const import DATA_CAMERAS, DATA_EVENTS, DOMAIN, MANUFACTURER
MIME_TYPE = "application/x-mpegURL"
async def async_get_media_source(hass: HomeAssistant):
"""Set up Netatmo media source."""
return NetatmoSource(hass)
class NetatmoSource(MediaSource):
"""Provide Netatmo camera recordings as media sources."""
name: str = MANUFACTURER
def __init__(self, hass: HomeAssistant):
"""Initialize Netatmo source."""
super().__init__(DOMAIN)
self.hass = hass
self.events = self.hass.data[DOMAIN][DATA_EVENTS]
async def async_resolve_media(self, item: MediaSourceItem) -> PlayMedia:
"""Resolve media to a url."""
_, camera_id, event_id = async_parse_identifier(item)
url = self.events[camera_id][event_id]["media_url"]
return PlayMedia(url, MIME_TYPE)
async def async_browse_media(
self, item: MediaSourceItem, media_types: Tuple[str] = MEDIA_MIME_TYPES
) -> Optional[BrowseMediaSource]:
"""Return media."""
try:
source, camera_id, event_id = async_parse_identifier(item)
except Unresolvable as err:
raise BrowseError(str(err)) from err
return self._browse_media(source, camera_id, event_id)
def _browse_media(
self, source: str, camera_id: str, event_id: int
) -> Optional[BrowseMediaSource]:
"""Browse media."""
if camera_id and camera_id not in self.events:
raise BrowseError("Camera does not exist.")
if event_id and event_id not in self.events[camera_id]:
raise BrowseError("Event does not exist.")
return self._build_item_response(source, camera_id, event_id)
def _build_item_response(
self, source: str, camera_id: str, event_id: int = None
) -> Optional[BrowseMediaSource]:
if event_id and event_id in self.events[camera_id]:
created = dt.datetime.fromtimestamp(event_id)
thumbnail = self.events[camera_id][event_id].get("snapshot", {}).get("url")
message = remove_html_tags(self.events[camera_id][event_id]["message"])
title = f"{created} - {message}"
else:
title = self.hass.data[DOMAIN][DATA_CAMERAS].get(camera_id, MANUFACTURER)
thumbnail = None
if event_id:
path = f"{source}/{camera_id}/{event_id}"
else:
path = f"{source}/{camera_id}"
media = BrowseMediaSource(
domain=DOMAIN,
identifier=path,
media_content_type=MEDIA_TYPE_VIDEO,
title=title,
can_play=bool(
event_id and self.events[camera_id][event_id].get("media_url")
),
can_expand=event_id is None,
thumbnail=thumbnail,
)
if not media.can_play and not media.can_expand:
return None
if not media.can_expand:
return media
media.children = []
# Append first level children
if not camera_id:
for cid in self.events:
child = self._build_item_response(source, cid)
if child:
media.children.append(child)
else:
for eid in self.events[camera_id]:
child = self._build_item_response(source, camera_id, eid)
if child:
media.children.append(child)
return media
def remove_html_tags(text):
"""Remove html tags from string."""
clean = re.compile("<.*?>")
return re.sub(clean, "", text)
@callback
def async_parse_identifier(
item: MediaSourceItem,
) -> Tuple[str, str, Optional[int]]:
"""Parse identifier."""
if not item.identifier:
return "events", "", None
source, path = item.identifier.lstrip("/").split("/", 1)
if source != "events":
raise Unresolvable("Unknown source directory.")
if "/" in path:
camera_id, event_id = path.split("/", 1)
return source, camera_id, int(event_id)
return source, path, None
|
"""Netatmo Media Source Implementation."""
import datetime as dt
import re
from typing import Optional, Tuple
from homeassistant.components.media_player.const import MEDIA_TYPE_VIDEO
from homeassistant.components.media_player.errors import BrowseError
from homeassistant.components.media_source.const import MEDIA_MIME_TYPES
from homeassistant.components.media_source.error import Unresolvable
from homeassistant.components.media_source.models import (
BrowseMediaSource,
MediaSource,
MediaSourceItem,
PlayMedia,
)
from homeassistant.core import HomeAssistant, callback
from .const import DATA_CAMERAS, DATA_EVENTS, DOMAIN, MANUFACTURER
MIME_TYPE = "application/x-mpegURL"
async def async_get_media_source(hass: HomeAssistant):
"""Set up Netatmo media source."""
return NetatmoSource(hass)
class NetatmoSource(MediaSource):
"""Provide Netatmo camera recordings as media sources."""
name: str = MANUFACTURER
def __init__(self, hass: HomeAssistant):
"""Initialize Netatmo source."""
super().__init__(DOMAIN)
self.hass = hass
self.events = self.hass.data[DOMAIN][DATA_EVENTS]
async def async_resolve_media(self, item: MediaSourceItem) -> PlayMedia:
"""Resolve media to a url."""
_, camera_id, event_id = async_parse_identifier(item)
url = self.events[camera_id][event_id]["media_url"]
return PlayMedia(url, MIME_TYPE)
async def async_browse_media(
self, item: MediaSourceItem, media_types: Tuple[str] = MEDIA_MIME_TYPES
) -> Optional[BrowseMediaSource]:
"""Return media."""
try:
source, camera_id, event_id = async_parse_identifier(item)
except Unresolvable as err:
raise BrowseError(str(err)) from err
return self._browse_media(source, camera_id, event_id)
def _browse_media(
self, source: str, camera_id: str, event_id: int
) -> Optional[BrowseMediaSource]:
"""Browse media."""
if camera_id and camera_id not in self.events:
raise BrowseError("Camera does not exist.")
if event_id and event_id not in self.events[camera_id]:
raise BrowseError("Event does not exist.")
return self._build_item_response(source, camera_id, event_id)
def _build_item_response(
self, source: str, camera_id: str, event_id: int = None
) -> Optional[BrowseMediaSource]:
if event_id and event_id in self.events[camera_id]:
created = dt.datetime.fromtimestamp(event_id)
thumbnail = self.events[camera_id][event_id].get("snapshot", {}).get("url")
message = remove_html_tags(self.events[camera_id][event_id]["message"])
title = f"{created} - {message}"
else:
title = self.hass.data[DOMAIN][DATA_CAMERAS].get(camera_id, MANUFACTURER)
thumbnail = None
if event_id:
path = f"{source}/{camera_id}/{event_id}"
else:
path = f"{source}/{camera_id}"
media = BrowseMediaSource(
domain=DOMAIN,
identifier=path,
media_content_type=MEDIA_TYPE_VIDEO,
title=title,
can_play=bool(
event_id and self.events[camera_id][event_id].get("media_url")
),
can_expand=event_id is None,
thumbnail=thumbnail,
)
if not media.can_play and not media.can_expand:
return None
if not media.can_expand:
return media
media.children = []
# Append first level children
if not camera_id:
for cid in self.events:
child = self._build_item_response(source, cid)
if child:
media.children.append(child)
else:
for eid in self.events[camera_id]:
child = self._build_item_response(source, camera_id, eid)
if child:
media.children.append(child)
return media
def remove_html_tags(text):
"""Remove html tags from string."""
clean = re.compile("<.*?>")
return re.sub(clean, "", text)
@callback
def async_parse_identifier(
item: MediaSourceItem,
) -> Tuple[str, str, Optional[int]]:
"""Parse identifier."""
if not item.identifier:
return "events", "", None
source, path = item.identifier.lstrip("/").split("/", 1)
if source != "events":
raise Unresolvable("Unknown source directory.")
if "/" in path:
camera_id, event_id = path.split("/", 1)
return source, camera_id, int(event_id)
return source, path, None
|
en
| 0.690068
|
Netatmo Media Source Implementation. Set up Netatmo media source. Provide Netatmo camera recordings as media sources. Initialize Netatmo source. Resolve media to a url. Return media. Browse media. # Append first level children Remove html tags from string. Parse identifier.
| 2.208966
| 2
|
vistrails/packages/vtk/pythonclass.py
|
remram44/VisTrails-mybinder
| 1
|
6629503
|
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: <EMAIL>
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
from itertools import izip
from vistrails.core.debug import format_exc
from vistrails.core.modules.vistrails_module import Module, ModuleError
from vistrails.core.modules.config import CIPort, COPort, ModuleSettings
from .common import convert_input, convert_output, get_input_spec, get_output_spec
class BaseClassModule(Module):
""" Wraps a python class as a vistrails Module using a ClassSpec
setter methods are used as inputs and getter methods as outputs
"""
_settings = ModuleSettings(abstract=True)
_get_input_spec = classmethod(get_input_spec)
_get_output_spec = classmethod(get_output_spec)
def call_set_method(self, instance, port, params):
# convert params
params = convert_input(params, self.input_specs[port.name].signature)
if isinstance(params, tuple):
params = list(params)
elif not isinstance(params, list):
params = [params]
method_name = port.method_name
if port.method_type == 'OnOff':
# This converts OnOff ports to XOn(), XOff() calls
method_name = method_name + ('On' if params[0] else 'Off')
params = []
elif port.method_type == 'nullary':
# Call X() only if boolean is true
if params[0]:
params = []
else:
return
elif port.method_type == 'SetXToY':
# Append enum name to function name and delete params
method_name += params[0]
params = []
prepend_params = port.get_prepend_params()
# print "SETTING", method_name, prepend_params + params, instance.vtkInstance.__class__.__name__
method = getattr(instance, method_name)
try:
method(*(prepend_params + params))
except Exception, e:
raise
def call_get_method(self, instance, port):
# print "GETTING", port.method_name, port.get_prepend_params(), instance.vtkInstance.__class__.__name__
method = getattr(instance, port.method_name)
try:
value = method(*(port.get_prepend_params()))
# convert params
return convert_output(value, self.output_specs[port.name].signature)
except Exception, e:
raise
def call_inputs(self, instance):
# compute input methods and connections
# We need to preserve the order of the inputs
methods = self.is_method.values()
methods.sort()
methods_to_call = []
for value in methods:
(_, port) = value
conn = self.is_method.inverse[value]
p = conn()
# Convert to correct port depth
depth = conn.depth()
while depth < self._get_input_spec(port).depth:
p = [p]
depth += 1
methods_to_call.append([port, p])
connections_to_call = []
for (function, connector_list) in self.inputPorts.iteritems():
paramList = self.force_get_input_list(function)
for p,connector in izip(paramList, connector_list):
# Don't call method
if connector in self.is_method:
continue
depth = connector.depth()
while depth < connector.spec.depth:
p = [p]
depth += 1
connections_to_call.append([function, p])
# Compute methods from visible ports last
#In the case of a vtkRenderer,
# we need to call the methods after the
#input ports are set.
if self._module_spec.methods_last:
to_call = connections_to_call + methods_to_call
else:
to_call = methods_to_call + connections_to_call
for port_name, params in to_call:
port = self._get_input_spec(port_name)
# Call method once for each item in depth1 lists
if port.depth == 0:
params = [params]
for ps in params:
self.call_set_method(instance, port, ps)
def call_outputs(self, instance):
outputs_list = self.output_specs_order
if 'self' in outputs_list:
outputs_list.remove('self')
if 'Instance' in outputs_list:
outputs_list.remove('Instance')
for port_name in outputs_list:
if not port_name in self.outputPorts:
# not connected
continue
port = self._get_output_spec(port_name)
result = self.call_get_method(instance, port)
self.set_output(port_name, result)
def compute(self):
spec = self._module_spec
# First create the instance
# TODO: How to handle parameters to instance
instance = getattr(self._lib, spec.code_ref)()
# Optional callback used for progress reporting
if spec.callback:
def callback(c):
self.logging.update_progress(self, c)
getattr(instance, spec.callback)(callback)
# Optional function for creating temporary files
if spec.tempfile:
getattr(instance, spec.tempfile)(self.interpreter.filePool.create_file)
# call input methods on instance
self.call_inputs(instance)
# optional compute method
if spec.compute:
getattr(instance, spec.compute)()
# convert outputs to dict
outputs = {}
outputs_list = self.output_specs_order
outputs_list.remove('self') # self is automatically set by base Module
# Get outputs
self.call_outputs(instance)
self.set_output('Instance', instance)
# optional cleanup method
if spec.cleanup:
getattr(instance, spec.cleanup)()
def gen_class_module(spec, lib, klasses, **module_settings):
"""Create a module from a python class specification
Parameters
----------
spec : ClassSpec
A class to module specification
"""
module_settings.update(spec.get_module_settings())
_settings = ModuleSettings(**module_settings)
# convert input/output specs into VT port objects
input_ports = [CIPort(ispec.name, ispec.get_port_type(), **ispec.get_port_attrs())
for ispec in spec.input_port_specs]
output_ports = [COPort(ospec.name, ospec.get_port_type(), **ospec.get_port_attrs())
for ospec in spec.output_port_specs]
output_ports.insert(0, COPort('Instance', spec.module_name)) # Adds instance output port
_input_spec_table = {}
for ps in spec.input_port_specs:
_input_spec_table[ps.name] = ps
_output_spec_table = {}
for ps in spec.output_port_specs:
_output_spec_table[ps.name] = ps
d = {'__module__': __name__,
'_settings': _settings,
'__doc__': spec.docstring,
'__name__': spec.name or spec.module_name,
'_input_ports': input_ports,
'_output_ports': output_ports,
'_input_spec_table': _input_spec_table,
'_output_spec_table': _output_spec_table,
'_module_spec': spec,
'is_cacheable': lambda self:spec.cacheable,
'_lib': lib}
superklass = klasses.get(spec.superklass, BaseClassModule)
new_klass = type(str(spec.module_name), (superklass,), d)
klasses[spec.module_name] = new_klass
return new_klass
|
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: <EMAIL>
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
from itertools import izip
from vistrails.core.debug import format_exc
from vistrails.core.modules.vistrails_module import Module, ModuleError
from vistrails.core.modules.config import CIPort, COPort, ModuleSettings
from .common import convert_input, convert_output, get_input_spec, get_output_spec
class BaseClassModule(Module):
""" Wraps a python class as a vistrails Module using a ClassSpec
setter methods are used as inputs and getter methods as outputs
"""
_settings = ModuleSettings(abstract=True)
_get_input_spec = classmethod(get_input_spec)
_get_output_spec = classmethod(get_output_spec)
def call_set_method(self, instance, port, params):
# convert params
params = convert_input(params, self.input_specs[port.name].signature)
if isinstance(params, tuple):
params = list(params)
elif not isinstance(params, list):
params = [params]
method_name = port.method_name
if port.method_type == 'OnOff':
# This converts OnOff ports to XOn(), XOff() calls
method_name = method_name + ('On' if params[0] else 'Off')
params = []
elif port.method_type == 'nullary':
# Call X() only if boolean is true
if params[0]:
params = []
else:
return
elif port.method_type == 'SetXToY':
# Append enum name to function name and delete params
method_name += params[0]
params = []
prepend_params = port.get_prepend_params()
# print "SETTING", method_name, prepend_params + params, instance.vtkInstance.__class__.__name__
method = getattr(instance, method_name)
try:
method(*(prepend_params + params))
except Exception, e:
raise
def call_get_method(self, instance, port):
# print "GETTING", port.method_name, port.get_prepend_params(), instance.vtkInstance.__class__.__name__
method = getattr(instance, port.method_name)
try:
value = method(*(port.get_prepend_params()))
# convert params
return convert_output(value, self.output_specs[port.name].signature)
except Exception, e:
raise
def call_inputs(self, instance):
# compute input methods and connections
# We need to preserve the order of the inputs
methods = self.is_method.values()
methods.sort()
methods_to_call = []
for value in methods:
(_, port) = value
conn = self.is_method.inverse[value]
p = conn()
# Convert to correct port depth
depth = conn.depth()
while depth < self._get_input_spec(port).depth:
p = [p]
depth += 1
methods_to_call.append([port, p])
connections_to_call = []
for (function, connector_list) in self.inputPorts.iteritems():
paramList = self.force_get_input_list(function)
for p,connector in izip(paramList, connector_list):
# Don't call method
if connector in self.is_method:
continue
depth = connector.depth()
while depth < connector.spec.depth:
p = [p]
depth += 1
connections_to_call.append([function, p])
# Compute methods from visible ports last
#In the case of a vtkRenderer,
# we need to call the methods after the
#input ports are set.
if self._module_spec.methods_last:
to_call = connections_to_call + methods_to_call
else:
to_call = methods_to_call + connections_to_call
for port_name, params in to_call:
port = self._get_input_spec(port_name)
# Call method once for each item in depth1 lists
if port.depth == 0:
params = [params]
for ps in params:
self.call_set_method(instance, port, ps)
def call_outputs(self, instance):
outputs_list = self.output_specs_order
if 'self' in outputs_list:
outputs_list.remove('self')
if 'Instance' in outputs_list:
outputs_list.remove('Instance')
for port_name in outputs_list:
if not port_name in self.outputPorts:
# not connected
continue
port = self._get_output_spec(port_name)
result = self.call_get_method(instance, port)
self.set_output(port_name, result)
def compute(self):
spec = self._module_spec
# First create the instance
# TODO: How to handle parameters to instance
instance = getattr(self._lib, spec.code_ref)()
# Optional callback used for progress reporting
if spec.callback:
def callback(c):
self.logging.update_progress(self, c)
getattr(instance, spec.callback)(callback)
# Optional function for creating temporary files
if spec.tempfile:
getattr(instance, spec.tempfile)(self.interpreter.filePool.create_file)
# call input methods on instance
self.call_inputs(instance)
# optional compute method
if spec.compute:
getattr(instance, spec.compute)()
# convert outputs to dict
outputs = {}
outputs_list = self.output_specs_order
outputs_list.remove('self') # self is automatically set by base Module
# Get outputs
self.call_outputs(instance)
self.set_output('Instance', instance)
# optional cleanup method
if spec.cleanup:
getattr(instance, spec.cleanup)()
def gen_class_module(spec, lib, klasses, **module_settings):
"""Create a module from a python class specification
Parameters
----------
spec : ClassSpec
A class to module specification
"""
module_settings.update(spec.get_module_settings())
_settings = ModuleSettings(**module_settings)
# convert input/output specs into VT port objects
input_ports = [CIPort(ispec.name, ispec.get_port_type(), **ispec.get_port_attrs())
for ispec in spec.input_port_specs]
output_ports = [COPort(ospec.name, ospec.get_port_type(), **ospec.get_port_attrs())
for ospec in spec.output_port_specs]
output_ports.insert(0, COPort('Instance', spec.module_name)) # Adds instance output port
_input_spec_table = {}
for ps in spec.input_port_specs:
_input_spec_table[ps.name] = ps
_output_spec_table = {}
for ps in spec.output_port_specs:
_output_spec_table[ps.name] = ps
d = {'__module__': __name__,
'_settings': _settings,
'__doc__': spec.docstring,
'__name__': spec.name or spec.module_name,
'_input_ports': input_ports,
'_output_ports': output_ports,
'_input_spec_table': _input_spec_table,
'_output_spec_table': _output_spec_table,
'_module_spec': spec,
'is_cacheable': lambda self:spec.cacheable,
'_lib': lib}
superklass = klasses.get(spec.superklass, BaseClassModule)
new_klass = type(str(spec.module_name), (superklass,), d)
klasses[spec.module_name] = new_klass
return new_klass
|
en
| 0.613688
|
############################################################################### ## ## Copyright (C) 2014-2016, New York University. ## Copyright (C) 2011-2014, NYU-Poly. ## Copyright (C) 2006-2011, University of Utah. ## All rights reserved. ## Contact: <EMAIL> ## ## This file is part of VisTrails. ## ## "Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are met: ## ## - Redistributions of source code must retain the above copyright notice, ## this list of conditions and the following disclaimer. ## - Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in the ## documentation and/or other materials provided with the distribution. ## - Neither the name of the New York University nor the names of its ## contributors may be used to endorse or promote products derived from ## this software without specific prior written permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR ## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; ## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, ## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR ## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ## ############################################################################### Wraps a python class as a vistrails Module using a ClassSpec setter methods are used as inputs and getter methods as outputs # convert params # This converts OnOff ports to XOn(), XOff() calls # Call X() only if boolean is true # Append enum name to function name and delete params # print "SETTING", method_name, prepend_params + params, instance.vtkInstance.__class__.__name__ # print "GETTING", port.method_name, port.get_prepend_params(), instance.vtkInstance.__class__.__name__ # convert params # compute input methods and connections # We need to preserve the order of the inputs # Convert to correct port depth # Don't call method # Compute methods from visible ports last #In the case of a vtkRenderer, # we need to call the methods after the #input ports are set. # Call method once for each item in depth1 lists # not connected # First create the instance # TODO: How to handle parameters to instance # Optional callback used for progress reporting # Optional function for creating temporary files # call input methods on instance # optional compute method # convert outputs to dict # self is automatically set by base Module # Get outputs # optional cleanup method Create a module from a python class specification Parameters ---------- spec : ClassSpec A class to module specification # convert input/output specs into VT port objects # Adds instance output port
| 0.96853
| 1
|
metaflow/datastore/__init__.py
|
savingoyal/metaflow
| 2
|
6629504
|
<reponame>savingoyal/metaflow
from .inputs import Inputs
from .flow_datastore import FlowDataStore
from .datastore_set import TaskDataStoreSet
from .local_storage import LocalStorage
from .s3_storage import S3Storage
DATASTORES = {"local": LocalStorage, "s3": S3Storage}
|
from .inputs import Inputs
from .flow_datastore import FlowDataStore
from .datastore_set import TaskDataStoreSet
from .local_storage import LocalStorage
from .s3_storage import S3Storage
DATASTORES = {"local": LocalStorage, "s3": S3Storage}
|
none
| 1
| 1.24912
| 1
|
|
utils/build_scripts/gdb-filter.py
|
victor001P/oxen-core
| 128
|
6629505
|
<reponame>victor001P/oxen-core
def exit_handler (event):
"""
write exit code of the program running in gdb to a file called exit.out.txt
"""
code = 1
if hasattr(event, "exit_code"):
code = event.exit_code
with open("exit.out.txt", 'w') as f:
f.write("{}".format(code))
def gdb_execmany(*cmds):
"""
run multiple gdb commands
"""
for cmd in cmds:
gdb.execute(cmd)
def crash_handler (event):
"""
handle a crash from the program running in gdb
"""
if isinstance(event, gdb.SignalEvent):
log_file_name = "crash.out.txt"
# poop out log file for stack trace of all threads
gdb_execmany("set logging file {}".format(log_file_name), "set logging on", "set logging redirect on", "thread apply all bt full")
# quit gdb
gdb.execute("q")
# set up event handlers to catch shit
gdb.events.stop.connect(crash_handler)
gdb.events.exited.connect(exit_handler)
# run settings setup
gdb_execmany("set confirm off", "set pagination off", "set print thread-events off")
# run program and exit
gdb_execmany("r", "q")
|
def exit_handler (event):
"""
write exit code of the program running in gdb to a file called exit.out.txt
"""
code = 1
if hasattr(event, "exit_code"):
code = event.exit_code
with open("exit.out.txt", 'w') as f:
f.write("{}".format(code))
def gdb_execmany(*cmds):
"""
run multiple gdb commands
"""
for cmd in cmds:
gdb.execute(cmd)
def crash_handler (event):
"""
handle a crash from the program running in gdb
"""
if isinstance(event, gdb.SignalEvent):
log_file_name = "crash.out.txt"
# poop out log file for stack trace of all threads
gdb_execmany("set logging file {}".format(log_file_name), "set logging on", "set logging redirect on", "thread apply all bt full")
# quit gdb
gdb.execute("q")
# set up event handlers to catch shit
gdb.events.stop.connect(crash_handler)
gdb.events.exited.connect(exit_handler)
# run settings setup
gdb_execmany("set confirm off", "set pagination off", "set print thread-events off")
# run program and exit
gdb_execmany("r", "q")
|
en
| 0.915944
|
write exit code of the program running in gdb to a file called exit.out.txt run multiple gdb commands handle a crash from the program running in gdb # poop out log file for stack trace of all threads # quit gdb # set up event handlers to catch shit # run settings setup # run program and exit
| 3.027805
| 3
|
apple/internal/macos_rules.bzl
|
cclauss/rules_apple
| 2
|
6629506
|
<reponame>cclauss/rules_apple
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experimental implementation of macOS rules."""
load(
"@build_bazel_rules_apple//apple/bundling:product_support.bzl",
"apple_product_type",
)
load(
"@build_bazel_rules_apple//apple/internal:partials.bzl",
"partials",
)
load(
"@build_bazel_rules_apple//apple/internal:processor.bzl",
"processor",
)
load(
"@build_bazel_rules_apple//apple:providers.bzl",
"MacosApplicationBundleInfo",
"MacosBundleBundleInfo",
"MacosExtensionBundleInfo",
)
def macos_application_impl(ctx):
"""Experimental implementation of macos_application."""
# TODO(kaipi): Replace the debug_outputs_provider with the provider returned from the linking
# action, when available.
# TODO(kaipi): Extract this into a common location to be reused and refactored later when we
# add linking support directly into the rule.
binary_target = ctx.attr.deps[0]
binary_artifact = binary_target[apple_common.AppleExecutableBinary].binary
debug_outputs_provider = binary_target[apple_common.AppleDebugOutputs]
bundle_id = ctx.attr.bundle_id
processor_partials = [
partials.apple_bundle_info_partial(bundle_id = bundle_id),
partials.binary_partial(binary_artifact = binary_artifact),
partials.clang_rt_dylibs_partial(binary_artifact = binary_artifact),
partials.debug_symbols_partial(
debug_dependencies = ctx.attr.extensions,
debug_outputs_provider = debug_outputs_provider,
),
partials.embedded_bundles_partial(
bundle_embedded_bundles = True,
embeddable_targets = ctx.attr.extensions,
),
partials.macos_additional_contents_partial(),
partials.resources_partial(
bundle_id = bundle_id,
bundle_verification_targets = [struct(target = ext) for ext in ctx.attr.extensions],
plist_attrs = ["infoplists"],
top_level_attrs = [
"app_icons",
"strings",
],
),
partials.swift_dylibs_partial(
binary_artifact = binary_artifact,
dependency_targets = ctx.attr.extensions,
bundle_dylibs = True,
),
]
if ctx.file.provisioning_profile:
processor_partials.append(
partials.provisioning_profile_partial(profile_artifact = ctx.file.provisioning_profile),
)
processor_result = processor.process(ctx, processor_partials)
# TODO(kaipi): Add support for `bazel run` for macos_application.
executable = ctx.actions.declare_file(ctx.label.name)
ctx.actions.write(
executable,
"#!/bin/bash\necho Unimplemented",
is_executable = True,
)
return [
DefaultInfo(
executable = executable,
files = processor_result.output_files,
),
MacosApplicationBundleInfo(),
] + processor_result.providers
def macos_bundle_impl(ctx):
"""Experimental implementation of macos_bundle."""
# TODO(kaipi): Replace the debug_outputs_provider with the provider returned from the linking
# action, when available.
# TODO(kaipi): Extract this into a common location to be reused and refactored later when we
# add linking support directly into the rule.
binary_target = ctx.attr.deps[0]
binary_provider_type = apple_common.AppleLoadableBundleBinary
# Kernel extensions on macOS have a mach header file type of MH_KEXT_BUNDLE.
# The -kext linker flag is used to produce these binaries.
# No userspace technologies can be linked into a KEXT.
# Using an "executable" to get around the extra userspace linker flags
# that are added to a "loadable_bundle".
if ctx.attr.product_type == apple_product_type.kernel_extension:
binary_provider_type = apple_common.AppleExecutableBinary
binary_artifact = binary_target[binary_provider_type].binary
bundle_id = ctx.attr.bundle_id
processor_partials = [
partials.apple_bundle_info_partial(bundle_id = bundle_id),
partials.binary_partial(binary_artifact = binary_artifact),
partials.clang_rt_dylibs_partial(binary_artifact = binary_artifact),
partials.debug_symbols_partial(
debug_outputs_provider = binary_target[apple_common.AppleDebugOutputs],
),
partials.embedded_bundles_partial(
plugins = [ctx.outputs.archive],
),
partials.macos_additional_contents_partial(),
partials.resources_partial(
bundle_id = bundle_id,
plist_attrs = ["infoplists"],
top_level_attrs = [
"app_icons",
"strings",
],
),
partials.swift_dylibs_partial(
binary_artifact = binary_artifact,
),
]
if ctx.file.provisioning_profile:
processor_partials.append(
partials.provisioning_profile_partial(profile_artifact = ctx.file.provisioning_profile),
)
processor_result = processor.process(ctx, processor_partials)
return [
DefaultInfo(
files = processor_result.output_files,
),
MacosBundleBundleInfo(),
] + processor_result.providers
def macos_extension_impl(ctx):
"""Experimental implementation of macos_extension."""
# TODO(kaipi): Replace the debug_outputs_provider with the provider returned from the linking
# action, when available.
# TODO(kaipi): Extract this into a common location to be reused and refactored later when we
# add linking support directly into the rule.
binary_target = ctx.attr.deps[0]
binary_artifact = binary_target[apple_common.AppleExecutableBinary].binary
bundle_id = ctx.attr.bundle_id
processor_partials = [
partials.apple_bundle_info_partial(bundle_id = bundle_id),
partials.binary_partial(binary_artifact = binary_artifact),
partials.clang_rt_dylibs_partial(binary_artifact = binary_artifact),
partials.debug_symbols_partial(
debug_outputs_provider = binary_target[apple_common.AppleDebugOutputs],
),
partials.embedded_bundles_partial(plugins = [ctx.outputs.archive]),
partials.macos_additional_contents_partial(),
partials.resources_partial(
bundle_id = bundle_id,
plist_attrs = ["infoplists"],
top_level_attrs = [
"app_icons",
"strings",
],
),
partials.swift_dylibs_partial(
binary_artifact = binary_artifact,
),
]
if ctx.file.provisioning_profile:
processor_partials.append(
partials.provisioning_profile_partial(profile_artifact = ctx.file.provisioning_profile),
)
processor_result = processor.process(ctx, processor_partials)
return [
DefaultInfo(
files = processor_result.output_files,
),
MacosExtensionBundleInfo(),
] + processor_result.providers
|
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experimental implementation of macOS rules."""
load(
"@build_bazel_rules_apple//apple/bundling:product_support.bzl",
"apple_product_type",
)
load(
"@build_bazel_rules_apple//apple/internal:partials.bzl",
"partials",
)
load(
"@build_bazel_rules_apple//apple/internal:processor.bzl",
"processor",
)
load(
"@build_bazel_rules_apple//apple:providers.bzl",
"MacosApplicationBundleInfo",
"MacosBundleBundleInfo",
"MacosExtensionBundleInfo",
)
def macos_application_impl(ctx):
"""Experimental implementation of macos_application."""
# TODO(kaipi): Replace the debug_outputs_provider with the provider returned from the linking
# action, when available.
# TODO(kaipi): Extract this into a common location to be reused and refactored later when we
# add linking support directly into the rule.
binary_target = ctx.attr.deps[0]
binary_artifact = binary_target[apple_common.AppleExecutableBinary].binary
debug_outputs_provider = binary_target[apple_common.AppleDebugOutputs]
bundle_id = ctx.attr.bundle_id
processor_partials = [
partials.apple_bundle_info_partial(bundle_id = bundle_id),
partials.binary_partial(binary_artifact = binary_artifact),
partials.clang_rt_dylibs_partial(binary_artifact = binary_artifact),
partials.debug_symbols_partial(
debug_dependencies = ctx.attr.extensions,
debug_outputs_provider = debug_outputs_provider,
),
partials.embedded_bundles_partial(
bundle_embedded_bundles = True,
embeddable_targets = ctx.attr.extensions,
),
partials.macos_additional_contents_partial(),
partials.resources_partial(
bundle_id = bundle_id,
bundle_verification_targets = [struct(target = ext) for ext in ctx.attr.extensions],
plist_attrs = ["infoplists"],
top_level_attrs = [
"app_icons",
"strings",
],
),
partials.swift_dylibs_partial(
binary_artifact = binary_artifact,
dependency_targets = ctx.attr.extensions,
bundle_dylibs = True,
),
]
if ctx.file.provisioning_profile:
processor_partials.append(
partials.provisioning_profile_partial(profile_artifact = ctx.file.provisioning_profile),
)
processor_result = processor.process(ctx, processor_partials)
# TODO(kaipi): Add support for `bazel run` for macos_application.
executable = ctx.actions.declare_file(ctx.label.name)
ctx.actions.write(
executable,
"#!/bin/bash\necho Unimplemented",
is_executable = True,
)
return [
DefaultInfo(
executable = executable,
files = processor_result.output_files,
),
MacosApplicationBundleInfo(),
] + processor_result.providers
def macos_bundle_impl(ctx):
"""Experimental implementation of macos_bundle."""
# TODO(kaipi): Replace the debug_outputs_provider with the provider returned from the linking
# action, when available.
# TODO(kaipi): Extract this into a common location to be reused and refactored later when we
# add linking support directly into the rule.
binary_target = ctx.attr.deps[0]
binary_provider_type = apple_common.AppleLoadableBundleBinary
# Kernel extensions on macOS have a mach header file type of MH_KEXT_BUNDLE.
# The -kext linker flag is used to produce these binaries.
# No userspace technologies can be linked into a KEXT.
# Using an "executable" to get around the extra userspace linker flags
# that are added to a "loadable_bundle".
if ctx.attr.product_type == apple_product_type.kernel_extension:
binary_provider_type = apple_common.AppleExecutableBinary
binary_artifact = binary_target[binary_provider_type].binary
bundle_id = ctx.attr.bundle_id
processor_partials = [
partials.apple_bundle_info_partial(bundle_id = bundle_id),
partials.binary_partial(binary_artifact = binary_artifact),
partials.clang_rt_dylibs_partial(binary_artifact = binary_artifact),
partials.debug_symbols_partial(
debug_outputs_provider = binary_target[apple_common.AppleDebugOutputs],
),
partials.embedded_bundles_partial(
plugins = [ctx.outputs.archive],
),
partials.macos_additional_contents_partial(),
partials.resources_partial(
bundle_id = bundle_id,
plist_attrs = ["infoplists"],
top_level_attrs = [
"app_icons",
"strings",
],
),
partials.swift_dylibs_partial(
binary_artifact = binary_artifact,
),
]
if ctx.file.provisioning_profile:
processor_partials.append(
partials.provisioning_profile_partial(profile_artifact = ctx.file.provisioning_profile),
)
processor_result = processor.process(ctx, processor_partials)
return [
DefaultInfo(
files = processor_result.output_files,
),
MacosBundleBundleInfo(),
] + processor_result.providers
def macos_extension_impl(ctx):
"""Experimental implementation of macos_extension."""
# TODO(kaipi): Replace the debug_outputs_provider with the provider returned from the linking
# action, when available.
# TODO(kaipi): Extract this into a common location to be reused and refactored later when we
# add linking support directly into the rule.
binary_target = ctx.attr.deps[0]
binary_artifact = binary_target[apple_common.AppleExecutableBinary].binary
bundle_id = ctx.attr.bundle_id
processor_partials = [
partials.apple_bundle_info_partial(bundle_id = bundle_id),
partials.binary_partial(binary_artifact = binary_artifact),
partials.clang_rt_dylibs_partial(binary_artifact = binary_artifact),
partials.debug_symbols_partial(
debug_outputs_provider = binary_target[apple_common.AppleDebugOutputs],
),
partials.embedded_bundles_partial(plugins = [ctx.outputs.archive]),
partials.macos_additional_contents_partial(),
partials.resources_partial(
bundle_id = bundle_id,
plist_attrs = ["infoplists"],
top_level_attrs = [
"app_icons",
"strings",
],
),
partials.swift_dylibs_partial(
binary_artifact = binary_artifact,
),
]
if ctx.file.provisioning_profile:
processor_partials.append(
partials.provisioning_profile_partial(profile_artifact = ctx.file.provisioning_profile),
)
processor_result = processor.process(ctx, processor_partials)
return [
DefaultInfo(
files = processor_result.output_files,
),
MacosExtensionBundleInfo(),
] + processor_result.providers
|
en
| 0.793364
|
# Copyright 2018 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Experimental implementation of macOS rules. Experimental implementation of macos_application. # TODO(kaipi): Replace the debug_outputs_provider with the provider returned from the linking # action, when available. # TODO(kaipi): Extract this into a common location to be reused and refactored later when we # add linking support directly into the rule. # TODO(kaipi): Add support for `bazel run` for macos_application. Experimental implementation of macos_bundle. # TODO(kaipi): Replace the debug_outputs_provider with the provider returned from the linking # action, when available. # TODO(kaipi): Extract this into a common location to be reused and refactored later when we # add linking support directly into the rule. # Kernel extensions on macOS have a mach header file type of MH_KEXT_BUNDLE. # The -kext linker flag is used to produce these binaries. # No userspace technologies can be linked into a KEXT. # Using an "executable" to get around the extra userspace linker flags # that are added to a "loadable_bundle". Experimental implementation of macos_extension. # TODO(kaipi): Replace the debug_outputs_provider with the provider returned from the linking # action, when available. # TODO(kaipi): Extract this into a common location to be reused and refactored later when we # add linking support directly into the rule.
| 1.729052
| 2
|
setup.py
|
mizukasai/shap
| 0
|
6629507
|
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext as _build_ext
# to publish use:
# > python setup.py sdist upload
# which depends on ~/.pypirc
# Extend the default build_ext class to bootstrap numpy installation
# that are needed to build C extensions.
# see https://stackoverflow.com/questions/19919905/how-to-bootstrap-numpy-installation-in-setup-py
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
if isinstance(__builtins__, dict):
__builtins__["__NUMPY_SETUP__"] = False
else:
setattr(__builtins__, "__NUMPY_SETUP__", False)
import numpy
print("numpy.get_include()", numpy.get_include())
self.include_dirs.append(numpy.get_include())
def run_setup(with_binary=True, test_xgboost=True, test_lightgbm=True):
ext_modules = []
if with_binary:
ext_modules.append(
Extension('shap._cext', sources=['shap/_cext.cc'])
)
with open("README.md", "r") as fh:
long_description = fh.read()
if test_xgboost and test_lightgbm:
tests_require = ['nose', 'xgboost', 'lightgbm']
elif test_xgboost:
tests_require = ['nose', 'xgboost']
elif test_lightgbm:
tests_require = ['nose', 'lightgbm']
else:
tests_require = ['nose']
setup(
name='shap',
version='0.19.5',
description='A unified approach to explain the output of any machine learning model.',
long_description=long_description,
long_description_content_type="text/markdown",
url='http://github.com/slundberg/shap',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['shap', 'shap.explainers'],
cmdclass={'build_ext': build_ext},
setup_requires=['numpy'],
install_requires=['numpy', 'scipy', 'iml>=0.6.1', 'scikit-learn', 'matplotlib', 'pandas', 'tqdm', ''],
test_suite='nose.collector',
tests_require=tests_require,
ext_modules=ext_modules,
zip_safe=False
)
def try_run_setup(**kwargs):
""" Fails gracefully when various install steps don't work.
"""
try:
run_setup(**kwargs)
except Exception as e:
print(str(e))
if "xgboost" in str(e).lower():
kwargs["test_xgboost"] = False
print("Couldn't install XGBoost for testing!")
try_run_setup(**kwargs)
elif "lightgbm" in str(e).lower():
kwargs["test_lightgbm"] = False
print("Couldn't install LightGBM for testing!")
try_run_setup(**kwargs)
elif kwargs["with_binary"]:
kwargs["with_binary"] = False
print("WARNING: The C extension could not be compiled, sklearn tree models not supported.")
try_run_setup(**kwargs)
else:
print("ERROR: Failed to build!")
# we seem to need this import guard for appveyor
if __name__ == "__main__":
try_run_setup(with_binary=True, test_xgboost=True, test_lightgbm=True)
|
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext as _build_ext
# to publish use:
# > python setup.py sdist upload
# which depends on ~/.pypirc
# Extend the default build_ext class to bootstrap numpy installation
# that are needed to build C extensions.
# see https://stackoverflow.com/questions/19919905/how-to-bootstrap-numpy-installation-in-setup-py
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
if isinstance(__builtins__, dict):
__builtins__["__NUMPY_SETUP__"] = False
else:
setattr(__builtins__, "__NUMPY_SETUP__", False)
import numpy
print("numpy.get_include()", numpy.get_include())
self.include_dirs.append(numpy.get_include())
def run_setup(with_binary=True, test_xgboost=True, test_lightgbm=True):
ext_modules = []
if with_binary:
ext_modules.append(
Extension('shap._cext', sources=['shap/_cext.cc'])
)
with open("README.md", "r") as fh:
long_description = fh.read()
if test_xgboost and test_lightgbm:
tests_require = ['nose', 'xgboost', 'lightgbm']
elif test_xgboost:
tests_require = ['nose', 'xgboost']
elif test_lightgbm:
tests_require = ['nose', 'lightgbm']
else:
tests_require = ['nose']
setup(
name='shap',
version='0.19.5',
description='A unified approach to explain the output of any machine learning model.',
long_description=long_description,
long_description_content_type="text/markdown",
url='http://github.com/slundberg/shap',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['shap', 'shap.explainers'],
cmdclass={'build_ext': build_ext},
setup_requires=['numpy'],
install_requires=['numpy', 'scipy', 'iml>=0.6.1', 'scikit-learn', 'matplotlib', 'pandas', 'tqdm', ''],
test_suite='nose.collector',
tests_require=tests_require,
ext_modules=ext_modules,
zip_safe=False
)
def try_run_setup(**kwargs):
""" Fails gracefully when various install steps don't work.
"""
try:
run_setup(**kwargs)
except Exception as e:
print(str(e))
if "xgboost" in str(e).lower():
kwargs["test_xgboost"] = False
print("Couldn't install XGBoost for testing!")
try_run_setup(**kwargs)
elif "lightgbm" in str(e).lower():
kwargs["test_lightgbm"] = False
print("Couldn't install LightGBM for testing!")
try_run_setup(**kwargs)
elif kwargs["with_binary"]:
kwargs["with_binary"] = False
print("WARNING: The C extension could not be compiled, sklearn tree models not supported.")
try_run_setup(**kwargs)
else:
print("ERROR: Failed to build!")
# we seem to need this import guard for appveyor
if __name__ == "__main__":
try_run_setup(with_binary=True, test_xgboost=True, test_lightgbm=True)
|
en
| 0.857019
|
# to publish use: # > python setup.py sdist upload # which depends on ~/.pypirc # Extend the default build_ext class to bootstrap numpy installation # that are needed to build C extensions. # see https://stackoverflow.com/questions/19919905/how-to-bootstrap-numpy-installation-in-setup-py Fails gracefully when various install steps don't work. # we seem to need this import guard for appveyor
| 1.936514
| 2
|
modules/say_module.py
|
BigFlubba/Reco-PC-Server
| 1
|
6629508
|
<reponame>BigFlubba/Reco-PC-Server<gh_stars>1-10
# Module: say
# Description: Uses powershell and a TTS engine to make your computer say something
# Usage: !say "Something to say"
# Dependencies: time, os
import os, asyncio, configs
from lib.reco_embeds import recoEmbeds as rm
async def say(ctx, txt,noti=True):
if configs.operating_sys == "Windows":
if noti:
await rm.msg(ctx,"**Saying**: " + txt)
os.system(
"powershell Add-Type -AssemblyName System.Speech; $synth = New-Object -TypeName System.Speech.Synthesis.SpeechSynthesizer; $synth.Speak('" + txt + "')")
elif configs.operating_sys == "Linux":
await rm.msg(ctx,"Saying: " + txt)
os.system('spd-say "{}"'.format(txt))
else:
await ctx.send("Can't use TTS")
await asyncio.sleep(3)
|
# Module: say
# Description: Uses powershell and a TTS engine to make your computer say something
# Usage: !say "Something to say"
# Dependencies: time, os
import os, asyncio, configs
from lib.reco_embeds import recoEmbeds as rm
async def say(ctx, txt,noti=True):
if configs.operating_sys == "Windows":
if noti:
await rm.msg(ctx,"**Saying**: " + txt)
os.system(
"powershell Add-Type -AssemblyName System.Speech; $synth = New-Object -TypeName System.Speech.Synthesis.SpeechSynthesizer; $synth.Speak('" + txt + "')")
elif configs.operating_sys == "Linux":
await rm.msg(ctx,"Saying: " + txt)
os.system('spd-say "{}"'.format(txt))
else:
await ctx.send("Can't use TTS")
await asyncio.sleep(3)
|
en
| 0.830721
|
# Module: say # Description: Uses powershell and a TTS engine to make your computer say something # Usage: !say "Something to say" # Dependencies: time, os
| 2.959354
| 3
|
sovtokenfees/sovtokenfees/test/helpers/test/test_helper_request.py
|
burdettadam/token-plugin
| 9
|
6629509
|
<reponame>burdettadam/token-plugin
import pytest
from plenum.common.constants import (ROLE, TARGET_NYM, TRUSTEE, TRUSTEE_STRING,
VERKEY, NYM)
from plenum.common.txn_util import get_payload_data
from sovtoken.constants import ADDRESS, AMOUNT
def get_nym_details(helpers, dest):
nym_req_handler = helpers.node.nym_handler
return nym_req_handler.get_nym_details(nym_req_handler.state, dest, False)
@pytest.mark.helper_test
class TestNym:
def test_nym_request_with_defaults(self, helpers):
result = helpers.general.do_nym()
data = get_payload_data(result)
dest = data[TARGET_NYM]
nym = get_nym_details(helpers, dest)
assert nym != {}
def test_nym_changes_role(self, helpers):
result = helpers.general.do_nym(role=TRUSTEE_STRING)
data = get_payload_data(result)
dest = data[TARGET_NYM]
verkey = data[VERKEY]
nym = get_nym_details(helpers, dest)
assert nym[ROLE] == TRUSTEE
helpers.general.do_nym(dest=dest, verkey=verkey, role='')
nym = get_nym_details(helpers, dest)
assert not nym[ROLE]
|
import pytest
from plenum.common.constants import (ROLE, TARGET_NYM, TRUSTEE, TRUSTEE_STRING,
VERKEY, NYM)
from plenum.common.txn_util import get_payload_data
from sovtoken.constants import ADDRESS, AMOUNT
def get_nym_details(helpers, dest):
nym_req_handler = helpers.node.nym_handler
return nym_req_handler.get_nym_details(nym_req_handler.state, dest, False)
@pytest.mark.helper_test
class TestNym:
def test_nym_request_with_defaults(self, helpers):
result = helpers.general.do_nym()
data = get_payload_data(result)
dest = data[TARGET_NYM]
nym = get_nym_details(helpers, dest)
assert nym != {}
def test_nym_changes_role(self, helpers):
result = helpers.general.do_nym(role=TRUSTEE_STRING)
data = get_payload_data(result)
dest = data[TARGET_NYM]
verkey = data[VERKEY]
nym = get_nym_details(helpers, dest)
assert nym[ROLE] == TRUSTEE
helpers.general.do_nym(dest=dest, verkey=verkey, role='')
nym = get_nym_details(helpers, dest)
assert not nym[ROLE]
|
none
| 1
| 1.993155
| 2
|
|
project1/urls.py
|
iogolla/My_first_django_project
| 0
|
6629510
|
from django.urls import include
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from django.urls import include
import project1.views as views
urlpatterns = [
path('', views.index, name='index'),
]
|
from django.urls import include
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from django.urls import include
import project1.views as views
urlpatterns = [
path('', views.index, name='index'),
]
|
none
| 1
| 1.481402
| 1
|
|
First course/1st semester/matrix (2).py
|
tekcellat/University
| 0
|
6629511
|
b = map(int(input('Введите количество элементов в матрице')))
if b <4:
print('dont E matric')
elif b == 4:
x1,y1 = map(int(input('1ya stroka').split( )))
x2,y2 = map(int(input('2ya stroka').split( )))
D=x1*y2-x2*y1
print('Opred matrix',D)
|
b = map(int(input('Введите количество элементов в матрице')))
if b <4:
print('dont E matric')
elif b == 4:
x1,y1 = map(int(input('1ya stroka').split( )))
x2,y2 = map(int(input('2ya stroka').split( )))
D=x1*y2-x2*y1
print('Opred matrix',D)
|
none
| 1
| 3.437756
| 3
|
|
rainbow/runners/airflow/dag/rainbow_dags.py
|
liorsav/rainbow-1
| 0
|
6629512
|
<gh_stars>0
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime, timedelta
import yaml
from airflow import DAG
from airflow.models import Variable
from rainbow.core.util import class_util
from rainbow.core.util import files_util
from rainbow.runners.airflow.model.task import Task
from rainbow.runners.airflow.tasks.defaults.job_end import JobEndTask
from rainbow.runners.airflow.tasks.defaults.job_start import JobStartTask
__DEPENDS_ON_PAST = 'depends_on_past'
def register_dags(configs_path):
"""
Registers pipelines in rainbow yml files found in given path (recursively) as airflow DAGs.
"""
config_files = files_util.find_config_files(configs_path)
dags = []
for config_file in config_files:
print(f'Registering DAG for file: {config_file}')
with open(config_file) as stream:
config = yaml.safe_load(stream)
for pipeline in config['pipelines']:
pipeline_name = pipeline['pipeline']
default_args = {k: v for k, v in pipeline.items()}
override_args = {
'start_date': datetime.combine(pipeline['start_date'], datetime.min.time()),
__DEPENDS_ON_PAST: default_args[__DEPENDS_ON_PAST] if __DEPENDS_ON_PAST in default_args else False,
}
default_args.update(override_args)
dag = DAG(
dag_id=pipeline_name,
default_args=default_args,
dagrun_timeout=timedelta(minutes=pipeline['timeout_minutes']),
catchup=False
)
job_start_task = JobStartTask(dag, pipeline_name, None, pipeline, 'all_success')
parent = job_start_task.apply_task_to_dag()
trigger_rule = 'all_success'
if 'always_run' in config and config['always_run']:
trigger_rule = 'all_done'
for task in pipeline['tasks']:
task_type = task['type']
task_instance = get_task_class(task_type)(
dag, pipeline['pipeline'], parent if parent else None, task, trigger_rule
)
parent = task_instance.apply_task_to_dag()
job_end_task = JobEndTask(dag, pipeline_name, parent, pipeline, 'all_done')
job_end_task.apply_task_to_dag()
print(f'{pipeline_name}: {dag.tasks}')
globals()[pipeline_name] = dag
dags.append(dag)
return dags
print(f'Loading task implementations..')
# TODO: add configuration for user tasks package
task_package = 'rainbow/runners/airflow/tasks'
user_task_package = 'TODO: user_tasks_package'
task_classes = class_util.find_subclasses_in_packages([task_package, user_task_package], Task)
print(f'Finished loading task implementations: {task_classes}')
def get_task_class(task_type):
return task_classes[task_type]
register_dags(Variable.get('rainbows_dir'))
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime, timedelta
import yaml
from airflow import DAG
from airflow.models import Variable
from rainbow.core.util import class_util
from rainbow.core.util import files_util
from rainbow.runners.airflow.model.task import Task
from rainbow.runners.airflow.tasks.defaults.job_end import JobEndTask
from rainbow.runners.airflow.tasks.defaults.job_start import JobStartTask
__DEPENDS_ON_PAST = 'depends_on_past'
def register_dags(configs_path):
"""
Registers pipelines in rainbow yml files found in given path (recursively) as airflow DAGs.
"""
config_files = files_util.find_config_files(configs_path)
dags = []
for config_file in config_files:
print(f'Registering DAG for file: {config_file}')
with open(config_file) as stream:
config = yaml.safe_load(stream)
for pipeline in config['pipelines']:
pipeline_name = pipeline['pipeline']
default_args = {k: v for k, v in pipeline.items()}
override_args = {
'start_date': datetime.combine(pipeline['start_date'], datetime.min.time()),
__DEPENDS_ON_PAST: default_args[__DEPENDS_ON_PAST] if __DEPENDS_ON_PAST in default_args else False,
}
default_args.update(override_args)
dag = DAG(
dag_id=pipeline_name,
default_args=default_args,
dagrun_timeout=timedelta(minutes=pipeline['timeout_minutes']),
catchup=False
)
job_start_task = JobStartTask(dag, pipeline_name, None, pipeline, 'all_success')
parent = job_start_task.apply_task_to_dag()
trigger_rule = 'all_success'
if 'always_run' in config and config['always_run']:
trigger_rule = 'all_done'
for task in pipeline['tasks']:
task_type = task['type']
task_instance = get_task_class(task_type)(
dag, pipeline['pipeline'], parent if parent else None, task, trigger_rule
)
parent = task_instance.apply_task_to_dag()
job_end_task = JobEndTask(dag, pipeline_name, parent, pipeline, 'all_done')
job_end_task.apply_task_to_dag()
print(f'{pipeline_name}: {dag.tasks}')
globals()[pipeline_name] = dag
dags.append(dag)
return dags
print(f'Loading task implementations..')
# TODO: add configuration for user tasks package
task_package = 'rainbow/runners/airflow/tasks'
user_task_package = 'TODO: user_tasks_package'
task_classes = class_util.find_subclasses_in_packages([task_package, user_task_package], Task)
print(f'Finished loading task implementations: {task_classes}')
def get_task_class(task_type):
return task_classes[task_type]
register_dags(Variable.get('rainbows_dir'))
|
en
| 0.858686
|
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. Registers pipelines in rainbow yml files found in given path (recursively) as airflow DAGs. # TODO: add configuration for user tasks package
| 2.023278
| 2
|
numismatics/coinmarketcap.py
|
wade-crys/crys-backend
| 0
|
6629513
|
from config.api_keys import COINMARKETCAP_API_KEY
import requests
import json
latest_url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest?CMC_PRO_API_KEY=<KEY>'
r = requests.get(latest_url)
coins_response = json.loads(r.text)
coins = coins_response['data']
with open('coins.json', 'w') as f:
json.dump(coins, f, indent=4, sort_keys=True)
ids = [coin['id'] for coin in coins]
metadata_url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/info?CMC_PRO_API_KEY=<KEY>'
r = requests.get(metadata_url + '&id=' + ",".join(str(elt) for elt in ids) + '&aux=urls,logo,description,tags,platform,date_added,notice,status')
metadata_response = json.loads(r.text)
coins_metadata = metadata_response['data']
with open('coins_metadata.json', 'w') as f:
json.dump(coins_metadata, f, indent=4, sort_keys=True)
# join info together
#
# with open('coins.json', 'r') as coins_file, open('coins_metadata.json', 'r') as coins_metadata_file:
# coins = json.load(coins_file)
# coins_metadata = json.load(coins_metadata_file)
assert(len(coins) == len(coins_metadata))
for id, coin_metadata in coins_metadata.items():
# find coin by id
coin_extras = [coin for coin in coins if coin['id'] == int(id)][0]
coin_metadata.update(coin_extras)
coins_metadata[id] = coin_metadata
with open('coins_full.json', 'w') as f:
json.dump(coins_metadata, f, indent=4, sort_keys=True)
|
from config.api_keys import COINMARKETCAP_API_KEY
import requests
import json
latest_url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest?CMC_PRO_API_KEY=<KEY>'
r = requests.get(latest_url)
coins_response = json.loads(r.text)
coins = coins_response['data']
with open('coins.json', 'w') as f:
json.dump(coins, f, indent=4, sort_keys=True)
ids = [coin['id'] for coin in coins]
metadata_url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/info?CMC_PRO_API_KEY=<KEY>'
r = requests.get(metadata_url + '&id=' + ",".join(str(elt) for elt in ids) + '&aux=urls,logo,description,tags,platform,date_added,notice,status')
metadata_response = json.loads(r.text)
coins_metadata = metadata_response['data']
with open('coins_metadata.json', 'w') as f:
json.dump(coins_metadata, f, indent=4, sort_keys=True)
# join info together
#
# with open('coins.json', 'r') as coins_file, open('coins_metadata.json', 'r') as coins_metadata_file:
# coins = json.load(coins_file)
# coins_metadata = json.load(coins_metadata_file)
assert(len(coins) == len(coins_metadata))
for id, coin_metadata in coins_metadata.items():
# find coin by id
coin_extras = [coin for coin in coins if coin['id'] == int(id)][0]
coin_metadata.update(coin_extras)
coins_metadata[id] = coin_metadata
with open('coins_full.json', 'w') as f:
json.dump(coins_metadata, f, indent=4, sort_keys=True)
|
en
| 0.365979
|
# join info together # # with open('coins.json', 'r') as coins_file, open('coins_metadata.json', 'r') as coins_metadata_file: # coins = json.load(coins_file) # coins_metadata = json.load(coins_metadata_file) # find coin by id
| 2.578327
| 3
|
extentions.py
|
FoxyTheKing/cats-blender-plugin
| 0
|
6629514
|
<gh_stars>0
from .tools import common as Common
from .tools import atlas as Atlas
from .tools import eyetracking as Eyetracking
from .tools import rootbone as Rootbone
from .tools import settings as Settings
from .tools import importer as Importer
from bpy.types import Scene, Material
from bpy.props import BoolProperty, EnumProperty, FloatProperty, IntProperty, CollectionProperty
def register():
Scene.armature = EnumProperty(
name='Armature',
description='Select the armature which will be used by Cats',
items=Common.get_armature_list,
update=Common.update_material_list
)
Scene.zip_content = EnumProperty(
name='Zip Content',
description='Select the model you want to import',
items=Importer.get_zip_content
)
Scene.keep_upper_chest = BoolProperty(
name='Keep Upper Chest',
description="VrChat now partially supports the Upper Chest bone, so deleting it is no longer necessary."
"\n\nWARNING: Currently this breaks Eye Tracking, so don't check this if you want Eye Tracking",
default=False
)
Scene.combine_mats = BoolProperty(
name='Combine Same Materials',
description="Combines similar materials into one, reducing draw calls.\n\n"
'Your avatar should visibly look the same after this operation.\n'
'This is a very important step for optimizing your avatar.\n'
'If you have problems with this, uncheck this option and tell us!\n',
default=True
)
Scene.remove_zero_weight = BoolProperty(
name='Remove Zero Weight Bones',
description="Cleans up the bones hierarchy, deleting all bones that don't directly affect any vertices.\n"
'Uncheck this if bones you want to keep got deleted',
default=True
)
Scene.keep_end_bones = BoolProperty(
name='Keep End Bones',
description="Saves end bones from deletion."
'\n\nThis can improve skirt movement for dynamic bones, but increases the bone count.'
'\nThis can also fix issues with crumbled finger bones in Unity.'
'\nMake sure to always uncheck "Add Leaf Bones" when exporting or use the CATS export button',
default=False
)
Scene.join_meshes = BoolProperty(
name='Join Meshes',
description='Joins all meshes of this model together.'
'\nIt also:'
'\n - Applies all transformations'
'\n - Repairs broken armature modifiers'
'\n - Applies all decimation and mirror modifiers'
'\n - Merges UV maps correctly'
'\n'
'\nINFO: You should always join your meshes',
default=True
)
Scene.connect_bones = BoolProperty(
name='Connect Bones',
description="This connects all bones to their child bone if they have exactly one child bone.\n"
"This will not change how the bones function in any way, it just improves the aesthetic of the armature",
default=True
)
Scene.fix_materials = BoolProperty(
name='Fix Materials',
description="This will apply some VRChat related fixes to materials",
default=True
)
Scene.use_google_only = BoolProperty(
name='Use Old Translations (not recommended)',
description="Ignores the internal dictionary and only uses the Google Translator for shape key translations."
"\n"
'\nThis will result in slower translation speed and worse translations, but the translations will be like in CATS version 0.9.0 and older.'
"\nOnly use this if you have animations which rely on the old translations and you don't want to convert them to the new ones",
default=False
)
Scene.show_more_options = BoolProperty(
name='Show More Options',
description="Shows more model options",
default=False
)
Scene.merge_mode = EnumProperty(
name="Merge Mode",
description="Mode",
items=[
("ARMATURE", "Merge Armatures", "Here you can merge two armatures together."),
("MESH", "Attach Mesh", "Here you can attach a mesh to an armature.")
]
)
Scene.merge_armature_into = EnumProperty(
name='Base Armature',
description='Select the armature into which the other armature will be merged\n',
items=Common.get_armature_list
)
Scene.merge_armature = EnumProperty(
name='Merge Armature',
description='Select the armature which will be merged into the selected armature above\n',
items=Common.get_armature_merge_list
)
Scene.attach_to_bone = EnumProperty(
name='Attach to Bone',
description='Select the bone to which the armature will be attached to\n',
items=Common.get_bones_merge
)
Scene.attach_mesh = EnumProperty(
name='Attach Mesh',
description='Select the mesh which will be attached to the selected bone in the selected armature\n',
items=Common.get_top_meshes
)
Scene.merge_same_bones = BoolProperty(
name='Merge All Bones',
description='Merges all bones together that have the same name instead of only the base bones (Hips, Spine, etc).'
'\nYou will have to make sure that all the bones you want to merge have the same name.'
'\n'
"\nIf this is checked, you won't need to fix the model with CATS beforehand but it is still advised to do so."
"\nIf this is unchecked, CATS will only merge the base bones (Hips, Spine, etc)."
"\n"
"\nThis can have unintended side effects, so check your model afterwards!"
"\n",
default=False
)
Scene.apply_transforms = BoolProperty(
name='Apply Transforms',
description='Check this if both armatures and meshes are already at their correct positions.'
'\nThis will cause them to stay exactly like they are when merging',
default=False
)
Scene.merge_armatures_join_meshes = BoolProperty(
name='Join Meshes',
description='This will join all meshes.'
'\nNot checking this will always apply transforms',
default=True
)
# Decimation
Scene.decimation_mode = EnumProperty(
name="Decimation Mode",
description="Decimation Mode",
items=[
("SAFE", "Safe", 'Decent results - no shape key loss\n'
'\n'
"This will only decimate meshes with no shape keys.\n"
"The results are decent and you won't lose any shape keys.\n"
'Eye Tracking and Lip Syncing will be fully preserved.'),
("HALF", "Half", 'Good results - minimal shape key loss\n'
"\n"
"This will only decimate meshes with less than 4 shape keys as those are often not used.\n"
'The results are better but you will lose the shape keys in some meshes.\n'
'Eye Tracking and Lip Syncing should still work.'),
("FULL", "Full", 'Best results - full shape key loss\n'
'\n'
"This will decimate your whole model deleting all shape keys in the process.\n"
'This will give the best results but you will lose the ability to add blinking and Lip Syncing.\n'
'Eye Tracking will still work if you disable Eye Blinking.'),
("CUSTOM", "Custom", 'Custom results - custom shape key loss\n'
'\n'
"This will let you choose which meshes and shape keys should not be decimated.\n")
],
default='HALF'
)
Scene.selection_mode = EnumProperty(
name="Selection Mode",
description="Selection Mode",
items=[
("SHAPES", "Shape Keys", 'Select all the shape keys you want to preserve here.'),
("MESHES", "Meshes", "Select all the meshes you don't want to decimate here.")
]
)
Scene.add_shape_key = EnumProperty(
name='Shape',
description='The shape key you want to keep',
items=Common.get_shapekeys_decimation
)
Scene.add_mesh = EnumProperty(
name='Mesh',
description='The mesh you want to leave untouched by the decimation',
items=Common.get_meshes_decimation
)
Scene.decimate_fingers = BoolProperty(
name="Save Fingers",
description="Check this if you don't want to decimate your fingers!\n"
"Results will be worse but there will be no issues with finger movement.\n"
"This is probably only useful if you have a VR headset.\n"
"\n"
"This operation requires the finger bones to be named specifically:\n"
"Thumb(0-2)_(L/R)\n"
"IndexFinger(1-3)_(L/R)\n"
"MiddleFinger(1-3)_(L/R)\n"
"RingFinger(1-3)_(L/R)\n"
"LittleFinger(1-3)_(L/R)"
)
Scene.decimate_hands = BoolProperty(
name="Save Hands",
description="Check this if you don't want to decimate your full hands!\n"
"Results will be worse but there will be no issues with hand movement.\n"
"This is probably only useful if you have a VR headset.\n"
"\n"
"This operation requires the finger and hand bones to be named specifically:\n"
"Left/Right wrist\n"
"Thumb(0-2)_(L/R)\n"
"IndexFinger(1-3)_(L/R)\n"
"MiddleFinger(1-3)_(L/R)\n"
"RingFinger(1-3)_(L/R)\n"
"LittleFinger(1-3)_(L/R)"
)
Scene.max_tris = IntProperty(
name='Tris',
description="The target amount of tris after decimation",
default=70000,
min=1,
max=200000
)
# Eye Tracking
Scene.eye_mode = EnumProperty(
name="Eye Mode",
description="Mode",
items=[
("CREATION", "Creation", "Here you can create eye tracking."),
("TESTING", "Testing", "Here you can test how eye tracking will look ingame.")
],
update=Eyetracking.stop_testing
)
Scene.mesh_name_eye = EnumProperty(
name='Mesh',
description='The mesh with the eyes vertex groups',
items=Common.get_meshes
)
Scene.head = EnumProperty(
name='Head',
description='The head bone containing the eye bones',
items=Common.get_bones_head
)
Scene.eye_left = EnumProperty(
name='Left Eye',
description='The models left eye bone',
items=Common.get_bones_eye_l
)
Scene.eye_right = EnumProperty(
name='Right Eye',
description='The models right eye bone',
items=Common.get_bones_eye_r
)
Scene.wink_left = EnumProperty(
name='Blink Left',
description='The shape key containing a blink with the left eye',
items=Common.get_shapekeys_eye_blink_l
)
Scene.wink_right = EnumProperty(
name='Blink Right',
description='The shape key containing a blink with the right eye',
items=Common.get_shapekeys_eye_blink_r
)
Scene.lowerlid_left = EnumProperty(
name='Lowerlid Left',
description='The shape key containing a slightly raised left lower lid.\n'
'Can be set to "Basis" to disable lower lid movement',
items=Common.get_shapekeys_eye_low_l
)
Scene.lowerlid_right = EnumProperty(
name='Lowerlid Right',
description='The shape key containing a slightly raised right lower lid.\n'
'Can be set to "Basis" to disable lower lid movement',
items=Common.get_shapekeys_eye_low_r
)
Scene.disable_eye_movement = BoolProperty(
name='Disable Eye Movement',
description='IMPORTANT: Do your decimation first if you check this!\n'
'\n'
'Disables eye movement. Useful if you only want blinking.\n'
'This creates eye bones with no movement bound to them.\n'
'You still have to assign "LeftEye" and "RightEye" to the eyes in Unity',
subtype='DISTANCE'
)
Scene.disable_eye_blinking = BoolProperty(
name='Disable Eye Blinking',
description='Disables eye blinking. Useful if you only want eye movement.\n'
'This will create the necessary shape keys but leaves them empty',
subtype='NONE'
)
Scene.eye_distance = FloatProperty(
name='Eye Movement Range',
description='Higher = more eye movement\n'
'Lower = less eye movement\n'
'Warning: Too little or too much range can glitch the eyes.\n'
'Test your results in the "Eye Testing"-Tab!\n',
default=0.8,
min=0.0,
max=2.0,
step=1.0,
precision=2,
subtype='FACTOR'
)
Scene.eye_rotation_x = IntProperty(
name='Up - Down',
description='Rotate the eye bones on the vertical axis',
default=0,
min=-19,
max=25,
step=1,
subtype='FACTOR',
update=Eyetracking.set_rotation
)
Scene.eye_rotation_y = IntProperty(
name='Left - Right',
description='Rotate the eye bones on the horizontal axis.'
'\nThis is from your own point of view',
default=0,
min=-19,
max=19,
step=1,
subtype='FACTOR',
update=Eyetracking.set_rotation
)
Scene.iris_height = IntProperty(
name='Iris Height',
description='Moves the iris away from the eye ball',
default=0,
min=0,
max=100,
step=1,
subtype='FACTOR'
)
Scene.eye_blink_shape = FloatProperty(
name='Blink Strength',
description='Test the blinking of the eye',
default=1.0,
min=0.0,
max=1.0,
step=1.0,
precision=2,
subtype='FACTOR'
)
Scene.eye_lowerlid_shape = FloatProperty(
name='Lowerlid Strength',
description='Test the lowerlid blinking of the eye',
default=1.0,
min=0.0,
max=1.0,
step=1.0,
precision=2,
subtype='FACTOR'
)
# Visemes
Scene.mesh_name_viseme = EnumProperty(
name='Mesh',
description='The mesh with the mouth shape keys',
items=Common.get_meshes
)
Scene.mouth_a = EnumProperty(
name='Viseme AA',
description='Shape key containing mouth movement that looks like someone is saying "aa".\nDo not put empty shape keys like "Basis" in here',
items=Common.get_shapekeys_mouth_ah,
)
Scene.mouth_o = EnumProperty(
name='Viseme OH',
description='Shape key containing mouth movement that looks like someone is saying "oh".\nDo not put empty shape keys like "Basis" in here',
items=Common.get_shapekeys_mouth_oh,
)
Scene.mouth_ch = EnumProperty(
name='Viseme CH',
description='Shape key containing mouth movement that looks like someone is saying "ch". Opened lips and clenched teeth.\nDo not put empty shape keys like "Basis" in here',
items=Common.get_shapekeys_mouth_ch,
)
Scene.shape_intensity = FloatProperty(
name='Shape Key Mix Intensity',
description='Controls the strength in the creation of the shape keys. Lower for less mouth movement strength',
default=1.0,
min=0.0,
max=10.0,
step=0.1,
precision=2,
subtype='FACTOR'
)
# Bone Parenting
Scene.root_bone = EnumProperty(
name='To Parent',
description='List of bones that look like they could be parented together to a root bone',
items=Rootbone.get_parent_root_bones,
)
# Optimize
Scene.optimize_mode = EnumProperty(
name="Optimize Mode",
description="Mode",
items=[
("ATLAS", "Atlas", "Allows you to make a texture atlas."),
("MATERIAL", "Material", "Some various options on material manipulation."),
("BONEMERGING", "Bone Merging", "Allows child bones to be merged into their parents."),
]
)
# Atlas
# Material.add_to_atlas = BoolProperty(
# description='Add this material to the atlas',
# default=False
# )
# Scene.material_list_index = IntProperty(
# default=0
# )
# Scene.material_list = CollectionProperty(
# type=Atlas.MaterialsGroup
# )
# Scene.clear_materials = BoolProperty(
# description='Clear materials checkbox',
# default=True
# )
# Bone Merging
Scene.merge_ratio = FloatProperty(
name='Merge Ratio',
description='Higher = more bones will be merged\n'
'Lower = less bones will be merged\n',
default=50,
min=1,
max=100,
step=1,
precision=0,
subtype='PERCENTAGE'
)
Scene.merge_mesh = EnumProperty(
name='Mesh',
description='The mesh with the bones vertex groups',
items=Common.get_meshes
)
Scene.merge_bone = EnumProperty(
name='To Merge',
description='List of bones that look like they could be merged together to reduce overall bones',
items=Rootbone.get_parent_root_bones,
)
# Settings
Scene.embed_textures = BoolProperty(
name='Embed Textures on Export',
description='Enable this to embed the texture files into the FBX file upon export.'
'\nUnity will automatically extract these textures and put them into a separate folder.'
'\nThis might not work for everyone and it increases the file size of the exported FBX file',
default=False,
update=Settings.update_settings
)
Scene.use_custom_mmd_tools = BoolProperty(
name='Use Custom mmd_tools',
description='Enable this to use your own version of mmd_tools. This will disable the internal cats mmd_tools',
default=False,
update=Settings.update_settings
)
Scene.debug_translations = BoolProperty(
name='Debug Google Translations',
description='Tests the Google Translations and prints the Google response in case of error',
default=False
)
# Scene.disable_vrchat_features = BoolProperty(
# name='Disable VRChat Only Features',
# description='This will disable features which are solely used for VRChat.'
# '\nThe following will be disabled:'
# '\n- Eye Tracking'
# '\n- Visemes',
# default=False,
# update=Settings.update_settings
# )
# Copy Protection - obsolete
# Scene.protection_mode = EnumProperty(
# name="Randomization Level",
# description="Randomization Level",
# items=[
# ("FULL", "Full", "This will randomize every vertex of your model and it will be completely unusable for thieves.\n"
# 'However this method might cause problems with the Outline option from Cubed shader.\n'
# 'If you have any issues ingame try again with option "Partial".'),
# ("PARTIAL", "Partial", 'Use this if you experience issues ingame with the Full option!\n'
# '\n'
# "This will only randomize a number of vertices and therefore will have a few unprotected areas,\n"
# "but it's still unusable to thieves as a whole.\n"
# 'This method however reduces the glitches that can occur ingame by a lot.')
# ],
# default='FULL'
# )
|
from .tools import common as Common
from .tools import atlas as Atlas
from .tools import eyetracking as Eyetracking
from .tools import rootbone as Rootbone
from .tools import settings as Settings
from .tools import importer as Importer
from bpy.types import Scene, Material
from bpy.props import BoolProperty, EnumProperty, FloatProperty, IntProperty, CollectionProperty
def register():
Scene.armature = EnumProperty(
name='Armature',
description='Select the armature which will be used by Cats',
items=Common.get_armature_list,
update=Common.update_material_list
)
Scene.zip_content = EnumProperty(
name='Zip Content',
description='Select the model you want to import',
items=Importer.get_zip_content
)
Scene.keep_upper_chest = BoolProperty(
name='Keep Upper Chest',
description="VrChat now partially supports the Upper Chest bone, so deleting it is no longer necessary."
"\n\nWARNING: Currently this breaks Eye Tracking, so don't check this if you want Eye Tracking",
default=False
)
Scene.combine_mats = BoolProperty(
name='Combine Same Materials',
description="Combines similar materials into one, reducing draw calls.\n\n"
'Your avatar should visibly look the same after this operation.\n'
'This is a very important step for optimizing your avatar.\n'
'If you have problems with this, uncheck this option and tell us!\n',
default=True
)
Scene.remove_zero_weight = BoolProperty(
name='Remove Zero Weight Bones',
description="Cleans up the bones hierarchy, deleting all bones that don't directly affect any vertices.\n"
'Uncheck this if bones you want to keep got deleted',
default=True
)
Scene.keep_end_bones = BoolProperty(
name='Keep End Bones',
description="Saves end bones from deletion."
'\n\nThis can improve skirt movement for dynamic bones, but increases the bone count.'
'\nThis can also fix issues with crumbled finger bones in Unity.'
'\nMake sure to always uncheck "Add Leaf Bones" when exporting or use the CATS export button',
default=False
)
Scene.join_meshes = BoolProperty(
name='Join Meshes',
description='Joins all meshes of this model together.'
'\nIt also:'
'\n - Applies all transformations'
'\n - Repairs broken armature modifiers'
'\n - Applies all decimation and mirror modifiers'
'\n - Merges UV maps correctly'
'\n'
'\nINFO: You should always join your meshes',
default=True
)
Scene.connect_bones = BoolProperty(
name='Connect Bones',
description="This connects all bones to their child bone if they have exactly one child bone.\n"
"This will not change how the bones function in any way, it just improves the aesthetic of the armature",
default=True
)
Scene.fix_materials = BoolProperty(
name='Fix Materials',
description="This will apply some VRChat related fixes to materials",
default=True
)
Scene.use_google_only = BoolProperty(
name='Use Old Translations (not recommended)',
description="Ignores the internal dictionary and only uses the Google Translator for shape key translations."
"\n"
'\nThis will result in slower translation speed and worse translations, but the translations will be like in CATS version 0.9.0 and older.'
"\nOnly use this if you have animations which rely on the old translations and you don't want to convert them to the new ones",
default=False
)
Scene.show_more_options = BoolProperty(
name='Show More Options',
description="Shows more model options",
default=False
)
Scene.merge_mode = EnumProperty(
name="Merge Mode",
description="Mode",
items=[
("ARMATURE", "Merge Armatures", "Here you can merge two armatures together."),
("MESH", "Attach Mesh", "Here you can attach a mesh to an armature.")
]
)
Scene.merge_armature_into = EnumProperty(
name='Base Armature',
description='Select the armature into which the other armature will be merged\n',
items=Common.get_armature_list
)
Scene.merge_armature = EnumProperty(
name='Merge Armature',
description='Select the armature which will be merged into the selected armature above\n',
items=Common.get_armature_merge_list
)
Scene.attach_to_bone = EnumProperty(
name='Attach to Bone',
description='Select the bone to which the armature will be attached to\n',
items=Common.get_bones_merge
)
Scene.attach_mesh = EnumProperty(
name='Attach Mesh',
description='Select the mesh which will be attached to the selected bone in the selected armature\n',
items=Common.get_top_meshes
)
Scene.merge_same_bones = BoolProperty(
name='Merge All Bones',
description='Merges all bones together that have the same name instead of only the base bones (Hips, Spine, etc).'
'\nYou will have to make sure that all the bones you want to merge have the same name.'
'\n'
"\nIf this is checked, you won't need to fix the model with CATS beforehand but it is still advised to do so."
"\nIf this is unchecked, CATS will only merge the base bones (Hips, Spine, etc)."
"\n"
"\nThis can have unintended side effects, so check your model afterwards!"
"\n",
default=False
)
Scene.apply_transforms = BoolProperty(
name='Apply Transforms',
description='Check this if both armatures and meshes are already at their correct positions.'
'\nThis will cause them to stay exactly like they are when merging',
default=False
)
Scene.merge_armatures_join_meshes = BoolProperty(
name='Join Meshes',
description='This will join all meshes.'
'\nNot checking this will always apply transforms',
default=True
)
# Decimation
Scene.decimation_mode = EnumProperty(
name="Decimation Mode",
description="Decimation Mode",
items=[
("SAFE", "Safe", 'Decent results - no shape key loss\n'
'\n'
"This will only decimate meshes with no shape keys.\n"
"The results are decent and you won't lose any shape keys.\n"
'Eye Tracking and Lip Syncing will be fully preserved.'),
("HALF", "Half", 'Good results - minimal shape key loss\n'
"\n"
"This will only decimate meshes with less than 4 shape keys as those are often not used.\n"
'The results are better but you will lose the shape keys in some meshes.\n'
'Eye Tracking and Lip Syncing should still work.'),
("FULL", "Full", 'Best results - full shape key loss\n'
'\n'
"This will decimate your whole model deleting all shape keys in the process.\n"
'This will give the best results but you will lose the ability to add blinking and Lip Syncing.\n'
'Eye Tracking will still work if you disable Eye Blinking.'),
("CUSTOM", "Custom", 'Custom results - custom shape key loss\n'
'\n'
"This will let you choose which meshes and shape keys should not be decimated.\n")
],
default='HALF'
)
Scene.selection_mode = EnumProperty(
name="Selection Mode",
description="Selection Mode",
items=[
("SHAPES", "Shape Keys", 'Select all the shape keys you want to preserve here.'),
("MESHES", "Meshes", "Select all the meshes you don't want to decimate here.")
]
)
Scene.add_shape_key = EnumProperty(
name='Shape',
description='The shape key you want to keep',
items=Common.get_shapekeys_decimation
)
Scene.add_mesh = EnumProperty(
name='Mesh',
description='The mesh you want to leave untouched by the decimation',
items=Common.get_meshes_decimation
)
Scene.decimate_fingers = BoolProperty(
name="Save Fingers",
description="Check this if you don't want to decimate your fingers!\n"
"Results will be worse but there will be no issues with finger movement.\n"
"This is probably only useful if you have a VR headset.\n"
"\n"
"This operation requires the finger bones to be named specifically:\n"
"Thumb(0-2)_(L/R)\n"
"IndexFinger(1-3)_(L/R)\n"
"MiddleFinger(1-3)_(L/R)\n"
"RingFinger(1-3)_(L/R)\n"
"LittleFinger(1-3)_(L/R)"
)
Scene.decimate_hands = BoolProperty(
name="Save Hands",
description="Check this if you don't want to decimate your full hands!\n"
"Results will be worse but there will be no issues with hand movement.\n"
"This is probably only useful if you have a VR headset.\n"
"\n"
"This operation requires the finger and hand bones to be named specifically:\n"
"Left/Right wrist\n"
"Thumb(0-2)_(L/R)\n"
"IndexFinger(1-3)_(L/R)\n"
"MiddleFinger(1-3)_(L/R)\n"
"RingFinger(1-3)_(L/R)\n"
"LittleFinger(1-3)_(L/R)"
)
Scene.max_tris = IntProperty(
name='Tris',
description="The target amount of tris after decimation",
default=70000,
min=1,
max=200000
)
# Eye Tracking
Scene.eye_mode = EnumProperty(
name="Eye Mode",
description="Mode",
items=[
("CREATION", "Creation", "Here you can create eye tracking."),
("TESTING", "Testing", "Here you can test how eye tracking will look ingame.")
],
update=Eyetracking.stop_testing
)
Scene.mesh_name_eye = EnumProperty(
name='Mesh',
description='The mesh with the eyes vertex groups',
items=Common.get_meshes
)
Scene.head = EnumProperty(
name='Head',
description='The head bone containing the eye bones',
items=Common.get_bones_head
)
Scene.eye_left = EnumProperty(
name='Left Eye',
description='The models left eye bone',
items=Common.get_bones_eye_l
)
Scene.eye_right = EnumProperty(
name='Right Eye',
description='The models right eye bone',
items=Common.get_bones_eye_r
)
Scene.wink_left = EnumProperty(
name='Blink Left',
description='The shape key containing a blink with the left eye',
items=Common.get_shapekeys_eye_blink_l
)
Scene.wink_right = EnumProperty(
name='Blink Right',
description='The shape key containing a blink with the right eye',
items=Common.get_shapekeys_eye_blink_r
)
Scene.lowerlid_left = EnumProperty(
name='Lowerlid Left',
description='The shape key containing a slightly raised left lower lid.\n'
'Can be set to "Basis" to disable lower lid movement',
items=Common.get_shapekeys_eye_low_l
)
Scene.lowerlid_right = EnumProperty(
name='Lowerlid Right',
description='The shape key containing a slightly raised right lower lid.\n'
'Can be set to "Basis" to disable lower lid movement',
items=Common.get_shapekeys_eye_low_r
)
Scene.disable_eye_movement = BoolProperty(
name='Disable Eye Movement',
description='IMPORTANT: Do your decimation first if you check this!\n'
'\n'
'Disables eye movement. Useful if you only want blinking.\n'
'This creates eye bones with no movement bound to them.\n'
'You still have to assign "LeftEye" and "RightEye" to the eyes in Unity',
subtype='DISTANCE'
)
Scene.disable_eye_blinking = BoolProperty(
name='Disable Eye Blinking',
description='Disables eye blinking. Useful if you only want eye movement.\n'
'This will create the necessary shape keys but leaves them empty',
subtype='NONE'
)
Scene.eye_distance = FloatProperty(
name='Eye Movement Range',
description='Higher = more eye movement\n'
'Lower = less eye movement\n'
'Warning: Too little or too much range can glitch the eyes.\n'
'Test your results in the "Eye Testing"-Tab!\n',
default=0.8,
min=0.0,
max=2.0,
step=1.0,
precision=2,
subtype='FACTOR'
)
Scene.eye_rotation_x = IntProperty(
name='Up - Down',
description='Rotate the eye bones on the vertical axis',
default=0,
min=-19,
max=25,
step=1,
subtype='FACTOR',
update=Eyetracking.set_rotation
)
Scene.eye_rotation_y = IntProperty(
name='Left - Right',
description='Rotate the eye bones on the horizontal axis.'
'\nThis is from your own point of view',
default=0,
min=-19,
max=19,
step=1,
subtype='FACTOR',
update=Eyetracking.set_rotation
)
Scene.iris_height = IntProperty(
name='Iris Height',
description='Moves the iris away from the eye ball',
default=0,
min=0,
max=100,
step=1,
subtype='FACTOR'
)
Scene.eye_blink_shape = FloatProperty(
name='Blink Strength',
description='Test the blinking of the eye',
default=1.0,
min=0.0,
max=1.0,
step=1.0,
precision=2,
subtype='FACTOR'
)
Scene.eye_lowerlid_shape = FloatProperty(
name='Lowerlid Strength',
description='Test the lowerlid blinking of the eye',
default=1.0,
min=0.0,
max=1.0,
step=1.0,
precision=2,
subtype='FACTOR'
)
# Visemes
Scene.mesh_name_viseme = EnumProperty(
name='Mesh',
description='The mesh with the mouth shape keys',
items=Common.get_meshes
)
Scene.mouth_a = EnumProperty(
name='Viseme AA',
description='Shape key containing mouth movement that looks like someone is saying "aa".\nDo not put empty shape keys like "Basis" in here',
items=Common.get_shapekeys_mouth_ah,
)
Scene.mouth_o = EnumProperty(
name='Viseme OH',
description='Shape key containing mouth movement that looks like someone is saying "oh".\nDo not put empty shape keys like "Basis" in here',
items=Common.get_shapekeys_mouth_oh,
)
Scene.mouth_ch = EnumProperty(
name='Viseme CH',
description='Shape key containing mouth movement that looks like someone is saying "ch". Opened lips and clenched teeth.\nDo not put empty shape keys like "Basis" in here',
items=Common.get_shapekeys_mouth_ch,
)
Scene.shape_intensity = FloatProperty(
name='Shape Key Mix Intensity',
description='Controls the strength in the creation of the shape keys. Lower for less mouth movement strength',
default=1.0,
min=0.0,
max=10.0,
step=0.1,
precision=2,
subtype='FACTOR'
)
# Bone Parenting
Scene.root_bone = EnumProperty(
name='To Parent',
description='List of bones that look like they could be parented together to a root bone',
items=Rootbone.get_parent_root_bones,
)
# Optimize
Scene.optimize_mode = EnumProperty(
name="Optimize Mode",
description="Mode",
items=[
("ATLAS", "Atlas", "Allows you to make a texture atlas."),
("MATERIAL", "Material", "Some various options on material manipulation."),
("BONEMERGING", "Bone Merging", "Allows child bones to be merged into their parents."),
]
)
# Atlas
# Material.add_to_atlas = BoolProperty(
# description='Add this material to the atlas',
# default=False
# )
# Scene.material_list_index = IntProperty(
# default=0
# )
# Scene.material_list = CollectionProperty(
# type=Atlas.MaterialsGroup
# )
# Scene.clear_materials = BoolProperty(
# description='Clear materials checkbox',
# default=True
# )
# Bone Merging
Scene.merge_ratio = FloatProperty(
name='Merge Ratio',
description='Higher = more bones will be merged\n'
'Lower = less bones will be merged\n',
default=50,
min=1,
max=100,
step=1,
precision=0,
subtype='PERCENTAGE'
)
Scene.merge_mesh = EnumProperty(
name='Mesh',
description='The mesh with the bones vertex groups',
items=Common.get_meshes
)
Scene.merge_bone = EnumProperty(
name='To Merge',
description='List of bones that look like they could be merged together to reduce overall bones',
items=Rootbone.get_parent_root_bones,
)
# Settings
Scene.embed_textures = BoolProperty(
name='Embed Textures on Export',
description='Enable this to embed the texture files into the FBX file upon export.'
'\nUnity will automatically extract these textures and put them into a separate folder.'
'\nThis might not work for everyone and it increases the file size of the exported FBX file',
default=False,
update=Settings.update_settings
)
Scene.use_custom_mmd_tools = BoolProperty(
name='Use Custom mmd_tools',
description='Enable this to use your own version of mmd_tools. This will disable the internal cats mmd_tools',
default=False,
update=Settings.update_settings
)
Scene.debug_translations = BoolProperty(
name='Debug Google Translations',
description='Tests the Google Translations and prints the Google response in case of error',
default=False
)
# Scene.disable_vrchat_features = BoolProperty(
# name='Disable VRChat Only Features',
# description='This will disable features which are solely used for VRChat.'
# '\nThe following will be disabled:'
# '\n- Eye Tracking'
# '\n- Visemes',
# default=False,
# update=Settings.update_settings
# )
# Copy Protection - obsolete
# Scene.protection_mode = EnumProperty(
# name="Randomization Level",
# description="Randomization Level",
# items=[
# ("FULL", "Full", "This will randomize every vertex of your model and it will be completely unusable for thieves.\n"
# 'However this method might cause problems with the Outline option from Cubed shader.\n'
# 'If you have any issues ingame try again with option "Partial".'),
# ("PARTIAL", "Partial", 'Use this if you experience issues ingame with the Full option!\n'
# '\n'
# "This will only randomize a number of vertices and therefore will have a few unprotected areas,\n"
# "but it's still unusable to thieves as a whole.\n"
# 'This method however reduces the glitches that can occur ingame by a lot.')
# ],
# default='FULL'
# )
|
en
| 0.662556
|
# Decimation # Eye Tracking # Visemes # Bone Parenting # Optimize # Atlas # Material.add_to_atlas = BoolProperty( # description='Add this material to the atlas', # default=False # ) # Scene.material_list_index = IntProperty( # default=0 # ) # Scene.material_list = CollectionProperty( # type=Atlas.MaterialsGroup # ) # Scene.clear_materials = BoolProperty( # description='Clear materials checkbox', # default=True # ) # Bone Merging # Settings # Scene.disable_vrchat_features = BoolProperty( # name='Disable VRChat Only Features', # description='This will disable features which are solely used for VRChat.' # '\nThe following will be disabled:' # '\n- Eye Tracking' # '\n- Visemes', # default=False, # update=Settings.update_settings # ) # Copy Protection - obsolete # Scene.protection_mode = EnumProperty( # name="Randomization Level", # description="Randomization Level", # items=[ # ("FULL", "Full", "This will randomize every vertex of your model and it will be completely unusable for thieves.\n" # 'However this method might cause problems with the Outline option from Cubed shader.\n' # 'If you have any issues ingame try again with option "Partial".'), # ("PARTIAL", "Partial", 'Use this if you experience issues ingame with the Full option!\n' # '\n' # "This will only randomize a number of vertices and therefore will have a few unprotected areas,\n" # "but it's still unusable to thieves as a whole.\n" # 'This method however reduces the glitches that can occur ingame by a lot.') # ], # default='FULL' # )
| 1.747107
| 2
|
game/ghdialogue/ghdview.py
|
marblexu/gearhead-caramel
| 0
|
6629515
|
<reponame>marblexu/gearhead-caramel
import pbge
import pygame
from pbge import my_state,draw_text,default_border,anim_delay
import gears
class ConvoVisualizer(object):
# The visualizer is a class used by the conversation when conversing.
# It has a "text" property and "render", "get_menu" methods.
TEXT_AREA = pbge.frects.Frect(0,-125,350,150)
MENU_AREA = pbge.frects.Frect(0,50,350,102)
NAME_AREA = pbge.frects.Frect(25,-185,300,35)
REACT_AREA = pbge.frects.Frect(290,-185,35,35)
PORTRAIT_AREA = pbge.frects.Frect(-370,-300,400,600)
PILOT_AREA = pbge.frects.Frect(-350,-250,100,100)
def __init__(self,npc,camp,pc=None):
pilot = npc.get_pilot()
npc = npc.get_root()
self.npc = pilot
if hasattr(npc, "get_portrait"):
self.npc_sprite = npc.get_portrait()
else:
self.npc_sprite = None
if pilot is not npc and hasattr(pilot, "get_portrait"):
self.pilot_sprite = pilot.get_portrait()
else:
self.pilot_sprite = None
self.npc_desc = self.npc.get_text_desc(camp)
self.camp = camp
self.bottom_sprite = pbge.image.Image('sys_wintermocha_convoborder.png',32,200)
self.react_sprite = pbge.image.Image('sys_reaction_emoji.png',35,35)
self.text = ''
if pc:
self.pc = pc.get_pilot()
def get_portrait_area(self):
if self.npc_sprite:
mydest = self.npc_sprite.get_rect(0)
mydest.midbottom = (my_state.screen.get_width()//2-170,my_state.screen.get_height()//2+300)
else:
return self.PORTRAIT_AREA.get_rect()
return mydest
def render(self,draw_menu_rect=True):
if my_state.view:
my_state.view()
self.bottom_sprite.tile(pygame.Rect(0,my_state.screen.get_height()//2+100,my_state.screen.get_width(),200))
if self.npc_sprite:
self.npc_sprite.render(self.get_portrait_area())
if self.pilot_sprite:
default_border.render(self.PILOT_AREA.get_rect())
self.pilot_sprite.render(self.PILOT_AREA.get_rect(),1)
text_rect = self.TEXT_AREA.get_rect()
default_border.render(text_rect)
draw_text(my_state.medium_font,self.text,text_rect)
if draw_menu_rect:
default_border.render(self.MENU_AREA.get_rect())
name_rect = self.NAME_AREA.get_rect()
default_border.render(name_rect)
draw_text(my_state.big_font,str(self.npc),name_rect,color=pbge.WHITE,justify=0)
name_rect.y += my_state.big_font.get_linesize()
draw_text(my_state.small_font,self.npc_desc,name_rect,color=pbge.GREY,justify=0)
if self.pc:
react_level = ( self.npc.get_reaction_score(self.pc,self.camp) + 99 )//40
self.react_sprite.render(self.REACT_AREA.get_rect(),react_level)
def rollout(self):
bx = my_state.screen.get_width()
t = 0
myrect = self.PORTRAIT_AREA.get_rect()
myrect.x = -400
while (myrect.x < self.get_portrait_area().x):
if my_state.view:
my_state.view()
self.bottom_sprite.tile(pygame.Rect(max(0,bx-t*75),my_state.screen.get_height()//2+100,my_state.screen.get_width(),200))
if self.npc_sprite:
self.npc_sprite.render(myrect)
my_state.do_flip()
myrect.x += 25
anim_delay()
t += 1
def get_menu(self):
return pbge.rpgmenu.Menu(self.MENU_AREA.dx,self.MENU_AREA.dy,self.MENU_AREA.w,self.MENU_AREA.h,border=None,predraw=self.render,font=my_state.medium_font)
|
import pbge
import pygame
from pbge import my_state,draw_text,default_border,anim_delay
import gears
class ConvoVisualizer(object):
# The visualizer is a class used by the conversation when conversing.
# It has a "text" property and "render", "get_menu" methods.
TEXT_AREA = pbge.frects.Frect(0,-125,350,150)
MENU_AREA = pbge.frects.Frect(0,50,350,102)
NAME_AREA = pbge.frects.Frect(25,-185,300,35)
REACT_AREA = pbge.frects.Frect(290,-185,35,35)
PORTRAIT_AREA = pbge.frects.Frect(-370,-300,400,600)
PILOT_AREA = pbge.frects.Frect(-350,-250,100,100)
def __init__(self,npc,camp,pc=None):
pilot = npc.get_pilot()
npc = npc.get_root()
self.npc = pilot
if hasattr(npc, "get_portrait"):
self.npc_sprite = npc.get_portrait()
else:
self.npc_sprite = None
if pilot is not npc and hasattr(pilot, "get_portrait"):
self.pilot_sprite = pilot.get_portrait()
else:
self.pilot_sprite = None
self.npc_desc = self.npc.get_text_desc(camp)
self.camp = camp
self.bottom_sprite = pbge.image.Image('sys_wintermocha_convoborder.png',32,200)
self.react_sprite = pbge.image.Image('sys_reaction_emoji.png',35,35)
self.text = ''
if pc:
self.pc = pc.get_pilot()
def get_portrait_area(self):
if self.npc_sprite:
mydest = self.npc_sprite.get_rect(0)
mydest.midbottom = (my_state.screen.get_width()//2-170,my_state.screen.get_height()//2+300)
else:
return self.PORTRAIT_AREA.get_rect()
return mydest
def render(self,draw_menu_rect=True):
if my_state.view:
my_state.view()
self.bottom_sprite.tile(pygame.Rect(0,my_state.screen.get_height()//2+100,my_state.screen.get_width(),200))
if self.npc_sprite:
self.npc_sprite.render(self.get_portrait_area())
if self.pilot_sprite:
default_border.render(self.PILOT_AREA.get_rect())
self.pilot_sprite.render(self.PILOT_AREA.get_rect(),1)
text_rect = self.TEXT_AREA.get_rect()
default_border.render(text_rect)
draw_text(my_state.medium_font,self.text,text_rect)
if draw_menu_rect:
default_border.render(self.MENU_AREA.get_rect())
name_rect = self.NAME_AREA.get_rect()
default_border.render(name_rect)
draw_text(my_state.big_font,str(self.npc),name_rect,color=pbge.WHITE,justify=0)
name_rect.y += my_state.big_font.get_linesize()
draw_text(my_state.small_font,self.npc_desc,name_rect,color=pbge.GREY,justify=0)
if self.pc:
react_level = ( self.npc.get_reaction_score(self.pc,self.camp) + 99 )//40
self.react_sprite.render(self.REACT_AREA.get_rect(),react_level)
def rollout(self):
bx = my_state.screen.get_width()
t = 0
myrect = self.PORTRAIT_AREA.get_rect()
myrect.x = -400
while (myrect.x < self.get_portrait_area().x):
if my_state.view:
my_state.view()
self.bottom_sprite.tile(pygame.Rect(max(0,bx-t*75),my_state.screen.get_height()//2+100,my_state.screen.get_width(),200))
if self.npc_sprite:
self.npc_sprite.render(myrect)
my_state.do_flip()
myrect.x += 25
anim_delay()
t += 1
def get_menu(self):
return pbge.rpgmenu.Menu(self.MENU_AREA.dx,self.MENU_AREA.dy,self.MENU_AREA.w,self.MENU_AREA.h,border=None,predraw=self.render,font=my_state.medium_font)
|
en
| 0.913531
|
# The visualizer is a class used by the conversation when conversing. # It has a "text" property and "render", "get_menu" methods.
| 2.801221
| 3
|
core/typecheck/typing.py
|
xieguo/sublime_db
| 1
|
6629516
|
<gh_stars>1-10
TYPE_CHECKING = False
class Any:
pass
class _GetAttr(type):
def __getitem__(self, x):
return self
class Generic(metaclass=_GetAttr):
pass
class Generator(Generic):
pass
class Callable(Generic):
pass
class List(Generic):
pass
class Optional(Generic):
pass
class Tuple(Generic):
pass
class Union(Generic):
pass
class Dict(Generic):
pass
class Set(Generic):
pass
class Sequence(Generic):
pass
class NamedTuple(Generic):
pass
class TypeVar:
def __init__(self, name: str) -> None:
pass
|
TYPE_CHECKING = False
class Any:
pass
class _GetAttr(type):
def __getitem__(self, x):
return self
class Generic(metaclass=_GetAttr):
pass
class Generator(Generic):
pass
class Callable(Generic):
pass
class List(Generic):
pass
class Optional(Generic):
pass
class Tuple(Generic):
pass
class Union(Generic):
pass
class Dict(Generic):
pass
class Set(Generic):
pass
class Sequence(Generic):
pass
class NamedTuple(Generic):
pass
class TypeVar:
def __init__(self, name: str) -> None:
pass
|
none
| 1
| 2.822477
| 3
|
|
utils/info_md.py
|
marzoul/prjxray
| 11
|
6629517
|
<gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import argparse
import hashlib
import os
import parse as format_parser
import subprocess
import sys
"""Module for generating the Info.md file found in the database directory."""
info_md_header = """
# Details
Last updated on {human_date} ({iso8601_date}).
Created using [Project X-Ray](https://github.com/SymbiFlow/prjxray) version [{commit_hash_short}](https://github.com/SymbiFlow/prjxray/commit/{commit_hash_long}).
Latest commit was;
```
{commit_latest}
```
"""
info_md_section = """
## Database for [{part_line}]({part_line}/)
### Settings
Created using following [settings/{part_line}.sh (sha256: {settings_sha256})](https://github.com/SymbiFlow/prjxray/blob/{commit_hash_long}/settings/{part_line}.sh)
```shell
{settings_contents}
```
### [Results]({part_line}/)
Results have checksums;
"""
info_md_file = " * [`{file_sha256} ./{file_short_path}`](./{file_short_path})\n"
def sha256(s):
m = hashlib.sha256()
m.update(s)
return m.hexdigest()
def sha256_file(p):
return sha256(open(p, 'rb').read())
def run(c):
o = subprocess.check_output(c, shell=True)
return o.decode('utf-8').strip()
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'--keep',
default=False,
action="store_true",
help="""\
Keep the existing commit information.
""")
args = parser.parse_args()
info_md_filename = os.path.join('database', 'Info.md')
assert os.path.exists(info_md_filename), info_md_filename
info_md = []
info_md.append(open('database/README.md').read())
v = {}
v['human_date'] = run('TZ=UTC date')
v['iso8601_date'] = run('TZ=UTC date --iso-8601=seconds')
if not args.keep:
v['commit_latest'] = run('git log -1')
v['commit_hash_short'] = run('git log -1 --pretty=%h')
v['commit_hash_long'] = run('git log -1 --pretty=%H')
else:
with open(info_md_filename) as f:
result = format_parser.parse(
'{before}' + info_md_header + '{after}', f.read())
assert result
assert result['human_date']
assert result['iso8601_date']
v['commit_latest'] = result['commit_latest']
v['commit_hash_short'] = result['commit_hash_short']
v['commit_hash_long'] = result['commit_hash_long']
info_md.append(info_md_header.format(**v))
for part_line in sorted(os.listdir('database')):
if part_line.startswith('.'):
continue
part_path = os.path.join('database', part_line)
if not os.path.isdir(part_path):
continue
files = list(os.listdir(part_path))
files.sort()
settings_path = os.path.join('settings', part_line + '.sh')
settings_raw = open(settings_path, 'rb').read()
w = {}
w['commit_hash_long'] = v['commit_hash_long']
w['part_line'] = part_line
w['settings_contents'] = settings_raw.decode('utf-8')
w['settings_sha256'] = sha256(settings_raw)
info_md.append(info_md_section.format(**w))
files = []
for dirpath, dirnames, filenames in os.walk(part_path):
for f in filenames:
files.append(os.path.join(dirpath, f))
files.sort()
for p in files:
x = {}
x['file_real_path'] = './' + p
x['file_short_path'] = os.path.join(
part_line, os.path.relpath(p, part_path))
x['file_sha256'] = sha256_file(p)
info_md.append(info_md_file.format(**x))
with open(info_md_filename, 'w') as f:
f.write("".join(info_md))
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import argparse
import hashlib
import os
import parse as format_parser
import subprocess
import sys
"""Module for generating the Info.md file found in the database directory."""
info_md_header = """
# Details
Last updated on {human_date} ({iso8601_date}).
Created using [Project X-Ray](https://github.com/SymbiFlow/prjxray) version [{commit_hash_short}](https://github.com/SymbiFlow/prjxray/commit/{commit_hash_long}).
Latest commit was;
```
{commit_latest}
```
"""
info_md_section = """
## Database for [{part_line}]({part_line}/)
### Settings
Created using following [settings/{part_line}.sh (sha256: {settings_sha256})](https://github.com/SymbiFlow/prjxray/blob/{commit_hash_long}/settings/{part_line}.sh)
```shell
{settings_contents}
```
### [Results]({part_line}/)
Results have checksums;
"""
info_md_file = " * [`{file_sha256} ./{file_short_path}`](./{file_short_path})\n"
def sha256(s):
m = hashlib.sha256()
m.update(s)
return m.hexdigest()
def sha256_file(p):
return sha256(open(p, 'rb').read())
def run(c):
o = subprocess.check_output(c, shell=True)
return o.decode('utf-8').strip()
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'--keep',
default=False,
action="store_true",
help="""\
Keep the existing commit information.
""")
args = parser.parse_args()
info_md_filename = os.path.join('database', 'Info.md')
assert os.path.exists(info_md_filename), info_md_filename
info_md = []
info_md.append(open('database/README.md').read())
v = {}
v['human_date'] = run('TZ=UTC date')
v['iso8601_date'] = run('TZ=UTC date --iso-8601=seconds')
if not args.keep:
v['commit_latest'] = run('git log -1')
v['commit_hash_short'] = run('git log -1 --pretty=%h')
v['commit_hash_long'] = run('git log -1 --pretty=%H')
else:
with open(info_md_filename) as f:
result = format_parser.parse(
'{before}' + info_md_header + '{after}', f.read())
assert result
assert result['human_date']
assert result['iso8601_date']
v['commit_latest'] = result['commit_latest']
v['commit_hash_short'] = result['commit_hash_short']
v['commit_hash_long'] = result['commit_hash_long']
info_md.append(info_md_header.format(**v))
for part_line in sorted(os.listdir('database')):
if part_line.startswith('.'):
continue
part_path = os.path.join('database', part_line)
if not os.path.isdir(part_path):
continue
files = list(os.listdir(part_path))
files.sort()
settings_path = os.path.join('settings', part_line + '.sh')
settings_raw = open(settings_path, 'rb').read()
w = {}
w['commit_hash_long'] = v['commit_hash_long']
w['part_line'] = part_line
w['settings_contents'] = settings_raw.decode('utf-8')
w['settings_sha256'] = sha256(settings_raw)
info_md.append(info_md_section.format(**w))
files = []
for dirpath, dirnames, filenames in os.walk(part_path):
for f in filenames:
files.append(os.path.join(dirpath, f))
files.sort()
for p in files:
x = {}
x['file_real_path'] = './' + p
x['file_short_path'] = os.path.join(
part_line, os.path.relpath(p, part_path))
x['file_sha256'] = sha256_file(p)
info_md.append(info_md_file.format(**x))
with open(info_md_filename, 'w') as f:
f.write("".join(info_md))
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
en
| 0.730122
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright (C) 2017-2020 The Project X-Ray Authors. # # Use of this source code is governed by a ISC-style # license that can be found in the LICENSE file or at # https://opensource.org/licenses/ISC # # SPDX-License-Identifier: ISC Module for generating the Info.md file found in the database directory. # Details Last updated on {human_date} ({iso8601_date}). Created using [Project X-Ray](https://github.com/SymbiFlow/prjxray) version [{commit_hash_short}](https://github.com/SymbiFlow/prjxray/commit/{commit_hash_long}). Latest commit was; ``` {commit_latest} ``` ## Database for [{part_line}]({part_line}/) ### Settings Created using following [settings/{part_line}.sh (sha256: {settings_sha256})](https://github.com/SymbiFlow/prjxray/blob/{commit_hash_long}/settings/{part_line}.sh) ```shell {settings_contents} ``` ### [Results]({part_line}/) Results have checksums; \ Keep the existing commit information.
| 2.273039
| 2
|
ex086b.py
|
lucaspereirag/pythonProject
| 0
|
6629518
|
<filename>ex086b.py
matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
for linha in range(0, 3):
for coluna in range(0, 3):
matriz[linha][coluna] = (int(input(f'Número [{linha}][{coluna}]: ')))
for linha in range(0, 3):
for coluna in range(0, 3):
print(f'[{matriz[linha][coluna]:^5}]',end='')
print()
|
<filename>ex086b.py
matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
for linha in range(0, 3):
for coluna in range(0, 3):
matriz[linha][coluna] = (int(input(f'Número [{linha}][{coluna}]: ')))
for linha in range(0, 3):
for coluna in range(0, 3):
print(f'[{matriz[linha][coluna]:^5}]',end='')
print()
|
none
| 1
| 3.65849
| 4
|
|
download pdfs from url inside a pdf/download_pdf_link.py
|
zeserrado-marques/Basic-stuff
| 1
|
6629519
|
<gh_stars>1-10
import requests
url = 'https://link.springer.com/content/pdf/10.1007%2F978-3-030-25943-3.pdf'
myfile = requests.get(url)
open('books_springer/test.pdf', 'wb').write(myfile.content)
|
import requests
url = 'https://link.springer.com/content/pdf/10.1007%2F978-3-030-25943-3.pdf'
myfile = requests.get(url)
open('books_springer/test.pdf', 'wb').write(myfile.content)
|
none
| 1
| 2.476255
| 2
|
|
trainer/trainer.py
|
esp32wrangler/Self-Driving-Car
| 1
|
6629520
|
from keras.layers import Dense, GlobalAveragePooling2D, Reshape, Dropout, Conv2D, Activation
from keras.models import Model
import numpy as np
import cv2
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.mobilenet import preprocess_input
from keras.layers import Input, Lambda
import tensorflow as tf
from keras.applications.mobilenet import MobileNet
from keras import backend as K
# I couldn't get the freeze_graph.py script working, so I took this approach to generate the pb file from
# https://stackoverflow.com/questions/45466020/how-to-export-keras-h5-to-tensorflow-pb
def freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):
"""
Freezes the state of a session into a pruned computation graph.
Creates a new computation graph where variable nodes are replaced by
constants taking their current value in the session. The new graph will be
pruned so subgraphs that are not necessary to compute the requested
outputs are removed.
@param session The TensorFlow session to be frozen.
@param keep_var_names A list of variable names that should not be frozen,
or None to freeze all the variables in the graph.
@param output_names Names of the relevant graph outputs.
@param clear_devices Remove the device directives from the graph for better portability.
@return The frozen graph definition.
"""
graph = session.graph
with graph.as_default():
freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))
output_names = output_names or []
output_names += [v.op.name for v in tf.global_variables()]
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ""
frozen_graph = tf.graph_util.convert_variables_to_constants(
session, input_graph_def, output_names, freeze_var_names)
return frozen_graph
#option to include "none" state or exclude it
CLASSES_TO_TRAIN=["0", "1", "2", "3"]
#CLASSES_TO_TRAIN=["0", "1", "2"]
NUMBER_OF_CLASSES=len(CLASSES_TO_TRAIN)
EPOCHS=8
freeze_flag = False # With frozen layers the results were much worse, so I let Keras retrain the whole model
weights_flag = 'imagenet' # 'imagenet' or None
preprocess_flag = True # Should be true for ImageNet pre-trained typically
# This is the largest supported size for the Keras application
input_size = (224,224,3)
# we have to remove the top to change the number of classes
mobilenet = MobileNet(input_shape=input_size, include_top=False,
weights=weights_flag)
if freeze_flag == True:
for layer in mobilenet.layers:
layer.trainable = False
# Makes the input placeholder layer 32x32x3 for CIFAR-10
input_ph = Input(shape=(224,224,3), name = "input_tensor")
inp = mobilenet(input_ph)
# Add back the missing top (based on the mobilenet.py in Keras)
gap = GlobalAveragePooling2D() (inp)
gap = Reshape((1,1,1024)) (gap)
gap = Dropout(1e-3) (gap)
gap = Conv2D (NUMBER_OF_CLASSES, (1,1), padding='same', name='conv_preds') (gap)
gap = Activation ('softmax', name='act_softmax') (gap)
predictions = Reshape ((NUMBER_OF_CLASSES,), name='result_tensor') (gap)
model = Model(inputs=input_ph, outputs=predictions)
# Compile the model
model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Check the summary of this new model to confirm the architecture
model.summary()
BATCH_SIZE = 32
datagen_train = ImageDataGenerator(preprocessing_function=preprocess_input,
rotation_range = 5, width_shift_range = 0.2,
height_shift_range = 0.2, shear_range=0.2,
zoom_range=0.2, horizontal_flip = True)
datagen_valid = ImageDataGenerator(preprocessing_function=preprocess_input)
train_gen = datagen_train.flow_from_directory(directory="data/training",
target_size=(224, 224),
color_mode="rgb",
class_mode="categorical",
classes=CLASSES_TO_TRAIN,
batch_size=BATCH_SIZE,
shuffle=True
)
valid_gen = datagen_valid.flow_from_directory(directory="data/validation",
target_size=(224, 224),
color_mode="rgb",
class_mode="categorical",
classes=CLASSES_TO_TRAIN,
batch_size=BATCH_SIZE,
shuffle=True
)
model.fit_generator(train_gen,
steps_per_epoch = train_gen.samples // BATCH_SIZE,
validation_data = valid_gen,
validation_steps = valid_gen.samples // BATCH_SIZE,
epochs = EPOCHS, verbose=1)
print(model.output.op.name)
frozen_graph = freeze_session(K.get_session(),
output_names=[out.op.name for out in model.outputs])
tf.train.write_graph(frozen_graph, "model", "my_model" + str(NUMBER_OF_CLASSES) + ".pb", as_text=False)
|
from keras.layers import Dense, GlobalAveragePooling2D, Reshape, Dropout, Conv2D, Activation
from keras.models import Model
import numpy as np
import cv2
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.mobilenet import preprocess_input
from keras.layers import Input, Lambda
import tensorflow as tf
from keras.applications.mobilenet import MobileNet
from keras import backend as K
# I couldn't get the freeze_graph.py script working, so I took this approach to generate the pb file from
# https://stackoverflow.com/questions/45466020/how-to-export-keras-h5-to-tensorflow-pb
def freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):
"""
Freezes the state of a session into a pruned computation graph.
Creates a new computation graph where variable nodes are replaced by
constants taking their current value in the session. The new graph will be
pruned so subgraphs that are not necessary to compute the requested
outputs are removed.
@param session The TensorFlow session to be frozen.
@param keep_var_names A list of variable names that should not be frozen,
or None to freeze all the variables in the graph.
@param output_names Names of the relevant graph outputs.
@param clear_devices Remove the device directives from the graph for better portability.
@return The frozen graph definition.
"""
graph = session.graph
with graph.as_default():
freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))
output_names = output_names or []
output_names += [v.op.name for v in tf.global_variables()]
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ""
frozen_graph = tf.graph_util.convert_variables_to_constants(
session, input_graph_def, output_names, freeze_var_names)
return frozen_graph
#option to include "none" state or exclude it
CLASSES_TO_TRAIN=["0", "1", "2", "3"]
#CLASSES_TO_TRAIN=["0", "1", "2"]
NUMBER_OF_CLASSES=len(CLASSES_TO_TRAIN)
EPOCHS=8
freeze_flag = False # With frozen layers the results were much worse, so I let Keras retrain the whole model
weights_flag = 'imagenet' # 'imagenet' or None
preprocess_flag = True # Should be true for ImageNet pre-trained typically
# This is the largest supported size for the Keras application
input_size = (224,224,3)
# we have to remove the top to change the number of classes
mobilenet = MobileNet(input_shape=input_size, include_top=False,
weights=weights_flag)
if freeze_flag == True:
for layer in mobilenet.layers:
layer.trainable = False
# Makes the input placeholder layer 32x32x3 for CIFAR-10
input_ph = Input(shape=(224,224,3), name = "input_tensor")
inp = mobilenet(input_ph)
# Add back the missing top (based on the mobilenet.py in Keras)
gap = GlobalAveragePooling2D() (inp)
gap = Reshape((1,1,1024)) (gap)
gap = Dropout(1e-3) (gap)
gap = Conv2D (NUMBER_OF_CLASSES, (1,1), padding='same', name='conv_preds') (gap)
gap = Activation ('softmax', name='act_softmax') (gap)
predictions = Reshape ((NUMBER_OF_CLASSES,), name='result_tensor') (gap)
model = Model(inputs=input_ph, outputs=predictions)
# Compile the model
model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Check the summary of this new model to confirm the architecture
model.summary()
BATCH_SIZE = 32
datagen_train = ImageDataGenerator(preprocessing_function=preprocess_input,
rotation_range = 5, width_shift_range = 0.2,
height_shift_range = 0.2, shear_range=0.2,
zoom_range=0.2, horizontal_flip = True)
datagen_valid = ImageDataGenerator(preprocessing_function=preprocess_input)
train_gen = datagen_train.flow_from_directory(directory="data/training",
target_size=(224, 224),
color_mode="rgb",
class_mode="categorical",
classes=CLASSES_TO_TRAIN,
batch_size=BATCH_SIZE,
shuffle=True
)
valid_gen = datagen_valid.flow_from_directory(directory="data/validation",
target_size=(224, 224),
color_mode="rgb",
class_mode="categorical",
classes=CLASSES_TO_TRAIN,
batch_size=BATCH_SIZE,
shuffle=True
)
model.fit_generator(train_gen,
steps_per_epoch = train_gen.samples // BATCH_SIZE,
validation_data = valid_gen,
validation_steps = valid_gen.samples // BATCH_SIZE,
epochs = EPOCHS, verbose=1)
print(model.output.op.name)
frozen_graph = freeze_session(K.get_session(),
output_names=[out.op.name for out in model.outputs])
tf.train.write_graph(frozen_graph, "model", "my_model" + str(NUMBER_OF_CLASSES) + ".pb", as_text=False)
|
en
| 0.852986
|
# I couldn't get the freeze_graph.py script working, so I took this approach to generate the pb file from # https://stackoverflow.com/questions/45466020/how-to-export-keras-h5-to-tensorflow-pb Freezes the state of a session into a pruned computation graph.
Creates a new computation graph where variable nodes are replaced by
constants taking their current value in the session. The new graph will be
pruned so subgraphs that are not necessary to compute the requested
outputs are removed.
@param session The TensorFlow session to be frozen.
@param keep_var_names A list of variable names that should not be frozen,
or None to freeze all the variables in the graph.
@param output_names Names of the relevant graph outputs.
@param clear_devices Remove the device directives from the graph for better portability.
@return The frozen graph definition. #option to include "none" state or exclude it #CLASSES_TO_TRAIN=["0", "1", "2"] # With frozen layers the results were much worse, so I let Keras retrain the whole model # 'imagenet' or None # Should be true for ImageNet pre-trained typically # This is the largest supported size for the Keras application # we have to remove the top to change the number of classes # Makes the input placeholder layer 32x32x3 for CIFAR-10 # Add back the missing top (based on the mobilenet.py in Keras) # Compile the model # Check the summary of this new model to confirm the architecture
| 2.804895
| 3
|
plug/spider_views.py
|
zx273983653/vulscan
| 582
|
6629521
|
#coding=utf-8
from django.shortcuts import render
from django.http import HttpResponse
from django.conf import settings
#导入公共函数库
from appscan.function import *
#导入数据库模型
from plug.models import spider,spider_conf
#爬虫首页
def index(request):
#获取爬虫结果
tables = spider.objects.all()
#获取爬虫表单配置
form_conf = {}
db_conf = spider_conf.objects.all()
if db_conf:
form_conf = spider_conf.objects.all()[0]
else:
form_conf['keyword']=''
form_conf['exec_sousuo']=''
form_conf['page_sousuo']=''
form_conf['quanzhong_vaule']=''
#渲染poc列表
data = poc_list.objects.all()
return render(request,'spider.html',{'tables':tables,'form_conf':form_conf,'data':data,})
#爬虫表单处理
def spider_action(request):
if request.POST:
keyword = request.POST['keyword'] #搜索关键词
exec_sousuo = request.POST['exec_sousuo'] #搜索引擎命令
page_sousuo = int(request.POST['page_sousuo']) #搜索页数
quanzhong_vaule = request.POST['quanzhong_vaule'] #爱站权重
chongfu_check = request.POST['chongfu_check'] #是否去重
chongfu_check = request.POST['chongfu_check'] #是否去重
vul_id = request.POST['vulid'] #获取扫描的POC
#保存爬虫表单参数配置
spider_conf.objects.all().delete()
spider_conf.objects.create(keyword=keyword,exec_sousuo=exec_sousuo,page_sousuo=page_sousuo,quanzhong_vaule=quanzhong_vaule)
word = keyword + " " + exec_sousuo #汇总搜索关键词参数
#定义爬虫进度
global spider_jindu
#page_sousuo = 100
for i in xrange(1,page_sousuo):
spider_jindu = int((float(i+1)/float(page_sousuo))*100) #转化为整数百分比
#time.sleep(1) # 调试显示进度效果
#下面开始执行搜索操作
#print "run_spider++++++++",word,i,quanzhong_vaule,chongfu_check,vul_id,""
run_spider(word,i,quanzhong_vaule,chongfu_check,vul_id)
else:
#pass 结束后返回1,避免报错,结束 可以刷新页面显示结果 或者 清空重置页面表单
#spider_jindu=0
return HttpResponse(1)
#return HttpResponse(spider_jindu)
else:
return render(request,'spider.html',)
#爬虫ajax进度
def get_jindu(request):
try:
return HttpResponse(spider_jindu)
except:
#当没有进度时,返回 0
return HttpResponse("0")
#搜索关键词 和 搜索页数
def run_spider(word = 'inurl:.php?id',page = 1 ,quanzhong = 1,chongfu = 1,vul_id=1):
baseUrl = 'http://www.baidu.com/s'
data = "?wd="+word+"&pn="+str(page-1)+"0"+"&tn=baidurt&ie=utf-8&bsst=1"
#获取url信息
try:
html = requests.get(url=baseUrl+data,headers = headers,verify=False).content
#print html
except:
pass
#读取加载url
soup = BS(html,'lxml')
td = soup.find_all(class_='f')
#查找百度快照结果树
for t in td:
link_url = t.h3.a['href']
#核心调试用 print link_url
#判断是否重复域名
if repeated(link_url):
#判断权重是否大于quanzhong,大于quanzhong则写入文件
rank = int(get_rank(link_url))
print link_url,"========================================================",rank
if rank >= int(quanzhong):
#判断是否有漏洞
if scan_poc(link_url,vul_id):
#写入数据库
spider.objects.create(url=link_url,aizhan=rank,vulid=vul_id)
else:
#没有漏洞
pass
else:
#权重小于1,不写入
pass
else:
#重复了,不做处理
#print "url repeated :",link_url
pass
#判断数据库中是否存在该域名
def repeated(url):
try:
domain = re.findall(r"^http://.*?\?{1}",url,re.I)[0] #获取 http://domain.com/index.php?
except:
#print "get domain error!",url
return False
#正则匹配域名,判断数据库中是否存在
result = spider.objects.filter(url__iregex="^"+domain)
if len(result) > 0:
return False #重复
else:
return True
#获取爱站百度权重
def get_rank(url):
try:
baseUrl = "http://baidurank.aizhan.com/"
siteurl = re.findall(r"^http://.*?/{1}",url,re.I)[0].replace('http://','').replace('/','')
html = requests.get(baseUrl + siteurl).content
#解决延迟加载问题
#time.sleep(5)
#print baseUrl + siteurl,html
#exit()
soup = BS(html,'lxml')
#新的获取权重正则
div = soup.find_all('div',class_='ip')
rank = str(div[0]).replace('\t','').replace('\n','').replace(' ','')
results = re.findall(r"(\d)\.png",rank,re.I)[-1]
#print results
#div = soup.find('div',class_='mb10').find_all('td')
#rank = str(div[1]).replace('\t','').replace('\n','').replace(' ','')
#results = re.findall(r"(\d)\.gif",rank,re.I)[-1]
except Exception ,e:
print e,'get_rank error!'
#get_rank(url) #如果网站打不开就不要了,返回0
return 0
#判断是否获取到权重,未获取到重新获取
if results:
return results
else:
print 'get rank error!',url
get_rank(url)
#下载结果集
def show_tables(request):
tables = spider.objects.all()
data =""
for results in tables:
data = data + str(results.url) + "\n"#"<br>"
#print data
response = HttpResponse(data)
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename="urls.txt"'
return response
#清空结果集
def delete_tables(request):
result_del = spider.objects.all().delete()
global spider_jindu
spider_jindu = 0
#删除会 返回 删除的条数
#print result_del
#删除成功后返回1
return HttpResponse(1)
#删除 一个url记录
def delete_url(request,vid):
res = spider.objects.get(id=vid).delete()
return HttpResponse(1)
"""
待解决问题:
1.权重配置 动态装入;== ok
2.一键清空数据库结果;== ok
3.通过 权重排序显示
4.发送到其他模块
5.解决前端bug问题
6.增加多线程扫描
"""
|
#coding=utf-8
from django.shortcuts import render
from django.http import HttpResponse
from django.conf import settings
#导入公共函数库
from appscan.function import *
#导入数据库模型
from plug.models import spider,spider_conf
#爬虫首页
def index(request):
#获取爬虫结果
tables = spider.objects.all()
#获取爬虫表单配置
form_conf = {}
db_conf = spider_conf.objects.all()
if db_conf:
form_conf = spider_conf.objects.all()[0]
else:
form_conf['keyword']=''
form_conf['exec_sousuo']=''
form_conf['page_sousuo']=''
form_conf['quanzhong_vaule']=''
#渲染poc列表
data = poc_list.objects.all()
return render(request,'spider.html',{'tables':tables,'form_conf':form_conf,'data':data,})
#爬虫表单处理
def spider_action(request):
if request.POST:
keyword = request.POST['keyword'] #搜索关键词
exec_sousuo = request.POST['exec_sousuo'] #搜索引擎命令
page_sousuo = int(request.POST['page_sousuo']) #搜索页数
quanzhong_vaule = request.POST['quanzhong_vaule'] #爱站权重
chongfu_check = request.POST['chongfu_check'] #是否去重
chongfu_check = request.POST['chongfu_check'] #是否去重
vul_id = request.POST['vulid'] #获取扫描的POC
#保存爬虫表单参数配置
spider_conf.objects.all().delete()
spider_conf.objects.create(keyword=keyword,exec_sousuo=exec_sousuo,page_sousuo=page_sousuo,quanzhong_vaule=quanzhong_vaule)
word = keyword + " " + exec_sousuo #汇总搜索关键词参数
#定义爬虫进度
global spider_jindu
#page_sousuo = 100
for i in xrange(1,page_sousuo):
spider_jindu = int((float(i+1)/float(page_sousuo))*100) #转化为整数百分比
#time.sleep(1) # 调试显示进度效果
#下面开始执行搜索操作
#print "run_spider++++++++",word,i,quanzhong_vaule,chongfu_check,vul_id,""
run_spider(word,i,quanzhong_vaule,chongfu_check,vul_id)
else:
#pass 结束后返回1,避免报错,结束 可以刷新页面显示结果 或者 清空重置页面表单
#spider_jindu=0
return HttpResponse(1)
#return HttpResponse(spider_jindu)
else:
return render(request,'spider.html',)
#爬虫ajax进度
def get_jindu(request):
try:
return HttpResponse(spider_jindu)
except:
#当没有进度时,返回 0
return HttpResponse("0")
#搜索关键词 和 搜索页数
def run_spider(word = 'inurl:.php?id',page = 1 ,quanzhong = 1,chongfu = 1,vul_id=1):
baseUrl = 'http://www.baidu.com/s'
data = "?wd="+word+"&pn="+str(page-1)+"0"+"&tn=baidurt&ie=utf-8&bsst=1"
#获取url信息
try:
html = requests.get(url=baseUrl+data,headers = headers,verify=False).content
#print html
except:
pass
#读取加载url
soup = BS(html,'lxml')
td = soup.find_all(class_='f')
#查找百度快照结果树
for t in td:
link_url = t.h3.a['href']
#核心调试用 print link_url
#判断是否重复域名
if repeated(link_url):
#判断权重是否大于quanzhong,大于quanzhong则写入文件
rank = int(get_rank(link_url))
print link_url,"========================================================",rank
if rank >= int(quanzhong):
#判断是否有漏洞
if scan_poc(link_url,vul_id):
#写入数据库
spider.objects.create(url=link_url,aizhan=rank,vulid=vul_id)
else:
#没有漏洞
pass
else:
#权重小于1,不写入
pass
else:
#重复了,不做处理
#print "url repeated :",link_url
pass
#判断数据库中是否存在该域名
def repeated(url):
try:
domain = re.findall(r"^http://.*?\?{1}",url,re.I)[0] #获取 http://domain.com/index.php?
except:
#print "get domain error!",url
return False
#正则匹配域名,判断数据库中是否存在
result = spider.objects.filter(url__iregex="^"+domain)
if len(result) > 0:
return False #重复
else:
return True
#获取爱站百度权重
def get_rank(url):
try:
baseUrl = "http://baidurank.aizhan.com/"
siteurl = re.findall(r"^http://.*?/{1}",url,re.I)[0].replace('http://','').replace('/','')
html = requests.get(baseUrl + siteurl).content
#解决延迟加载问题
#time.sleep(5)
#print baseUrl + siteurl,html
#exit()
soup = BS(html,'lxml')
#新的获取权重正则
div = soup.find_all('div',class_='ip')
rank = str(div[0]).replace('\t','').replace('\n','').replace(' ','')
results = re.findall(r"(\d)\.png",rank,re.I)[-1]
#print results
#div = soup.find('div',class_='mb10').find_all('td')
#rank = str(div[1]).replace('\t','').replace('\n','').replace(' ','')
#results = re.findall(r"(\d)\.gif",rank,re.I)[-1]
except Exception ,e:
print e,'get_rank error!'
#get_rank(url) #如果网站打不开就不要了,返回0
return 0
#判断是否获取到权重,未获取到重新获取
if results:
return results
else:
print 'get rank error!',url
get_rank(url)
#下载结果集
def show_tables(request):
tables = spider.objects.all()
data =""
for results in tables:
data = data + str(results.url) + "\n"#"<br>"
#print data
response = HttpResponse(data)
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename="urls.txt"'
return response
#清空结果集
def delete_tables(request):
result_del = spider.objects.all().delete()
global spider_jindu
spider_jindu = 0
#删除会 返回 删除的条数
#print result_del
#删除成功后返回1
return HttpResponse(1)
#删除 一个url记录
def delete_url(request,vid):
res = spider.objects.get(id=vid).delete()
return HttpResponse(1)
"""
待解决问题:
1.权重配置 动态装入;== ok
2.一键清空数据库结果;== ok
3.通过 权重排序显示
4.发送到其他模块
5.解决前端bug问题
6.增加多线程扫描
"""
|
zh
| 0.799106
|
#coding=utf-8 #导入公共函数库 #导入数据库模型 #爬虫首页 #获取爬虫结果 #获取爬虫表单配置 #渲染poc列表 #爬虫表单处理 #搜索关键词 #搜索引擎命令 #搜索页数 #爱站权重 #是否去重 #是否去重 #获取扫描的POC #保存爬虫表单参数配置 #汇总搜索关键词参数 #定义爬虫进度 #page_sousuo = 100 #转化为整数百分比 #time.sleep(1) # 调试显示进度效果 #下面开始执行搜索操作 #print "run_spider++++++++",word,i,quanzhong_vaule,chongfu_check,vul_id,"" #pass 结束后返回1,避免报错,结束 可以刷新页面显示结果 或者 清空重置页面表单 #spider_jindu=0 #return HttpResponse(spider_jindu) #爬虫ajax进度 #当没有进度时,返回 0 #搜索关键词 和 搜索页数 #获取url信息 #print html #读取加载url #查找百度快照结果树 #核心调试用 print link_url #判断是否重复域名 #判断权重是否大于quanzhong,大于quanzhong则写入文件 #判断是否有漏洞 #写入数据库 #没有漏洞 #权重小于1,不写入 #重复了,不做处理 #print "url repeated :",link_url #判断数据库中是否存在该域名 #获取 http://domain.com/index.php? #print "get domain error!",url #正则匹配域名,判断数据库中是否存在 #重复 #获取爱站百度权重 #解决延迟加载问题 #time.sleep(5) #print baseUrl + siteurl,html #exit() #新的获取权重正则 #print results #div = soup.find('div',class_='mb10').find_all('td') #rank = str(div[1]).replace('\t','').replace('\n','').replace(' ','') #results = re.findall(r"(\d)\.gif",rank,re.I)[-1] #get_rank(url) #如果网站打不开就不要了,返回0 #判断是否获取到权重,未获取到重新获取 #下载结果集 #print data #清空结果集 #删除会 返回 删除的条数 #print result_del #删除成功后返回1 #删除 一个url记录 待解决问题: 1.权重配置 动态装入;== ok 2.一键清空数据库结果;== ok 3.通过 权重排序显示 4.发送到其他模块 5.解决前端bug问题 6.增加多线程扫描
| 2.043933
| 2
|
ML/WhatsApp/analysis.py
|
saneravi/ML_Stuff
| 209
|
6629522
|
#!/usr/bin/env python
"""Exploratory analysis of a whatsapp chat conversation."""
import datetime
import os
import re
# core modules
from collections import Counter
import matplotlib.pyplot as plt
# 3rd party modules (you might have to install them via pip)
import pandas as pd
def main(path):
"""Analyze a whatsapp chat file."""
df = parse_file(path)
df = augment_df(df)
print_general_statistics(df)
generate_visualizations(df)
text_mining(df)
return df
class Message:
"""A WhatsApp message object."""
def __init__(self, line):
self.line = line
self.datetime = datetime.datetime.strptime(line[:17],
'%d/%m/%Y, %H:%M')
if ':' not in line[20:]:
self.sender = 'SYSTEM'
self.text = line[20:]
else:
sender, text = line[20:].split(':', 1)
text = text.strip()
self.sender = sender
self.text = text
def __str__(self):
return 'Message({:%y-%m-%d}, {})'.format(self.datetime, self.text[:30])
__repr__ = __str__
def parse_file(path):
"""Parse a WhatsApp chat file into a dataframe."""
with open(path) as f:
data = f.read()
messages = parse_into_messages(data)
df = pd.DataFrame({'sender': [msg.sender for msg in messages],
'text': [msg.text for msg in messages],
'nb_words': [count_words(msg.text) for msg in messages],
'date': [msg.datetime for msg in messages]})
return df
def augment_df(df):
df['sender_before'] = df['sender'].shift(1)
df['date_before'] = df['date'].shift(1)
df['response_to'] = df['date_before']
selector = df['sender_before'] == df['sender']
df.loc[selector, 'response_to'] = None
df['response_time'] = df['date'] - df['response_to']
df['response_seconds'] = df['response_time'].dt.total_seconds()
return df
def parse_into_messages(data):
"""
Take on string which contains many messages and return list of message str.
Parameters
----------
data : str
Returns
-------
messages : List[str]
"""
tmp_msg = ''
data = data.replace('<Media omitted>', 'MEDIA_OMITTED')
lines = data.split('\n')
messages = []
whatsapp_date_pattern = re.compile(r'\d{2}/\d{2}/\d{4}, \d{2}:\d{2}')
for line in lines:
if whatsapp_date_pattern.match(line):
if len(tmp_msg) > 17:
messages.append(Message(tmp_msg))
tmp_msg = line
else:
tmp_msg += '\n' + line
return messages
def count_words(text):
"""Count the words in a text."""
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(text)
return len(tokens)
def generate_visualizations(df):
"""Generate many visualizations for a whatsapp chat df."""
visualize(df,
grouping=(df['date'].dt.hour),
column_name='date',
title='Send+received messages by hour',
xlabel='hour of the day (local time)',
filename='messages_by_hour.png')
# Weekend
df_weekday = df[df['date'].dt.weekday.isin([0, 1, 2, 3, 4])]
df_weekend = df[df['date'].dt.weekday.isin([5, 6])]
text = ' '.join(df.text.tolist()).lower()
create_wordcloud(text)
visualize(df_weekday,
grouping=(df_weekday['date'].dt.hour),
column_name='date',
title='Send+received messages by hour (weekday only)',
xlabel='hour of the day (local time)',
filename='messages_by_hour_weekday.png')
visualize(df_weekend,
grouping=(df_weekend['date'].dt.hour),
column_name='date',
title='Send+received messages by hour (weekend only)',
xlabel='hour of the day (local time)',
filename='messages_by_hour_weekend.png')
visualize(df,
grouping=(df['date'].dt.weekday),
column_name='date',
title='Send+received messages by weekday',
xlabel='From Monday=0 to Sunday=6',
filename='messages_by_weekday.png')
df2 = df[df['date'] > df.date.max() - datetime.timedelta(days=365)]
visualize(df2,
grouping=(df2['date'].dt.month),
column_name='date',
title='Send+received messages by month',
filename='messages_by_month.png')
visualize(df,
grouping=[df['date'].dt.year, df['date'].dt.week],
column_name='date',
title='Send+received messages over time',
filename='messages_over_time.png')
def visualize(df,
grouping,
column_name='start_date',
color='#494949',
title='',
xlabel='',
filename='image.png'):
"""
Visualize a dataframe with a date column.
Parameters
----------
df : Pandas dataframe
column_name : str
Column to visualize
color : str
title : str
"""
plt.figure(figsize=(20, 10))
ax = (df[column_name].groupby(by=grouping)
.count()).plot(kind="bar", color=color, stacked=True) # .unstack()
ax.set_facecolor('#eeeeee')
ax.set_xlabel(xlabel)
ax.set_ylabel("count")
ax.set_title(title)
plt.savefig(os.path.join('images', filename))
def print_general_statistics(df):
print('{nb_messages} messages exchanged between {start_date} and '
'{end_date} ({time})'
.format(nb_messages=len(df),
start_date=df['date'].min(),
end_date=df['date'].max(),
time=df['date'].max() - df['date'].min()))
print(df['sender'].value_counts())
print(df.groupby('sender').aggregate({'nb_words': sum}))
print('Message length distribution:')
senders = [sender for sender in df.sender.unique() if sender != 'SYSTEM']
for sender in senders:
df_sender = df[df.sender == sender]
print(f'## {sender}')
print('\tmin-words: {}'.format(df_sender['nb_words'].min()))
print('\tmean-words: {:0.0f}'.format(df_sender['nb_words'].mean()))
print('\tmax-words: {}'.format(df_sender['nb_words'].max()))
print('\t---')
print('\tmin-chars: {}'.format(df_sender['text'].str.len().min()))
print('\t.25-chars: {}'.format(df_sender['text'].str.len().quantile(0.25)))
print('\tmean-chars: {:0.0f}'.format(df_sender['text'].str.len().mean()))
print('\t.95-chars: {}'.format(df_sender['text'].str.len().quantile(0.95)))
print('\tmax-chars: {}'.format(df_sender['text'].str.len().max()))
print('## After how many seconds do people react?')
df_humans = df[df.sender != 'SYSTEM']
print(df_humans.groupby('sender')
.agg({'response_seconds': ['median', 'mean', 'max']}))
def text_mining(df):
df_humans = df[df.sender != 'SYSTEM']
corpus = ' '.join(df.text.tolist()).lower()
overall_counter = Counter(corpus.split(' '))
print('## Most common 30 words')
print(overall_counter.most_common(30))
senders = [sender for sender in df.sender.unique() if sender != 'SYSTEM']
for sender in senders:
print(f'## {sender}')
df_sender = df[df.sender == sender]
corpus = ' '.join(df_sender.text.tolist()).lower()
print('### Most common 30 words')
print(Counter(corpus.split(' ')).most_common(30))
print('## Smiley Analysis')
smiley_analysis(df_humans)
def smiley_analysis(df):
smileys = find_common_smileys(df)
senders = df.sender.unique()
smiley_counts = {}
for smiley in smileys:
smiley_counts[smiley] = []
for sender in senders:
count = sum(df[df.sender == sender].text.str.count(smiley))
smiley_counts[smiley].append(count)
df2 = pd.DataFrame(smiley_counts, index=senders)
df2 = df2.T
print(df2)
def find_common_smileys(df, most_common=20):
text = ' '.join(df.text.tolist())
text = [el for el in text if ord(el) > 1000]
special_chars = Counter(text)
special_chars = [char for char, count in special_chars.most_common(20)]
return special_chars
def create_wordcloud(text, output='wordcloud.png'):
# Start with loading all necessary libraries
import random
# Create and generate a word cloud image:
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from wordcloud import WordCloud
def love_color_func(word, font_size, position, orientation, random_state=None,
**kwargs):
val = random.randint(0, 200)
return (250, val, val)
stop_words = set(stopwords.words('german')).union({'dass', 'media_omitted'})
word_tokens = word_tokenize(text)
filtered_sentence = [w for w in word_tokens if w not in stop_words]
text = ' '.join(filtered_sentence)
wordcloud = WordCloud(width=800, height=1200,
# max_font_size=50,
# max_words=100,
background_color="white").generate(text)
wordcloud.recolor(color_func=love_color_func, random_state=3)
# Display the generated image:
plt.figure(figsize=(20, 10))
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.savefig(os.path.join('images', output), dpi=300, bbox_inches='tight')
def is_valid_file(parser, arg):
"""
Check if arg is a valid file that already exists on the file system.
Parameters
----------
parser : argparse object
arg : str
Returns
-------
arg
"""
arg = os.path.abspath(arg)
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
else:
return arg
def get_parser():
"""Get parser object for whatsapp analysis script."""
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-f", "--file",
dest="filename",
type=lambda x: is_valid_file(parser, x),
required=True,
help="Whatsapp file to analyze",
metavar="FILE")
return parser
if __name__ == '__main__':
args = get_parser().parse_args()
main(args.filename)
|
#!/usr/bin/env python
"""Exploratory analysis of a whatsapp chat conversation."""
import datetime
import os
import re
# core modules
from collections import Counter
import matplotlib.pyplot as plt
# 3rd party modules (you might have to install them via pip)
import pandas as pd
def main(path):
"""Analyze a whatsapp chat file."""
df = parse_file(path)
df = augment_df(df)
print_general_statistics(df)
generate_visualizations(df)
text_mining(df)
return df
class Message:
"""A WhatsApp message object."""
def __init__(self, line):
self.line = line
self.datetime = datetime.datetime.strptime(line[:17],
'%d/%m/%Y, %H:%M')
if ':' not in line[20:]:
self.sender = 'SYSTEM'
self.text = line[20:]
else:
sender, text = line[20:].split(':', 1)
text = text.strip()
self.sender = sender
self.text = text
def __str__(self):
return 'Message({:%y-%m-%d}, {})'.format(self.datetime, self.text[:30])
__repr__ = __str__
def parse_file(path):
"""Parse a WhatsApp chat file into a dataframe."""
with open(path) as f:
data = f.read()
messages = parse_into_messages(data)
df = pd.DataFrame({'sender': [msg.sender for msg in messages],
'text': [msg.text for msg in messages],
'nb_words': [count_words(msg.text) for msg in messages],
'date': [msg.datetime for msg in messages]})
return df
def augment_df(df):
df['sender_before'] = df['sender'].shift(1)
df['date_before'] = df['date'].shift(1)
df['response_to'] = df['date_before']
selector = df['sender_before'] == df['sender']
df.loc[selector, 'response_to'] = None
df['response_time'] = df['date'] - df['response_to']
df['response_seconds'] = df['response_time'].dt.total_seconds()
return df
def parse_into_messages(data):
"""
Take on string which contains many messages and return list of message str.
Parameters
----------
data : str
Returns
-------
messages : List[str]
"""
tmp_msg = ''
data = data.replace('<Media omitted>', 'MEDIA_OMITTED')
lines = data.split('\n')
messages = []
whatsapp_date_pattern = re.compile(r'\d{2}/\d{2}/\d{4}, \d{2}:\d{2}')
for line in lines:
if whatsapp_date_pattern.match(line):
if len(tmp_msg) > 17:
messages.append(Message(tmp_msg))
tmp_msg = line
else:
tmp_msg += '\n' + line
return messages
def count_words(text):
"""Count the words in a text."""
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(text)
return len(tokens)
def generate_visualizations(df):
"""Generate many visualizations for a whatsapp chat df."""
visualize(df,
grouping=(df['date'].dt.hour),
column_name='date',
title='Send+received messages by hour',
xlabel='hour of the day (local time)',
filename='messages_by_hour.png')
# Weekend
df_weekday = df[df['date'].dt.weekday.isin([0, 1, 2, 3, 4])]
df_weekend = df[df['date'].dt.weekday.isin([5, 6])]
text = ' '.join(df.text.tolist()).lower()
create_wordcloud(text)
visualize(df_weekday,
grouping=(df_weekday['date'].dt.hour),
column_name='date',
title='Send+received messages by hour (weekday only)',
xlabel='hour of the day (local time)',
filename='messages_by_hour_weekday.png')
visualize(df_weekend,
grouping=(df_weekend['date'].dt.hour),
column_name='date',
title='Send+received messages by hour (weekend only)',
xlabel='hour of the day (local time)',
filename='messages_by_hour_weekend.png')
visualize(df,
grouping=(df['date'].dt.weekday),
column_name='date',
title='Send+received messages by weekday',
xlabel='From Monday=0 to Sunday=6',
filename='messages_by_weekday.png')
df2 = df[df['date'] > df.date.max() - datetime.timedelta(days=365)]
visualize(df2,
grouping=(df2['date'].dt.month),
column_name='date',
title='Send+received messages by month',
filename='messages_by_month.png')
visualize(df,
grouping=[df['date'].dt.year, df['date'].dt.week],
column_name='date',
title='Send+received messages over time',
filename='messages_over_time.png')
def visualize(df,
grouping,
column_name='start_date',
color='#494949',
title='',
xlabel='',
filename='image.png'):
"""
Visualize a dataframe with a date column.
Parameters
----------
df : Pandas dataframe
column_name : str
Column to visualize
color : str
title : str
"""
plt.figure(figsize=(20, 10))
ax = (df[column_name].groupby(by=grouping)
.count()).plot(kind="bar", color=color, stacked=True) # .unstack()
ax.set_facecolor('#eeeeee')
ax.set_xlabel(xlabel)
ax.set_ylabel("count")
ax.set_title(title)
plt.savefig(os.path.join('images', filename))
def print_general_statistics(df):
print('{nb_messages} messages exchanged between {start_date} and '
'{end_date} ({time})'
.format(nb_messages=len(df),
start_date=df['date'].min(),
end_date=df['date'].max(),
time=df['date'].max() - df['date'].min()))
print(df['sender'].value_counts())
print(df.groupby('sender').aggregate({'nb_words': sum}))
print('Message length distribution:')
senders = [sender for sender in df.sender.unique() if sender != 'SYSTEM']
for sender in senders:
df_sender = df[df.sender == sender]
print(f'## {sender}')
print('\tmin-words: {}'.format(df_sender['nb_words'].min()))
print('\tmean-words: {:0.0f}'.format(df_sender['nb_words'].mean()))
print('\tmax-words: {}'.format(df_sender['nb_words'].max()))
print('\t---')
print('\tmin-chars: {}'.format(df_sender['text'].str.len().min()))
print('\t.25-chars: {}'.format(df_sender['text'].str.len().quantile(0.25)))
print('\tmean-chars: {:0.0f}'.format(df_sender['text'].str.len().mean()))
print('\t.95-chars: {}'.format(df_sender['text'].str.len().quantile(0.95)))
print('\tmax-chars: {}'.format(df_sender['text'].str.len().max()))
print('## After how many seconds do people react?')
df_humans = df[df.sender != 'SYSTEM']
print(df_humans.groupby('sender')
.agg({'response_seconds': ['median', 'mean', 'max']}))
def text_mining(df):
df_humans = df[df.sender != 'SYSTEM']
corpus = ' '.join(df.text.tolist()).lower()
overall_counter = Counter(corpus.split(' '))
print('## Most common 30 words')
print(overall_counter.most_common(30))
senders = [sender for sender in df.sender.unique() if sender != 'SYSTEM']
for sender in senders:
print(f'## {sender}')
df_sender = df[df.sender == sender]
corpus = ' '.join(df_sender.text.tolist()).lower()
print('### Most common 30 words')
print(Counter(corpus.split(' ')).most_common(30))
print('## Smiley Analysis')
smiley_analysis(df_humans)
def smiley_analysis(df):
smileys = find_common_smileys(df)
senders = df.sender.unique()
smiley_counts = {}
for smiley in smileys:
smiley_counts[smiley] = []
for sender in senders:
count = sum(df[df.sender == sender].text.str.count(smiley))
smiley_counts[smiley].append(count)
df2 = pd.DataFrame(smiley_counts, index=senders)
df2 = df2.T
print(df2)
def find_common_smileys(df, most_common=20):
text = ' '.join(df.text.tolist())
text = [el for el in text if ord(el) > 1000]
special_chars = Counter(text)
special_chars = [char for char, count in special_chars.most_common(20)]
return special_chars
def create_wordcloud(text, output='wordcloud.png'):
# Start with loading all necessary libraries
import random
# Create and generate a word cloud image:
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from wordcloud import WordCloud
def love_color_func(word, font_size, position, orientation, random_state=None,
**kwargs):
val = random.randint(0, 200)
return (250, val, val)
stop_words = set(stopwords.words('german')).union({'dass', 'media_omitted'})
word_tokens = word_tokenize(text)
filtered_sentence = [w for w in word_tokens if w not in stop_words]
text = ' '.join(filtered_sentence)
wordcloud = WordCloud(width=800, height=1200,
# max_font_size=50,
# max_words=100,
background_color="white").generate(text)
wordcloud.recolor(color_func=love_color_func, random_state=3)
# Display the generated image:
plt.figure(figsize=(20, 10))
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.savefig(os.path.join('images', output), dpi=300, bbox_inches='tight')
def is_valid_file(parser, arg):
"""
Check if arg is a valid file that already exists on the file system.
Parameters
----------
parser : argparse object
arg : str
Returns
-------
arg
"""
arg = os.path.abspath(arg)
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
else:
return arg
def get_parser():
"""Get parser object for whatsapp analysis script."""
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-f", "--file",
dest="filename",
type=lambda x: is_valid_file(parser, x),
required=True,
help="Whatsapp file to analyze",
metavar="FILE")
return parser
if __name__ == '__main__':
args = get_parser().parse_args()
main(args.filename)
|
en
| 0.526718
|
#!/usr/bin/env python Exploratory analysis of a whatsapp chat conversation. # core modules # 3rd party modules (you might have to install them via pip) Analyze a whatsapp chat file. A WhatsApp message object. Parse a WhatsApp chat file into a dataframe. Take on string which contains many messages and return list of message str. Parameters ---------- data : str Returns ------- messages : List[str] Count the words in a text. Generate many visualizations for a whatsapp chat df. # Weekend Visualize a dataframe with a date column. Parameters ---------- df : Pandas dataframe column_name : str Column to visualize color : str title : str # .unstack() # {sender}') # After how many seconds do people react?') # Most common 30 words') # {sender}') ## Most common 30 words') # Smiley Analysis') # Start with loading all necessary libraries # Create and generate a word cloud image: # max_font_size=50, # max_words=100, # Display the generated image: Check if arg is a valid file that already exists on the file system. Parameters ---------- parser : argparse object arg : str Returns ------- arg Get parser object for whatsapp analysis script.
| 3.275261
| 3
|
cloudkeeper_os/imagemanager.py
|
FranceGrilles/cloudkeeper-os
| 0
|
6629523
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2017-2018 CNRS and University of Strasbourg
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Image Manager
"""
import json
import os
from oslo_config import cfg
from oslo_log import log
from cloudkeeper_os import cloudkeeper_pb2
from cloudkeeper_os import constants
from cloudkeeper_os import openstack_client
from cloudkeeper_os import mapping
from cloudkeeper_os import utils
CONF = cfg.CONF
LOG = log.getLogger(__name__)
IMAGE_ID_TAG = constants.IMAGE_ID_TAG
IMAGE_LIST_ID_TAG = constants.IMAGE_LIST_ID_TAG
APPLIANCE_INT_VALUES = constants.APPLIANCE_INT_VALUES
IMAGE_STATUS_TAG = constants.IMAGE_STATUS_TAG
class ApplianceManager(object):
"""A class for managing Appliance
"""
def __init__(self):
self.mapping = mapping.Mapping()
def add_appliance(self, appliance):
"""Add an appliance to glance
"""
project_name = self.mapping.get_project_from_vo(appliance.vo)
if not project_name:
LOG.error("Cannot get a project name mapped to the "
"vo '%s'" % appliance.vo)
return None
domain_name = self.mapping.get_domain_from_project(project_name)
glance = openstack_client.get_glance_client(project_name, domain_name)
if not glance:
LOG.error("Cannot get a glance client for the "
"project '%s'" % project_name)
return None
LOG.info('Adding appliance: ' + appliance.title)
LOG.debug("Image access mode: "
"%s" % appliance.image.Mode.Name(appliance.image.mode))
if appliance.image.Mode.Name(appliance.image.mode) == 'REMOTE':
remote_image = True
filename = utils.retrieve_image(appliance)
if not filename:
LOG.error("The appliance '%s' could not be retrieved from "
"Cloudkeeper" % appliance.identifier)
return None
else:
filename = appliance.image.location
remote_image = False
if not filename:
LOG.error("The image filename has not set been set "
"in the appliance '%s'." % appliance.identifier)
return None
image_format = appliance.image.Format.Name(appliance.image.format)
try:
image_data = open(filename, 'rb')
except IOError as err:
LOG.error("Cannot open image file: '%s'" % filename)
LOG.exception(err)
return None
appliance.ClearField('image')
properties = utils.extract_appliance_properties(appliance)
min_ram = utils.convert_ram(int(properties.get("APPLIANCE_RAM", "0")))
properties[IMAGE_STATUS_TAG] = 'ACTIVE'
LOG.debug(
"Creating image '%s' (format: '%s', "
"properties %s)" % (appliance.title,
str.lower(image_format),
properties)
)
glance_image = glance.images.create(
name=appliance.title,
disk_format=str.lower(image_format),
container_format="bare",
visibility=CONF.image_visibility,
min_ram=min_ram
)
glance.images.upload(glance_image.id, image_data)
glance.images.update(glance_image.id, **properties)
image_data.close()
if remote_image:
LOG.debug("Deleting retrieved image: '%s'" % (filename))
os.unlink(filename)
return glance_image.id
def update_appliance(self, appliance):
"""Update an appliance stored in glance
"""
LOG.info("Updating appliance '%s'" % appliance.identifier)
image_list = self.mark_appliance_for_removal(appliance)
if not image_list:
LOG.error(
"Could not mark appliance for removal. Appliance will "
"not be updated"
)
return None
LOG.debug("Old version of the '%s' appliance has been marked for "
"removal" % appliance.identifier)
LOG.debug("Creating new release of the appliance")
image_id = self.add_appliance(appliance)
LOG.debug("The glance image '%s' has been created" % image_id)
return image_id
def mark_appliance_for_removal(self, appliance):
"""Mark an appliance in glance for removal
"""
LOG.info("Marking appliance '%s' for removal" % appliance.identifier)
project_name = self.mapping.get_project_from_vo(appliance.vo)
if not project_name:
LOG.error("Cannot get a project name mapped to the "
"VO '%s'" % appliance.vo)
return None
domain_name = self.mapping.get_domain_from_project(project_name)
glance = openstack_client.get_glance_client(project_name, domain_name)
if not glance:
LOG.error("Cannot get a glance client for the "
"project '%s'" % project_name)
return None
glance_images = utils.find_images(glance, appliance.identifier,
appliance.image_list_identifier)
if not glance_images:
LOG.error("Cannot mark image for removal: image not found")
return None
properties = {}
properties[IMAGE_STATUS_TAG] = 'EOL'
for image in glance_images:
LOG.debug("Marking image for removal: '%s'" % image.id)
glance.images.update(image.id, visibility='private', **properties)
return True
def cleanup_appliances(self):
"""Try to remove all appliances marked for removal
"""
LOG.info("Cleaning up appliances")
for project_name in self.mapping.get_projects():
domain_name = self.mapping.get_domain_from_project(project_name)
glance = openstack_client.get_glance_client(
project_name, domain_name
)
if not glance:
LOG.error("Not authorized to manage images from the "
"project: %s" % project_name)
continue
try:
img_generator = glance.images.list()
image_list = list(img_generator)
except Exception as err:
LOG.error("Not authorized to retrieve the image list from "
"the project: %s" % project_name)
LOG.exception(err)
continue
for image in image_list:
if IMAGE_LIST_ID_TAG in image:
if (IMAGE_STATUS_TAG in image and
image[IMAGE_STATUS_TAG] == 'EOL'):
try:
LOG.debug("Deleting image '%s'" % image['id'])
glance.images.delete(image['id'])
LOG.debug(
"Image '%s' successfully "
"deleted" % image['id']
)
except Exception as err:
LOG.error(
"Cannot delete image '%s'" % image['id']
)
LOG.error(err)
class ImageListManager(object):
"""A class for managing image lists
"""
def __init__(self):
"""Initialize the ImageListManager
"""
self.appliances = {}
self.mapping = mapping.Mapping()
def update_image_list_identifiers(self):
"""Update the identifier list
"""
appliances = {}
for project_name in self.mapping.get_projects():
LOG.debug(
"Retrieving image list identifiers for "
"project %s" % (project_name)
)
domain_name = self.mapping.get_domain_from_project(project_name)
glance = openstack_client.get_glance_client(
project_name, domain_name
)
if not glance:
LOG.error("Not authorized to manage images from the "
"project: %s" % project_name)
continue
try:
img_generator = glance.images.list()
image_list = list(img_generator)
except Exception as err:
LOG.error("Not authorized to retrieve the image list from "
"the project: %s" % project_name)
LOG.exception(err)
continue
for image in image_list:
if IMAGE_LIST_ID_TAG in image:
if ((IMAGE_STATUS_TAG not in image) or
(image[IMAGE_STATUS_TAG] != 'EOL')):
if image[IMAGE_LIST_ID_TAG] not in appliances:
appliances[image[IMAGE_LIST_ID_TAG]] = []
LOG.debug(
"Appending image with id %s to image list with "
"id %s" % (
image[IMAGE_ID_TAG],
image[IMAGE_LIST_ID_TAG]
)
)
appliances[image[IMAGE_LIST_ID_TAG]].append(image)
self.appliances = appliances
def get_appliances(self, image_list_identifier):
"""Return all appliances with a given image_list_identifier
"""
self.update_image_list_identifiers()
appliance_list = []
LOG.debug(
"Create appliance list for the image list "
"%s" % image_list_identifier
)
for image in self.appliances[image_list_identifier]:
LOG.debug("Building the property list for appliance %s (image: "
"%s)" % (image[IMAGE_ID_TAG], image.id))
properties = {}
for field in cloudkeeper_pb2.Appliance.DESCRIPTOR.fields_by_name:
if field == 'identifier':
key = IMAGE_ID_TAG
elif field == 'image_list_identifier':
key = IMAGE_LIST_ID_TAG
else:
key = 'APPLIANCE_' + str.upper(field)
if key in image:
if field in APPLIANCE_INT_VALUES:
properties[field] = int(image[key])
elif field == 'attributes':
properties[field] = json.loads(image[key])
else:
properties[field] = image[key]
if 'attributes' not in properties:
attributes = {}
for key in image:
if key[:3] in constants.ATTRIBUTE_KEYS:
attributes[key] = image[key]
properties['attributes'] = attributes
appliance_list.append(
cloudkeeper_pb2.Appliance(**properties)
)
LOG.debug('The property list contains: %s' % (properties))
return appliance_list
def remove_image_list(self, image_list_identifier):
"""Remove all images linked to an image_list_identifier
"""
self.update_image_list_identifiers()
if image_list_identifier not in self.appliances:
# raise NotIdentifierFound exception
LOG.error("No image with the image_list_identifier:"
"%s" % image_list_identifier)
return None
vo_name = self.appliances[image_list_identifier][0]['APPLIANCE_VO']
project_name = self.mapping.get_project_from_vo(vo_name)
if not project_name:
LOG.error("Cannot get the project name mapped the "
"VO '%s'" % vo_name)
return None
domain_name = self.mapping.get_domain_from_project(project_name)
glance = openstack_client.get_glance_client(project_name, domain_name)
if not glance:
LOG.error("Cannot get a glance client for the "
"project '%s'" % project_name)
return None
LOG.debug("Deleting all images with the Image List Identifier: "
"'%s'" % image_list_identifier)
for image in self.appliances[image_list_identifier]:
LOG.info("Deleting image '%s'" % image['id'])
glance.images.delete(image['id'])
self.appliances.pop(image_list_identifier)
return image_list_identifier
def get_image_list_identifiers(self):
"""Return a list of identifiers
"""
self.update_image_list_identifiers()
image_list_identifiers = []
for identifier in self.appliances:
LOG.debug("Appending new image list identifier: '%s'" % identifier)
image_list_identifiers.append(
cloudkeeper_pb2.ImageListIdentifier(
image_list_identifier=identifier
)
)
return image_list_identifiers
|
# -*- coding: utf-8 -*-
# Copyright 2017-2018 CNRS and University of Strasbourg
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Image Manager
"""
import json
import os
from oslo_config import cfg
from oslo_log import log
from cloudkeeper_os import cloudkeeper_pb2
from cloudkeeper_os import constants
from cloudkeeper_os import openstack_client
from cloudkeeper_os import mapping
from cloudkeeper_os import utils
CONF = cfg.CONF
LOG = log.getLogger(__name__)
IMAGE_ID_TAG = constants.IMAGE_ID_TAG
IMAGE_LIST_ID_TAG = constants.IMAGE_LIST_ID_TAG
APPLIANCE_INT_VALUES = constants.APPLIANCE_INT_VALUES
IMAGE_STATUS_TAG = constants.IMAGE_STATUS_TAG
class ApplianceManager(object):
"""A class for managing Appliance
"""
def __init__(self):
self.mapping = mapping.Mapping()
def add_appliance(self, appliance):
"""Add an appliance to glance
"""
project_name = self.mapping.get_project_from_vo(appliance.vo)
if not project_name:
LOG.error("Cannot get a project name mapped to the "
"vo '%s'" % appliance.vo)
return None
domain_name = self.mapping.get_domain_from_project(project_name)
glance = openstack_client.get_glance_client(project_name, domain_name)
if not glance:
LOG.error("Cannot get a glance client for the "
"project '%s'" % project_name)
return None
LOG.info('Adding appliance: ' + appliance.title)
LOG.debug("Image access mode: "
"%s" % appliance.image.Mode.Name(appliance.image.mode))
if appliance.image.Mode.Name(appliance.image.mode) == 'REMOTE':
remote_image = True
filename = utils.retrieve_image(appliance)
if not filename:
LOG.error("The appliance '%s' could not be retrieved from "
"Cloudkeeper" % appliance.identifier)
return None
else:
filename = appliance.image.location
remote_image = False
if not filename:
LOG.error("The image filename has not set been set "
"in the appliance '%s'." % appliance.identifier)
return None
image_format = appliance.image.Format.Name(appliance.image.format)
try:
image_data = open(filename, 'rb')
except IOError as err:
LOG.error("Cannot open image file: '%s'" % filename)
LOG.exception(err)
return None
appliance.ClearField('image')
properties = utils.extract_appliance_properties(appliance)
min_ram = utils.convert_ram(int(properties.get("APPLIANCE_RAM", "0")))
properties[IMAGE_STATUS_TAG] = 'ACTIVE'
LOG.debug(
"Creating image '%s' (format: '%s', "
"properties %s)" % (appliance.title,
str.lower(image_format),
properties)
)
glance_image = glance.images.create(
name=appliance.title,
disk_format=str.lower(image_format),
container_format="bare",
visibility=CONF.image_visibility,
min_ram=min_ram
)
glance.images.upload(glance_image.id, image_data)
glance.images.update(glance_image.id, **properties)
image_data.close()
if remote_image:
LOG.debug("Deleting retrieved image: '%s'" % (filename))
os.unlink(filename)
return glance_image.id
def update_appliance(self, appliance):
"""Update an appliance stored in glance
"""
LOG.info("Updating appliance '%s'" % appliance.identifier)
image_list = self.mark_appliance_for_removal(appliance)
if not image_list:
LOG.error(
"Could not mark appliance for removal. Appliance will "
"not be updated"
)
return None
LOG.debug("Old version of the '%s' appliance has been marked for "
"removal" % appliance.identifier)
LOG.debug("Creating new release of the appliance")
image_id = self.add_appliance(appliance)
LOG.debug("The glance image '%s' has been created" % image_id)
return image_id
def mark_appliance_for_removal(self, appliance):
"""Mark an appliance in glance for removal
"""
LOG.info("Marking appliance '%s' for removal" % appliance.identifier)
project_name = self.mapping.get_project_from_vo(appliance.vo)
if not project_name:
LOG.error("Cannot get a project name mapped to the "
"VO '%s'" % appliance.vo)
return None
domain_name = self.mapping.get_domain_from_project(project_name)
glance = openstack_client.get_glance_client(project_name, domain_name)
if not glance:
LOG.error("Cannot get a glance client for the "
"project '%s'" % project_name)
return None
glance_images = utils.find_images(glance, appliance.identifier,
appliance.image_list_identifier)
if not glance_images:
LOG.error("Cannot mark image for removal: image not found")
return None
properties = {}
properties[IMAGE_STATUS_TAG] = 'EOL'
for image in glance_images:
LOG.debug("Marking image for removal: '%s'" % image.id)
glance.images.update(image.id, visibility='private', **properties)
return True
def cleanup_appliances(self):
"""Try to remove all appliances marked for removal
"""
LOG.info("Cleaning up appliances")
for project_name in self.mapping.get_projects():
domain_name = self.mapping.get_domain_from_project(project_name)
glance = openstack_client.get_glance_client(
project_name, domain_name
)
if not glance:
LOG.error("Not authorized to manage images from the "
"project: %s" % project_name)
continue
try:
img_generator = glance.images.list()
image_list = list(img_generator)
except Exception as err:
LOG.error("Not authorized to retrieve the image list from "
"the project: %s" % project_name)
LOG.exception(err)
continue
for image in image_list:
if IMAGE_LIST_ID_TAG in image:
if (IMAGE_STATUS_TAG in image and
image[IMAGE_STATUS_TAG] == 'EOL'):
try:
LOG.debug("Deleting image '%s'" % image['id'])
glance.images.delete(image['id'])
LOG.debug(
"Image '%s' successfully "
"deleted" % image['id']
)
except Exception as err:
LOG.error(
"Cannot delete image '%s'" % image['id']
)
LOG.error(err)
class ImageListManager(object):
"""A class for managing image lists
"""
def __init__(self):
"""Initialize the ImageListManager
"""
self.appliances = {}
self.mapping = mapping.Mapping()
def update_image_list_identifiers(self):
"""Update the identifier list
"""
appliances = {}
for project_name in self.mapping.get_projects():
LOG.debug(
"Retrieving image list identifiers for "
"project %s" % (project_name)
)
domain_name = self.mapping.get_domain_from_project(project_name)
glance = openstack_client.get_glance_client(
project_name, domain_name
)
if not glance:
LOG.error("Not authorized to manage images from the "
"project: %s" % project_name)
continue
try:
img_generator = glance.images.list()
image_list = list(img_generator)
except Exception as err:
LOG.error("Not authorized to retrieve the image list from "
"the project: %s" % project_name)
LOG.exception(err)
continue
for image in image_list:
if IMAGE_LIST_ID_TAG in image:
if ((IMAGE_STATUS_TAG not in image) or
(image[IMAGE_STATUS_TAG] != 'EOL')):
if image[IMAGE_LIST_ID_TAG] not in appliances:
appliances[image[IMAGE_LIST_ID_TAG]] = []
LOG.debug(
"Appending image with id %s to image list with "
"id %s" % (
image[IMAGE_ID_TAG],
image[IMAGE_LIST_ID_TAG]
)
)
appliances[image[IMAGE_LIST_ID_TAG]].append(image)
self.appliances = appliances
def get_appliances(self, image_list_identifier):
"""Return all appliances with a given image_list_identifier
"""
self.update_image_list_identifiers()
appliance_list = []
LOG.debug(
"Create appliance list for the image list "
"%s" % image_list_identifier
)
for image in self.appliances[image_list_identifier]:
LOG.debug("Building the property list for appliance %s (image: "
"%s)" % (image[IMAGE_ID_TAG], image.id))
properties = {}
for field in cloudkeeper_pb2.Appliance.DESCRIPTOR.fields_by_name:
if field == 'identifier':
key = IMAGE_ID_TAG
elif field == 'image_list_identifier':
key = IMAGE_LIST_ID_TAG
else:
key = 'APPLIANCE_' + str.upper(field)
if key in image:
if field in APPLIANCE_INT_VALUES:
properties[field] = int(image[key])
elif field == 'attributes':
properties[field] = json.loads(image[key])
else:
properties[field] = image[key]
if 'attributes' not in properties:
attributes = {}
for key in image:
if key[:3] in constants.ATTRIBUTE_KEYS:
attributes[key] = image[key]
properties['attributes'] = attributes
appliance_list.append(
cloudkeeper_pb2.Appliance(**properties)
)
LOG.debug('The property list contains: %s' % (properties))
return appliance_list
def remove_image_list(self, image_list_identifier):
"""Remove all images linked to an image_list_identifier
"""
self.update_image_list_identifiers()
if image_list_identifier not in self.appliances:
# raise NotIdentifierFound exception
LOG.error("No image with the image_list_identifier:"
"%s" % image_list_identifier)
return None
vo_name = self.appliances[image_list_identifier][0]['APPLIANCE_VO']
project_name = self.mapping.get_project_from_vo(vo_name)
if not project_name:
LOG.error("Cannot get the project name mapped the "
"VO '%s'" % vo_name)
return None
domain_name = self.mapping.get_domain_from_project(project_name)
glance = openstack_client.get_glance_client(project_name, domain_name)
if not glance:
LOG.error("Cannot get a glance client for the "
"project '%s'" % project_name)
return None
LOG.debug("Deleting all images with the Image List Identifier: "
"'%s'" % image_list_identifier)
for image in self.appliances[image_list_identifier]:
LOG.info("Deleting image '%s'" % image['id'])
glance.images.delete(image['id'])
self.appliances.pop(image_list_identifier)
return image_list_identifier
def get_image_list_identifiers(self):
"""Return a list of identifiers
"""
self.update_image_list_identifiers()
image_list_identifiers = []
for identifier in self.appliances:
LOG.debug("Appending new image list identifier: '%s'" % identifier)
image_list_identifiers.append(
cloudkeeper_pb2.ImageListIdentifier(
image_list_identifier=identifier
)
)
return image_list_identifiers
|
en
| 0.779793
|
# -*- coding: utf-8 -*- # Copyright 2017-2018 CNRS and University of Strasbourg # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Image Manager A class for managing Appliance Add an appliance to glance Update an appliance stored in glance Mark an appliance in glance for removal Try to remove all appliances marked for removal A class for managing image lists Initialize the ImageListManager Update the identifier list Return all appliances with a given image_list_identifier Remove all images linked to an image_list_identifier # raise NotIdentifierFound exception Return a list of identifiers
| 1.935989
| 2
|
exercicios/exercicio103.py
|
NicoCassio/cursoemvideo-python
| 0
|
6629524
|
<reponame>NicoCassio/cursoemvideo-python
def ficha(nome='<desconhecido>', gols=0):
"""
-> Mostra o rendimento do jogador
:param nome: nome do jogador
:param gols: quantidade de gols do jogador
"""
print(f'{nome:<20}{f"{gols} gols":>10}')
# Programa principal
print('-' * 30)
n = str(input('Nome: '))
g = str(input('Gols: '))
g = int(g) if g.isnumeric() else 0
if n.strip() == '':
ficha(gols=g)
else:
ficha(n, g)
print('-' * 30)
|
def ficha(nome='<desconhecido>', gols=0):
"""
-> Mostra o rendimento do jogador
:param nome: nome do jogador
:param gols: quantidade de gols do jogador
"""
print(f'{nome:<20}{f"{gols} gols":>10}')
# Programa principal
print('-' * 30)
n = str(input('Nome: '))
g = str(input('Gols: '))
g = int(g) if g.isnumeric() else 0
if n.strip() == '':
ficha(gols=g)
else:
ficha(n, g)
print('-' * 30)
|
pt
| 0.978845
|
-> Mostra o rendimento do jogador :param nome: nome do jogador :param gols: quantidade de gols do jogador # Programa principal
| 3.785573
| 4
|
sparse/coo/umath.py
|
nimroha/sparse
| 0
|
6629525
|
from itertools import product
import numpy as np
import scipy.sparse
import numba
from ..utils import isscalar, PositinalArgumentPartial, _zero_of_dtype
from ..compatibility import range, zip, zip_longest
def elemwise(func, *args, **kwargs):
"""
Apply a function to any number of arguments.
Parameters
----------
func : Callable
The function to apply. Must support broadcasting.
args : tuple, optional
The arguments to the function. Can be :obj:`SparseArray` objects
or :obj:`scipy.sparse.spmatrix` objects.
kwargs : dict, optional
Any additional arguments to pass to the function.
Returns
-------
COO
The result of applying the function.
Raises
------
ValueError
If the operation would result in a dense matrix, or if the operands
don't have broadcastable shapes.
See Also
--------
:obj:`numpy.ufunc` : A similar Numpy construct. Note that any :code:`ufunc` can be used
as the :code:`func` input to this function.
Notes
-----
Previously, operations with Numpy arrays were sometimes supported. Now,
it is necessary to convert Numpy arrays to :obj:`COO` objects.
"""
# Because we need to mutate args.
from .core import COO
from ..sparse_array import SparseArray
args = list(args)
posargs = []
pos = []
for i, arg in enumerate(args):
if isinstance(arg, scipy.sparse.spmatrix):
args[i] = COO.from_scipy_sparse(arg)
elif isscalar(arg) or (isinstance(arg, np.ndarray)
and not arg.shape):
# Faster and more reliable to pass ()-shaped ndarrays as scalars.
args[i] = np.asarray(arg)[()]
pos.append(i)
posargs.append(args[i])
elif isinstance(arg, SparseArray) and not isinstance(arg, COO):
args[i] = COO(arg)
elif not isinstance(arg, COO):
return NotImplemented
# Filter out scalars as they are 'baked' into the function.
func = PositinalArgumentPartial(func, pos, posargs)
args = [arg for arg in args if not isscalar(arg)]
if len(args) == 0:
return func(**kwargs)
return _elemwise_n_ary(func, *args, **kwargs)
@numba.jit(nopython=True)
def _match_arrays(a, b): # pragma: no cover
"""
Finds all indexes into a and b such that a[i] = b[j]. The outputs are sorted
in lexographical order.
Parameters
----------
a, b : np.ndarray
The input 1-D arrays to match. If matching of multiple fields is
needed, use np.recarrays. These two arrays must be sorted.
Returns
-------
a_idx, b_idx : np.ndarray
The output indices of every possible pair of matching elements.
"""
if len(a) == 0 or len(b) == 0:
return np.empty(0, dtype=np.uintp), np.empty(0, dtype=np.uintp)
a_ind, b_ind = [], []
nb = len(b)
ib = 0
match = 0
for ia, j in enumerate(a):
if j == b[match]:
ib = match
while ib < nb and j >= b[ib]:
if j == b[ib]:
a_ind.append(ia)
b_ind.append(ib)
if b[match] < b[ib]:
match = ib
ib += 1
return np.array(a_ind, dtype=np.uintp), np.array(b_ind, dtype=np.uintp)
def _elemwise_n_ary(func, *args, **kwargs):
"""
Apply a function to any number of arguments with broadcasting.
Parameters
----------
func : Callable
The function to apply to arguments. Must support broadcasting.
args : list
Input :obj:`COO` or :obj:`numpy.ndarray`s.
kwargs : dict
Additional arguments to pass to the function.
Returns
-------
COO
The output array.
Raises
------
ValueError
If the input shapes aren't compatible or the result will be dense.
"""
from .core import COO
args = list(args)
args_zeros = tuple(_zero_of_dtype(np.dtype(arg)) for arg in args)
func_value = func(*args_zeros, **kwargs)
func_zero = _zero_of_dtype(func_value.dtype)
if func_value != func_zero:
raise ValueError("Performing this operation would produce "
"a dense result: %s" % str(func))
data_list = []
coords_list = []
cache = {}
for mask in product([True, False], repeat=len(args)):
if not any(mask):
continue
ci, di = _unmatch_coo(func, args, mask, cache, **kwargs)
coords_list.extend(ci)
data_list.extend(di)
result_shape = _get_nary_broadcast_shape(*[arg.shape for arg in args])
# Concatenate matches and mismatches
data = np.concatenate(data_list) if len(data_list) else np.empty((0,), dtype=func_value.dtype)
coords = np.concatenate(coords_list, axis=1) if len(coords_list) else \
np.empty((0, len(result_shape)), dtype=np.min_scalar_type(max(result_shape) - 1))
nonzero = data != func_zero
data = data[nonzero]
coords = coords[:, nonzero]
return COO(coords, data, shape=result_shape, has_duplicates=False)
def _match_coo(*args, **kwargs):
"""
Matches the coordinates for any number of input :obj:`COO` arrays.
Equivalent to "sparse" broadcasting for all arrays.
Parameters
----------
args : Tuple[COO]
The input :obj:`COO` arrays.
return_midx : bool
Whether to return matched indices or matched arrays. Matching
only supported for two arrays. ``False`` by default.
cache : dict
Cache of things already matched. No cache by default.
Returns
-------
matched_idx : List[ndarray]
The indices of matched elements in the original arrays. Only returned if
``return_midx`` is ``True``.
matched_arrays : List[COO]
The expanded, matched :obj:`COO` objects. Only returned if
``return_midx`` is ``False``.
"""
from .core import COO
from .common import linear_loc
return_midx = kwargs.pop('return_midx', False)
cache = kwargs.pop('cache', None)
if kwargs:
raise ValueError('Unknown kwargs %s' % kwargs.keys())
if return_midx and (len(args) != 2 or cache is not None):
raise NotImplementedError('Matching indices only supported for two args, and no cache.')
matched_arrays = [args[0]]
cache_key = [id(args[0])]
for arg2 in args[1:]:
cache_key.append(id(arg2))
key = tuple(cache_key)
if cache is not None and key in cache:
matched_arrays = cache[key]
continue
cargs = [matched_arrays[0], arg2]
current_shape = _get_broadcast_shape(matched_arrays[0].shape, arg2.shape)
params = [_get_broadcast_parameters(arg.shape, current_shape) for arg in cargs]
reduced_params = [all(p) for p in zip(*params)]
reduced_shape = _get_reduced_shape(arg2.shape,
reduced_params[-arg2.ndim:])
reduced_coords = [_get_reduced_coords(arg.coords, reduced_params[-arg.ndim:])
for arg in cargs]
linear = [linear_loc(rc, reduced_shape) for rc in reduced_coords]
sorted_idx = [np.argsort(idx) for idx in linear]
linear = [idx[s] for idx, s in zip(linear, sorted_idx)]
matched_idx = _match_arrays(*linear)
if return_midx:
matched_idx = [sidx[midx] for sidx, midx in zip(sorted_idx, matched_idx)]
return matched_idx
coords = [arg.coords[:, s] for arg, s in zip(cargs, sorted_idx)]
mcoords = [c[:, idx] for c, idx in zip(coords, matched_idx)]
mcoords = _get_matching_coords(mcoords, params, current_shape)
mdata = [arg.data[sorted_idx[0]][matched_idx[0]] for arg in matched_arrays]
mdata.append(arg2.data[sorted_idx[1]][matched_idx[1]])
matched_arrays = [COO(mcoords, md, shape=current_shape) for md in mdata]
if cache is not None:
cache[key] = matched_arrays
return matched_arrays
def _unmatch_coo(func, args, mask, cache, **kwargs):
"""
Matches the coordinates for any number of input :obj:`COO` arrays.
First computes the matches, then filters out the non-matches.
Parameters
----------
func : Callable
The function to compute matches
args : tuple[COO]
The input :obj:`COO` arrays.
mask : tuple[bool]
Specifies the inputs that are zero and the ones that are
nonzero.
kwargs: dict
Extra keyword arguments to pass to func.
Returns
-------
matched_coords : list[ndarray]
The matched coordinates.
matched_data : list[ndarray]
The matched data.
"""
from .core import COO
matched_args = [a for a, m in zip(args, mask) if m]
unmatched_args = [a for a, m in zip(args, mask) if not m]
matched_arrays = _match_coo(*matched_args, cache=cache)
pos = tuple(i for i, m in enumerate(mask) if not m)
posargs = [_zero_of_dtype(arg.dtype) for arg, m in zip(args, mask) if not m]
result_shape = _get_nary_broadcast_shape(*[arg.shape for arg in args])
partial = PositinalArgumentPartial(func, pos, posargs)
matched_func = partial(*[a.data for a in matched_arrays], **kwargs)
unmatched_mask = matched_func != _zero_of_dtype(matched_func.dtype)
if not unmatched_mask.any():
return [], []
func_data = matched_func[unmatched_mask]
func_coords = matched_arrays[0].coords[:, unmatched_mask]
func_array = COO(func_coords, func_data, shape=matched_arrays[0].shape).broadcast_to(result_shape)
if all(mask):
return [func_array.coords], [func_array.data]
unmatched_mask = np.ones(func_array.nnz, dtype=np.bool)
for arg in unmatched_args:
matched_idx = _match_coo(func_array, arg, return_midx=True)[0]
unmatched_mask[matched_idx] = False
coords = np.asarray(func_array.coords[:, unmatched_mask], order='C')
data = np.asarray(func_array.data[unmatched_mask], order='C')
return [coords], [data]
def _get_nary_broadcast_shape(*shapes):
"""
Broadcast any number of shapes to a result shape.
Parameters
----------
shapes : tuple[tuple[int]]
The shapes to broadcast.
Returns
-------
tuple[int]
The output shape.
Raises
------
ValueError
If the input shapes cannot be broadcast to a single shape.
"""
result_shape = ()
for shape in shapes:
try:
result_shape = _get_broadcast_shape(shape, result_shape)
except ValueError:
shapes_str = ', '.join(str(shape) for shape in shapes)
raise ValueError('operands could not be broadcast together with shapes %s'
% shapes_str)
return result_shape
def _get_broadcast_shape(shape1, shape2, is_result=False):
"""
Get the overall broadcasted shape.
Parameters
----------
shape1, shape2 : tuple[int]
The input shapes to broadcast together.
is_result : bool
Whether or not shape2 is also the result shape.
Returns
-------
result_shape : tuple[int]
The overall shape of the result.
Raises
------
ValueError
If the two shapes cannot be broadcast together.
"""
# https://stackoverflow.com/a/47244284/774273
if not all((l1 == l2) or (l1 == 1) or ((l2 == 1) and not is_result) for l1, l2 in
zip(shape1[::-1], shape2[::-1])):
raise ValueError('operands could not be broadcast together with shapes %s, %s' %
(shape1, shape2))
result_shape = tuple(max(l1, l2) for l1, l2 in
zip_longest(shape1[::-1], shape2[::-1], fillvalue=1))[::-1]
return result_shape
def _get_broadcast_parameters(shape, broadcast_shape):
"""
Get the broadcast parameters.
Parameters
----------
shape : tuple[int]
The input shape.
broadcast_shape
The shape to broadcast to.
Returns
-------
params : list
A list containing None if the dimension isn't in the original array, False if
it needs to be broadcast, and True if it doesn't.
"""
params = [None if l1 is None else l1 == l2 for l1, l2
in zip_longest(shape[::-1], broadcast_shape[::-1], fillvalue=None)][::-1]
return params
def _get_reduced_coords(coords, params):
"""
Gets only those dimensions of the coordinates that don't need to be broadcast.
Parameters
----------
coords : np.ndarray
The coordinates to reduce.
params : list
The params from which to check which dimensions to get.
Returns
-------
reduced_coords : np.ndarray
The reduced coordinates.
"""
reduced_params = [bool(param) for param in params]
return coords[reduced_params]
def _get_reduced_shape(shape, params):
"""
Gets only those dimensions of the coordinates that don't need to be broadcast.
Parameters
----------
coords : np.ndarray
The coordinates to reduce.
params : list
The params from which to check which dimensions to get.
Returns
-------
reduced_coords : np.ndarray
The reduced coordinates.
"""
reduced_shape = tuple(l for l, p in zip(shape, params) if p)
return reduced_shape
def _get_expanded_coords_data(coords, data, params, broadcast_shape):
"""
Expand coordinates/data to broadcast_shape. Does most of the heavy lifting for broadcast_to.
Produces sorted output for sorted inputs.
Parameters
----------
coords : np.ndarray
The coordinates to expand.
data : np.ndarray
The data corresponding to the coordinates.
params : list
The broadcast parameters.
broadcast_shape : tuple[int]
The shape to broadcast to.
Returns
-------
expanded_coords : np.ndarray
List of 1-D arrays. Each item in the list has one dimension of coordinates.
expanded_data : np.ndarray
The data corresponding to expanded_coords.
"""
first_dim = -1
expand_shapes = []
for d, p, l in zip(range(len(broadcast_shape)), params, broadcast_shape):
if p and first_dim == -1:
expand_shapes.append(coords.shape[1])
first_dim = d
if not p:
expand_shapes.append(l)
all_idx = _cartesian_product(*(np.arange(d, dtype=np.min_scalar_type(d - 1)) for d in expand_shapes))
dt = np.result_type(*(np.min_scalar_type(l - 1) for l in broadcast_shape))
false_dim = 0
dim = 0
expanded_coords = np.empty((len(broadcast_shape), all_idx.shape[1]), dtype=dt)
expanded_data = data[all_idx[first_dim]]
for d, p, l in zip(range(len(broadcast_shape)), params, broadcast_shape):
if p:
expanded_coords[d] = coords[dim, all_idx[first_dim]]
else:
expanded_coords[d] = all_idx[false_dim + (d > first_dim)]
false_dim += 1
if p is not None:
dim += 1
return np.asarray(expanded_coords), np.asarray(expanded_data)
# (c) senderle
# Taken from https://stackoverflow.com/a/11146645/774273
# License: https://creativecommons.org/licenses/by-sa/3.0/
def _cartesian_product(*arrays):
"""
Get the cartesian product of a number of arrays.
Parameters
----------
arrays : Tuple[np.ndarray]
The arrays to get a cartesian product of. Always sorted with respect
to the original array.
Returns
-------
out : np.ndarray
The overall cartesian product of all the input arrays.
"""
broadcastable = np.ix_(*arrays)
broadcasted = np.broadcast_arrays(*broadcastable)
rows, cols = np.prod(broadcasted[0].shape), len(broadcasted)
dtype = np.result_type(*arrays)
out = np.empty(rows * cols, dtype=dtype)
start, end = 0, rows
for a in broadcasted:
out[start:end] = a.reshape(-1)
start, end = end, end + rows
return out.reshape(cols, rows)
def _get_matching_coords(coords, params, shape):
"""
Get the matching coords across a number of broadcast operands.
Parameters
----------
coords : list[numpy.ndarray]
The input coordinates.
params : list[Union[bool, none]]
The broadcast parameters.
Returns
-------
numpy.ndarray
The broacasted coordinates
"""
matching_coords = []
dims = np.zeros(len(coords), dtype=np.uint8)
for p_all in zip(*params):
for i, p in enumerate(p_all):
if p:
matching_coords.append(coords[i][dims[i]])
break
else:
matching_coords.append(coords[dims[0]])
for i, p in enumerate(p_all):
if p is not None:
dims[i] += 1
dtype = np.min_scalar_type(max(shape) - 1)
return np.asarray(matching_coords, dtype=dtype)
def broadcast_to(x, shape):
"""
Performs the equivalent of :obj:`numpy.broadcast_to` for :obj:`COO`. Note that
this function returns a new array instead of a view.
Parameters
----------
shape : tuple[int]
The shape to broadcast the data to.
Returns
-------
COO
The broadcasted sparse array.
Raises
------
ValueError
If the operand cannot be broadcast to the given shape.
See also
--------
:obj:`numpy.broadcast_to` : NumPy equivalent function
"""
from .core import COO
if shape == x.shape:
return x
result_shape = _get_broadcast_shape(x.shape, shape, is_result=True)
params = _get_broadcast_parameters(x.shape, result_shape)
coords, data = _get_expanded_coords_data(x.coords, x.data, params, result_shape)
return COO(coords, data, shape=result_shape, has_duplicates=False,
sorted=True)
|
from itertools import product
import numpy as np
import scipy.sparse
import numba
from ..utils import isscalar, PositinalArgumentPartial, _zero_of_dtype
from ..compatibility import range, zip, zip_longest
def elemwise(func, *args, **kwargs):
"""
Apply a function to any number of arguments.
Parameters
----------
func : Callable
The function to apply. Must support broadcasting.
args : tuple, optional
The arguments to the function. Can be :obj:`SparseArray` objects
or :obj:`scipy.sparse.spmatrix` objects.
kwargs : dict, optional
Any additional arguments to pass to the function.
Returns
-------
COO
The result of applying the function.
Raises
------
ValueError
If the operation would result in a dense matrix, or if the operands
don't have broadcastable shapes.
See Also
--------
:obj:`numpy.ufunc` : A similar Numpy construct. Note that any :code:`ufunc` can be used
as the :code:`func` input to this function.
Notes
-----
Previously, operations with Numpy arrays were sometimes supported. Now,
it is necessary to convert Numpy arrays to :obj:`COO` objects.
"""
# Because we need to mutate args.
from .core import COO
from ..sparse_array import SparseArray
args = list(args)
posargs = []
pos = []
for i, arg in enumerate(args):
if isinstance(arg, scipy.sparse.spmatrix):
args[i] = COO.from_scipy_sparse(arg)
elif isscalar(arg) or (isinstance(arg, np.ndarray)
and not arg.shape):
# Faster and more reliable to pass ()-shaped ndarrays as scalars.
args[i] = np.asarray(arg)[()]
pos.append(i)
posargs.append(args[i])
elif isinstance(arg, SparseArray) and not isinstance(arg, COO):
args[i] = COO(arg)
elif not isinstance(arg, COO):
return NotImplemented
# Filter out scalars as they are 'baked' into the function.
func = PositinalArgumentPartial(func, pos, posargs)
args = [arg for arg in args if not isscalar(arg)]
if len(args) == 0:
return func(**kwargs)
return _elemwise_n_ary(func, *args, **kwargs)
@numba.jit(nopython=True)
def _match_arrays(a, b): # pragma: no cover
"""
Finds all indexes into a and b such that a[i] = b[j]. The outputs are sorted
in lexographical order.
Parameters
----------
a, b : np.ndarray
The input 1-D arrays to match. If matching of multiple fields is
needed, use np.recarrays. These two arrays must be sorted.
Returns
-------
a_idx, b_idx : np.ndarray
The output indices of every possible pair of matching elements.
"""
if len(a) == 0 or len(b) == 0:
return np.empty(0, dtype=np.uintp), np.empty(0, dtype=np.uintp)
a_ind, b_ind = [], []
nb = len(b)
ib = 0
match = 0
for ia, j in enumerate(a):
if j == b[match]:
ib = match
while ib < nb and j >= b[ib]:
if j == b[ib]:
a_ind.append(ia)
b_ind.append(ib)
if b[match] < b[ib]:
match = ib
ib += 1
return np.array(a_ind, dtype=np.uintp), np.array(b_ind, dtype=np.uintp)
def _elemwise_n_ary(func, *args, **kwargs):
"""
Apply a function to any number of arguments with broadcasting.
Parameters
----------
func : Callable
The function to apply to arguments. Must support broadcasting.
args : list
Input :obj:`COO` or :obj:`numpy.ndarray`s.
kwargs : dict
Additional arguments to pass to the function.
Returns
-------
COO
The output array.
Raises
------
ValueError
If the input shapes aren't compatible or the result will be dense.
"""
from .core import COO
args = list(args)
args_zeros = tuple(_zero_of_dtype(np.dtype(arg)) for arg in args)
func_value = func(*args_zeros, **kwargs)
func_zero = _zero_of_dtype(func_value.dtype)
if func_value != func_zero:
raise ValueError("Performing this operation would produce "
"a dense result: %s" % str(func))
data_list = []
coords_list = []
cache = {}
for mask in product([True, False], repeat=len(args)):
if not any(mask):
continue
ci, di = _unmatch_coo(func, args, mask, cache, **kwargs)
coords_list.extend(ci)
data_list.extend(di)
result_shape = _get_nary_broadcast_shape(*[arg.shape for arg in args])
# Concatenate matches and mismatches
data = np.concatenate(data_list) if len(data_list) else np.empty((0,), dtype=func_value.dtype)
coords = np.concatenate(coords_list, axis=1) if len(coords_list) else \
np.empty((0, len(result_shape)), dtype=np.min_scalar_type(max(result_shape) - 1))
nonzero = data != func_zero
data = data[nonzero]
coords = coords[:, nonzero]
return COO(coords, data, shape=result_shape, has_duplicates=False)
def _match_coo(*args, **kwargs):
"""
Matches the coordinates for any number of input :obj:`COO` arrays.
Equivalent to "sparse" broadcasting for all arrays.
Parameters
----------
args : Tuple[COO]
The input :obj:`COO` arrays.
return_midx : bool
Whether to return matched indices or matched arrays. Matching
only supported for two arrays. ``False`` by default.
cache : dict
Cache of things already matched. No cache by default.
Returns
-------
matched_idx : List[ndarray]
The indices of matched elements in the original arrays. Only returned if
``return_midx`` is ``True``.
matched_arrays : List[COO]
The expanded, matched :obj:`COO` objects. Only returned if
``return_midx`` is ``False``.
"""
from .core import COO
from .common import linear_loc
return_midx = kwargs.pop('return_midx', False)
cache = kwargs.pop('cache', None)
if kwargs:
raise ValueError('Unknown kwargs %s' % kwargs.keys())
if return_midx and (len(args) != 2 or cache is not None):
raise NotImplementedError('Matching indices only supported for two args, and no cache.')
matched_arrays = [args[0]]
cache_key = [id(args[0])]
for arg2 in args[1:]:
cache_key.append(id(arg2))
key = tuple(cache_key)
if cache is not None and key in cache:
matched_arrays = cache[key]
continue
cargs = [matched_arrays[0], arg2]
current_shape = _get_broadcast_shape(matched_arrays[0].shape, arg2.shape)
params = [_get_broadcast_parameters(arg.shape, current_shape) for arg in cargs]
reduced_params = [all(p) for p in zip(*params)]
reduced_shape = _get_reduced_shape(arg2.shape,
reduced_params[-arg2.ndim:])
reduced_coords = [_get_reduced_coords(arg.coords, reduced_params[-arg.ndim:])
for arg in cargs]
linear = [linear_loc(rc, reduced_shape) for rc in reduced_coords]
sorted_idx = [np.argsort(idx) for idx in linear]
linear = [idx[s] for idx, s in zip(linear, sorted_idx)]
matched_idx = _match_arrays(*linear)
if return_midx:
matched_idx = [sidx[midx] for sidx, midx in zip(sorted_idx, matched_idx)]
return matched_idx
coords = [arg.coords[:, s] for arg, s in zip(cargs, sorted_idx)]
mcoords = [c[:, idx] for c, idx in zip(coords, matched_idx)]
mcoords = _get_matching_coords(mcoords, params, current_shape)
mdata = [arg.data[sorted_idx[0]][matched_idx[0]] for arg in matched_arrays]
mdata.append(arg2.data[sorted_idx[1]][matched_idx[1]])
matched_arrays = [COO(mcoords, md, shape=current_shape) for md in mdata]
if cache is not None:
cache[key] = matched_arrays
return matched_arrays
def _unmatch_coo(func, args, mask, cache, **kwargs):
"""
Matches the coordinates for any number of input :obj:`COO` arrays.
First computes the matches, then filters out the non-matches.
Parameters
----------
func : Callable
The function to compute matches
args : tuple[COO]
The input :obj:`COO` arrays.
mask : tuple[bool]
Specifies the inputs that are zero and the ones that are
nonzero.
kwargs: dict
Extra keyword arguments to pass to func.
Returns
-------
matched_coords : list[ndarray]
The matched coordinates.
matched_data : list[ndarray]
The matched data.
"""
from .core import COO
matched_args = [a for a, m in zip(args, mask) if m]
unmatched_args = [a for a, m in zip(args, mask) if not m]
matched_arrays = _match_coo(*matched_args, cache=cache)
pos = tuple(i for i, m in enumerate(mask) if not m)
posargs = [_zero_of_dtype(arg.dtype) for arg, m in zip(args, mask) if not m]
result_shape = _get_nary_broadcast_shape(*[arg.shape for arg in args])
partial = PositinalArgumentPartial(func, pos, posargs)
matched_func = partial(*[a.data for a in matched_arrays], **kwargs)
unmatched_mask = matched_func != _zero_of_dtype(matched_func.dtype)
if not unmatched_mask.any():
return [], []
func_data = matched_func[unmatched_mask]
func_coords = matched_arrays[0].coords[:, unmatched_mask]
func_array = COO(func_coords, func_data, shape=matched_arrays[0].shape).broadcast_to(result_shape)
if all(mask):
return [func_array.coords], [func_array.data]
unmatched_mask = np.ones(func_array.nnz, dtype=np.bool)
for arg in unmatched_args:
matched_idx = _match_coo(func_array, arg, return_midx=True)[0]
unmatched_mask[matched_idx] = False
coords = np.asarray(func_array.coords[:, unmatched_mask], order='C')
data = np.asarray(func_array.data[unmatched_mask], order='C')
return [coords], [data]
def _get_nary_broadcast_shape(*shapes):
"""
Broadcast any number of shapes to a result shape.
Parameters
----------
shapes : tuple[tuple[int]]
The shapes to broadcast.
Returns
-------
tuple[int]
The output shape.
Raises
------
ValueError
If the input shapes cannot be broadcast to a single shape.
"""
result_shape = ()
for shape in shapes:
try:
result_shape = _get_broadcast_shape(shape, result_shape)
except ValueError:
shapes_str = ', '.join(str(shape) for shape in shapes)
raise ValueError('operands could not be broadcast together with shapes %s'
% shapes_str)
return result_shape
def _get_broadcast_shape(shape1, shape2, is_result=False):
"""
Get the overall broadcasted shape.
Parameters
----------
shape1, shape2 : tuple[int]
The input shapes to broadcast together.
is_result : bool
Whether or not shape2 is also the result shape.
Returns
-------
result_shape : tuple[int]
The overall shape of the result.
Raises
------
ValueError
If the two shapes cannot be broadcast together.
"""
# https://stackoverflow.com/a/47244284/774273
if not all((l1 == l2) or (l1 == 1) or ((l2 == 1) and not is_result) for l1, l2 in
zip(shape1[::-1], shape2[::-1])):
raise ValueError('operands could not be broadcast together with shapes %s, %s' %
(shape1, shape2))
result_shape = tuple(max(l1, l2) for l1, l2 in
zip_longest(shape1[::-1], shape2[::-1], fillvalue=1))[::-1]
return result_shape
def _get_broadcast_parameters(shape, broadcast_shape):
"""
Get the broadcast parameters.
Parameters
----------
shape : tuple[int]
The input shape.
broadcast_shape
The shape to broadcast to.
Returns
-------
params : list
A list containing None if the dimension isn't in the original array, False if
it needs to be broadcast, and True if it doesn't.
"""
params = [None if l1 is None else l1 == l2 for l1, l2
in zip_longest(shape[::-1], broadcast_shape[::-1], fillvalue=None)][::-1]
return params
def _get_reduced_coords(coords, params):
"""
Gets only those dimensions of the coordinates that don't need to be broadcast.
Parameters
----------
coords : np.ndarray
The coordinates to reduce.
params : list
The params from which to check which dimensions to get.
Returns
-------
reduced_coords : np.ndarray
The reduced coordinates.
"""
reduced_params = [bool(param) for param in params]
return coords[reduced_params]
def _get_reduced_shape(shape, params):
"""
Gets only those dimensions of the coordinates that don't need to be broadcast.
Parameters
----------
coords : np.ndarray
The coordinates to reduce.
params : list
The params from which to check which dimensions to get.
Returns
-------
reduced_coords : np.ndarray
The reduced coordinates.
"""
reduced_shape = tuple(l for l, p in zip(shape, params) if p)
return reduced_shape
def _get_expanded_coords_data(coords, data, params, broadcast_shape):
"""
Expand coordinates/data to broadcast_shape. Does most of the heavy lifting for broadcast_to.
Produces sorted output for sorted inputs.
Parameters
----------
coords : np.ndarray
The coordinates to expand.
data : np.ndarray
The data corresponding to the coordinates.
params : list
The broadcast parameters.
broadcast_shape : tuple[int]
The shape to broadcast to.
Returns
-------
expanded_coords : np.ndarray
List of 1-D arrays. Each item in the list has one dimension of coordinates.
expanded_data : np.ndarray
The data corresponding to expanded_coords.
"""
first_dim = -1
expand_shapes = []
for d, p, l in zip(range(len(broadcast_shape)), params, broadcast_shape):
if p and first_dim == -1:
expand_shapes.append(coords.shape[1])
first_dim = d
if not p:
expand_shapes.append(l)
all_idx = _cartesian_product(*(np.arange(d, dtype=np.min_scalar_type(d - 1)) for d in expand_shapes))
dt = np.result_type(*(np.min_scalar_type(l - 1) for l in broadcast_shape))
false_dim = 0
dim = 0
expanded_coords = np.empty((len(broadcast_shape), all_idx.shape[1]), dtype=dt)
expanded_data = data[all_idx[first_dim]]
for d, p, l in zip(range(len(broadcast_shape)), params, broadcast_shape):
if p:
expanded_coords[d] = coords[dim, all_idx[first_dim]]
else:
expanded_coords[d] = all_idx[false_dim + (d > first_dim)]
false_dim += 1
if p is not None:
dim += 1
return np.asarray(expanded_coords), np.asarray(expanded_data)
# (c) senderle
# Taken from https://stackoverflow.com/a/11146645/774273
# License: https://creativecommons.org/licenses/by-sa/3.0/
def _cartesian_product(*arrays):
"""
Get the cartesian product of a number of arrays.
Parameters
----------
arrays : Tuple[np.ndarray]
The arrays to get a cartesian product of. Always sorted with respect
to the original array.
Returns
-------
out : np.ndarray
The overall cartesian product of all the input arrays.
"""
broadcastable = np.ix_(*arrays)
broadcasted = np.broadcast_arrays(*broadcastable)
rows, cols = np.prod(broadcasted[0].shape), len(broadcasted)
dtype = np.result_type(*arrays)
out = np.empty(rows * cols, dtype=dtype)
start, end = 0, rows
for a in broadcasted:
out[start:end] = a.reshape(-1)
start, end = end, end + rows
return out.reshape(cols, rows)
def _get_matching_coords(coords, params, shape):
"""
Get the matching coords across a number of broadcast operands.
Parameters
----------
coords : list[numpy.ndarray]
The input coordinates.
params : list[Union[bool, none]]
The broadcast parameters.
Returns
-------
numpy.ndarray
The broacasted coordinates
"""
matching_coords = []
dims = np.zeros(len(coords), dtype=np.uint8)
for p_all in zip(*params):
for i, p in enumerate(p_all):
if p:
matching_coords.append(coords[i][dims[i]])
break
else:
matching_coords.append(coords[dims[0]])
for i, p in enumerate(p_all):
if p is not None:
dims[i] += 1
dtype = np.min_scalar_type(max(shape) - 1)
return np.asarray(matching_coords, dtype=dtype)
def broadcast_to(x, shape):
"""
Performs the equivalent of :obj:`numpy.broadcast_to` for :obj:`COO`. Note that
this function returns a new array instead of a view.
Parameters
----------
shape : tuple[int]
The shape to broadcast the data to.
Returns
-------
COO
The broadcasted sparse array.
Raises
------
ValueError
If the operand cannot be broadcast to the given shape.
See also
--------
:obj:`numpy.broadcast_to` : NumPy equivalent function
"""
from .core import COO
if shape == x.shape:
return x
result_shape = _get_broadcast_shape(x.shape, shape, is_result=True)
params = _get_broadcast_parameters(x.shape, result_shape)
coords, data = _get_expanded_coords_data(x.coords, x.data, params, result_shape)
return COO(coords, data, shape=result_shape, has_duplicates=False,
sorted=True)
|
en
| 0.678978
|
Apply a function to any number of arguments. Parameters ---------- func : Callable The function to apply. Must support broadcasting. args : tuple, optional The arguments to the function. Can be :obj:`SparseArray` objects or :obj:`scipy.sparse.spmatrix` objects. kwargs : dict, optional Any additional arguments to pass to the function. Returns ------- COO The result of applying the function. Raises ------ ValueError If the operation would result in a dense matrix, or if the operands don't have broadcastable shapes. See Also -------- :obj:`numpy.ufunc` : A similar Numpy construct. Note that any :code:`ufunc` can be used as the :code:`func` input to this function. Notes ----- Previously, operations with Numpy arrays were sometimes supported. Now, it is necessary to convert Numpy arrays to :obj:`COO` objects. # Because we need to mutate args. # Faster and more reliable to pass ()-shaped ndarrays as scalars. # Filter out scalars as they are 'baked' into the function. # pragma: no cover Finds all indexes into a and b such that a[i] = b[j]. The outputs are sorted in lexographical order. Parameters ---------- a, b : np.ndarray The input 1-D arrays to match. If matching of multiple fields is needed, use np.recarrays. These two arrays must be sorted. Returns ------- a_idx, b_idx : np.ndarray The output indices of every possible pair of matching elements. Apply a function to any number of arguments with broadcasting. Parameters ---------- func : Callable The function to apply to arguments. Must support broadcasting. args : list Input :obj:`COO` or :obj:`numpy.ndarray`s. kwargs : dict Additional arguments to pass to the function. Returns ------- COO The output array. Raises ------ ValueError If the input shapes aren't compatible or the result will be dense. # Concatenate matches and mismatches Matches the coordinates for any number of input :obj:`COO` arrays. Equivalent to "sparse" broadcasting for all arrays. Parameters ---------- args : Tuple[COO] The input :obj:`COO` arrays. return_midx : bool Whether to return matched indices or matched arrays. Matching only supported for two arrays. ``False`` by default. cache : dict Cache of things already matched. No cache by default. Returns ------- matched_idx : List[ndarray] The indices of matched elements in the original arrays. Only returned if ``return_midx`` is ``True``. matched_arrays : List[COO] The expanded, matched :obj:`COO` objects. Only returned if ``return_midx`` is ``False``. Matches the coordinates for any number of input :obj:`COO` arrays. First computes the matches, then filters out the non-matches. Parameters ---------- func : Callable The function to compute matches args : tuple[COO] The input :obj:`COO` arrays. mask : tuple[bool] Specifies the inputs that are zero and the ones that are nonzero. kwargs: dict Extra keyword arguments to pass to func. Returns ------- matched_coords : list[ndarray] The matched coordinates. matched_data : list[ndarray] The matched data. Broadcast any number of shapes to a result shape. Parameters ---------- shapes : tuple[tuple[int]] The shapes to broadcast. Returns ------- tuple[int] The output shape. Raises ------ ValueError If the input shapes cannot be broadcast to a single shape. Get the overall broadcasted shape. Parameters ---------- shape1, shape2 : tuple[int] The input shapes to broadcast together. is_result : bool Whether or not shape2 is also the result shape. Returns ------- result_shape : tuple[int] The overall shape of the result. Raises ------ ValueError If the two shapes cannot be broadcast together. # https://stackoverflow.com/a/47244284/774273 Get the broadcast parameters. Parameters ---------- shape : tuple[int] The input shape. broadcast_shape The shape to broadcast to. Returns ------- params : list A list containing None if the dimension isn't in the original array, False if it needs to be broadcast, and True if it doesn't. Gets only those dimensions of the coordinates that don't need to be broadcast. Parameters ---------- coords : np.ndarray The coordinates to reduce. params : list The params from which to check which dimensions to get. Returns ------- reduced_coords : np.ndarray The reduced coordinates. Gets only those dimensions of the coordinates that don't need to be broadcast. Parameters ---------- coords : np.ndarray The coordinates to reduce. params : list The params from which to check which dimensions to get. Returns ------- reduced_coords : np.ndarray The reduced coordinates. Expand coordinates/data to broadcast_shape. Does most of the heavy lifting for broadcast_to. Produces sorted output for sorted inputs. Parameters ---------- coords : np.ndarray The coordinates to expand. data : np.ndarray The data corresponding to the coordinates. params : list The broadcast parameters. broadcast_shape : tuple[int] The shape to broadcast to. Returns ------- expanded_coords : np.ndarray List of 1-D arrays. Each item in the list has one dimension of coordinates. expanded_data : np.ndarray The data corresponding to expanded_coords. # (c) senderle # Taken from https://stackoverflow.com/a/11146645/774273 # License: https://creativecommons.org/licenses/by-sa/3.0/ Get the cartesian product of a number of arrays. Parameters ---------- arrays : Tuple[np.ndarray] The arrays to get a cartesian product of. Always sorted with respect to the original array. Returns ------- out : np.ndarray The overall cartesian product of all the input arrays. Get the matching coords across a number of broadcast operands. Parameters ---------- coords : list[numpy.ndarray] The input coordinates. params : list[Union[bool, none]] The broadcast parameters. Returns ------- numpy.ndarray The broacasted coordinates Performs the equivalent of :obj:`numpy.broadcast_to` for :obj:`COO`. Note that this function returns a new array instead of a view. Parameters ---------- shape : tuple[int] The shape to broadcast the data to. Returns ------- COO The broadcasted sparse array. Raises ------ ValueError If the operand cannot be broadcast to the given shape. See also -------- :obj:`numpy.broadcast_to` : NumPy equivalent function
| 3.004773
| 3
|
src/sales/__init__.py
|
TechiStack/SalesProject
| 0
|
6629526
|
default_app_config = 'sales.apps.SalesConfig'
|
default_app_config = 'sales.apps.SalesConfig'
|
none
| 1
| 1.09936
| 1
|
|
scripts/resnet_sd/eval_model_c10.py
|
jielyugt/calibration
| 91
|
6629527
|
# Load in model weights and evaluate its goodness (ECE, MCE, error) also saves logits
import numpy as np
import collections
import pickle
from resnet_sd import resnet_sd_model
from sklearn.model_selection import train_test_split
from keras.models import Model
from keras.optimizers import SGD
from keras.datasets import cifar10
from keras.utils import np_utils
# Imports to get "utility" package
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath("utility") ) ) )
from utility.evaluation import evaluate_model
# Per channel mean and std normalization
def color_preprocessing(x_train, x_val, x_test):
x_train = x_train.astype('float32')
x_val = x_val.astype('float32')
x_test = x_test.astype('float32')
mean = np.mean(x_train, axis=(0,1,2)) # Per channel mean
std = np.std(x_train, axis=(0,1,2))
x_train = (x_train - mean) / std
x_val = (x_val - mean) / std
x_test = (x_test - mean) / std
return x_train, x_val, x_test
if __name__ == '__main__':
# constants
img_rows, img_cols = 32, 32
img_channels = 3
nb_epochs = 500
batch_size = 128
nb_classes = 10
seed = 333
weights_file = "../../models/resnet_110_SD_c10.hdf5"
# data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Data splitting (get additional 5k validation set)
# Sklearn to split
x_train45, x_val, y_train45, y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=seed) # random_state = seed
x_train45, x_val, x_test = color_preprocessing(x_train45, x_val, x_test) # Mean per channel
y_train45 = np_utils.to_categorical(y_train45, nb_classes) # 1-hot vector
y_val = np_utils.to_categorical(y_val, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
# building and training net
model = resnet_sd_model(img_shape = (32,32), img_channels = 3,
layers = 110, nb_classes = nb_classes, verbose = True)
evaluate_model(model, weights_file, x_test, y_test, bins = 15, verbose = True,
pickle_file = "probs_resnet110_SD_c10", x_val = x_val, y_val = y_val)
|
# Load in model weights and evaluate its goodness (ECE, MCE, error) also saves logits
import numpy as np
import collections
import pickle
from resnet_sd import resnet_sd_model
from sklearn.model_selection import train_test_split
from keras.models import Model
from keras.optimizers import SGD
from keras.datasets import cifar10
from keras.utils import np_utils
# Imports to get "utility" package
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath("utility") ) ) )
from utility.evaluation import evaluate_model
# Per channel mean and std normalization
def color_preprocessing(x_train, x_val, x_test):
x_train = x_train.astype('float32')
x_val = x_val.astype('float32')
x_test = x_test.astype('float32')
mean = np.mean(x_train, axis=(0,1,2)) # Per channel mean
std = np.std(x_train, axis=(0,1,2))
x_train = (x_train - mean) / std
x_val = (x_val - mean) / std
x_test = (x_test - mean) / std
return x_train, x_val, x_test
if __name__ == '__main__':
# constants
img_rows, img_cols = 32, 32
img_channels = 3
nb_epochs = 500
batch_size = 128
nb_classes = 10
seed = 333
weights_file = "../../models/resnet_110_SD_c10.hdf5"
# data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Data splitting (get additional 5k validation set)
# Sklearn to split
x_train45, x_val, y_train45, y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=seed) # random_state = seed
x_train45, x_val, x_test = color_preprocessing(x_train45, x_val, x_test) # Mean per channel
y_train45 = np_utils.to_categorical(y_train45, nb_classes) # 1-hot vector
y_val = np_utils.to_categorical(y_val, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
# building and training net
model = resnet_sd_model(img_shape = (32,32), img_channels = 3,
layers = 110, nb_classes = nb_classes, verbose = True)
evaluate_model(model, weights_file, x_test, y_test, bins = 15, verbose = True,
pickle_file = "probs_resnet110_SD_c10", x_val = x_val, y_val = y_val)
|
en
| 0.853697
|
# Load in model weights and evaluate its goodness (ECE, MCE, error) also saves logits # Imports to get "utility" package # Per channel mean and std normalization # Per channel mean # constants # data # Data splitting (get additional 5k validation set) # Sklearn to split # random_state = seed # Mean per channel # 1-hot vector # building and training net
| 2.623236
| 3
|
wmdadict/migrations/0018_auto_20170830_2143.py
|
antmont/wmda-stuff
| 1
|
6629528
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-30 11:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wmdadict', '0017_auto_20170830_1920'),
]
operations = [
migrations.RemoveField(
model_name='emdisfield',
name='field_type',
),
migrations.AlterField(
model_name='emdisfield',
name='emdis_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='wmdadict.EmdisFieldType', verbose_name='field type'),
),
migrations.AlterField(
model_name='emdisfield',
name='field_length',
field=models.PositiveIntegerField(),
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-30 11:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wmdadict', '0017_auto_20170830_1920'),
]
operations = [
migrations.RemoveField(
model_name='emdisfield',
name='field_type',
),
migrations.AlterField(
model_name='emdisfield',
name='emdis_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='wmdadict.EmdisFieldType', verbose_name='field type'),
),
migrations.AlterField(
model_name='emdisfield',
name='field_length',
field=models.PositiveIntegerField(),
),
]
|
en
| 0.712553
|
# -*- coding: utf-8 -*- # Generated by Django 1.11.4 on 2017-08-30 11:43
| 1.526342
| 2
|
__init__.py
|
vais-ral/SliceOPy
| 0
|
6629529
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 15 14:52:29 2018
@author: lhe39759
"""
from .DataSlice import DataSlice
from .NetSlice import NetSlice
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 15 14:52:29 2018
@author: lhe39759
"""
from .DataSlice import DataSlice
from .NetSlice import NetSlice
|
en
| 0.817018
|
# -*- coding: utf-8 -*- Created on Wed Aug 15 14:52:29 2018 @author: lhe39759
| 0.911863
| 1
|
chroma-manager/tests/integration/shared_storage_configuration/test_firewall_access.py
|
GarimaVishvakarma/intel-chroma
| 0
|
6629530
|
<filename>chroma-manager/tests/integration/shared_storage_configuration/test_firewall_access.py<gh_stars>0
from testconfig import config
from django.utils.unittest import skipIf
from tests.utils.remote_firewall_control import RemoteFirewallControl
from tests.integration.core.chroma_integration_testcase import ChromaIntegrationTestCase
from tests.integration.core.remote_operations import RealRemoteOperations
class TestFirewall(ChromaIntegrationTestCase):
GREP_NOTFOUND_RC = 1
def setUp(self):
super(TestFirewall, self).setUp()
self.remote_operations = RealRemoteOperations(self)
@skipIf(config.get('simulator'), "Can't be simulated")
def test_manager(self):
""" Test that the manager has the required selinux setting and firewall access rules installed"""
chroma_manager = config['chroma_managers'][0]
self.assertEqual('Enforcing\n',
self.remote_operations._ssh_address(chroma_manager['address'], 'getenforce').stdout)
# TODO: refactor reset_cluster/reset_chroma_manager_db so that previous
# state can be cleaned up without initializing the DB
# then we can do a before/after firewall state comparison where
# before and after are before chroma-config setup and after it
# XXX: this assumes there is only one manager
iml_port_proto_filter = [(80, 'tcp'), (443, 'tcp')]
if chroma_manager.get('ntp_server', "localhost") == "localhost":
iml_port_proto_filter.append((123, 'udp'))
iml_rules = self._process_ip_rules(chroma_manager, iml_port_proto_filter)
self.assertEqual(len(iml_rules), len(iml_port_proto_filter))
def _process_ip_rules(self, server, port_proto_filter=None):
"""
Retrieve matching rules or entire set from given server
:param server: target server we wish to retrieve rules from
:param port_proto_filter: optional list of port/proto pairs to look for
:return: RemoteFirewallControl.rules list of matching active firewall rules
"""
# process rules on remote firewall in current state
firewall = RemoteFirewallControl.create(server['address'], self.remote_operations._ssh_address_no_check)
firewall.process_rules()
if port_proto_filter:
# we want to match firewall rules stored in member list 'firewall.rules' with those supplied in
# port/proto tuples list 'port_proto_filter'. We also want to match rules with proto == 'all' (iptables).
rules = []
for rule in firewall.rules:
if (int(rule.port), rule.protocol) in port_proto_filter:
rules.append(rule)
elif rule.protocol == 'any' and int(rule.port) in [f[0] for f in port_proto_filter]:
rules.append(rule)
return rules
else:
return firewall.rules
@skipIf(config.get('simulator'), "Can't be simulated")
def test_agent(self):
"""
Test that when hosts are added and a filesytem is created, that all required firewall accesses are
installed
"""
servers = self.TEST_SERVERS[0:4]
host_addresses = [s['address'] for s in servers]
self.hosts = self.add_hosts(host_addresses)
self.configure_power_control(host_addresses)
volumes = self.wait_for_shared_volumes(4, 4)
mgt_volume = volumes[0]
mdt_volume = volumes[1]
ost1_volume = volumes[2]
ost2_volume = volumes[3]
self.set_volume_mounts(mgt_volume, self.hosts[0]['id'], self.hosts[1]['id'])
self.set_volume_mounts(mdt_volume, self.hosts[1]['id'], self.hosts[0]['id'])
self.set_volume_mounts(ost1_volume, self.hosts[2]['id'], self.hosts[3]['id'])
self.set_volume_mounts(ost2_volume, self.hosts[3]['id'], self.hosts[2]['id'])
self.filesystem_id = self.create_filesystem(self.hosts,
{'name': 'testfs',
'mgt': {'volume_id': mgt_volume['id']},
'mdts': [{
'volume_id': mdt_volume['id'],
'conf_params': {}}],
'osts': [{
'volume_id': ost1_volume['id'],
'conf_params': {}}, {
'volume_id': ost2_volume['id'],
'conf_params': {}}],
'conf_params': {}})
mcast_ports = {}
for server in servers:
self.assertNotEqual('Enforcing\n',
self.remote_operations._ssh_address(server['address'], 'getenforce').stdout)
mcast_port = self.remote_operations.get_corosync_port(server['fqdn'])
self.assertIsNotNone(mcast_port)
mcast_ports[server['address']] = mcast_port
matching_rules = self._process_ip_rules(server, [(mcast_port, 'udp'), (988, 'tcp')])
self.assertEqual(len(matching_rules), 2)
# tear it down and make sure firewall rules are cleaned up
self.graceful_teardown(self.chroma_manager)
for server in servers:
mcast_port = mcast_ports[server['address']]
matching_rules = self._process_ip_rules(server, [(mcast_port, 'udp')])
self.assertEqual(len(matching_rules), 0)
# retrieve command string compatible with this server target
firewall = RemoteFirewallControl.create(server['address'], self.remote_operations._ssh_address_no_check)
# test that the remote firewall configuration doesn't include rules to enable the mcast_port
self.remote_operations._ssh_address(server['address'],
firewall.remote_validate_persistent_rule_cmd(mcast_port),
expected_return_code=self.GREP_NOTFOUND_RC)
|
<filename>chroma-manager/tests/integration/shared_storage_configuration/test_firewall_access.py<gh_stars>0
from testconfig import config
from django.utils.unittest import skipIf
from tests.utils.remote_firewall_control import RemoteFirewallControl
from tests.integration.core.chroma_integration_testcase import ChromaIntegrationTestCase
from tests.integration.core.remote_operations import RealRemoteOperations
class TestFirewall(ChromaIntegrationTestCase):
GREP_NOTFOUND_RC = 1
def setUp(self):
super(TestFirewall, self).setUp()
self.remote_operations = RealRemoteOperations(self)
@skipIf(config.get('simulator'), "Can't be simulated")
def test_manager(self):
""" Test that the manager has the required selinux setting and firewall access rules installed"""
chroma_manager = config['chroma_managers'][0]
self.assertEqual('Enforcing\n',
self.remote_operations._ssh_address(chroma_manager['address'], 'getenforce').stdout)
# TODO: refactor reset_cluster/reset_chroma_manager_db so that previous
# state can be cleaned up without initializing the DB
# then we can do a before/after firewall state comparison where
# before and after are before chroma-config setup and after it
# XXX: this assumes there is only one manager
iml_port_proto_filter = [(80, 'tcp'), (443, 'tcp')]
if chroma_manager.get('ntp_server', "localhost") == "localhost":
iml_port_proto_filter.append((123, 'udp'))
iml_rules = self._process_ip_rules(chroma_manager, iml_port_proto_filter)
self.assertEqual(len(iml_rules), len(iml_port_proto_filter))
def _process_ip_rules(self, server, port_proto_filter=None):
"""
Retrieve matching rules or entire set from given server
:param server: target server we wish to retrieve rules from
:param port_proto_filter: optional list of port/proto pairs to look for
:return: RemoteFirewallControl.rules list of matching active firewall rules
"""
# process rules on remote firewall in current state
firewall = RemoteFirewallControl.create(server['address'], self.remote_operations._ssh_address_no_check)
firewall.process_rules()
if port_proto_filter:
# we want to match firewall rules stored in member list 'firewall.rules' with those supplied in
# port/proto tuples list 'port_proto_filter'. We also want to match rules with proto == 'all' (iptables).
rules = []
for rule in firewall.rules:
if (int(rule.port), rule.protocol) in port_proto_filter:
rules.append(rule)
elif rule.protocol == 'any' and int(rule.port) in [f[0] for f in port_proto_filter]:
rules.append(rule)
return rules
else:
return firewall.rules
@skipIf(config.get('simulator'), "Can't be simulated")
def test_agent(self):
"""
Test that when hosts are added and a filesytem is created, that all required firewall accesses are
installed
"""
servers = self.TEST_SERVERS[0:4]
host_addresses = [s['address'] for s in servers]
self.hosts = self.add_hosts(host_addresses)
self.configure_power_control(host_addresses)
volumes = self.wait_for_shared_volumes(4, 4)
mgt_volume = volumes[0]
mdt_volume = volumes[1]
ost1_volume = volumes[2]
ost2_volume = volumes[3]
self.set_volume_mounts(mgt_volume, self.hosts[0]['id'], self.hosts[1]['id'])
self.set_volume_mounts(mdt_volume, self.hosts[1]['id'], self.hosts[0]['id'])
self.set_volume_mounts(ost1_volume, self.hosts[2]['id'], self.hosts[3]['id'])
self.set_volume_mounts(ost2_volume, self.hosts[3]['id'], self.hosts[2]['id'])
self.filesystem_id = self.create_filesystem(self.hosts,
{'name': 'testfs',
'mgt': {'volume_id': mgt_volume['id']},
'mdts': [{
'volume_id': mdt_volume['id'],
'conf_params': {}}],
'osts': [{
'volume_id': ost1_volume['id'],
'conf_params': {}}, {
'volume_id': ost2_volume['id'],
'conf_params': {}}],
'conf_params': {}})
mcast_ports = {}
for server in servers:
self.assertNotEqual('Enforcing\n',
self.remote_operations._ssh_address(server['address'], 'getenforce').stdout)
mcast_port = self.remote_operations.get_corosync_port(server['fqdn'])
self.assertIsNotNone(mcast_port)
mcast_ports[server['address']] = mcast_port
matching_rules = self._process_ip_rules(server, [(mcast_port, 'udp'), (988, 'tcp')])
self.assertEqual(len(matching_rules), 2)
# tear it down and make sure firewall rules are cleaned up
self.graceful_teardown(self.chroma_manager)
for server in servers:
mcast_port = mcast_ports[server['address']]
matching_rules = self._process_ip_rules(server, [(mcast_port, 'udp')])
self.assertEqual(len(matching_rules), 0)
# retrieve command string compatible with this server target
firewall = RemoteFirewallControl.create(server['address'], self.remote_operations._ssh_address_no_check)
# test that the remote firewall configuration doesn't include rules to enable the mcast_port
self.remote_operations._ssh_address(server['address'],
firewall.remote_validate_persistent_rule_cmd(mcast_port),
expected_return_code=self.GREP_NOTFOUND_RC)
|
en
| 0.866497
|
Test that the manager has the required selinux setting and firewall access rules installed # TODO: refactor reset_cluster/reset_chroma_manager_db so that previous # state can be cleaned up without initializing the DB # then we can do a before/after firewall state comparison where # before and after are before chroma-config setup and after it # XXX: this assumes there is only one manager Retrieve matching rules or entire set from given server :param server: target server we wish to retrieve rules from :param port_proto_filter: optional list of port/proto pairs to look for :return: RemoteFirewallControl.rules list of matching active firewall rules # process rules on remote firewall in current state # we want to match firewall rules stored in member list 'firewall.rules' with those supplied in # port/proto tuples list 'port_proto_filter'. We also want to match rules with proto == 'all' (iptables). Test that when hosts are added and a filesytem is created, that all required firewall accesses are installed # tear it down and make sure firewall rules are cleaned up # retrieve command string compatible with this server target # test that the remote firewall configuration doesn't include rules to enable the mcast_port
| 1.967995
| 2
|
registration/urls.py
|
chris-huynh/via1-registration-website
| 0
|
6629531
|
from django.conf.urls import url, include
from django.contrib.auth import views as auth_views
from django.views.generic import TemplateView
from django.contrib.auth.decorators import login_required
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^login/$', auth_views.login, name='login'),
url(r'^logout/$', auth_views.logout, {'next_page': '/registration/login'}, name='logout'),
url(r'^home/$', views.home, name='home'),
url(r'^profile/$', views.profile, name='profile'),
url(r'^profile/submit_profile', views.submit_profile, name='submit_profile'),
url(r'^profile/upload_picture', views.upload_picture, name='upload_picture'),
url(r'^profile/remove_picture', views.remove_picture, name='remove_picture'),
url(r'^profile/change_waiver_policy_agreement/$', views.change_waiver_policy_agreement, name='change_waiver_policy_agreement'),
url(r'^hotel/$', views.hotel, name='hotel'),
url(r'^hotel/create_hotel_room/$', views.create_hotel_room, name='create_hotel_room'),
url(r'^hotel/change_coed_preference/$', views.change_coed_preference, name='change_coed_preference'),
url(r'^hotel/disband_room/$', views.disband_room, name='disband_room'),
url(r'^hotel/join_room/$', views.join_room, name='join_room'),
url(r'^hotel/remove_roommate/$', views.remove_roommate, name='remove_roommate'),
url(r'^hotel/whole_room_save_roommates/$',views.whole_room_save_roommates, name='whole_room_save_roommates'),
url(r'^workshops/$', views.workshops, name='workshops'),
url(r'^workshops/choose_workshop/(?P<wid>[0-9]+)/$', views.choose_workshop, name='choose_workshop'),
url(r'^families/$', views.families, name='families'),
url(r'^families/(?P<fid>[0-9]+)/$', views.family, name='family'),
url(r'^register/$', views.register, name='register'),
url(r'^password_reset/$', auth_views.password_reset, name='password_reset'),
url(r'^password_reset/done/$', auth_views.password_reset_done, name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', auth_views.password_reset_confirm, name='password_reset_confirm'),
url(r'^reset/done/$', auth_views.password_reset_complete, name='password_reset_complete'),
url(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', views.activate, name='activate'),
url(r'^account_activation_sent/$', TemplateView.as_view(template_name='registration/account_activation_sent.html')),
url(r'^account_activation_invalid/', TemplateView.as_view(template_name='registration/account_activation_invalid.html')),
url(r'^account_activation_resend/', TemplateView.as_view(template_name='registration/account_activation_resend.html'),
name='account_activation_resend'),
url(r'^resend_activation_email/$', views.resend_activation_email, name='resend_activation_email'),
url(r'^home/member_school_verification/$', views.member_school_verification_request, name='member_school_verification_request'),
url(r'^home/member_school_verification_approve/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[<KEY>z]{1,13}-[0-9A-Za-z]{1,20})/(?P<school>.*)/$',
views.member_school_verification_approve, name='member_school_verification_approve'),
url(r'^home/member_school_verification_deny/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[<KEY>1,13}-[0-9A-Za-z]{1,20})/(?P<school>.*)/$',
views.member_school_verification_deny, name='member_school_verification_deny'),
url(r'^member_school_verification_approved/$', TemplateView.as_view(template_name='registration/mem_school_verif_approved.html')),
url(r'^member_school_verification_denied/$', TemplateView.as_view(template_name='registration/mem_school_verif_denied.html')),
url(r'^member_school_verification_invalid/$', TemplateView.as_view(template_name='registration/mem_school_verif_invalid.html')),
url(r'^alumni_verification/$', views.alumni_verification_request, name='alumni_verification_request'),
url(r'^alumni_verification_approve/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.alumni_verification_approve, name='alumni_verification_approve'),
url(r'^alumni_verification_deny/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.alumni_verification_deny, name='alumni_verification_deny'),
url(r'^alumni_verification_approved/$', TemplateView.as_view(template_name='registration/alumni_verification_approved.html')),
url(r'^alumni_verification_denied/$', TemplateView.as_view(template_name='registration/alumni_verification_denied.html')),
url(r'^alumni_verification_invalid/$', TemplateView.as_view(template_name='registration/alumni_verification_invalid.html')),
url(r'^ajax/is_conference_full/$', views.is_conference_full, name='is_conference_full'),
url(r'^ajax/update_paid_attendee/$', views.update_paid_attendee, name='update_paid_attendee'),
url(r'^ajax/update_hotel_paid/$', views.update_hotel_paid, name='update_hotel_paid'),
url(r'^home/refund_request/$', views.refund_request, name='refund_request'),
url(r'^refund_request_complete/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/(?P<pp_email>.*)/$',
views.refund_request_complete, name='refund_request_complete'),
url(r'^home/refund_hotel_request/$', views.refund_hotel_request, name='refund_hotel_request'),
url(r'^refund_hotel_request_complete/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[<KEY>]{1,13}-[0-9A-Za-z]{1,20})/(?P<pp_email>.*)/$',
views.refund_hotel_request_complete, name='refund_hotel_request_complete'),
url(r'^refund_request_complete/$', TemplateView.as_view(template_name='registration/refund_request_complete.html')),
url(r'^home/registration_code/$', views.registration_code, name='registration_code'),
url(r'^home/reg_code_complete/$', login_required(TemplateView.as_view(template_name='registration/reg_code_complete.html'))),
url(r'^code_generator/$', views.code_generator, name='code_generator'),
url(r'^code_generator/generate_code/$', views.generate_code, name='generate_code'),
url(r'^code_generator/remove_code/$', views.remove_code, name='remove_code'),
url(r'^home/code_payment/$', login_required(TemplateView.as_view(template_name='registration/code_payment.html')), name='code_payment'),
url(r'home/update_code_attendee/$', views.update_code_attendee, name='update_code_attendee'),
]
|
from django.conf.urls import url, include
from django.contrib.auth import views as auth_views
from django.views.generic import TemplateView
from django.contrib.auth.decorators import login_required
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^login/$', auth_views.login, name='login'),
url(r'^logout/$', auth_views.logout, {'next_page': '/registration/login'}, name='logout'),
url(r'^home/$', views.home, name='home'),
url(r'^profile/$', views.profile, name='profile'),
url(r'^profile/submit_profile', views.submit_profile, name='submit_profile'),
url(r'^profile/upload_picture', views.upload_picture, name='upload_picture'),
url(r'^profile/remove_picture', views.remove_picture, name='remove_picture'),
url(r'^profile/change_waiver_policy_agreement/$', views.change_waiver_policy_agreement, name='change_waiver_policy_agreement'),
url(r'^hotel/$', views.hotel, name='hotel'),
url(r'^hotel/create_hotel_room/$', views.create_hotel_room, name='create_hotel_room'),
url(r'^hotel/change_coed_preference/$', views.change_coed_preference, name='change_coed_preference'),
url(r'^hotel/disband_room/$', views.disband_room, name='disband_room'),
url(r'^hotel/join_room/$', views.join_room, name='join_room'),
url(r'^hotel/remove_roommate/$', views.remove_roommate, name='remove_roommate'),
url(r'^hotel/whole_room_save_roommates/$',views.whole_room_save_roommates, name='whole_room_save_roommates'),
url(r'^workshops/$', views.workshops, name='workshops'),
url(r'^workshops/choose_workshop/(?P<wid>[0-9]+)/$', views.choose_workshop, name='choose_workshop'),
url(r'^families/$', views.families, name='families'),
url(r'^families/(?P<fid>[0-9]+)/$', views.family, name='family'),
url(r'^register/$', views.register, name='register'),
url(r'^password_reset/$', auth_views.password_reset, name='password_reset'),
url(r'^password_reset/done/$', auth_views.password_reset_done, name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', auth_views.password_reset_confirm, name='password_reset_confirm'),
url(r'^reset/done/$', auth_views.password_reset_complete, name='password_reset_complete'),
url(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', views.activate, name='activate'),
url(r'^account_activation_sent/$', TemplateView.as_view(template_name='registration/account_activation_sent.html')),
url(r'^account_activation_invalid/', TemplateView.as_view(template_name='registration/account_activation_invalid.html')),
url(r'^account_activation_resend/', TemplateView.as_view(template_name='registration/account_activation_resend.html'),
name='account_activation_resend'),
url(r'^resend_activation_email/$', views.resend_activation_email, name='resend_activation_email'),
url(r'^home/member_school_verification/$', views.member_school_verification_request, name='member_school_verification_request'),
url(r'^home/member_school_verification_approve/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[<KEY>z]{1,13}-[0-9A-Za-z]{1,20})/(?P<school>.*)/$',
views.member_school_verification_approve, name='member_school_verification_approve'),
url(r'^home/member_school_verification_deny/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[<KEY>1,13}-[0-9A-Za-z]{1,20})/(?P<school>.*)/$',
views.member_school_verification_deny, name='member_school_verification_deny'),
url(r'^member_school_verification_approved/$', TemplateView.as_view(template_name='registration/mem_school_verif_approved.html')),
url(r'^member_school_verification_denied/$', TemplateView.as_view(template_name='registration/mem_school_verif_denied.html')),
url(r'^member_school_verification_invalid/$', TemplateView.as_view(template_name='registration/mem_school_verif_invalid.html')),
url(r'^alumni_verification/$', views.alumni_verification_request, name='alumni_verification_request'),
url(r'^alumni_verification_approve/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.alumni_verification_approve, name='alumni_verification_approve'),
url(r'^alumni_verification_deny/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.alumni_verification_deny, name='alumni_verification_deny'),
url(r'^alumni_verification_approved/$', TemplateView.as_view(template_name='registration/alumni_verification_approved.html')),
url(r'^alumni_verification_denied/$', TemplateView.as_view(template_name='registration/alumni_verification_denied.html')),
url(r'^alumni_verification_invalid/$', TemplateView.as_view(template_name='registration/alumni_verification_invalid.html')),
url(r'^ajax/is_conference_full/$', views.is_conference_full, name='is_conference_full'),
url(r'^ajax/update_paid_attendee/$', views.update_paid_attendee, name='update_paid_attendee'),
url(r'^ajax/update_hotel_paid/$', views.update_hotel_paid, name='update_hotel_paid'),
url(r'^home/refund_request/$', views.refund_request, name='refund_request'),
url(r'^refund_request_complete/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/(?P<pp_email>.*)/$',
views.refund_request_complete, name='refund_request_complete'),
url(r'^home/refund_hotel_request/$', views.refund_hotel_request, name='refund_hotel_request'),
url(r'^refund_hotel_request_complete/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[<KEY>]{1,13}-[0-9A-Za-z]{1,20})/(?P<pp_email>.*)/$',
views.refund_hotel_request_complete, name='refund_hotel_request_complete'),
url(r'^refund_request_complete/$', TemplateView.as_view(template_name='registration/refund_request_complete.html')),
url(r'^home/registration_code/$', views.registration_code, name='registration_code'),
url(r'^home/reg_code_complete/$', login_required(TemplateView.as_view(template_name='registration/reg_code_complete.html'))),
url(r'^code_generator/$', views.code_generator, name='code_generator'),
url(r'^code_generator/generate_code/$', views.generate_code, name='generate_code'),
url(r'^code_generator/remove_code/$', views.remove_code, name='remove_code'),
url(r'^home/code_payment/$', login_required(TemplateView.as_view(template_name='registration/code_payment.html')), name='code_payment'),
url(r'home/update_code_attendee/$', views.update_code_attendee, name='update_code_attendee'),
]
|
none
| 1
| 1.874965
| 2
|
|
cfgov/v1/signals.py
|
thephillipsequation/cfgov-refresh
| 4
|
6629532
|
<filename>cfgov/v1/signals.py<gh_stars>1-10
from datetime import timedelta
from django.core.cache import caches
from django.utils import timezone
from wagtail.core.signals import page_published
def new_phi(user, expiration_days=90, locked_days=1):
now = timezone.now()
locked_until = now + timedelta(days=locked_days)
expires_at = now + timedelta(days=expiration_days)
from v1.models import PasswordHistoryItem
password_history = PasswordHistoryItem(
user=user,
encrypted_password=<PASSWORD>,
locked_until=locked_until,
expires_at=expires_at
)
password_history.save()
user.temporarylockout_set.all().delete()
def user_save_callback(sender, **kwargs):
user = kwargs['instance']
if kwargs['created']:
if user.is_superuser:
# If a superuser was created, don't expire its password.
new_phi(user, locked_days=0)
else:
# If a regular user was just created, force a new password to be
# set right away by expiring the password and unlocking it.
new_phi(user, locked_days=0, expiration_days=0)
else:
current_password_history = user.passwordhistoryitem_set.latest()
if user.password != <PASSWORD>:
new_phi(user)
def invalidate_post_preview(sender, **kwargs):
instance = kwargs['instance']
caches['post_preview'].delete(instance.post_preview_cache_key)
page_published.connect(invalidate_post_preview)
|
<filename>cfgov/v1/signals.py<gh_stars>1-10
from datetime import timedelta
from django.core.cache import caches
from django.utils import timezone
from wagtail.core.signals import page_published
def new_phi(user, expiration_days=90, locked_days=1):
now = timezone.now()
locked_until = now + timedelta(days=locked_days)
expires_at = now + timedelta(days=expiration_days)
from v1.models import PasswordHistoryItem
password_history = PasswordHistoryItem(
user=user,
encrypted_password=<PASSWORD>,
locked_until=locked_until,
expires_at=expires_at
)
password_history.save()
user.temporarylockout_set.all().delete()
def user_save_callback(sender, **kwargs):
user = kwargs['instance']
if kwargs['created']:
if user.is_superuser:
# If a superuser was created, don't expire its password.
new_phi(user, locked_days=0)
else:
# If a regular user was just created, force a new password to be
# set right away by expiring the password and unlocking it.
new_phi(user, locked_days=0, expiration_days=0)
else:
current_password_history = user.passwordhistoryitem_set.latest()
if user.password != <PASSWORD>:
new_phi(user)
def invalidate_post_preview(sender, **kwargs):
instance = kwargs['instance']
caches['post_preview'].delete(instance.post_preview_cache_key)
page_published.connect(invalidate_post_preview)
|
en
| 0.980255
|
# If a superuser was created, don't expire its password. # If a regular user was just created, force a new password to be # set right away by expiring the password and unlocking it.
| 2.078817
| 2
|
OneDriveUploader/config.py
|
SimeoneVilardo/OneDriveUploader
| 1
|
6629533
|
from os.path import join, dirname
from dotenv import load_dotenv, find_dotenv
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(find_dotenv())
client = dict(id = 'e2767e53-fc10-405d-9f95-b7ab04fd2f13')
urls = dict(redirect = 'http://localhost:8081',
discovery = 'https://api.office.com/discovery/',
auth_server = 'https://login.microsoftonline.com/common/oauth2/authorize',
auth_token = 'https://login.microsoftonline.com/common/oauth2/token')
|
from os.path import join, dirname
from dotenv import load_dotenv, find_dotenv
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(find_dotenv())
client = dict(id = 'e2767e53-fc10-405d-9f95-b7ab04fd2f13')
urls = dict(redirect = 'http://localhost:8081',
discovery = 'https://api.office.com/discovery/',
auth_server = 'https://login.microsoftonline.com/common/oauth2/authorize',
auth_token = 'https://login.microsoftonline.com/common/oauth2/token')
|
none
| 1
| 2.121132
| 2
|
|
Demo/pdist/rcslib.py
|
deadsnakes/python2.4
| 0
|
6629534
|
"""RCS interface module.
Defines the class RCS, which represents a directory with rcs version
files and (possibly) corresponding work files.
"""
import fnmatch
import os
import regsub
import string
import tempfile
class RCS:
"""RCS interface class (local filesystem version).
An instance of this class represents a directory with rcs version
files and (possible) corresponding work files.
Methods provide access to most rcs operations such as
checkin/checkout, access to the rcs metadata (revisions, logs,
branches etc.) as well as some filesystem operations such as
listing all rcs version files.
XXX BUGS / PROBLEMS
- The instance always represents the current directory so it's not
very useful to have more than one instance around simultaneously
"""
# Characters allowed in work file names
okchars = string.ascii_letters + string.digits + '-_=+'
def __init__(self):
"""Constructor."""
pass
def __del__(self):
"""Destructor."""
pass
# --- Informational methods about a single file/revision ---
def log(self, name_rev, otherflags = ''):
"""Return the full log text for NAME_REV as a string.
Optional OTHERFLAGS are passed to rlog.
"""
f = self._open(name_rev, 'rlog ' + otherflags)
data = f.read()
status = self._closepipe(f)
if status:
data = data + "%s: %s" % status
elif data[-1] == '\n':
data = data[:-1]
return data
def head(self, name_rev):
"""Return the head revision for NAME_REV"""
dict = self.info(name_rev)
return dict['head']
def info(self, name_rev):
"""Return a dictionary of info (from rlog -h) for NAME_REV
The dictionary's keys are the keywords that rlog prints
(e.g. 'head' and its values are the corresponding data
(e.g. '1.3').
XXX symbolic names and locks are not returned
"""
f = self._open(name_rev, 'rlog -h')
dict = {}
while 1:
line = f.readline()
if not line: break
if line[0] == '\t':
# XXX could be a lock or symbolic name
# Anything else?
continue
i = string.find(line, ':')
if i > 0:
key, value = line[:i], string.strip(line[i+1:])
dict[key] = value
status = self._closepipe(f)
if status:
raise IOError, status
return dict
# --- Methods that change files ---
def lock(self, name_rev):
"""Set an rcs lock on NAME_REV."""
name, rev = self.checkfile(name_rev)
cmd = "rcs -l%s %s" % (rev, name)
return self._system(cmd)
def unlock(self, name_rev):
"""Clear an rcs lock on NAME_REV."""
name, rev = self.checkfile(name_rev)
cmd = "rcs -u%s %s" % (rev, name)
return self._system(cmd)
def checkout(self, name_rev, withlock=0, otherflags=""):
"""Check out NAME_REV to its work file.
If optional WITHLOCK is set, check out locked, else unlocked.
The optional OTHERFLAGS is passed to co without
interpretation.
Any output from co goes to directly to stdout.
"""
name, rev = self.checkfile(name_rev)
if withlock: lockflag = "-l"
else: lockflag = "-u"
cmd = 'co %s%s %s %s' % (lockflag, rev, otherflags, name)
return self._system(cmd)
def checkin(self, name_rev, message=None, otherflags=""):
"""Check in NAME_REV from its work file.
The optional MESSAGE argument becomes the checkin message
(default "<none>" if None); or the file description if this is
a new file.
The optional OTHERFLAGS argument is passed to ci without
interpretation.
Any output from ci goes to directly to stdout.
"""
name, rev = self._unmangle(name_rev)
new = not self.isvalid(name)
if not message: message = "<none>"
if message and message[-1] != '\n':
message = message + '\n'
lockflag = "-u"
if new:
f = tempfile.NamedTemporaryFile()
f.write(message)
f.flush()
cmd = 'ci %s%s -t%s %s %s' % \
(lockflag, rev, f.name, otherflags, name)
else:
message = regsub.gsub('\([\\"$`]\)', '\\\\\\1', message)
cmd = 'ci %s%s -m"%s" %s %s' % \
(lockflag, rev, message, otherflags, name)
return self._system(cmd)
# --- Exported support methods ---
def listfiles(self, pat = None):
"""Return a list of all version files matching optional PATTERN."""
files = os.listdir(os.curdir)
files = filter(self._isrcs, files)
if os.path.isdir('RCS'):
files2 = os.listdir('RCS')
files2 = filter(self._isrcs, files2)
files = files + files2
files = map(self.realname, files)
return self._filter(files, pat)
def isvalid(self, name):
"""Test whether NAME has a version file associated."""
namev = self.rcsname(name)
return (os.path.isfile(namev) or
os.path.isfile(os.path.join('RCS', namev)))
def rcsname(self, name):
"""Return the pathname of the version file for NAME.
The argument can be a work file name or a version file name.
If the version file does not exist, the name of the version
file that would be created by "ci" is returned.
"""
if self._isrcs(name): namev = name
else: namev = name + ',v'
if os.path.isfile(namev): return namev
namev = os.path.join('RCS', os.path.basename(namev))
if os.path.isfile(namev): return namev
if os.path.isdir('RCS'):
return os.path.join('RCS', namev)
else:
return namev
def realname(self, namev):
"""Return the pathname of the work file for NAME.
The argument can be a work file name or a version file name.
If the work file does not exist, the name of the work file
that would be created by "co" is returned.
"""
if self._isrcs(namev): name = namev[:-2]
else: name = namev
if os.path.isfile(name): return name
name = os.path.basename(name)
return name
def islocked(self, name_rev):
"""Test whether FILE (which must have a version file) is locked.
XXX This does not tell you which revision number is locked and
ignores any revision you may pass in (by virtue of using rlog
-L -R).
"""
f = self._open(name_rev, 'rlog -L -R')
line = f.readline()
status = self._closepipe(f)
if status:
raise IOError, status
if not line: return None
if line[-1] == '\n':
line = line[:-1]
return self.realname(name_rev) == self.realname(line)
def checkfile(self, name_rev):
"""Normalize NAME_REV into a (NAME, REV) tuple.
Raise an exception if there is no corresponding version file.
"""
name, rev = self._unmangle(name_rev)
if not self.isvalid(name):
raise os.error, 'not an rcs file %r' % (name,)
return name, rev
# --- Internal methods ---
def _open(self, name_rev, cmd = 'co -p', rflag = '-r'):
"""INTERNAL: open a read pipe to NAME_REV using optional COMMAND.
Optional FLAG is used to indicate the revision (default -r).
Default COMMAND is "co -p".
Return a file object connected by a pipe to the command's
output.
"""
name, rev = self.checkfile(name_rev)
namev = self.rcsname(name)
if rev:
cmd = cmd + ' ' + rflag + rev
return os.popen("%s %r" % (cmd, namev))
def _unmangle(self, name_rev):
"""INTERNAL: Normalize NAME_REV argument to (NAME, REV) tuple.
Raise an exception if NAME contains invalid characters.
A NAME_REV argument is either NAME string (implying REV='') or
a tuple of the form (NAME, REV).
"""
if type(name_rev) == type(''):
name_rev = name, rev = name_rev, ''
else:
name, rev = name_rev
for c in rev:
if c not in self.okchars:
raise ValueError, "bad char in rev"
return name_rev
def _closepipe(self, f):
"""INTERNAL: Close PIPE and print its exit status if nonzero."""
sts = f.close()
if not sts: return None
detail, reason = divmod(sts, 256)
if reason == 0: return 'exit', detail # Exit status
signal = reason&0x7F
if signal == 0x7F:
code = 'stopped'
signal = detail
else:
code = 'killed'
if reason&0x80:
code = code + '(coredump)'
return code, signal
def _system(self, cmd):
"""INTERNAL: run COMMAND in a subshell.
Standard input for the command is taken from /dev/null.
Raise IOError when the exit status is not zero.
Return whatever the calling method should return; normally
None.
A derived class may override this method and redefine it to
capture stdout/stderr of the command and return it.
"""
cmd = cmd + " </dev/null"
sts = os.system(cmd)
if sts: raise IOError, "command exit status %d" % sts
def _filter(self, files, pat = None):
"""INTERNAL: Return a sorted copy of the given list of FILES.
If a second PATTERN argument is given, only files matching it
are kept. No check for valid filenames is made.
"""
if pat:
def keep(name, pat = pat):
return fnmatch.fnmatch(name, pat)
files = filter(keep, files)
else:
files = files[:]
files.sort()
return files
def _remove(self, fn):
"""INTERNAL: remove FILE without complaints."""
try:
os.unlink(fn)
except os.error:
pass
def _isrcs(self, name):
"""INTERNAL: Test whether NAME ends in ',v'."""
return name[-2:] == ',v'
|
"""RCS interface module.
Defines the class RCS, which represents a directory with rcs version
files and (possibly) corresponding work files.
"""
import fnmatch
import os
import regsub
import string
import tempfile
class RCS:
"""RCS interface class (local filesystem version).
An instance of this class represents a directory with rcs version
files and (possible) corresponding work files.
Methods provide access to most rcs operations such as
checkin/checkout, access to the rcs metadata (revisions, logs,
branches etc.) as well as some filesystem operations such as
listing all rcs version files.
XXX BUGS / PROBLEMS
- The instance always represents the current directory so it's not
very useful to have more than one instance around simultaneously
"""
# Characters allowed in work file names
okchars = string.ascii_letters + string.digits + '-_=+'
def __init__(self):
"""Constructor."""
pass
def __del__(self):
"""Destructor."""
pass
# --- Informational methods about a single file/revision ---
def log(self, name_rev, otherflags = ''):
"""Return the full log text for NAME_REV as a string.
Optional OTHERFLAGS are passed to rlog.
"""
f = self._open(name_rev, 'rlog ' + otherflags)
data = f.read()
status = self._closepipe(f)
if status:
data = data + "%s: %s" % status
elif data[-1] == '\n':
data = data[:-1]
return data
def head(self, name_rev):
"""Return the head revision for NAME_REV"""
dict = self.info(name_rev)
return dict['head']
def info(self, name_rev):
"""Return a dictionary of info (from rlog -h) for NAME_REV
The dictionary's keys are the keywords that rlog prints
(e.g. 'head' and its values are the corresponding data
(e.g. '1.3').
XXX symbolic names and locks are not returned
"""
f = self._open(name_rev, 'rlog -h')
dict = {}
while 1:
line = f.readline()
if not line: break
if line[0] == '\t':
# XXX could be a lock or symbolic name
# Anything else?
continue
i = string.find(line, ':')
if i > 0:
key, value = line[:i], string.strip(line[i+1:])
dict[key] = value
status = self._closepipe(f)
if status:
raise IOError, status
return dict
# --- Methods that change files ---
def lock(self, name_rev):
"""Set an rcs lock on NAME_REV."""
name, rev = self.checkfile(name_rev)
cmd = "rcs -l%s %s" % (rev, name)
return self._system(cmd)
def unlock(self, name_rev):
"""Clear an rcs lock on NAME_REV."""
name, rev = self.checkfile(name_rev)
cmd = "rcs -u%s %s" % (rev, name)
return self._system(cmd)
def checkout(self, name_rev, withlock=0, otherflags=""):
"""Check out NAME_REV to its work file.
If optional WITHLOCK is set, check out locked, else unlocked.
The optional OTHERFLAGS is passed to co without
interpretation.
Any output from co goes to directly to stdout.
"""
name, rev = self.checkfile(name_rev)
if withlock: lockflag = "-l"
else: lockflag = "-u"
cmd = 'co %s%s %s %s' % (lockflag, rev, otherflags, name)
return self._system(cmd)
def checkin(self, name_rev, message=None, otherflags=""):
"""Check in NAME_REV from its work file.
The optional MESSAGE argument becomes the checkin message
(default "<none>" if None); or the file description if this is
a new file.
The optional OTHERFLAGS argument is passed to ci without
interpretation.
Any output from ci goes to directly to stdout.
"""
name, rev = self._unmangle(name_rev)
new = not self.isvalid(name)
if not message: message = "<none>"
if message and message[-1] != '\n':
message = message + '\n'
lockflag = "-u"
if new:
f = tempfile.NamedTemporaryFile()
f.write(message)
f.flush()
cmd = 'ci %s%s -t%s %s %s' % \
(lockflag, rev, f.name, otherflags, name)
else:
message = regsub.gsub('\([\\"$`]\)', '\\\\\\1', message)
cmd = 'ci %s%s -m"%s" %s %s' % \
(lockflag, rev, message, otherflags, name)
return self._system(cmd)
# --- Exported support methods ---
def listfiles(self, pat = None):
"""Return a list of all version files matching optional PATTERN."""
files = os.listdir(os.curdir)
files = filter(self._isrcs, files)
if os.path.isdir('RCS'):
files2 = os.listdir('RCS')
files2 = filter(self._isrcs, files2)
files = files + files2
files = map(self.realname, files)
return self._filter(files, pat)
def isvalid(self, name):
"""Test whether NAME has a version file associated."""
namev = self.rcsname(name)
return (os.path.isfile(namev) or
os.path.isfile(os.path.join('RCS', namev)))
def rcsname(self, name):
"""Return the pathname of the version file for NAME.
The argument can be a work file name or a version file name.
If the version file does not exist, the name of the version
file that would be created by "ci" is returned.
"""
if self._isrcs(name): namev = name
else: namev = name + ',v'
if os.path.isfile(namev): return namev
namev = os.path.join('RCS', os.path.basename(namev))
if os.path.isfile(namev): return namev
if os.path.isdir('RCS'):
return os.path.join('RCS', namev)
else:
return namev
def realname(self, namev):
"""Return the pathname of the work file for NAME.
The argument can be a work file name or a version file name.
If the work file does not exist, the name of the work file
that would be created by "co" is returned.
"""
if self._isrcs(namev): name = namev[:-2]
else: name = namev
if os.path.isfile(name): return name
name = os.path.basename(name)
return name
def islocked(self, name_rev):
"""Test whether FILE (which must have a version file) is locked.
XXX This does not tell you which revision number is locked and
ignores any revision you may pass in (by virtue of using rlog
-L -R).
"""
f = self._open(name_rev, 'rlog -L -R')
line = f.readline()
status = self._closepipe(f)
if status:
raise IOError, status
if not line: return None
if line[-1] == '\n':
line = line[:-1]
return self.realname(name_rev) == self.realname(line)
def checkfile(self, name_rev):
"""Normalize NAME_REV into a (NAME, REV) tuple.
Raise an exception if there is no corresponding version file.
"""
name, rev = self._unmangle(name_rev)
if not self.isvalid(name):
raise os.error, 'not an rcs file %r' % (name,)
return name, rev
# --- Internal methods ---
def _open(self, name_rev, cmd = 'co -p', rflag = '-r'):
"""INTERNAL: open a read pipe to NAME_REV using optional COMMAND.
Optional FLAG is used to indicate the revision (default -r).
Default COMMAND is "co -p".
Return a file object connected by a pipe to the command's
output.
"""
name, rev = self.checkfile(name_rev)
namev = self.rcsname(name)
if rev:
cmd = cmd + ' ' + rflag + rev
return os.popen("%s %r" % (cmd, namev))
def _unmangle(self, name_rev):
"""INTERNAL: Normalize NAME_REV argument to (NAME, REV) tuple.
Raise an exception if NAME contains invalid characters.
A NAME_REV argument is either NAME string (implying REV='') or
a tuple of the form (NAME, REV).
"""
if type(name_rev) == type(''):
name_rev = name, rev = name_rev, ''
else:
name, rev = name_rev
for c in rev:
if c not in self.okchars:
raise ValueError, "bad char in rev"
return name_rev
def _closepipe(self, f):
"""INTERNAL: Close PIPE and print its exit status if nonzero."""
sts = f.close()
if not sts: return None
detail, reason = divmod(sts, 256)
if reason == 0: return 'exit', detail # Exit status
signal = reason&0x7F
if signal == 0x7F:
code = 'stopped'
signal = detail
else:
code = 'killed'
if reason&0x80:
code = code + '(coredump)'
return code, signal
def _system(self, cmd):
"""INTERNAL: run COMMAND in a subshell.
Standard input for the command is taken from /dev/null.
Raise IOError when the exit status is not zero.
Return whatever the calling method should return; normally
None.
A derived class may override this method and redefine it to
capture stdout/stderr of the command and return it.
"""
cmd = cmd + " </dev/null"
sts = os.system(cmd)
if sts: raise IOError, "command exit status %d" % sts
def _filter(self, files, pat = None):
"""INTERNAL: Return a sorted copy of the given list of FILES.
If a second PATTERN argument is given, only files matching it
are kept. No check for valid filenames is made.
"""
if pat:
def keep(name, pat = pat):
return fnmatch.fnmatch(name, pat)
files = filter(keep, files)
else:
files = files[:]
files.sort()
return files
def _remove(self, fn):
"""INTERNAL: remove FILE without complaints."""
try:
os.unlink(fn)
except os.error:
pass
def _isrcs(self, name):
"""INTERNAL: Test whether NAME ends in ',v'."""
return name[-2:] == ',v'
|
en
| 0.84275
|
RCS interface module. Defines the class RCS, which represents a directory with rcs version files and (possibly) corresponding work files. RCS interface class (local filesystem version). An instance of this class represents a directory with rcs version files and (possible) corresponding work files. Methods provide access to most rcs operations such as checkin/checkout, access to the rcs metadata (revisions, logs, branches etc.) as well as some filesystem operations such as listing all rcs version files. XXX BUGS / PROBLEMS - The instance always represents the current directory so it's not very useful to have more than one instance around simultaneously # Characters allowed in work file names Constructor. Destructor. # --- Informational methods about a single file/revision --- Return the full log text for NAME_REV as a string. Optional OTHERFLAGS are passed to rlog. Return the head revision for NAME_REV Return a dictionary of info (from rlog -h) for NAME_REV The dictionary's keys are the keywords that rlog prints (e.g. 'head' and its values are the corresponding data (e.g. '1.3'). XXX symbolic names and locks are not returned # XXX could be a lock or symbolic name # Anything else? # --- Methods that change files --- Set an rcs lock on NAME_REV. Clear an rcs lock on NAME_REV. Check out NAME_REV to its work file. If optional WITHLOCK is set, check out locked, else unlocked. The optional OTHERFLAGS is passed to co without interpretation. Any output from co goes to directly to stdout. Check in NAME_REV from its work file. The optional MESSAGE argument becomes the checkin message (default "<none>" if None); or the file description if this is a new file. The optional OTHERFLAGS argument is passed to ci without interpretation. Any output from ci goes to directly to stdout. # --- Exported support methods --- Return a list of all version files matching optional PATTERN. Test whether NAME has a version file associated. Return the pathname of the version file for NAME. The argument can be a work file name or a version file name. If the version file does not exist, the name of the version file that would be created by "ci" is returned. Return the pathname of the work file for NAME. The argument can be a work file name or a version file name. If the work file does not exist, the name of the work file that would be created by "co" is returned. Test whether FILE (which must have a version file) is locked. XXX This does not tell you which revision number is locked and ignores any revision you may pass in (by virtue of using rlog -L -R). Normalize NAME_REV into a (NAME, REV) tuple. Raise an exception if there is no corresponding version file. # --- Internal methods --- INTERNAL: open a read pipe to NAME_REV using optional COMMAND. Optional FLAG is used to indicate the revision (default -r). Default COMMAND is "co -p". Return a file object connected by a pipe to the command's output. INTERNAL: Normalize NAME_REV argument to (NAME, REV) tuple. Raise an exception if NAME contains invalid characters. A NAME_REV argument is either NAME string (implying REV='') or a tuple of the form (NAME, REV). INTERNAL: Close PIPE and print its exit status if nonzero. # Exit status INTERNAL: run COMMAND in a subshell. Standard input for the command is taken from /dev/null. Raise IOError when the exit status is not zero. Return whatever the calling method should return; normally None. A derived class may override this method and redefine it to capture stdout/stderr of the command and return it. INTERNAL: Return a sorted copy of the given list of FILES. If a second PATTERN argument is given, only files matching it are kept. No check for valid filenames is made. INTERNAL: remove FILE without complaints. INTERNAL: Test whether NAME ends in ',v'.
| 2.78817
| 3
|
tests/sentry/utils/test_linksign.py
|
AlexWayfer/sentry
| 4
|
6629535
|
<filename>tests/sentry/utils/test_linksign.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.test.client import RequestFactory
from sentry.testutils import TestCase
from sentry.utils import linksign
class LinkSignTestCase(TestCase):
def test_link_signing(self):
rf = RequestFactory()
url = linksign.generate_signed_link(self.user, 'sentry')
assert url.startswith('http://')
req = rf.get('/' + url.split('/', 3)[-1])
signed_user = linksign.process_signature(req)
assert signed_user
assert signed_user.id == self.user.id
req = rf.get('/what' + url.split('/', 3)[-1])
signed_user = linksign.process_signature(req)
assert signed_user is None
req = rf.get('/' + url.split('/', 3)[-1] + 'garbage')
signed_user = linksign.process_signature(req)
assert signed_user is None
rf.defaults['SERVER_NAME'] = 'something-else'
req = rf.get('/' + url.split('/', 3)[-1])
signed_user = linksign.process_signature(req)
assert signed_user is None
|
<filename>tests/sentry/utils/test_linksign.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.test.client import RequestFactory
from sentry.testutils import TestCase
from sentry.utils import linksign
class LinkSignTestCase(TestCase):
def test_link_signing(self):
rf = RequestFactory()
url = linksign.generate_signed_link(self.user, 'sentry')
assert url.startswith('http://')
req = rf.get('/' + url.split('/', 3)[-1])
signed_user = linksign.process_signature(req)
assert signed_user
assert signed_user.id == self.user.id
req = rf.get('/what' + url.split('/', 3)[-1])
signed_user = linksign.process_signature(req)
assert signed_user is None
req = rf.get('/' + url.split('/', 3)[-1] + 'garbage')
signed_user = linksign.process_signature(req)
assert signed_user is None
rf.defaults['SERVER_NAME'] = 'something-else'
req = rf.get('/' + url.split('/', 3)[-1])
signed_user = linksign.process_signature(req)
assert signed_user is None
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 2.208433
| 2
|
svm-classifier.py
|
ManuelFrieder/FHNW_SentimentAnalysis
| 0
|
6629536
|
import pandas
import preprocessing as pp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import svm
from sklearn.pipeline import Pipeline
# define language (de/fr/en) or "" for any
language = ""
preprocessing = pp.PreProcessData(language)
# file paths
corpusFile = '/path/to/trainingData.csv'
testFile = '/path/to/testData.csv'
outputFile = '/path/to/destination/file_'+language+'.csv'
# read files =>check encoding and delimiter!
allTrainingData = pandas.read_csv(corpusFile, encoding='UTF-8', delimiter=';')
allTestData = pandas.read_csv(testFile, encoding='UTF-8', delimiter=';', index_col=0)
# language filter
trainingData = preprocessing.langSelection(allTrainingData,language)
testData = preprocessing.langSelection(allTestData,language)
#Generating the training and testing vectors
def prepareTrainingData(trainingData):
X = []
y = []
ppTrainingData=preprocessing.processData(trainingData)
for wordlist,sentiment,index in ppTrainingData:
y.append(0 if (sentiment=='negative') else 1)
X.append(' '.join(wordlist))
return X, y
# training
X_train, y_train = prepareTrainingData(trainingData)
vec = TfidfVectorizer(min_df=5, max_df=0.95, sublinear_tf = True, use_idf = True, ngram_range=(1, 2))
svm_clf =svm.LinearSVC(C=0.1)
vec_clf = Pipeline([('vectorizer', vec), ('pac', svm_clf)])
vec_clf.fit(X_train,y_train)
# classify
for index, row in testData.iterrows():
ppTestData = ' '.join(preprocessing.processComment(row.text))
testData.at[index,'classified'] = ('negative' if (vec_clf.predict([ppTestData])==0) else 'positive')
testData.to_csv(outputFile, sep=';', encoding='latin1')
|
import pandas
import preprocessing as pp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import svm
from sklearn.pipeline import Pipeline
# define language (de/fr/en) or "" for any
language = ""
preprocessing = pp.PreProcessData(language)
# file paths
corpusFile = '/path/to/trainingData.csv'
testFile = '/path/to/testData.csv'
outputFile = '/path/to/destination/file_'+language+'.csv'
# read files =>check encoding and delimiter!
allTrainingData = pandas.read_csv(corpusFile, encoding='UTF-8', delimiter=';')
allTestData = pandas.read_csv(testFile, encoding='UTF-8', delimiter=';', index_col=0)
# language filter
trainingData = preprocessing.langSelection(allTrainingData,language)
testData = preprocessing.langSelection(allTestData,language)
#Generating the training and testing vectors
def prepareTrainingData(trainingData):
X = []
y = []
ppTrainingData=preprocessing.processData(trainingData)
for wordlist,sentiment,index in ppTrainingData:
y.append(0 if (sentiment=='negative') else 1)
X.append(' '.join(wordlist))
return X, y
# training
X_train, y_train = prepareTrainingData(trainingData)
vec = TfidfVectorizer(min_df=5, max_df=0.95, sublinear_tf = True, use_idf = True, ngram_range=(1, 2))
svm_clf =svm.LinearSVC(C=0.1)
vec_clf = Pipeline([('vectorizer', vec), ('pac', svm_clf)])
vec_clf.fit(X_train,y_train)
# classify
for index, row in testData.iterrows():
ppTestData = ' '.join(preprocessing.processComment(row.text))
testData.at[index,'classified'] = ('negative' if (vec_clf.predict([ppTestData])==0) else 'positive')
testData.to_csv(outputFile, sep=';', encoding='latin1')
|
en
| 0.788325
|
# define language (de/fr/en) or "" for any # file paths # read files =>check encoding and delimiter! # language filter #Generating the training and testing vectors # training # classify
| 3.336659
| 3
|
src/electionguardtest/ballot_factory.py
|
erikschlegel/electionguard-python
| 0
|
6629537
|
<reponame>erikschlegel/electionguard-python<gh_stars>0
import os
from jsons import KEY_TRANSFORMER_SNAKECASE, loads
from random import Random
from typing import cast, TypeVar, Callable, List, Tuple
from hypothesis.strategies import (
composite,
booleans,
text,
uuids,
SearchStrategy,
)
from electionguard.ballot import (
PlaintextBallot,
PlaintextBallotContest,
PlaintextBallotSelection,
)
from electionguard.election import (
ContestDescription,
SelectionDescription,
InternalElectionDescription,
)
from electionguard.encrypt import selection_from
_T = TypeVar("_T")
_DrawType = Callable[[SearchStrategy[_T]], _T]
here = os.path.abspath(os.path.dirname(__file__))
class BallotFactory(object):
simple_ballot_filename = "ballot_in_simple.json"
simple_ballots_filename = "plaintext_ballots_simple.json"
def get_random_selection_from(
self,
description: SelectionDescription,
random_source: Random,
is_placeholder=False,
) -> PlaintextBallotSelection:
selected = bool(random_source.randint(0, 1))
return selection_from(description, is_placeholder, selected)
def get_random_contest_from(
self,
description: ContestDescription,
random: Random,
suppress_validity_check=False,
with_trues=False,
) -> PlaintextBallotContest:
"""
Get a randomly filled contest for the given description that
may be undervoted and may include explicitly false votes.
Since this is only used for testing, the random number generator
(`random`) must be provided to make this function deterministic.
"""
if not suppress_validity_check:
assert description.is_valid(), "the contest description must be valid"
selections: List[PlaintextBallotSelection] = list()
voted = 0
for selection_description in description.ballot_selections:
selection = self.get_random_selection_from(selection_description, random)
# the caller may force a true value
voted += selection.to_int()
if voted <= 1 and selection.to_int() and with_trues:
selections.append(selection)
continue
# Possibly append the true selection, indicating an undervote
if voted <= description.number_elected and bool(random.randint(0, 1)) == 1:
selections.append(selection)
# Possibly append the false selections as well, indicating some choices
# may be explicitly false
elif bool(random.randint(0, 1)) == 1:
selections.append(selection_from(selection_description))
return PlaintextBallotContest(description.object_id, selections)
def get_fake_ballot(
self,
election: InternalElectionDescription,
ballot_id: str = None,
with_trues=True,
) -> PlaintextBallot:
"""
Get a single Fake Ballot object that is manually constructed with default vaules
"""
if ballot_id is None:
ballot_id = "some-unique-ballot-id-123"
contests: List[PlaintextBallotContest] = []
for contest in election.get_contests_for(election.ballot_styles[0].object_id):
contests.append(
self.get_random_contest_from(contest, Random(), with_trues=with_trues)
)
fake_ballot = PlaintextBallot(
ballot_id, election.ballot_styles[0].object_id, contests
)
return fake_ballot
def get_simple_ballot_from_file(self) -> PlaintextBallot:
return self._get_ballot_from_file(self.simple_ballot_filename)
def get_simple_ballots_from_file(self) -> List[PlaintextBallot]:
return self._get_ballots_from_file(self.simple_ballots_filename)
def _get_ballot_from_file(self, filename: str) -> PlaintextBallot:
with open(os.path.join(here, "data", filename), "r") as subject:
data = subject.read()
target = PlaintextBallot.from_json(data)
return target
def _get_ballots_from_file(self, filename: str) -> List[PlaintextBallot]:
with open(os.path.join(here, "data", filename), "r") as subject:
data = subject.read()
target = cast(
List[PlaintextBallot],
loads(
data,
List[PlaintextBallot],
key_transformer=KEY_TRANSFORMER_SNAKECASE,
),
)
return target
@composite
def get_selection_well_formed(
draw: _DrawType, uuids=uuids(), bools=booleans(), text=text()
) -> Tuple[str, PlaintextBallotSelection]:
use_none = draw(bools)
if use_none:
extra_data = None
else:
extra_data = draw(text)
object_id = f"selection-{draw(uuids)}"
return (
object_id,
PlaintextBallotSelection(object_id, f"{draw(bools)}", draw(bools), extra_data),
)
@composite
def get_selection_poorly_formed(
draw: _DrawType, uuids=uuids(), bools=booleans(), text=text()
) -> Tuple[str, PlaintextBallotSelection]:
use_none = draw(bools)
if use_none:
extra_data = None
else:
extra_data = draw(text)
object_id = f"selection-{draw(uuids)}"
return (
object_id,
PlaintextBallotSelection(object_id, f"{draw(text)}", draw(bools), extra_data),
)
|
import os
from jsons import KEY_TRANSFORMER_SNAKECASE, loads
from random import Random
from typing import cast, TypeVar, Callable, List, Tuple
from hypothesis.strategies import (
composite,
booleans,
text,
uuids,
SearchStrategy,
)
from electionguard.ballot import (
PlaintextBallot,
PlaintextBallotContest,
PlaintextBallotSelection,
)
from electionguard.election import (
ContestDescription,
SelectionDescription,
InternalElectionDescription,
)
from electionguard.encrypt import selection_from
_T = TypeVar("_T")
_DrawType = Callable[[SearchStrategy[_T]], _T]
here = os.path.abspath(os.path.dirname(__file__))
class BallotFactory(object):
simple_ballot_filename = "ballot_in_simple.json"
simple_ballots_filename = "plaintext_ballots_simple.json"
def get_random_selection_from(
self,
description: SelectionDescription,
random_source: Random,
is_placeholder=False,
) -> PlaintextBallotSelection:
selected = bool(random_source.randint(0, 1))
return selection_from(description, is_placeholder, selected)
def get_random_contest_from(
self,
description: ContestDescription,
random: Random,
suppress_validity_check=False,
with_trues=False,
) -> PlaintextBallotContest:
"""
Get a randomly filled contest for the given description that
may be undervoted and may include explicitly false votes.
Since this is only used for testing, the random number generator
(`random`) must be provided to make this function deterministic.
"""
if not suppress_validity_check:
assert description.is_valid(), "the contest description must be valid"
selections: List[PlaintextBallotSelection] = list()
voted = 0
for selection_description in description.ballot_selections:
selection = self.get_random_selection_from(selection_description, random)
# the caller may force a true value
voted += selection.to_int()
if voted <= 1 and selection.to_int() and with_trues:
selections.append(selection)
continue
# Possibly append the true selection, indicating an undervote
if voted <= description.number_elected and bool(random.randint(0, 1)) == 1:
selections.append(selection)
# Possibly append the false selections as well, indicating some choices
# may be explicitly false
elif bool(random.randint(0, 1)) == 1:
selections.append(selection_from(selection_description))
return PlaintextBallotContest(description.object_id, selections)
def get_fake_ballot(
self,
election: InternalElectionDescription,
ballot_id: str = None,
with_trues=True,
) -> PlaintextBallot:
"""
Get a single Fake Ballot object that is manually constructed with default vaules
"""
if ballot_id is None:
ballot_id = "some-unique-ballot-id-123"
contests: List[PlaintextBallotContest] = []
for contest in election.get_contests_for(election.ballot_styles[0].object_id):
contests.append(
self.get_random_contest_from(contest, Random(), with_trues=with_trues)
)
fake_ballot = PlaintextBallot(
ballot_id, election.ballot_styles[0].object_id, contests
)
return fake_ballot
def get_simple_ballot_from_file(self) -> PlaintextBallot:
return self._get_ballot_from_file(self.simple_ballot_filename)
def get_simple_ballots_from_file(self) -> List[PlaintextBallot]:
return self._get_ballots_from_file(self.simple_ballots_filename)
def _get_ballot_from_file(self, filename: str) -> PlaintextBallot:
with open(os.path.join(here, "data", filename), "r") as subject:
data = subject.read()
target = PlaintextBallot.from_json(data)
return target
def _get_ballots_from_file(self, filename: str) -> List[PlaintextBallot]:
with open(os.path.join(here, "data", filename), "r") as subject:
data = subject.read()
target = cast(
List[PlaintextBallot],
loads(
data,
List[PlaintextBallot],
key_transformer=KEY_TRANSFORMER_SNAKECASE,
),
)
return target
@composite
def get_selection_well_formed(
draw: _DrawType, uuids=uuids(), bools=booleans(), text=text()
) -> Tuple[str, PlaintextBallotSelection]:
use_none = draw(bools)
if use_none:
extra_data = None
else:
extra_data = draw(text)
object_id = f"selection-{draw(uuids)}"
return (
object_id,
PlaintextBallotSelection(object_id, f"{draw(bools)}", draw(bools), extra_data),
)
@composite
def get_selection_poorly_formed(
draw: _DrawType, uuids=uuids(), bools=booleans(), text=text()
) -> Tuple[str, PlaintextBallotSelection]:
use_none = draw(bools)
if use_none:
extra_data = None
else:
extra_data = draw(text)
object_id = f"selection-{draw(uuids)}"
return (
object_id,
PlaintextBallotSelection(object_id, f"{draw(text)}", draw(bools), extra_data),
)
|
en
| 0.865317
|
Get a randomly filled contest for the given description that may be undervoted and may include explicitly false votes. Since this is only used for testing, the random number generator (`random`) must be provided to make this function deterministic. # the caller may force a true value # Possibly append the true selection, indicating an undervote # Possibly append the false selections as well, indicating some choices # may be explicitly false Get a single Fake Ballot object that is manually constructed with default vaules
| 2.644909
| 3
|
examples/estimator/classifier/LinearSVC/ruby/basics.py
|
mathewdgardner/sklearn-porter
| 1
|
6629538
|
# -*- coding: utf-8 -*-
from sklearn import svm
from sklearn.datasets import load_iris
from sklearn_porter import Porter
iris_data = load_iris()
X = iris_data.data
y = iris_data.target
clf = svm.LinearSVC(C=1., random_state=0)
clf.fit(X, y)
porter = Porter(clf, language='ruby')
output = porter.export()
print(output)
"""
class LinearSVC
def initialize (coefficients, intercepts)
@coefficients = coefficients
@intercepts = intercepts
end
def predict (features)
classVal = -1.0/0.0
classIdx = -1
for i in 0 ... @intercepts.length
prob = 0
for j in 0 ... @coefficients[i].length
prob += @coefficients[i][j] * features[j].to_f
end
if prob + @intercepts[i] > classVal
classVal = prob + @intercepts[i]
classIdx = i
end
end
return classIdx
end
end
if ARGV.length == 4
# Features:
features = ARGV.collect { |i| i.to_f }
# Parameters:
coefficients = [[0.18424209458473811, 0.45123000025163923, -0.80794587716737576, -0.45071660033253858], [0.052877455748516447, -0.89214995228605254, 0.40398084459610972, -0.9376821661447452], [-0.85070784319293802, -0.98670214922204336, 1.381010448739191, 1.8654095662423917]]
intercepts = [0.10956266406702335, 1.6636707776739579, -1.7096109416521363]
# Prediction:
clf = LinearSVC.new coefficients, intercepts
estimation = clf.predict features
puts estimation
end
"""
|
# -*- coding: utf-8 -*-
from sklearn import svm
from sklearn.datasets import load_iris
from sklearn_porter import Porter
iris_data = load_iris()
X = iris_data.data
y = iris_data.target
clf = svm.LinearSVC(C=1., random_state=0)
clf.fit(X, y)
porter = Porter(clf, language='ruby')
output = porter.export()
print(output)
"""
class LinearSVC
def initialize (coefficients, intercepts)
@coefficients = coefficients
@intercepts = intercepts
end
def predict (features)
classVal = -1.0/0.0
classIdx = -1
for i in 0 ... @intercepts.length
prob = 0
for j in 0 ... @coefficients[i].length
prob += @coefficients[i][j] * features[j].to_f
end
if prob + @intercepts[i] > classVal
classVal = prob + @intercepts[i]
classIdx = i
end
end
return classIdx
end
end
if ARGV.length == 4
# Features:
features = ARGV.collect { |i| i.to_f }
# Parameters:
coefficients = [[0.18424209458473811, 0.45123000025163923, -0.80794587716737576, -0.45071660033253858], [0.052877455748516447, -0.89214995228605254, 0.40398084459610972, -0.9376821661447452], [-0.85070784319293802, -0.98670214922204336, 1.381010448739191, 1.8654095662423917]]
intercepts = [0.10956266406702335, 1.6636707776739579, -1.7096109416521363]
# Prediction:
clf = LinearSVC.new coefficients, intercepts
estimation = clf.predict features
puts estimation
end
"""
|
en
| 0.510151
|
# -*- coding: utf-8 -*- class LinearSVC def initialize (coefficients, intercepts) @coefficients = coefficients @intercepts = intercepts end def predict (features) classVal = -1.0/0.0 classIdx = -1 for i in 0 ... @intercepts.length prob = 0 for j in 0 ... @coefficients[i].length prob += @coefficients[i][j] * features[j].to_f end if prob + @intercepts[i] > classVal classVal = prob + @intercepts[i] classIdx = i end end return classIdx end end if ARGV.length == 4 # Features: features = ARGV.collect { |i| i.to_f } # Parameters: coefficients = [[0.18424209458473811, 0.45123000025163923, -0.80794587716737576, -0.45071660033253858], [0.052877455748516447, -0.89214995228605254, 0.40398084459610972, -0.9376821661447452], [-0.85070784319293802, -0.98670214922204336, 1.381010448739191, 1.8654095662423917]] intercepts = [0.10956266406702335, 1.6636707776739579, -1.7096109416521363] # Prediction: clf = LinearSVC.new coefficients, intercepts estimation = clf.predict features puts estimation end
| 2.362404
| 2
|
modules/tests/inv/warehouse_search.py
|
sungkomp/sambro
| 1
|
6629539
|
<reponame>sungkomp/sambro
# -*- coding: utf-8 -*-
""" Sahana Eden Warehouse Search Module Automated Tests
@copyright: 2011-2016 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from gluon import current
from tests.web2unittest import SeleniumUnitTest
import functools
def _kwsearch(instance, column, items, keyword):
for item in [instance.dt_data_item(i, column) for i in xrange(1, items + 1)]:
if not (keyword.strip().lower() in item.strip().lower()):
return False
return True
class SearchWarehouse(SeleniumUnitTest):
def setUp(self):
super(SeleniumUnitTest, self).setUp()
print "\n"
self.login(account="admin", nexturl="inv/warehouse/search?clear_opts=1")
def test_warehouse_01_search_name(self):
"""
@case: warehouse_01
@description: Search Warehouse - Simple Search
"""
w = current.s3db["inv_warehouse"]
key="na"
dbRowCount = current.db( (w.deleted != "T") & (w.name.like("%"+ key + "%")) ).count()
self.search(self.search.advanced_form,
True,
({
"id": "warehouse_search_simple",
"value": key
},), dbRowCount,
manual_check=functools.partial(_kwsearch, keyword=key, items=dbRowCount, column=2)
)
def test_warehouse_02_search_by_Organization(self):
"""
@case: warehouse_02
@description: Search Warehouse - Advanced Search by Organization
"""
w = current.s3db["inv_warehouse"]
o = current.s3db["org_organisation"]
key="Timor-Leste Red Cross Society (Cruz Vermelha de Timor-Leste)"
dbRowCount = current.db((w.deleted != "T") & (w.organisation_id == o.id) & (o.name == key)).count()
self.search(self.search.advanced_form,
True,
({
"name": "warehouse_search_org",
"label": key,
"value": True
},), dbRowCount,
manual_check=functools.partial(_kwsearch, keyword=key, items=dbRowCount, column=3)
)
def test_warehouse_03_search_by_District(self):
"""
@case: warehouse_03
@description: Search Warehouse - Advanced Search by District
"""
w = current.s3db["inv_warehouse"]
l = current.s3db["gis_location"]
key="Viqueque"
dbRowCount = current.db((w.deleted != "T") & (w.location_id == l.id) & (l.L2 == key)).count()
self.search(self.search.advanced_form,
True,
({
"name": "warehouse_search_location",
"label": key,
"value": True
},), dbRowCount,
manual_check=functools.partial(_kwsearch, keyword=key, items=dbRowCount, column=5)
)
|
# -*- coding: utf-8 -*-
""" Sahana Eden Warehouse Search Module Automated Tests
@copyright: 2011-2016 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from gluon import current
from tests.web2unittest import SeleniumUnitTest
import functools
def _kwsearch(instance, column, items, keyword):
for item in [instance.dt_data_item(i, column) for i in xrange(1, items + 1)]:
if not (keyword.strip().lower() in item.strip().lower()):
return False
return True
class SearchWarehouse(SeleniumUnitTest):
def setUp(self):
super(SeleniumUnitTest, self).setUp()
print "\n"
self.login(account="admin", nexturl="inv/warehouse/search?clear_opts=1")
def test_warehouse_01_search_name(self):
"""
@case: warehouse_01
@description: Search Warehouse - Simple Search
"""
w = current.s3db["inv_warehouse"]
key="na"
dbRowCount = current.db( (w.deleted != "T") & (w.name.like("%"+ key + "%")) ).count()
self.search(self.search.advanced_form,
True,
({
"id": "warehouse_search_simple",
"value": key
},), dbRowCount,
manual_check=functools.partial(_kwsearch, keyword=key, items=dbRowCount, column=2)
)
def test_warehouse_02_search_by_Organization(self):
"""
@case: warehouse_02
@description: Search Warehouse - Advanced Search by Organization
"""
w = current.s3db["inv_warehouse"]
o = current.s3db["org_organisation"]
key="Timor-Leste Red Cross Society (Cruz Vermelha de Timor-Leste)"
dbRowCount = current.db((w.deleted != "T") & (w.organisation_id == o.id) & (o.name == key)).count()
self.search(self.search.advanced_form,
True,
({
"name": "warehouse_search_org",
"label": key,
"value": True
},), dbRowCount,
manual_check=functools.partial(_kwsearch, keyword=key, items=dbRowCount, column=3)
)
def test_warehouse_03_search_by_District(self):
"""
@case: warehouse_03
@description: Search Warehouse - Advanced Search by District
"""
w = current.s3db["inv_warehouse"]
l = current.s3db["gis_location"]
key="Viqueque"
dbRowCount = current.db((w.deleted != "T") & (w.location_id == l.id) & (l.L2 == key)).count()
self.search(self.search.advanced_form,
True,
({
"name": "warehouse_search_location",
"label": key,
"value": True
},), dbRowCount,
manual_check=functools.partial(_kwsearch, keyword=key, items=dbRowCount, column=5)
)
|
en
| 0.733034
|
# -*- coding: utf-8 -*- Sahana Eden Warehouse Search Module Automated Tests
@copyright: 2011-2016 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE. @case: warehouse_01
@description: Search Warehouse - Simple Search @case: warehouse_02
@description: Search Warehouse - Advanced Search by Organization @case: warehouse_03
@description: Search Warehouse - Advanced Search by District
| 1.790652
| 2
|
root/scripts/setup/01_0_run_job.py
|
DragonCrafted87/docker-alpine-speedtest-mqtt-publisher
| 0
|
6629540
|
<reponame>DragonCrafted87/docker-alpine-speedtest-mqtt-publisher
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from datetime import datetime
from json import dumps as dump_to_json
from math import ceil as ceiling
# System Imports
from os import getenv
from pathlib import PurePath
from statistics import fmean as mean
from statistics import median
from time import sleep
from time import time
from paho.mqtt.client import MQTTv311
from paho.mqtt.publish import single as single_mqtt_message
from ping3 import ping
# Local Imports
from python_logger import create_logger
# 3rd Party
from requests import Session
from requests import get as requests_get
from requests import post as requests_post
from requests.adapters import HTTPAdapter
from requests.exceptions import ConnectionError
LOGGER = create_logger(PurePath(__file__).stem)
SLEEP_BETWEEN_MEASURMENTS = 5
MEASUREMENT_SIZES = [
100000,
1000000,
10000000,
25000000,
50000000,
100000000,
250000000,
500000000,
1000000000,
]
CLOUDFLARE_ADAPTER = HTTPAdapter(max_retries=3)
SESSION = Session()
SESSION.mount("https://speed.cloudflare.com", CLOUDFLARE_ADAPTER)
MQTT_SERVER = getenv("MQTT_SERVER", "localhost")
MQTT_SERVER_PORT = int(getenv("MQTT_SERVER_PORT", "1883"))
MQTT_USERNAME = getenv("MQTT_USERNAME", None)
MQTT_PASSWORD = getenv("MQTT_PASSWORD", None)
AUTH_DICT = None
if MQTT_USERNAME and MQTT_PASSWORD:
AUTH_DICT = {"username": MQTT_USERNAME, "password": <PASSWORD>}
def download(bytes):
try:
start_time = time()
_ = requests_get(f"https://speed.cloudflare.com/__down?bytes={bytes}")
finish_time = time()
sleep(SLEEP_BETWEEN_MEASURMENTS)
duration = finish_time - start_time
measurement = (bytes / duration) / 100000
except ConnectionError:
measurement = 0
return measurement
def upload(bytes):
try:
upload_data = bytearray(bytes)
start_time = time()
_ = requests_post(f"https://speed.cloudflare.com/__up", data=upload_data)
finish_time = time()
sleep(SLEEP_BETWEEN_MEASURMENTS)
duration = finish_time - start_time
measurement = (bytes / duration) / 100000
except ConnectionError:
measurement = 0
return measurement
def run_speed_test(iterations_list, operation):
measurements = []
for index in range(len(iterations_list)):
size = MEASUREMENT_SIZES[index]
iterations = iterations_list[index]
for _ in range(iterations):
measurements.append(operation(size))
return measurements
def calculate_ping():
ping_count = int(getenv("PING_COUNT", "20"))
ping_measurements = []
for _ in range(ping_count):
value = None
while not value:
value = ping("cloudflare.com", unit="ms")
ping_measurements.append(value)
median_ping = median(ping_measurements)
ping_jitter = mean(
[
abs(ping_measurements[index] - ping_measurements[index - 1])
for index in range(1, len(ping_measurements))
]
)
return (median_ping, ping_jitter)
def calculate_percentile(data, percentile):
sorted_data = sorted(data)
n = len(sorted_data)
p = n * percentile / 100
if p.is_integer():
return_value = sorted_data[int(p)]
else:
p = int(p) - 1
return_value = (sorted_data[p] + sorted_data[p + 1]) / 2
return return_value
def calculate_download_percentile(percentile):
download_iterations = list(
map(int, getenv("DOWNLOAD_ITERATIONS", "10,8,6,4,2").split(","))
)
download_measurements = run_speed_test(download_iterations, download)
LOGGER.info(f"Download {download_measurements}")
return calculate_percentile(download_measurements, percentile)
def calculate_upload_percentile(percentile):
upload_iterations = list(
map(int, getenv("UPLOAD_ITERATIONS", "8,6,4,2").split(","))
)
upload_measurements = run_speed_test(upload_iterations, upload)
LOGGER.info(f"Upload {upload_measurements}")
return calculate_percentile(upload_measurements, percentile)
def send_mqtt_message(topic, payload_value):
LOGGER.info(f"MQTT {topic} payload {payload_value}")
single_mqtt_message(
topic,
payload=payload_value,
qos=0,
retain=True,
hostname=MQTT_SERVER,
port=MQTT_SERVER_PORT,
client_id="",
keepalive=60,
will=None,
auth=AUTH_DICT,
tls=None,
protocol=MQTTv311,
transport="tcp",
)
def main():
percentile = int(getenv("PERCENTILE", "90"))
median_ping, ping_jitter = calculate_ping()
download_percentile = calculate_download_percentile(percentile)
upload_percentile = calculate_upload_percentile(percentile)
LOGGER.info(f"Ping {median_ping}")
LOGGER.info(f"Jitter {ping_jitter}")
LOGGER.info(f"Download Percentile {download_percentile}")
LOGGER.info(f"Upload Percentile {upload_percentile}")
time_string_payload = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
json_payload = dump_to_json(
{
"median_ping": median_ping,
"ping_jitter": ping_jitter,
"download_mbps": download_percentile,
"upload_mbps": upload_percentile,
}
)
send_mqtt_message("speedtest", time_string_payload)
send_mqtt_message("speedtest/attributes", json_payload)
if __name__ == "__main__":
main()
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from datetime import datetime
from json import dumps as dump_to_json
from math import ceil as ceiling
# System Imports
from os import getenv
from pathlib import PurePath
from statistics import fmean as mean
from statistics import median
from time import sleep
from time import time
from paho.mqtt.client import MQTTv311
from paho.mqtt.publish import single as single_mqtt_message
from ping3 import ping
# Local Imports
from python_logger import create_logger
# 3rd Party
from requests import Session
from requests import get as requests_get
from requests import post as requests_post
from requests.adapters import HTTPAdapter
from requests.exceptions import ConnectionError
LOGGER = create_logger(PurePath(__file__).stem)
SLEEP_BETWEEN_MEASURMENTS = 5
MEASUREMENT_SIZES = [
100000,
1000000,
10000000,
25000000,
50000000,
100000000,
250000000,
500000000,
1000000000,
]
CLOUDFLARE_ADAPTER = HTTPAdapter(max_retries=3)
SESSION = Session()
SESSION.mount("https://speed.cloudflare.com", CLOUDFLARE_ADAPTER)
MQTT_SERVER = getenv("MQTT_SERVER", "localhost")
MQTT_SERVER_PORT = int(getenv("MQTT_SERVER_PORT", "1883"))
MQTT_USERNAME = getenv("MQTT_USERNAME", None)
MQTT_PASSWORD = getenv("MQTT_PASSWORD", None)
AUTH_DICT = None
if MQTT_USERNAME and MQTT_PASSWORD:
AUTH_DICT = {"username": MQTT_USERNAME, "password": <PASSWORD>}
def download(bytes):
try:
start_time = time()
_ = requests_get(f"https://speed.cloudflare.com/__down?bytes={bytes}")
finish_time = time()
sleep(SLEEP_BETWEEN_MEASURMENTS)
duration = finish_time - start_time
measurement = (bytes / duration) / 100000
except ConnectionError:
measurement = 0
return measurement
def upload(bytes):
try:
upload_data = bytearray(bytes)
start_time = time()
_ = requests_post(f"https://speed.cloudflare.com/__up", data=upload_data)
finish_time = time()
sleep(SLEEP_BETWEEN_MEASURMENTS)
duration = finish_time - start_time
measurement = (bytes / duration) / 100000
except ConnectionError:
measurement = 0
return measurement
def run_speed_test(iterations_list, operation):
measurements = []
for index in range(len(iterations_list)):
size = MEASUREMENT_SIZES[index]
iterations = iterations_list[index]
for _ in range(iterations):
measurements.append(operation(size))
return measurements
def calculate_ping():
ping_count = int(getenv("PING_COUNT", "20"))
ping_measurements = []
for _ in range(ping_count):
value = None
while not value:
value = ping("cloudflare.com", unit="ms")
ping_measurements.append(value)
median_ping = median(ping_measurements)
ping_jitter = mean(
[
abs(ping_measurements[index] - ping_measurements[index - 1])
for index in range(1, len(ping_measurements))
]
)
return (median_ping, ping_jitter)
def calculate_percentile(data, percentile):
sorted_data = sorted(data)
n = len(sorted_data)
p = n * percentile / 100
if p.is_integer():
return_value = sorted_data[int(p)]
else:
p = int(p) - 1
return_value = (sorted_data[p] + sorted_data[p + 1]) / 2
return return_value
def calculate_download_percentile(percentile):
download_iterations = list(
map(int, getenv("DOWNLOAD_ITERATIONS", "10,8,6,4,2").split(","))
)
download_measurements = run_speed_test(download_iterations, download)
LOGGER.info(f"Download {download_measurements}")
return calculate_percentile(download_measurements, percentile)
def calculate_upload_percentile(percentile):
upload_iterations = list(
map(int, getenv("UPLOAD_ITERATIONS", "8,6,4,2").split(","))
)
upload_measurements = run_speed_test(upload_iterations, upload)
LOGGER.info(f"Upload {upload_measurements}")
return calculate_percentile(upload_measurements, percentile)
def send_mqtt_message(topic, payload_value):
LOGGER.info(f"MQTT {topic} payload {payload_value}")
single_mqtt_message(
topic,
payload=payload_value,
qos=0,
retain=True,
hostname=MQTT_SERVER,
port=MQTT_SERVER_PORT,
client_id="",
keepalive=60,
will=None,
auth=AUTH_DICT,
tls=None,
protocol=MQTTv311,
transport="tcp",
)
def main():
percentile = int(getenv("PERCENTILE", "90"))
median_ping, ping_jitter = calculate_ping()
download_percentile = calculate_download_percentile(percentile)
upload_percentile = calculate_upload_percentile(percentile)
LOGGER.info(f"Ping {median_ping}")
LOGGER.info(f"Jitter {ping_jitter}")
LOGGER.info(f"Download Percentile {download_percentile}")
LOGGER.info(f"Upload Percentile {upload_percentile}")
time_string_payload = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
json_payload = dump_to_json(
{
"median_ping": median_ping,
"ping_jitter": ping_jitter,
"download_mbps": download_percentile,
"upload_mbps": upload_percentile,
}
)
send_mqtt_message("speedtest", time_string_payload)
send_mqtt_message("speedtest/attributes", json_payload)
if __name__ == "__main__":
main()
|
en
| 0.707773
|
#!/usr/bin/python3 # -*- coding: utf-8 -*- # System Imports # Local Imports # 3rd Party
| 2.301734
| 2
|
tests/sentry/tasks/test_store.py
|
vperron/sentry
| 0
|
6629541
|
<reponame>vperron/sentry<filename>tests/sentry/tasks/test_store.py
from __future__ import absolute_import
import mock
from sentry.plugins import Plugin2
from sentry.tasks.store import preprocess_event
from sentry.testutils import PluginTestCase
class BasicPreprocessorPlugin(Plugin2):
def get_event_preprocessors(self):
def remove_extra(data):
del data['extra']
return data
return [remove_extra, lambda x: None]
def is_enabled(self, project=None):
return True
class PreprocessEventTest(PluginTestCase):
plugin = BasicPreprocessorPlugin
@mock.patch('sentry.tasks.store.save_event')
def test_simple(self, mock_save_event):
project = self.create_project()
data = {
'project': project.id,
'message': 'test',
'extra': {'foo': 'bar'},
}
preprocess_event(data=data)
mock_save_event.delay.assert_called_once()
|
from __future__ import absolute_import
import mock
from sentry.plugins import Plugin2
from sentry.tasks.store import preprocess_event
from sentry.testutils import PluginTestCase
class BasicPreprocessorPlugin(Plugin2):
def get_event_preprocessors(self):
def remove_extra(data):
del data['extra']
return data
return [remove_extra, lambda x: None]
def is_enabled(self, project=None):
return True
class PreprocessEventTest(PluginTestCase):
plugin = BasicPreprocessorPlugin
@mock.patch('sentry.tasks.store.save_event')
def test_simple(self, mock_save_event):
project = self.create_project()
data = {
'project': project.id,
'message': 'test',
'extra': {'foo': 'bar'},
}
preprocess_event(data=data)
mock_save_event.delay.assert_called_once()
|
none
| 1
| 2.142446
| 2
|
|
recsys/classifiers/binning.py
|
krasch/smart-assistants
| 9
|
6629542
|
<gh_stars>1-10
# -*- coding: UTF-8 -*-
"""
This module contains functions for dealing with temporal bins in the TemporalClassifier.
"""
import numpy
def initialize_bins(start, end, width):
"""
Generate a list of interval borders.
@param start: The left border of the first interval.
@param end: The left border of the last interval.
@param width: The width of each interval.
@return: The list of interval borders.
"""
return list(range(start + width, end + width, width))
def smooth(x, window_len=9, window='hanning'):
"""
Perform moving-window smoothing of some value array. This method is copied from numpy cookbook from the numpy
cookbook, http://www.scipy.org/Cookbook/SignalSmooth
@param x: The numpy array to be smoothed.
@param window_len: The size of the window.
@param window: The type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
@return: The smoothed value array.
"""
window_len = min(window_len, len(x)-1)
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = numpy.r_[2*x[0]-x[window_len-1::-1], x, 2*x[-1]-x[-1:-window_len:-1]]
if window == 'flat':
w = numpy.ones(window_len,'d')
else:
w = eval('numpy.'+window+'(window_len)')
y = numpy.convolve(w/w.sum(), s, mode='same')
return y[window_len:-window_len+1]
|
# -*- coding: UTF-8 -*-
"""
This module contains functions for dealing with temporal bins in the TemporalClassifier.
"""
import numpy
def initialize_bins(start, end, width):
"""
Generate a list of interval borders.
@param start: The left border of the first interval.
@param end: The left border of the last interval.
@param width: The width of each interval.
@return: The list of interval borders.
"""
return list(range(start + width, end + width, width))
def smooth(x, window_len=9, window='hanning'):
"""
Perform moving-window smoothing of some value array. This method is copied from numpy cookbook from the numpy
cookbook, http://www.scipy.org/Cookbook/SignalSmooth
@param x: The numpy array to be smoothed.
@param window_len: The size of the window.
@param window: The type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
@return: The smoothed value array.
"""
window_len = min(window_len, len(x)-1)
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = numpy.r_[2*x[0]-x[window_len-1::-1], x, 2*x[-1]-x[-1:-window_len:-1]]
if window == 'flat':
w = numpy.ones(window_len,'d')
else:
w = eval('numpy.'+window+'(window_len)')
y = numpy.convolve(w/w.sum(), s, mode='same')
return y[window_len:-window_len+1]
|
en
| 0.562541
|
# -*- coding: UTF-8 -*- This module contains functions for dealing with temporal bins in the TemporalClassifier. Generate a list of interval borders. @param start: The left border of the first interval. @param end: The left border of the last interval. @param width: The width of each interval. @return: The list of interval borders. Perform moving-window smoothing of some value array. This method is copied from numpy cookbook from the numpy cookbook, http://www.scipy.org/Cookbook/SignalSmooth @param x: The numpy array to be smoothed. @param window_len: The size of the window. @param window: The type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman' @return: The smoothed value array.
| 3.202703
| 3
|
tensorflow_datasets/image_classification/imagenet_r.py
|
Erik-Tran/datasets
| 1
|
6629543
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The ImageNet-R image classification dataset."""
import os
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_CITATION = r"""
@article{hendrycks2020many,
title={The Many Faces of Robustness: A Critical Analysis of Out-of-Distribution Generalization},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
journal={arXiv preprint arXiv:2006.16241},
year={2020}
}
"""
_DESCRIPTION = """
ImageNet-R is a set of images labelled with ImageNet labels that were obtained
by collecting art, cartoons, deviantart, graffiti, embroidery, graphics,
origami, paintings, patterns, plastic objects, plush objects, sculptures,
sketches, tattoos, toys, and video game renditions of ImageNet classes.
ImageNet-R has renditions of 200 ImageNet classes resulting in 30,000 images.
by collecting new data and keeping only those images that ResNet-50 models fail
to correctly classify. For more details please refer to the paper.
The label space is the same as that of ImageNet2012. Each example is
represented as a dictionary with the following keys:
* 'image': The image, a (H, W, 3)-tensor.
* 'label': An integer in the range [0, 1000).
* 'file_name': A unique sting identifying the example within the dataset.
"""
_IMAGENET_LABELS_FILENAME = r'image_classification/imagenet2012_labels.txt'
_IMAGENET_R_URL = r'https://people.eecs.berkeley.edu/~hendrycks/imagenet-r.tar'
class ImagenetR(tfds.core.GeneratorBasedBuilder):
"""ImageNet object renditions with ImageNet labels."""
VERSION = tfds.core.Version('0.1.0')
def _info(self):
names_file = tfds.core.get_tfds_path(_IMAGENET_LABELS_FILENAME)
return tfds.core.DatasetInfo(
builder=self,
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# tfds.features.FeatureConnectors
features=tfds.features.FeaturesDict({
'image': tfds.features.Image(encoding_format='jpeg'),
'label': tfds.features.ClassLabel(names_file=names_file),
'file_name': tfds.features.Text(),
}),
# Used if as_supervised=True in # builder.as_dataset.
supervised_keys=('image', 'label'),
# Homepage of the dataset for documentation
homepage='https://github.com/hendrycks/imagenet-r',
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns a SplitGenerator for the test set."""
imagenet_r_root = os.path.join(
dl_manager.download_and_extract(_IMAGENET_R_URL), 'imagenet-r')
return [
tfds.core.SplitGenerator(
# The dataset provides only a test split.
name=tfds.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={'imagenet_r_root': imagenet_r_root},
),
]
def _generate_examples(self, imagenet_r_root):
"""Yields the examples."""
# The directory structure is `imagenet-r/imagenet_synset_id/filename.jpg`.
for class_synset in tf.io.gfile.listdir(imagenet_r_root):
class_dir = os.path.join(imagenet_r_root, class_synset)
if not tf.io.gfile.isdir(class_dir):
continue
for image_filename in tf.io.gfile.listdir(class_dir):
image_path = os.path.join(class_dir, image_filename)
features = {
'image': image_path,
'label': class_synset,
'file_name': image_path,
}
yield image_path, features
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The ImageNet-R image classification dataset."""
import os
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_CITATION = r"""
@article{hendrycks2020many,
title={The Many Faces of Robustness: A Critical Analysis of Out-of-Distribution Generalization},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
journal={arXiv preprint arXiv:2006.16241},
year={2020}
}
"""
_DESCRIPTION = """
ImageNet-R is a set of images labelled with ImageNet labels that were obtained
by collecting art, cartoons, deviantart, graffiti, embroidery, graphics,
origami, paintings, patterns, plastic objects, plush objects, sculptures,
sketches, tattoos, toys, and video game renditions of ImageNet classes.
ImageNet-R has renditions of 200 ImageNet classes resulting in 30,000 images.
by collecting new data and keeping only those images that ResNet-50 models fail
to correctly classify. For more details please refer to the paper.
The label space is the same as that of ImageNet2012. Each example is
represented as a dictionary with the following keys:
* 'image': The image, a (H, W, 3)-tensor.
* 'label': An integer in the range [0, 1000).
* 'file_name': A unique sting identifying the example within the dataset.
"""
_IMAGENET_LABELS_FILENAME = r'image_classification/imagenet2012_labels.txt'
_IMAGENET_R_URL = r'https://people.eecs.berkeley.edu/~hendrycks/imagenet-r.tar'
class ImagenetR(tfds.core.GeneratorBasedBuilder):
"""ImageNet object renditions with ImageNet labels."""
VERSION = tfds.core.Version('0.1.0')
def _info(self):
names_file = tfds.core.get_tfds_path(_IMAGENET_LABELS_FILENAME)
return tfds.core.DatasetInfo(
builder=self,
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# tfds.features.FeatureConnectors
features=tfds.features.FeaturesDict({
'image': tfds.features.Image(encoding_format='jpeg'),
'label': tfds.features.ClassLabel(names_file=names_file),
'file_name': tfds.features.Text(),
}),
# Used if as_supervised=True in # builder.as_dataset.
supervised_keys=('image', 'label'),
# Homepage of the dataset for documentation
homepage='https://github.com/hendrycks/imagenet-r',
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns a SplitGenerator for the test set."""
imagenet_r_root = os.path.join(
dl_manager.download_and_extract(_IMAGENET_R_URL), 'imagenet-r')
return [
tfds.core.SplitGenerator(
# The dataset provides only a test split.
name=tfds.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={'imagenet_r_root': imagenet_r_root},
),
]
def _generate_examples(self, imagenet_r_root):
"""Yields the examples."""
# The directory structure is `imagenet-r/imagenet_synset_id/filename.jpg`.
for class_synset in tf.io.gfile.listdir(imagenet_r_root):
class_dir = os.path.join(imagenet_r_root, class_synset)
if not tf.io.gfile.isdir(class_dir):
continue
for image_filename in tf.io.gfile.listdir(class_dir):
image_path = os.path.join(class_dir, image_filename)
features = {
'image': image_path,
'label': class_synset,
'file_name': image_path,
}
yield image_path, features
|
en
| 0.795372
|
# coding=utf-8 # Copyright 2020 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. The ImageNet-R image classification dataset. @article{hendrycks2020many, title={The Many Faces of Robustness: A Critical Analysis of Out-of-Distribution Generalization}, author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME>}, journal={arXiv preprint arXiv:2006.16241}, year={2020} } ImageNet-R is a set of images labelled with ImageNet labels that were obtained by collecting art, cartoons, deviantart, graffiti, embroidery, graphics, origami, paintings, patterns, plastic objects, plush objects, sculptures, sketches, tattoos, toys, and video game renditions of ImageNet classes. ImageNet-R has renditions of 200 ImageNet classes resulting in 30,000 images. by collecting new data and keeping only those images that ResNet-50 models fail to correctly classify. For more details please refer to the paper. The label space is the same as that of ImageNet2012. Each example is represented as a dictionary with the following keys: * 'image': The image, a (H, W, 3)-tensor. * 'label': An integer in the range [0, 1000). * 'file_name': A unique sting identifying the example within the dataset. ImageNet object renditions with ImageNet labels. # This is the description that will appear on the datasets page. # tfds.features.FeatureConnectors # Used if as_supervised=True in # builder.as_dataset. # Homepage of the dataset for documentation Returns a SplitGenerator for the test set. # The dataset provides only a test split. # These kwargs will be passed to _generate_examples Yields the examples. # The directory structure is `imagenet-r/imagenet_synset_id/filename.jpg`.
| 1.603301
| 2
|
rowboat/models/migrations/__init__.py
|
zeromomentum121/speedboat
| 16
|
6629544
|
import operator
import time
from functools import reduce
from playhouse.migrate import PostgresqlMigrator, migrate
from rowboat import ENV
from rowboat.sql import database, init_db
COLUMN_EXISTS_SQL = '''
SELECT 1
FROM information_schema.columns
WHERE table_name=%s and column_name=%s;
'''
GET_NULLABLE_SQL = '''
SELECT is_nullable
FROM information_schema.columns
WHERE table_name=%s and column_name=%s;
'''
class Migrate(object):
def __init__(self, rules, func):
self.rules = rules
self.func = func
self.actions = []
self.raw_actions = []
self.m = PostgresqlMigrator(database)
def run(self):
conn = database.obj.connection()
for rule in self.rules:
with conn.cursor() as cur:
if not rule(cur):
return
self.func(self)
self.apply()
def apply(self):
print('Applying {} actions'.format(len(self.actions)))
migrate(*self.actions)
print('Executing {} raw queries'.format(len(self.raw_actions)))
conn = database.obj.connection()
for query, args in self.raw_actions:
with conn.cursor() as cur:
cur.execute(query, args)
conn.commit()
def add_columns(self, table, *fields):
for field in fields:
self.actions.append(self.m.add_column(table._meta.db_table, field.name, field))
def rename_column(self, table, field, new_name):
self.actions.append(self.m.rename_column(table._meta.db_table, field.name, new_name))
def drop_not_nulls(self, table, *fields):
for field in fields:
self.actions.append(self.m.drop_not_null(table._meta.db_table, field.name))
def add_not_nulls(self, table, *fields):
for field in fields:
self.actions.append(self.m.add_not_null(table._meta.db_table, field.name))
def execute(self, query, params=None):
self.raw_actions.append((query, params or []))
def backfill_column(self, table, old_columns, new_columns, pkeys=None, cast_funcs=None):
total = table.select().count()
if not pkeys:
pkeys = [table._meta.primary_key]
q = table.select(
*(pkeys + old_columns)
).tuples()
idx = 0
modified = 0
start = time.time()
with database.transaction() as txn:
for values in q:
idx += 1
if idx % 10000 == 0:
print('[%ss] Backfilling %s %s/%s (wrote %s)' % (time.time() - start, str(table), idx, total, modified))
if modified % 1000:
txn.commit()
obj = {
new_column.name: cast_funcs[new_column](values[i + len(pkeys)])
if cast_funcs and new_column in cast_funcs else values[i] + len(pkeys)
for i, new_column in enumerate(new_columns)
}
if not any(obj.values()):
continue
modified += 1
table.update(
**{new_column.name: values[i + len(pkeys)] for i, new_column in enumerate(new_columns)}
).where(
reduce(operator.and_, [(iz == values[i]) for i, iz in enumerate(pkeys)])
).execute()
txn.commit()
print('DONE, %s scanned %s written' % (idx, modified))
@staticmethod
def missing(table, field):
def rule(cursor):
cursor.execute(COLUMN_EXISTS_SQL, (table._meta.db_table, field))
if len(cursor.fetchall()) == 0:
return True
return False
return rule
@staticmethod
def nullable(table, field):
def rule(cursor):
cursor.execute(GET_NULLABLE_SQL, (table._meta.db_table, field))
return cursor.fetchone()[0] == 'YES'
return rule
@staticmethod
def non_nullable(table, field):
def rule(cursor):
cursor.execute(GET_NULLABLE_SQL, (table._meta.db_table, field))
return cursor.fetchone()[0] == 'NO'
return rule
@classmethod
def only_if(cls, check, table, *fields):
def deco(func):
rules = [check(table, i) for i in fields]
cls(rules, func).run()
return deco
@classmethod
def always(cls):
def deco(func):
cls([lambda c: True], func).run()
return deco
init_db(ENV)
|
import operator
import time
from functools import reduce
from playhouse.migrate import PostgresqlMigrator, migrate
from rowboat import ENV
from rowboat.sql import database, init_db
COLUMN_EXISTS_SQL = '''
SELECT 1
FROM information_schema.columns
WHERE table_name=%s and column_name=%s;
'''
GET_NULLABLE_SQL = '''
SELECT is_nullable
FROM information_schema.columns
WHERE table_name=%s and column_name=%s;
'''
class Migrate(object):
def __init__(self, rules, func):
self.rules = rules
self.func = func
self.actions = []
self.raw_actions = []
self.m = PostgresqlMigrator(database)
def run(self):
conn = database.obj.connection()
for rule in self.rules:
with conn.cursor() as cur:
if not rule(cur):
return
self.func(self)
self.apply()
def apply(self):
print('Applying {} actions'.format(len(self.actions)))
migrate(*self.actions)
print('Executing {} raw queries'.format(len(self.raw_actions)))
conn = database.obj.connection()
for query, args in self.raw_actions:
with conn.cursor() as cur:
cur.execute(query, args)
conn.commit()
def add_columns(self, table, *fields):
for field in fields:
self.actions.append(self.m.add_column(table._meta.db_table, field.name, field))
def rename_column(self, table, field, new_name):
self.actions.append(self.m.rename_column(table._meta.db_table, field.name, new_name))
def drop_not_nulls(self, table, *fields):
for field in fields:
self.actions.append(self.m.drop_not_null(table._meta.db_table, field.name))
def add_not_nulls(self, table, *fields):
for field in fields:
self.actions.append(self.m.add_not_null(table._meta.db_table, field.name))
def execute(self, query, params=None):
self.raw_actions.append((query, params or []))
def backfill_column(self, table, old_columns, new_columns, pkeys=None, cast_funcs=None):
total = table.select().count()
if not pkeys:
pkeys = [table._meta.primary_key]
q = table.select(
*(pkeys + old_columns)
).tuples()
idx = 0
modified = 0
start = time.time()
with database.transaction() as txn:
for values in q:
idx += 1
if idx % 10000 == 0:
print('[%ss] Backfilling %s %s/%s (wrote %s)' % (time.time() - start, str(table), idx, total, modified))
if modified % 1000:
txn.commit()
obj = {
new_column.name: cast_funcs[new_column](values[i + len(pkeys)])
if cast_funcs and new_column in cast_funcs else values[i] + len(pkeys)
for i, new_column in enumerate(new_columns)
}
if not any(obj.values()):
continue
modified += 1
table.update(
**{new_column.name: values[i + len(pkeys)] for i, new_column in enumerate(new_columns)}
).where(
reduce(operator.and_, [(iz == values[i]) for i, iz in enumerate(pkeys)])
).execute()
txn.commit()
print('DONE, %s scanned %s written' % (idx, modified))
@staticmethod
def missing(table, field):
def rule(cursor):
cursor.execute(COLUMN_EXISTS_SQL, (table._meta.db_table, field))
if len(cursor.fetchall()) == 0:
return True
return False
return rule
@staticmethod
def nullable(table, field):
def rule(cursor):
cursor.execute(GET_NULLABLE_SQL, (table._meta.db_table, field))
return cursor.fetchone()[0] == 'YES'
return rule
@staticmethod
def non_nullable(table, field):
def rule(cursor):
cursor.execute(GET_NULLABLE_SQL, (table._meta.db_table, field))
return cursor.fetchone()[0] == 'NO'
return rule
@classmethod
def only_if(cls, check, table, *fields):
def deco(func):
rules = [check(table, i) for i in fields]
cls(rules, func).run()
return deco
@classmethod
def always(cls):
def deco(func):
cls([lambda c: True], func).run()
return deco
init_db(ENV)
|
en
| 0.354408
|
SELECT 1 FROM information_schema.columns WHERE table_name=%s and column_name=%s; SELECT is_nullable FROM information_schema.columns WHERE table_name=%s and column_name=%s;
| 2.53905
| 3
|
728.py
|
RafaelHuang87/Leet-Code-Practice
| 0
|
6629545
|
"""
Solution for Leet Code 728.
"""
class Solution:
def selfDividingNumbers(left, right):
list = []
for i in range(left, right+1):
list.append(i)
j = str(i)
for digit in j:
if digit is "0" or i % int(digit) is not 0:
list.remove(i)
break
return list
print(Solution.selfDividingNumbers(1, 22))
|
"""
Solution for Leet Code 728.
"""
class Solution:
def selfDividingNumbers(left, right):
list = []
for i in range(left, right+1):
list.append(i)
j = str(i)
for digit in j:
if digit is "0" or i % int(digit) is not 0:
list.remove(i)
break
return list
print(Solution.selfDividingNumbers(1, 22))
|
en
| 0.307168
|
Solution for Leet Code 728.
| 3.479384
| 3
|
pyftdi/tests/backend/consts.py
|
marcjordan2112/pyftdi
| 345
|
6629546
|
<reponame>marcjordan2112/pyftdi
"""Constant importer from existing modules."""
# Copyright (c) 2020-2021, <NAME> <<EMAIL>>
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#pylint: disable-msg=missing-docstring
#pylint: disable-msg=invalid-name
#pylint: disable-msg=too-many-instance-attributes
from enum import Enum
from importlib import import_module
from sys import version_info
from pyftdi.ftdi import Ftdi
from pyftdi.misc import EasyDict
class UsbConstants:
"""Expose useful constants defined in PyUSB and allow reverse search, i.e.
retrieve constant literals from integral values.
"""
DEVICE_REQUESTS = {
(True, 0x0): 'get_status',
(False, 0x1): 'clear_feature',
(False, 0x3): 'set_feature',
(False, 0x5): 'set_address',
(True, 0x6): 'get_descriptor',
(False, 0x7): 'set_descriptor',
(True, 0x8): 'get_configuration',
(False, 0x9): 'set_configuration',
}
def __init__(self):
self._desc_type = self._load_constants('desc_type')
self._desc_type_mask = self._mask(self._desc_type)
self._ctrl_dir = self._load_constants('ctrl')
self._ctrl_dir_mask = self._mask(self._ctrl_dir)
self._ctrl_type = self._load_constants('ctrl_type')
self._ctrl_type_mask = self._mask(self._ctrl_type)
self._ctrl_recipient = self._load_constants('ctrl_recipient')
self._ctrl_recipient_mask = self._mask(self._ctrl_recipient)
self._endpoint_type = self._load_constants('endpoint_type')
self._endpoint_type_mask = self._mask(self._endpoint_type)
self._descriptors = EasyDict({v.upper(): k
for k, v in self._desc_type.items()})
self.endpoints = self._load_constants('endpoint', True)
self.endpoint_types = self._load_constants('endpoint_type', True)
self.speeds = self._load_constants('speed', True)
@property
def descriptors(self):
return self._descriptors
@classmethod
def _load_constants(cls, prefix: str, reverse=False):
prefix = prefix.upper()
if not prefix.endswith('_'):
prefix = f'{prefix}_'
mod = import_module('usb.util')
mapping = EasyDict()
plen = len(prefix)
for entry in dir(mod):
if not entry.startswith(prefix):
continue
if '_' in entry[plen:]:
continue
if not reverse:
mapping[getattr(mod, entry)] = entry[plen:].lower()
else:
mapping[entry[plen:].lower()] = getattr(mod, entry)
if not mapping:
raise ValueError(f"No USB constant found for {prefix.rstrip('_')}")
return mapping
@classmethod
def _mask(cls, mapping: dict) -> int:
mask = 0
for val in mapping:
mask |= val
return mask
def is_req_out(self, reqtype: int) -> str:
return not reqtype & self._ctrl_dir_mask
def dec_req_ctrl(self, reqtype: int) -> str:
return self._ctrl_dir[reqtype & self._ctrl_dir_mask]
def dec_req_type(self, reqtype: int) -> str:
return self._ctrl_type[reqtype & self._ctrl_type_mask]
def dec_req_rcpt(self, reqtype: int) -> str:
return self._ctrl_recipient[reqtype & self._ctrl_recipient_mask]
def dec_req_name(self, reqtype: int, request: int) -> str:
direction = bool(reqtype & self._ctrl_dir_mask)
try:
return self.DEVICE_REQUESTS[(direction, request)]
except KeyError:
return f'req x{request:02x}'
def dec_desc_type(self, desctype: int) -> str:
return self._desc_type[desctype & self._desc_type_mask]
class FtdiConstants:
"""Expose useful constants defined in Ftdi and allow reverse search, i.e.
retrieve constant literals from integral values.
"""
def __init__(self):
self._dcache = {}
self._rcache = {}
@classmethod
def _load_constants(cls, prefix: str, reverse=False):
prefix = prefix.upper()
if not prefix.endswith('_'):
prefix = f'{prefix}_'
mapping = EasyDict()
plen = len(prefix)
for name in dir(Ftdi):
if not name.startswith(prefix):
continue
if not reverse:
mapping[getattr(Ftdi, name)] = name[plen:].lower()
else:
mapping[name[plen:].lower()] = getattr(Ftdi, name)
if not mapping:
# maybe an enum
prefix = prefix.rstrip('_').lower()
for name in dir(Ftdi):
if not name.lower().startswith(prefix):
continue
item = getattr(Ftdi, name)
if issubclass(item, Enum):
if not reverse:
mapping = {en.value: en.name.lower() for en in item}
else:
mapping = {en.name.lower(): en.value for en in item}
if not mapping:
raise ValueError(f"No FTDI constant found for "
f"{prefix.rstrip('_')}")
return mapping
def get_name(self, prefix: str, value: int) -> str:
if prefix not in self._dcache:
self._dcache[prefix] = self._load_constants(prefix)
try:
return self._dcache[prefix][value]
except KeyError:
return f'x?{value:04x}'
def get_value(self, prefix: str, name: str) -> str:
if prefix not in self._rcache:
self._rcache[prefix] = self._load_constants(prefix, True)
try:
return self._rcache[prefix][name.lower()]
except KeyError as exc:
raise ValueError(f'Unknown name {prefix}.{name}') from exc
def dec_req_name(self, request: int) -> str:
return self.get_name('sio_req', request)
USBCONST = UsbConstants()
"""Unique instance of USB constant container."""
FTDICONST = FtdiConstants()
"""Unique instances of FTDI constant container."""
|
"""Constant importer from existing modules."""
# Copyright (c) 2020-2021, <NAME> <<EMAIL>>
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#pylint: disable-msg=missing-docstring
#pylint: disable-msg=invalid-name
#pylint: disable-msg=too-many-instance-attributes
from enum import Enum
from importlib import import_module
from sys import version_info
from pyftdi.ftdi import Ftdi
from pyftdi.misc import EasyDict
class UsbConstants:
"""Expose useful constants defined in PyUSB and allow reverse search, i.e.
retrieve constant literals from integral values.
"""
DEVICE_REQUESTS = {
(True, 0x0): 'get_status',
(False, 0x1): 'clear_feature',
(False, 0x3): 'set_feature',
(False, 0x5): 'set_address',
(True, 0x6): 'get_descriptor',
(False, 0x7): 'set_descriptor',
(True, 0x8): 'get_configuration',
(False, 0x9): 'set_configuration',
}
def __init__(self):
self._desc_type = self._load_constants('desc_type')
self._desc_type_mask = self._mask(self._desc_type)
self._ctrl_dir = self._load_constants('ctrl')
self._ctrl_dir_mask = self._mask(self._ctrl_dir)
self._ctrl_type = self._load_constants('ctrl_type')
self._ctrl_type_mask = self._mask(self._ctrl_type)
self._ctrl_recipient = self._load_constants('ctrl_recipient')
self._ctrl_recipient_mask = self._mask(self._ctrl_recipient)
self._endpoint_type = self._load_constants('endpoint_type')
self._endpoint_type_mask = self._mask(self._endpoint_type)
self._descriptors = EasyDict({v.upper(): k
for k, v in self._desc_type.items()})
self.endpoints = self._load_constants('endpoint', True)
self.endpoint_types = self._load_constants('endpoint_type', True)
self.speeds = self._load_constants('speed', True)
@property
def descriptors(self):
return self._descriptors
@classmethod
def _load_constants(cls, prefix: str, reverse=False):
prefix = prefix.upper()
if not prefix.endswith('_'):
prefix = f'{prefix}_'
mod = import_module('usb.util')
mapping = EasyDict()
plen = len(prefix)
for entry in dir(mod):
if not entry.startswith(prefix):
continue
if '_' in entry[plen:]:
continue
if not reverse:
mapping[getattr(mod, entry)] = entry[plen:].lower()
else:
mapping[entry[plen:].lower()] = getattr(mod, entry)
if not mapping:
raise ValueError(f"No USB constant found for {prefix.rstrip('_')}")
return mapping
@classmethod
def _mask(cls, mapping: dict) -> int:
mask = 0
for val in mapping:
mask |= val
return mask
def is_req_out(self, reqtype: int) -> str:
return not reqtype & self._ctrl_dir_mask
def dec_req_ctrl(self, reqtype: int) -> str:
return self._ctrl_dir[reqtype & self._ctrl_dir_mask]
def dec_req_type(self, reqtype: int) -> str:
return self._ctrl_type[reqtype & self._ctrl_type_mask]
def dec_req_rcpt(self, reqtype: int) -> str:
return self._ctrl_recipient[reqtype & self._ctrl_recipient_mask]
def dec_req_name(self, reqtype: int, request: int) -> str:
direction = bool(reqtype & self._ctrl_dir_mask)
try:
return self.DEVICE_REQUESTS[(direction, request)]
except KeyError:
return f'req x{request:02x}'
def dec_desc_type(self, desctype: int) -> str:
return self._desc_type[desctype & self._desc_type_mask]
class FtdiConstants:
"""Expose useful constants defined in Ftdi and allow reverse search, i.e.
retrieve constant literals from integral values.
"""
def __init__(self):
self._dcache = {}
self._rcache = {}
@classmethod
def _load_constants(cls, prefix: str, reverse=False):
prefix = prefix.upper()
if not prefix.endswith('_'):
prefix = f'{prefix}_'
mapping = EasyDict()
plen = len(prefix)
for name in dir(Ftdi):
if not name.startswith(prefix):
continue
if not reverse:
mapping[getattr(Ftdi, name)] = name[plen:].lower()
else:
mapping[name[plen:].lower()] = getattr(Ftdi, name)
if not mapping:
# maybe an enum
prefix = prefix.rstrip('_').lower()
for name in dir(Ftdi):
if not name.lower().startswith(prefix):
continue
item = getattr(Ftdi, name)
if issubclass(item, Enum):
if not reverse:
mapping = {en.value: en.name.lower() for en in item}
else:
mapping = {en.name.lower(): en.value for en in item}
if not mapping:
raise ValueError(f"No FTDI constant found for "
f"{prefix.rstrip('_')}")
return mapping
def get_name(self, prefix: str, value: int) -> str:
if prefix not in self._dcache:
self._dcache[prefix] = self._load_constants(prefix)
try:
return self._dcache[prefix][value]
except KeyError:
return f'x?{value:04x}'
def get_value(self, prefix: str, name: str) -> str:
if prefix not in self._rcache:
self._rcache[prefix] = self._load_constants(prefix, True)
try:
return self._rcache[prefix][name.lower()]
except KeyError as exc:
raise ValueError(f'Unknown name {prefix}.{name}') from exc
def dec_req_name(self, request: int) -> str:
return self.get_name('sio_req', request)
USBCONST = UsbConstants()
"""Unique instance of USB constant container."""
FTDICONST = FtdiConstants()
"""Unique instances of FTDI constant container."""
|
en
| 0.609107
|
Constant importer from existing modules. # Copyright (c) 2020-2021, <NAME> <<EMAIL>> # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause #pylint: disable-msg=missing-docstring #pylint: disable-msg=invalid-name #pylint: disable-msg=too-many-instance-attributes Expose useful constants defined in PyUSB and allow reverse search, i.e. retrieve constant literals from integral values. Expose useful constants defined in Ftdi and allow reverse search, i.e. retrieve constant literals from integral values. # maybe an enum Unique instance of USB constant container. Unique instances of FTDI constant container.
| 2.148753
| 2
|
uvicore/auth/models/group.py
|
uvicore/framework
| 11
|
6629547
|
<filename>uvicore/auth/models/group.py
from __future__ import annotations
import uvicore
from uvicore.typing import Optional, List
from uvicore.auth.models.role import Role
from uvicore.support.dumper import dd, dump
from uvicore.orm import Model, ModelMetaclass, Field, BelongsToMany
from uvicore.auth.database.tables import groups as table
@uvicore.model()
class Group(Model['Group'], metaclass=ModelMetaclass):
"""Auth Group Model"""
__tableclass__ = table.Groups
id: Optional[int] = Field('id',
primary=True,
description='Group ID',
)
# key: str = Field('key',
# primary=True,
# description='Group Primary Key',
# )
name: str = Field('name',
description='Group Name',
)
# Many-To-Many via group_roles pivot table
roles: Optional[List[Role]] = Field(None,
description="Group Roles",
relation=BelongsToMany('uvicore.auth.models.role.Role', join_tablename='group_roles', left_key='group_id', right_key='role_id'),
)
|
<filename>uvicore/auth/models/group.py
from __future__ import annotations
import uvicore
from uvicore.typing import Optional, List
from uvicore.auth.models.role import Role
from uvicore.support.dumper import dd, dump
from uvicore.orm import Model, ModelMetaclass, Field, BelongsToMany
from uvicore.auth.database.tables import groups as table
@uvicore.model()
class Group(Model['Group'], metaclass=ModelMetaclass):
"""Auth Group Model"""
__tableclass__ = table.Groups
id: Optional[int] = Field('id',
primary=True,
description='Group ID',
)
# key: str = Field('key',
# primary=True,
# description='Group Primary Key',
# )
name: str = Field('name',
description='Group Name',
)
# Many-To-Many via group_roles pivot table
roles: Optional[List[Role]] = Field(None,
description="Group Roles",
relation=BelongsToMany('uvicore.auth.models.role.Role', join_tablename='group_roles', left_key='group_id', right_key='role_id'),
)
|
en
| 0.468856
|
Auth Group Model # key: str = Field('key', # primary=True, # description='Group Primary Key', # ) # Many-To-Many via group_roles pivot table
| 2.34738
| 2
|
idler.py
|
zignig/cqparts_bucket
| 10
|
6629548
|
<filename>idler.py
import cadquery as cq
import cqparts
from cadquery import Solid
from cqparts.params import *
from cqparts.display import render_props, display
from cqparts.constraint import Fixed, Coincident
from cqparts.constraint import Mate
from cqparts.utils.geometry import CoordSystem
from cqparts_bearings.ball import BallBearing
from cqparts_motors.shaft import Shaft
class Idler(cqparts.Assembly):
def make_components(self):
comps = {"shaft": Shaft()}
return comps
def make_constraints(self):
return [Fixed(self.components["shaft"].mate_origin)]
if __name__ == "__main__":
from cqparts.display import display
B = Idler()
display(B)
|
<filename>idler.py
import cadquery as cq
import cqparts
from cadquery import Solid
from cqparts.params import *
from cqparts.display import render_props, display
from cqparts.constraint import Fixed, Coincident
from cqparts.constraint import Mate
from cqparts.utils.geometry import CoordSystem
from cqparts_bearings.ball import BallBearing
from cqparts_motors.shaft import Shaft
class Idler(cqparts.Assembly):
def make_components(self):
comps = {"shaft": Shaft()}
return comps
def make_constraints(self):
return [Fixed(self.components["shaft"].mate_origin)]
if __name__ == "__main__":
from cqparts.display import display
B = Idler()
display(B)
|
none
| 1
| 2.42015
| 2
|
|
test/mac/gyptest-app-error.py
|
Jet-Streaming/gyp
| 0
|
6629549
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that invalid strings files cause the build to fail.
"""
import TestCmd
import TestGyp
import sys
if sys.platform == 'darwin':
expected_error = 'Old-style plist parser: missing semicolon in dictionary'
saw_expected_error = [False] # Python2 has no "nonlocal" keyword.
def match(a, b):
if a == b:
return True
if not TestCmd.is_List(a):
a = a.split('\n')
if not TestCmd.is_List(b):
b = b.split('\n')
if expected_error in '\n'.join(a) + '\n'.join(b):
saw_expected_error[0] = True
return True
return False
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'], match=match)
test.run_gyp('test-error.gyp', chdir='app-bundle')
test.build('test-error.gyp', test.ALL, chdir='app-bundle')
# Ninja pipes stderr of subprocesses to stdout.
if test.format in ['ninja', 'xcode-ninja'] \
and expected_error in test.stdout():
saw_expected_error[0] = True
if saw_expected_error[0]:
test.pass_test()
else:
test.fail_test()
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that invalid strings files cause the build to fail.
"""
import TestCmd
import TestGyp
import sys
if sys.platform == 'darwin':
expected_error = 'Old-style plist parser: missing semicolon in dictionary'
saw_expected_error = [False] # Python2 has no "nonlocal" keyword.
def match(a, b):
if a == b:
return True
if not TestCmd.is_List(a):
a = a.split('\n')
if not TestCmd.is_List(b):
b = b.split('\n')
if expected_error in '\n'.join(a) + '\n'.join(b):
saw_expected_error[0] = True
return True
return False
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'], match=match)
test.run_gyp('test-error.gyp', chdir='app-bundle')
test.build('test-error.gyp', test.ALL, chdir='app-bundle')
# Ninja pipes stderr of subprocesses to stdout.
if test.format in ['ninja', 'xcode-ninja'] \
and expected_error in test.stdout():
saw_expected_error[0] = True
if saw_expected_error[0]:
test.pass_test()
else:
test.fail_test()
|
en
| 0.831238
|
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. Verifies that invalid strings files cause the build to fail. # Python2 has no "nonlocal" keyword. # Ninja pipes stderr of subprocesses to stdout.
| 2.285208
| 2
|
src/sentry/utils/distutils/commands/build_js_sdk_registry.py
|
detouched/sentry
| 4
|
6629550
|
# NOTE: This is run external to sentry as well as part of the setup
# process. Thus we do not want to import non stdlib things here.
from __future__ import absolute_import
import os
import sys
import json
from distutils import log
import sentry
JS_SDK_REGISTRY_URL = 'https://release-registry.services.sentry.io/packages/npm:@sentry/browser/latest'
LOADER_FOLDER = os.path.abspath(os.path.join(os.path.dirname(sentry.__file__), 'loader'))
# We cannot leverage six here, so we need to vendor
# bits that we need.
if sys.version_info[0] == 3:
def iteritems(d, **kw):
return iter(d.items(**kw))
from urllib.request import urlopen
else:
def iteritems(d, **kw):
return d.iteritems(**kw) # NOQA
from urllib2 import urlopen
def dump_registry(path, data):
fn = os.path.join(LOADER_FOLDER, path + '.json')
directory = os.path.dirname(fn)
try:
os.makedirs(directory)
except OSError:
pass
with open(fn, 'wb') as f:
json.dump(data, f, indent=2)
f.write('\n')
def sync_registry():
body = urlopen(JS_SDK_REGISTRY_URL).read().decode('utf-8')
data = json.loads(body)
dump_registry('_registry', data)
from .base import BaseBuildCommand
class BuildJsSdkRegistryCommand(BaseBuildCommand):
description = 'build js sdk registry'
def run(self):
log.info('downloading js sdk information from the release registry')
try:
sync_registry()
except BaseException:
log.error('error ocurred while trying to fetch js sdk information from the registry')
|
# NOTE: This is run external to sentry as well as part of the setup
# process. Thus we do not want to import non stdlib things here.
from __future__ import absolute_import
import os
import sys
import json
from distutils import log
import sentry
JS_SDK_REGISTRY_URL = 'https://release-registry.services.sentry.io/packages/npm:@sentry/browser/latest'
LOADER_FOLDER = os.path.abspath(os.path.join(os.path.dirname(sentry.__file__), 'loader'))
# We cannot leverage six here, so we need to vendor
# bits that we need.
if sys.version_info[0] == 3:
def iteritems(d, **kw):
return iter(d.items(**kw))
from urllib.request import urlopen
else:
def iteritems(d, **kw):
return d.iteritems(**kw) # NOQA
from urllib2 import urlopen
def dump_registry(path, data):
fn = os.path.join(LOADER_FOLDER, path + '.json')
directory = os.path.dirname(fn)
try:
os.makedirs(directory)
except OSError:
pass
with open(fn, 'wb') as f:
json.dump(data, f, indent=2)
f.write('\n')
def sync_registry():
body = urlopen(JS_SDK_REGISTRY_URL).read().decode('utf-8')
data = json.loads(body)
dump_registry('_registry', data)
from .base import BaseBuildCommand
class BuildJsSdkRegistryCommand(BaseBuildCommand):
description = 'build js sdk registry'
def run(self):
log.info('downloading js sdk information from the release registry')
try:
sync_registry()
except BaseException:
log.error('error ocurred while trying to fetch js sdk information from the registry')
|
en
| 0.967024
|
# NOTE: This is run external to sentry as well as part of the setup # process. Thus we do not want to import non stdlib things here. # We cannot leverage six here, so we need to vendor # bits that we need. # NOQA
| 1.93818
| 2
|