text stringlengths 0 1.05M | meta dict |
|---|---|
__all__ = [
"t"
]
from Optizelle.Utility import *
from Optizelle.Properties import *
from Optizelle.Enumerated import *
def allocateVectors(self,X,x):
"""Allocates memory for the state vectors"""
self.x=X.init(x)
self.grad=X.init(x)
self.dx=X.init(x)
self.x_old=X.init(x)
self.grad_old=X.init(x)
self.dx_old=X.init(x)
class t(object):
"""Internal state of the optimization"""
def __init__(self,X,x):
"""Constructor"""
# Check our arguments
checkVectorSpace("X",X)
# Allocate memory for our vectors
allocateVectors(self,X,x)
# Create the state
UnconstrainedStateCreate(self,X,x)
# Create all of the properties
eps_grad = createFloatProperty(
"eps_grad",
"Tolerance for the gradient stopping condition")
eps_dx = createFloatProperty(
"eps_dx",
"Tolerance for the step length stopping criteria")
algorithm_class = createEnumProperty(
"algorihm_class",
AlgorithmClass,
"Algorithm class")
stored_history = createNatProperty(
"stored_history",
"Number of control objects to store in a quasi-Newton method")
iter = createNatProperty(
"iter",
"Current iteration")
iter_max = createNatProperty(
"iter_max",
"Maximum number of optimization iterations")
glob_iter = createNatProperty(
"glob_iter",
"Globalization iteration")
glob_iter_max = createNatProperty(
"glob_iter_max",
"Maximum number of globalization iterations before we quit")
glob_iter_total = createNatProperty(
"glob_iter_total",
"Total number of globalization iterations taken")
opt_stop = createEnumProperty(
"opt_stop",
OptimizationStop,
"Why we've stopped the optimization")
trunc_iter = createNatProperty(
"trunc_iter",
"Current number of truncated-CG iterations taken")
trunc_iter_max = createNatProperty(
"trunc_iter_max",
"Maximum number of iterations used by truncated CG")
trunc_iter_total = createNatProperty(
"trunc_iter_total",
"Total number of truncated-CG iterations taken")
trunc_orthog_storage_max = createNatProperty(
"trunc_orthog_storage_max",
"Maximum number of vectors we orthogonalize against in truncated CG")
trunc_orthog_iter_max = createNatProperty(
"trunc_orthog_iter_max",
"Maximum number of orthogonalization iterations in truncated CG")
trunc_stop = createEnumProperty(
"trunc_stop",
TruncatedStop,
"Why truncated CG was last stopped")
trunc_err = createFloatProperty(
"trunc_err",
"Relative error in truncated CG")
eps_trunc = createFloatProperty(
"eps_trunc",
"Stopping tolerance for truncated CG")
algorithm_class = createEnumProperty(
"algorithm_class",
AlgorithmClass,
"Algorithm class")
PH_type = createEnumProperty(
"PH_type",
Operators,
"Preconditioner for the Hessian")
H_type = createEnumProperty(
"H_type",
Operators,
"Hessian approximation")
norm_gradtyp = createFloatProperty(
"norm_gradtyp",
"Norm of a typical tradient")
norm_dxtyp = createFloatProperty(
"norm_dxtyp",
"Norm of a typical trial step")
x = createVectorProperty(
"x",
"Optimization variable")
grad = createVectorProperty(
"grad",
("Gradient, possibly of the objective, possibly of the Lagrangian. "
"It depends on the context."))
dx = createVectorProperty(
"dx",
"Trial step")
x_old = createVectorProperty(
"x_old",
"Old optimization variable")
grad_old = createVectorProperty(
"grad_old",
"Old gradient")
dx_old = createVectorProperty(
"dx_old",
"Old trial step")
oldY = createVectorListProperty(
"oldY",
"Difference in prior gradients")
oldS = createVectorListProperty(
"oldS",
"Difference in prior steps")
f_x = createFloatProperty(
"f_x",
"Current value of the objective function")
f_xpdx = createFloatProperty(
"f_xpdx",
"Objective function at the trial step")
msg_level = createNatProperty(
"msg_level",
"Messaging level")
safeguard_failed_max = createNatProperty(
"safeguard_failed_max",
"Number of failed safe-guard steps before quitting the method")
safeguard_failed = createNatProperty(
"safeguard_failed",
"Number of failed safeguard steps during the last iteration")
safeguard_failed_total = createNatProperty(
"safeguard_failed_total",
"Total number of failed safeguard steps")
alpha_x = createFloatProperty(
"alpha_x",
("Amount we truncate dx in order to maintain feasibility "
" with respect to the safeguard, which probably relates to "
"the inequality constraint"))
alpha_x_qn = createFloatProperty(
"alpha_x_qn",
("Amount we truncate dx_n in order to maintain feasibility "
"with respect to the safeguard, which probably relates to "
"the inequailty constraint"))
delta = createFloatProperty(
"delta",
"Trust region radius")
eta1 = createFloatProperty(
"eta1",
"Trust-region parameter for checking whether a step has been accepted")
eta2 = createFloatProperty(
"eta2",
("Trust-region parameter for checking whether we enlarge the "
"trust-region radius"))
ared = createFloatProperty(
"ared",
"Actual reduction")
pred = createFloatProperty(
"pred",
"Predicted reduction")
alpha0 = createFloatProperty(
"alpha0",
"Base line-search step length")
alpha = createFloatProperty(
"alpha",
"Actual line-search step length")
c1 = createFloatProperty(
"c1",
"Parameter that helps govern the sufficient decrease")
ls_iter = createNatProperty(
"ls_iter",
"Current number of iterations used in the line-search")
ls_iter_max = createNatProperty(
"ls_iter_max",
"Maximum number of iterations used in the line-search")
ls_iter_total = createNatProperty(
"ls_iter_total",
"Total number of line-search iterations computed")
eps_ls = createFloatProperty(
"eps_ls",
"Stopping tolerance for the line-search")
dir = createEnumProperty(
"dir",
LineSearchDirection,
"Search direction type")
kind = createEnumProperty(
"kind",
LineSearchKind,
"Type of line-search")
f_diag = createEnumProperty(
"f_diag",
FunctionDiagnostics,
"Function diagnostics on f")
L_diag = createEnumProperty(
"L_diag",
FunctionDiagnostics,
"Function diagnostics on the Lagrangian")
x_diag = createEnumProperty(
"x_diag",
VectorSpaceDiagnostics,
"Vector space diagnostics on X")
dscheme = createEnumProperty(
"dscheme",
DiagnosticScheme,
"Diagnostic scheme")
eps_kind = createEnumProperty(
"eps_kind",
ToleranceKind,
"Kind of stopping tolerance")
def checkT(name,value):
"""Check that we have a state"""
if not issubclass(type(value),t):
raise TypeError(
"The %s argument must have type Unconstrained.State.t."
% (name))
| {
"repo_name": "OptimoJoe/Optizelle",
"path": "src/python/Optizelle/Unconstrained/State.py",
"copies": "1",
"size": "7613",
"license": "bsd-2-clause",
"hash": 5174906473665832000,
"line_mean": 31.6738197425,
"line_max": 79,
"alpha_frac": 0.6203861815,
"autogenerated": false,
"ratio": 4.084227467811159,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5204613649311158,
"avg_score": null,
"num_lines": null
} |
__all__ = [
"t"
]
import Optizelle.Unconstrained.Functions
from Optizelle.Properties import *
from Optizelle.Functions import *
class t(Optizelle.Unconstrained.Functions.t):
"""All the functions required by an optimization algorithm"""
def __init__(self):
super(t,self).__init__()
self._g=VectorValuedFunction()
self._PSchur_left=Operator()
self._PSchur_right=Operator()
# Create all of the properties
g = createVectorValuedFunctionProperty(
"g",
"Equality constraints")
PSchur_left = createOperatorProperty(
"PSchur_left",
"Left preconditioner for the augmented system")
PSchur_right = createOperatorProperty(
"PSchur_right",
"Right preconditioner for the augmented system")
def checkT(name,value):
"""Check that we have a bundle of functions"""
if not issubclass(type(value),t):
raise TypeError(
"The %s argument must have type EqualityConstrained.Functions.t."
% (name))
| {
"repo_name": "OptimoJoe/Optizelle",
"path": "src/python/Optizelle/EqualityConstrained/Functions.py",
"copies": "1",
"size": "1026",
"license": "bsd-2-clause",
"hash": -7977901696843889000,
"line_mean": 30.0909090909,
"line_max": 77,
"alpha_frac": 0.6530214425,
"autogenerated": false,
"ratio": 3.7859778597785976,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4938999302278598,
"avg_score": null,
"num_lines": null
} |
__all__ = [
"t"
]
import Optizelle.Unconstrained.State
from Optizelle.Utility import *
from Optizelle.Properties import *
from Optizelle.Enumerated import *
def allocateVectors(self,X,Y,x,y):
"""Allocates memory for the state vectors"""
self.y=Y.init(y)
self.dy=Y.init(y)
self.g_x=Y.init(y)
self.gpxdxn_p_gx=Y.init(y)
self.gpxdxt=Y.init(y)
self.dx_n=X.init(x)
self.dx_ncp=X.init(x)
self.dx_t=X.init(x)
self.dx_t_uncorrected=X.init(x)
self.dx_tcp_uncorrected=X.init(x)
self.H_dxn=X.init(x)
self.W_gradpHdxn=X.init(x)
self.H_dxtuncorrected=X.init(x)
class t(Optizelle.Unconstrained.State.t):
"""Internal state of the optimization"""
def __init__(self,X,Y,x,y):
"""Constructor"""
# Check our arguments
checkVectorSpace("X",X)
checkVectorSpace("Y",Y)
# Allocate memory for our vectors
Optizelle.Unconstrained.State.allocateVectors(self,X,x)
allocateVectors(self,X,Y,x,y)
# Create the state
EqualityConstrainedStateCreate(self,X,Y,x,y)
# Create all of the properties
y = createVectorProperty(
"y",
"Equality multiplier (dual variable or Lagrange multiplier)")
dy = createVectorProperty(
"dy",
"Step in the equality multiplier")
zeta = createFloatProperty(
"zeta",
"The fraction of the total trust-region used for the quasi-norm step")
eta0 = createFloatProperty(
"eta0",
("Trust-region parameter that bounds the error in the predicted "
"reduction"))
rho = createFloatProperty(
"rho",
"Penalty parameter for the augmented-Lagrangian")
rho_old = createFloatProperty(
"rho_old",
"Penalty parameter from the last iteration")
rho_bar = createFloatProperty(
"rho_bar",
"Fixed increase in the penalty parameter")
eps_constr = createFloatProperty(
"eps_constr",
"Stopping tolerance for the norm of the constraints")
xi_qn = createFloatProperty(
"xi_qn",
"Inexactness tolerance for the quasi-Newton step")
xi_pg = createFloatProperty(
"xi_pg",
"Inexactness tolerance for the projection of the gradient")
xi_proj = createFloatProperty(
"xi_proj",
"Inexactness tolerance for the null-space projection")
xi_tang = createFloatProperty(
"xi_tang",
"Inexactness tolerance for the tangential step")
xi_lmh = createFloatProperty(
"xi_lmh",
"Inexactness tolerance for the equality multiplier")
def xi_all(self,value):
"""Sets all the inexactness tolerances: xi_qn, xi_pg, xi_proj, xi_tang, and xi_lmh"""
self.xi_qn=value
self.xi_pg=value
self.xi_proj=value
self.xi_tang=value
self.xi_lmh=value
xi_lmg = createFloatProperty(
"xi_lmg",
"Absolute tolerance on the residual of the equality multiplier solve")
xi_4 = createFloatProperty(
"xi_4",
("Tolerance for how much error is acceptable after computing the "
"tangential step given the result from the tangential subproblem"))
rpred = createFloatProperty(
"rpred",
"Residual term in the predicted reduction")
PSchur_left_type = createEnumProperty(
"PSchur_left_type",
Operators,
"Left preconditioner for the augmented system")
PSchur_right_type = createEnumProperty(
"PSchur_right_type",
Operators,
"Right preconditioner for the augmented system")
augsys_iter_max = createNatProperty(
"augsys_iter_max",
"Maximum number of iterations used when solving the augmented system")
augsys_rst_freq = createNatProperty(
"augsys_rst_freq",
("How often we restart the augmented system solve"))
augsys_qn_iter = createNatProperty(
"augsys_qn_iter",
("Number of augmented system solve iterations used on the quasi-normal "
"step"))
augsys_pg_iter = createNatProperty(
"augsys_pg_iter",
("Number of augmented system solve iterations used when projecting the "
"gradient prior to the tangential subproblem"))
augsys_proj_iter = createNatProperty(
"augsys_proj_iter",
("Number of augmented system solve iterations used in the nullspace "
"projection inside the tangential subproblem"))
augsys_tang_iter = createNatProperty(
"augsys_tang_iter",
("Number of augmented system solve iterations used in the tangential "
"step"))
augsys_lmh_iter = createNatProperty(
"augsys_lmh_iter",
("Number of augmented system solve iterations used in the equality "
"multiplier solve"))
augsys_qn_iter_total = createNatProperty(
"augsys_qn_iter_total",
("Total number of augmented system solve iterations used on the "
"quasi-normal step"))
augsys_pg_iter_total = createNatProperty(
"augsys_pg_iter_total",
("Total number of augmented system solve iterations used when "
"projecting the gradient prior to the tangential subproblem"))
augsys_proj_iter_total = createNatProperty(
"augsys_proj_iter_total",
("Total number of augmented system solve iterations used in the "
"nullspace projection inside the tangential subproblem"))
augsys_tang_iter_total = createNatProperty(
"augsys_tang_iter_total",
("Total number of augmented system solve iterations used in the "
"tangential step"))
augsys_lmh_iter_total = createNatProperty(
"augsys_lmh_iter_total",
("Total number of augmented system solve iterations used in the "
"equality multiplier solve"))
augsys_qn_err = createFloatProperty(
"augsys_qn_err",
("Error in the augmented system solve used on the "
"quasi-normal step"))
augsys_pg_err = createFloatProperty(
"augsys_pg_err",
("Error in the augmented system solve used when "
"projecting the gradient prior to the tangential subproblem"))
augsys_proj_err = createFloatProperty(
"augsys_proj_err",
("Error in the augmented system solve used in the "
"nullspace projection inside the tangential subproblem"))
augsys_tang_err = createFloatProperty(
"augsys_tang_err",
("Error in the augmented system solve used in the "
"tangential step"))
augsys_lmh_err = createFloatProperty(
"augsys_lmh_err",
("Error in the augmented system solve used in the "
"equality multiplier solve"))
augsys_qn_err_target = createFloatProperty(
"augsys_qn_err_target",
("Target error in the augmented system solve used on the "
"quasi-normal step"))
augsys_pg_err_target = createFloatProperty(
"augsys_pg_err_target",
("Target error in the augmented system solve used when "
"projecting the gradient prior to the tangential subproblem"))
augsys_proj_err_target = createFloatProperty(
"augsys_proj_err_target",
("Target error in the augmented system solve used in the "
"nullspace projection inside the tangential subproblem"))
augsys_tang_err_target = createFloatProperty(
"augsys_tang_err_target",
("Target error in the augmented system solve used in the "
"tangential step"))
augsys_lmh_err_target = createFloatProperty(
"augsys_lmh_err_target",
("Target error in the augmented system solve used in the "
"equality multiplier solve"))
augsys_iter_total = createNatProperty(
"augsys_iter_total",
("Total number of augmented system solve iterations used in all "
"solves"))
augsys_qn_failed = createNatProperty(
"augsys_qn_failed",
("Number of failed quasinormal augmented system solves"))
augsys_pg_failed = createNatProperty(
"augsys_pg_failed",
("Number of failed projected gradient augmented system solves"))
augsys_proj_failed = createNatProperty(
"augsys_proj_failed",
("Number of failed nullspace projection augmented system solves"))
augsys_tang_failed = createNatProperty(
"augsys_tang_failed",
("Number of failed tangential step augmented system solves"))
augsys_lmh_failed = createNatProperty(
"augsys_lmh_failed",
("Number of failed equality multiplier augmented system solves"))
augsys_failed_total = createNatProperty(
"augsys_failed_total",
("Total number of failed augmented system solves"))
g_x = createVectorProperty(
"g_x",
("Equality constraint evaluated at x. This is used in the quasinormal "
"step as well as in the computation of the linear Taylor series at x "
"in the direciton dx_n."))
norm_gxtyp = createFloatProperty(
"norm_gxtyp",
("A typical norm for g(x). Generally, we just take the value at "
"the first iteration."))
norm_gpsgxtyp = createFloatProperty(
"norm_gpsgxtyp",
("A typical norm for g'(x)*g(x). Generally, we just take "
"the value at the first iteration."))
gpxdxn_p_gx = createVectorProperty(
"gpxdxn_p_gx",
("Linear Taylor series at x in the direction dx_n. This is used both "
"in the predicted reduction as well as the residual predicted "
"reduction."))
gpxdxt = createVectorProperty(
"gpxdxt",
("Derivative of the constraint applied to the tangential step this is "
"used in the residual predicted reduction."))
norm_gpxdxnpgx = createVectorProperty(
"norm_gpxdxnpgx",
("Norm of gpxdxn_p_gx. This is used in the penalty parameter "
"computation and predicted reduction."))
dx_n = createVectorProperty(
"dx_n",
"Normal step")
dx_ncp = createVectorProperty(
"dx_ncp",
"Cauchy point for normal step")
dx_t = createVectorProperty(
"dx_t",
"(Corrected) tangential step")
dx_t_uncorrected = createVectorProperty(
"dx_t_uncorrected",
"Tangential step prior to correction")
dx_tcp_uncorrected = createVectorProperty(
"dx_tcp_uncorrected",
"Cauchy point for tangential step prior to correction")
H_dxn = createVectorProperty(
"H_dxn",
("Hessian applied to the normal step. This is required by W_gradpHdxn "
"as well as the predicted reduction."))
W_gradpHdxn = createVectorProperty(
"W_gradpHdxn",
("Quantity grad f(x) + g'(x)*y + H dx_n projected into the null-space ",
"of the constraints. This is required in the tangential subproblem "
"and the predicted reduction."))
H_dxtuncorrected = createVectorProperty(
"H_dxtuncorrected",
("Hessian applied to the uncorrected tangential step. This is needed "
"in the predicted reduction."))
g_diag = createEnumProperty(
"g_diag",
FunctionDiagnostics,
"Function diagnostics on g")
y_diag = createEnumProperty(
"y_diag",
VectorSpaceDiagnostics,
"Vector space diagnostics on Y")
qn_stop = createEnumProperty(
"qn_stop",
QuasinormalStop,
"Reason why the quasinormal problem exited")
def checkT(name,value):
"""Check that we have a state"""
if not issubclass(type(value),t):
raise TypeError(
"The %s argument must have type EqualityConstrained.State.t."
% (name))
| {
"repo_name": "OptimoJoe/Optizelle",
"path": "src/python/Optizelle/EqualityConstrained/State.py",
"copies": "1",
"size": "11573",
"license": "bsd-2-clause",
"hash": 4637180416278640000,
"line_mean": 39.044982699,
"line_max": 93,
"alpha_frac": 0.6446902273,
"autogenerated": false,
"ratio": 3.691547049441786,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4836237276741786,
"avg_score": null,
"num_lines": null
} |
__all__ = [
"t"
]
import Optizelle.Unconstrained.State
from Optizelle.Utility import *
from Optizelle.Properties import *
from Optizelle.Enumerated import *
def allocateVectors(self,X,Z,x,z):
"""Allocates memory for the state vectors"""
self.z=Z.init(z)
self.dz=Z.init(z)
self.h_x=Z.init(z)
class t(Optizelle.Unconstrained.State.t):
"""Internal state of the optimization"""
def __init__(self,X,Z,x,z):
"""Constructor"""
# Check our arguments
checkVectorSpace("X",X)
checkEuclidean("Z",Z)
# Allocate memory for our vectors
Optizelle.Unconstrained.State.allocateVectors(self,X,x)
allocateVectors(self,X,Z,x,z)
# Create the state
InequalityConstrainedStateCreate(self,X,Z,x,z)
# Create all of the properties
z = createVectorProperty(
"z",
"Inequality multiplier (dual variable or Lagrange multiplier)")
dz = createVectorProperty(
"dz",
"Step in the inequality multiplier")
h_x = createVectorProperty(
"h_x",
"The inequality constraint evaluated at x.")
mu = createFloatProperty(
"mu",
"Interior point parameter")
mu_est = createFloatProperty(
"mu_est",
"Current interior point estimate")
mu_typ = createFloatProperty(
"mu_typ",
"Typical value for mu. Generally, the first estimated value for mu.")
eps_mu = createFloatProperty(
"eps_mu",
"Relative stopping criteria for the interior point parameter")
sigma = createFloatProperty(
"sigma",
("The amount that we reduce the interior point parameter by everytime "
"we approach the central path"))
gamma = createFloatProperty(
"gamma",
"How close we move to the boundary during a single step")
alpha_z = createFloatProperty(
"alpha_z",
("Amount we truncate dx in order to maintain feasibility "
" of the inequality multiplier"))
h_diag = createEnumProperty(
"h_diag",
FunctionDiagnostics,
"Function diagnostics on h")
z_diag = createEnumProperty(
"z_diag",
VectorSpaceDiagnostics,
"Vector space diagnostics on Z")
def checkT(name,value):
"""Check that we have a state"""
if not issubclass(type(value),t):
raise TypeError(
"The %s argument must have type InequalityConstrained.State.t."
% (name))
| {
"repo_name": "OptimoJoe/Optizelle",
"path": "src/python/Optizelle/InequalityConstrained/State.py",
"copies": "1",
"size": "2465",
"license": "bsd-2-clause",
"hash": -8112444528863901000,
"line_mean": 29.8125,
"line_max": 79,
"alpha_frac": 0.6267748479,
"autogenerated": false,
"ratio": 3.8098918083462134,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49366666562462136,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'to_issue_dict'
]
def to_issue_dict(issue):
"""
:rtype: ``dict``
"""
split = issue.permalink().split(' - ', 1)
url = split[0]
if issue.fields.resolution:
resolution = issue.fields.resolution.name
else:
resolution = None
if issue.fields.reporter:
reporter = issue.fields.reporter.displayName
else:
reporter = None
if issue.fields.assignee:
assignee = issue.fields.assignee.displayName
else:
assignee = None
result = {
'id': issue.id,
'key': issue.key,
'url': url,
'summary': issue.fields.summary,
'description': issue.fields.description,
'status': issue.fields.status.name,
'resolution': resolution,
'labels': issue.fields.labels,
'reporter': reporter,
'assignee': assignee,
'created_at': issue.fields.created,
'updated_at': issue.fields.updated,
'resolved_at': issue.fields.resolutiondate
}
return result
| {
"repo_name": "pinterb/st2contrib",
"path": "packs/jira/actions/lib/formatters.py",
"copies": "5",
"size": "1035",
"license": "apache-2.0",
"hash": 240800907307637470,
"line_mean": 23.0697674419,
"line_max": 52,
"alpha_frac": 0.5748792271,
"autogenerated": false,
"ratio": 3.791208791208791,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6866088018308791,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'to_issue_dict',
'to_comment_dict'
]
def to_issue_dict(issue):
"""
:rtype: ``dict``
"""
split = issue.permalink().split(' - ', 1)
url = split[0]
if issue.fields.resolution:
resolution = issue.fields.resolution.name
else:
resolution = None
if issue.fields.reporter:
reporter = issue.fields.reporter.displayName
else:
reporter = None
if issue.fields.assignee:
assignee = issue.fields.assignee.displayName
else:
assignee = None
result = {
'id': issue.id,
'key': issue.key,
'url': url,
'summary': issue.fields.summary,
'description': issue.fields.description,
'status': issue.fields.status.name,
'resolution': resolution,
'labels': issue.fields.labels,
'reporter': reporter,
'assignee': assignee,
'created_at': issue.fields.created,
'updated_at': issue.fields.updated,
'resolved_at': issue.fields.resolutiondate
}
return result
def to_comment_dict(comment):
"""
:rtype: ``dict``
"""
result = {
'id': comment.id,
'body': comment.body
}
return result
| {
"repo_name": "psychopenguin/st2contrib",
"path": "packs/jira/actions/lib/formatters.py",
"copies": "7",
"size": "1221",
"license": "apache-2.0",
"hash": 4566777338414358000,
"line_mean": 21.2,
"line_max": 52,
"alpha_frac": 0.5601965602,
"autogenerated": false,
"ratio": 3.7339449541284404,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.779414151432844,
"avg_score": null,
"num_lines": null
} |
__all__ = (
'TokenList',
)
import collections
from .errors import TokenTypeError
class TokenList(collections.Sized, collections.Iterable, collections.Container):
def __init__(self, init=None, *, token_type=None):
if token_type is None:
token_type = object
self._token_type = token_type
self._tokens = collections.deque()
if init:
if not hasattr(init, '__iter__'):
raise TypeError("invalid value {!r}: not an iterable".format(init))
for token in init:
self.add(token)
@property
def token_type(self):
return self._token_type
def add(self, token, *, count=1):
if not isinstance(token, self._token_type):
raise TokenTypeError("invalid token {!r}: type is not {}".format(token, self._token_type.__name__))
for i in range(count):
self._tokens.append(token)
def pop(self):
return self._tokens.popleft()
def remove(self, token):
for c, t in enumerate(self._tokens):
if t is token:
break
else:
return
del self._tokens[c]
#self._tokens.remove(token)
def copy(self):
return self.__class__(init=self, token_type=self.token_type)
def __iter__(self):
yield from self._tokens
def __len__(self):
return len(self._tokens)
def clear(self):
self._tokens.clear()
def extend(self, values):
if self._token_type is object:
self._tokens.extend(values)
else:
for value in values:
self.add(value)
def __contains__(self, value):
return value in self._tokens
def __repr__(self):
args = []
if self:
args.append(repr(list(self._tokens)))
if self._token_type is not object:
args.append("token_type={}".format(self._token_type.__name__))
return "{}({})".format(self.__class__.__name__, ', '.join(args))
def __eq__(self, other):
if isinstance(other, TokenList):
if self._token_type != other.token_type:
return False
return self._tokens == other._tokens
else:
if len(self._tokens) != len(other):
return False
for a, b in zip(self._tokens, other):
if a != b:
return False
return True
| {
"repo_name": "simone-campagna/petra",
"path": "petra/token_list.py",
"copies": "1",
"size": "2481",
"license": "apache-2.0",
"hash": 223850138077393380,
"line_mean": 28.1882352941,
"line_max": 111,
"alpha_frac": 0.5272067715,
"autogenerated": false,
"ratio": 4.248287671232877,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00609114015976761,
"num_lines": 85
} |
__all__ = [
'TopoReader',
'GravObsReader',
'GravGradReader',
'MagObsReader',
'GeologyMapper',
]
__displayname__ = 'General Tools'
import numpy as np
import pandas as pd
import vtk
from vtk.numpy_interface import dataset_adapter as dsa
from .. import _helpers, interface
from ..base import FilterPreserveTypeBase
from ..readers import DelimitedPointsReaderBase
###############################################################################
class TopoReader(DelimitedPointsReaderBase):
"""A reader to handle .topo files in UBC format to create a topography
surface.
"""
__displayname__ = 'GIF Topo Reader'
__category__ = 'reader'
extensions = 'topo txt dat'
description = 'PVGeo: UBC 3D Topo Files'
def __init__(self, copy_z=True, **kwargs):
DelimitedPointsReaderBase.__init__(self, copy_z=copy_z, **kwargs)
self.set_has_titles(False) # kwargs.get('has_titles', False))
self.set_split_on_white_space(True)
self.__3d = True # TODO: handle 2D topo files as well
self.__npts = None
# Simply override the extract titles functionality
def _extract_header(self, content):
"""Internal helper to parse header details for UBC Topo files"""
# No titles
# Get number of points
self.__npts = int(content[0].strip())
if len(content[1].split(self._get_delimiter())) != 3:
raise _helpers.PVGeoError('Data improperly formatted')
return ['X', 'Y', 'Z'], content[1::]
###############################################################################
class GravObsReader(DelimitedPointsReaderBase):
"""Read `GIF Gravity Observations`_ file.
.. _GIF Gravity Observations: https://giftoolscookbook.readthedocs.io/en/latest/content/fileFormats/gravfile.html
"""
__displayname__ = 'UBC Gravity Observations'
__category__ = 'reader'
extensions = 'grv txt dat'
description = 'PVGeo: GIF Gravity Observations'
def __init__(self, **kwargs):
DelimitedPointsReaderBase.__init__(self, **kwargs)
self.set_has_titles(False)
self.set_split_on_white_space(True)
self.__npts = None
# Simply override the extract titles functionality
def _extract_header(self, content):
"""Internal helper to parse header details for UBC Gravity Observation
files"""
# No titles
# Get number of points
self.__npts = int(content[0].strip())
# Now decide if it is single or multi component
if len(content[1].split(self._get_delimiter())) != 5:
raise _helpers.PVGeoError('Data improperly formatted')
return ['X', 'Y', 'Z', 'Grav', 'Err'], content[1::]
###############################################################################
class GravGradReader(DelimitedPointsReaderBase):
"""Read `GIF Gravity Gradiometry Observations`_ file.
.. _GIF Gravity Gradiometry Observations: https://giftoolscookbook.readthedocs.io/en/latest/content/fileFormats/ggfile.html
"""
__displayname__ = 'GIF Gravity Gradiometry Observations'
__category__ = 'reader'
extensions = 'grv gg txt dat'
description = 'PVGeo: GIF Gravity Gradiometry Observations'
def __init__(self, **kwargs):
DelimitedPointsReaderBase.__init__(self, **kwargs)
self.set_has_titles(False)
self.set_split_on_white_space(True)
self.__npts = None
# Simply override the extract titles functionality
def _extract_header(self, content):
"""Internal helper to parse header details for UBC Gravity Gradiometry
files"""
# Get components
comps = content[0].split('=')[1].split(',')
# Get number of points
self.__npts = int(content[1].strip())
titles = ['X', 'Y', 'Z']
for c in comps:
titles.append(c)
# Now decipher if it has stddevs
num = len(content[2].split(self._get_delimiter()))
if num != len(titles):
if num != (len(titles) + len(comps)):
raise _helpers.PVGeoError('Data improperly formatted')
for c in comps:
titles.append('Stn_%s' % c)
return titles, content[2::]
###############################################################################
class MagObsReader(DelimitedPointsReaderBase):
"""Read `GIF Magnetic Observations`_ file.
.. _GIF Magnetic Observations: https://giftoolscookbook.readthedocs.io/en/latest/content/fileFormats/magfile.html
"""
__displayname__ = 'UBC Magnetic Observations'
__category__ = 'reader'
extensions = 'mag loc txt dat pre'
description = 'PVGeo: GIF Magnetic Observations'
def __init__(self, **kwargs):
DelimitedPointsReaderBase.__init__(self, **kwargs)
self.set_has_titles(False)
self.set_split_on_white_space(True)
self.__npts = None
self.__incl = None
self.__decl = None
self.__geomag = None
self.__ainc = None
self.__adec = None
self.__dir = None
# Simply override the extract titles functionality
def _extract_header(self, content):
"""Internal helper to parse header details for UBC Magnetic Observations
files"""
# No titles
self.__incl, self.__decl, self.__geomag = (
float(val) for val in content[0].split(self._get_delimiter())
)
self.__ainc, self.__adec, self.__dir = (
float(val) for val in content[1].split(self._get_delimiter())
)
# Get number of points
self.__npts = int(content[2].strip())
# Now decide if it is single or multi component
row = content[3].split(self._get_delimiter())
num = len(row)
if num == 3: # just locations
self.set_copy_z(True)
return ['X', 'Y', 'Z'], content[3::]
elif num == 4: # single component
return ['X', 'Y', 'Z', 'Mag'], content[3::]
elif num == 5: # single component
return ['X', 'Y', 'Z', 'Mag', 'Err'], content[3::]
elif num == 7: # multi component
return ['X', 'Y', 'Z', 'ainc_1', 'ainc_2', 'Mag', 'Err'], content[3::]
else:
raise _helpers.PVGeoError('Data improperly formatted.')
@staticmethod
def convert_vector(incl, decl, mag=1):
"""Converts inclination, declinations, and magntidue to an XYZ vector"""
x = mag * np.cos(np.deg2rad(incl)) * np.cos(np.deg2rad(decl))
y = mag * np.cos(np.deg2rad(incl)) * np.sin(np.deg2rad(decl))
z = mag * np.sin(np.deg2rad(incl))
return (x, y, z)
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to get data for current timestep and populate the
output data object.
"""
# Set points using parent
DelimitedPointsReaderBase.RequestData(self, request, inInfo, outInfo)
# Add field data to ouptput
output = self.GetOutputData(outInfo, 0)
# Add inducing magnetic field
x, y, z = self.convert_vector(self.__incl, self.__decl, mag=self.__geomag)
ind = vtk.vtkDoubleArray()
ind.SetName('Inducing Magnetic Field')
ind.SetNumberOfComponents(3)
ind.InsertNextTuple3(x, y, z)
output.GetFieldData().AddArray(ind)
# Add Inclination and declination of the anomaly projection
x, y, z = self.convert_vector(self.__ainc, self.__adec)
anom = vtk.vtkDoubleArray()
anom.SetName('Anomaly Projection')
anom.SetNumberOfComponents(3)
anom.InsertNextTuple3(x, y, z)
output.GetFieldData().AddArray(anom)
return 1
###############################################################################
class GeologyMapper(FilterPreserveTypeBase):
"""A filter to load a GIF geology definity file and map its values to a given
data array in an input data object.
"""
__displayname__ = 'UBC Geology Mapper'
__category__ = 'filter'
description = 'PVGeo: UBC Geology Mapper'
def __init__(self, filename=None, delimiter=',', **kwargs):
FilterPreserveTypeBase.__init__(self, **kwargs)
self.__filename = filename
self.__deli = delimiter
self.__input_array = [None, None]
@staticmethod
def _read_definitions(filename, delimiter):
"""Reades the geology definition file as a pandas DataFrame"""
return pd.read_csv(filename, sep=delimiter)
@staticmethod
def _map_values(geol, arr):
"""Map the values defined by ``geol`` dataframe to the values in ``arr``.
The first column (name should be ``Index``) will be used for the mapping.
"""
# TODO: check that geol table contains all indexs found in arr
# Return the mapped table
geol.set_index(geol.keys()[0])
return geol[geol.keys()[1::]].iloc[arr]
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
# Get input array
field, name = self.__input_array[0], self.__input_array[1]
# self.__range = NormalizeArray.get_array_range(pdi, field, name)
wpdi = dsa.WrapDataObject(pdi)
arr = _helpers.get_numpy_array(wpdi, field, name)
#### Perfrom task ####
geol = self._read_definitions(self.__filename, self.__deli)
data = self._map_values(geol, arr)
pdo.DeepCopy(pdi)
interface.add_arrays_from_data_frame(pdo, field, data)
return 1
#### Seters and Geters ####
def SetInputArrayToProcess(self, idx, port, connection, field, name):
"""Used to set the input array(s)
Args:
idx (int): the index of the array to process
port (int): input port (use 0 if unsure)
connection (int): the connection on the port (use 0 if unsure)
field (int): the array field (0 for points, 1 for cells, 2 for
field, and 6 for row)
name (int): the name of the array
"""
if self.__input_array[0] != field:
self.__input_array[0] = field
self.Modified()
if self.__input_array[1] != name:
self.__input_array[1] = name
self.Modified()
return 1
def SetFileName(self, filename):
"""Set the file name to read"""
if self.__filename != filename:
self.__filename = filename
self.Modified()
def set_file_name(self, filename):
"""Set the file name to read"""
return self.SetFileName(filename)
def set_delimiter(self, deli):
"""Set the delimiter of the ASCII file"""
if self.__deli != deli:
self.__deli = deli
self.Modified()
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "PVGeo/ubc/general.py",
"copies": "1",
"size": "10897",
"license": "bsd-3-clause",
"hash": 6850066694521328000,
"line_mean": 34.8453947368,
"line_max": 127,
"alpha_frac": 0.5795172983,
"autogenerated": false,
"ratio": 3.875177809388336,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9951839140147563,
"avg_score": 0.0005711935081546725,
"num_lines": 304
} |
__all__ = (
'translate_to_snakes',
)
from petra.annotation import Vars, InputFlush, Test, Inhibitor, OutputFlush, Expr
from petra.errors import TranslationError
import snakes.nets as snakes_module
def _input_annotation_to_snakes(snakes, annotation):
if isinstance(annotation, Inhibitor):
return snakes.Inhibitor()
elif isinstance(annotation, InputFlush):
return snakes.Flush(_input_annotation_to_snakes(snakes, annotation.wrapped_annotation))
elif isinstance(annotation, Test):
return snakes.Test(_input_annotation_to_snakes(snakes, annotation.wrapped_annotation))
elif isinstance(annotation, Vars):
variables = annotation.variables()
if len(variables) > 1:
return snakes.MultiArc([snakes.Variable(var) for var in variables])
else:
return snakes.Variable(variables[0])
else:
raise TranslationError("cannot translate {!r} to snakes".format(annotation))
def _output_annotation_to_snakes(snakes, annotation):
if isinstance(annotation, Expr):
return snakes.Expression(str(annotation.expression))
elif isinstance(annotation, OutputFlush):
return snakes.Flush(_output_annotation_to_snakes(snakes, annotation.wrapped_annotation))
else:
raise TranslationError("cannot translate {!r} to snakes".format(annotation))
def translate_to_snakes(net, snakes=None):
if snakes is None:
snakes = snakes_module
s_net = snakes.PetriNet(net.name)
### add places:
for place in net.places():
s_net.add_place(snakes.Place(place.name, tuple(place.tokens)))
### add transitions and arcs:
for transition in net.transitions():
transition_guards = []
for input_arc in transition.input_arcs():
annotation = input_arc.annotation
guard = getattr(annotation, 'guard', None)
if guard is not None:
transition_guards.append(guard)
if hasattr(transition, 'guard'):
guard = getattr(transition, 'guard', None)
if guard is not None:
transition_guards.append(guard)
if transition_guards:
s_guard = snakes.Expression(str(transition_guards[0]))
for guard in transition_guards[1:]:
s_guard = s_guard & snakes.Expression(str(guard))
else:
s_guard = None
# add transition:
s_net.add_transition(snakes.Transition(transition.name, s_guard))
# add input arcs:
for arc in transition.input_arcs():
s_net.add_input(arc.source.name, arc.target.name, _input_annotation_to_snakes(snakes, arc.annotation))
# add output arcs:
for arc in transition.output_arcs():
s_net.add_output(arc.target.name, arc.source.name, _output_annotation_to_snakes(snakes, arc.annotation))
return s_net
| {
"repo_name": "simone-campagna/petra",
"path": "petra/utils/translate.py",
"copies": "1",
"size": "2917",
"license": "apache-2.0",
"hash": 2157462448963947500,
"line_mean": 37.3815789474,
"line_max": 116,
"alpha_frac": 0.6465546795,
"autogenerated": false,
"ratio": 3.920698924731183,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004951327673780312,
"num_lines": 76
} |
__all__ = ['ntriples_parser', 'nquads_parser', 'turtle_parser']
from collections import defaultdict, namedtuple, OrderedDict
from lepl import *
from lxml import etree
import re
from threading import local
from urlparse import urljoin
from pymantic.util import normalize_iri
import pymantic.primitives
def discrete_pairs(iterable):
"s -> (s0,s1), (s2,s3), (s4, s5), ..."
previous = None
for v in iterable:
if previous is None:
previous = v
else:
yield (previous, v)
previous = None
unicode_re = re.compile(r'\\u([0-9A-Za-z]{4})|\\U([0-9A-Za-z]{8})')
def nt_unescape(nt_string):
"""Un-do nt escaping style."""
output_string = u''
nt_string = nt_string.decode('utf-8')
nt_string = nt_string.replace('\\t', u'\u0009')
nt_string = nt_string.replace('\\n', u'\u000A')
nt_string = nt_string.replace('\\r', u'\u000D')
nt_string = nt_string.replace('\\"', u'\u0022')
nt_string = nt_string.replace('\\\\', u'\u005C')
def chr_match(matchobj):
ordinal = matchobj.group(1) or matchobj.group(2)
return unichr(int(ordinal, 16))
nt_string = unicode_re.sub(chr_match, nt_string)
return nt_string
class BaseLeplParser(object):
def __init__(self, environment=None):
self.env = environment or pymantic.primitives.RDFEnvironment()
self.profile = self.env.createProfile()
self._call_state = local()
def make_datatype_literal(self, values):
return self.env.createLiteral(value = values[0], datatype = values[1])
def make_language_literal(self, values):
if len(values) == 2:
return self.env.createLiteral(value = values[0],
language = values[1])
else:
return self.env.createLiteral(value = values[0])
def make_named_node(self, values):
return self.env.createNamedNode(normalize_iri(values[0]))
def make_blank_node(self, values):
if values[0] not in self._call_state.bnodes:
self._call_state.bnodes[values[0]] = self.env.createBlankNode()
return self._call_state.bnodes[values[0]]
def _prepare_parse(self, graph):
self._call_state.bnodes = defaultdict(self.env.createBlankNode)
self._call_state.graph = graph
def _cleanup_parse(self):
del self._call_state.bnodes
del self._call_state.graph
def _make_graph(self):
return self.env.createGraph()
def parse(self, f, sink = None):
if sink is None:
sink = self._make_graph()
self._prepare_parse(sink)
self.document.parse_file(f)
self._cleanup_parse()
return sink
def parse_string(self, string, sink = None):
if sink is None:
sink = self._make_graph()
self._prepare_parse(sink)
self.document.parse(string)
self._cleanup_parse()
return sink
class BaseNParser(BaseLeplParser):
"""Base parser that establishes common grammar rules and interfaces used for
parsing both n-triples and n-quads."""
def __init__(self, environment=None):
super(BaseNParser, self).__init__(environment)
self.string = Regexp(r'(?:[ -!#-[\]-~]|\\[trn"\\]|\\u[0-9A-Fa-f]{4}|\\U[0-9A-Fa-f]{8})*')
self.name = Regexp(r'[A-Za-z][A-Za-z0-9]*')
self.absoluteURI = Regexp(r'(?:[ -=?-[\]-~]|\\[trn"\\]|\\u[0-9A-Fa-f]{4}|\\U[0-9A-Fa-f]{8})+')
self.language = Regexp(r'[a-z]+(?:-[a-zA-Z0-9]+)*')
self.uriref = ~Literal('<') & self.absoluteURI & ~Literal('>') \
> self.make_named_node
self.datatypeString = ~Literal('"') & self.string & ~Literal('"') \
& ~Literal('^^') & self.uriref > self.make_datatype_literal
self.langString = ~Literal('"') & self.string & ~Literal('"') \
& Optional(~Literal('@') & self.language) > self.make_language_literal
self.literal = self.datatypeString | self.langString
self.nodeID = ~Literal('_:') & self.name > self.make_blank_node
self.object_ = self.uriref | self.nodeID | self.literal
self.predicate = self.uriref
self.subject = self.uriref | self.nodeID
self.comment = Literal('#') & Regexp(r'[ -~]*')
def make_named_node(self, values):
return self.env.createNamedNode(normalize_iri(nt_unescape(values[0])))
def make_language_literal(self, values):
if len(values) == 2:
return self.env.createLiteral(value = nt_unescape(values[0]),
language = values[1])
else:
return self.env.createLiteral(value = nt_unescape(values[0]))
class NTriplesParser(BaseNParser):
def make_triple(self, values):
triple = self.env.createTriple(*values)
self._call_state.graph.add(triple)
return triple
def __init__(self, environment=None):
super(NTriplesParser, self).__init__(environment)
self.triple = self.subject & ~Plus(Space()) & self.predicate & \
~Plus(Space()) & self.object_ & ~Star(Space()) & ~Literal('.') \
& ~Star(Space()) >= self.make_triple
self.line = Star(Space()) & Optional(~self.triple | ~self.comment) & \
~Literal('\n')
self.document = Star(self.line)
def _make_graph(self):
return self.env.createGraph()
def parse(self, f, graph=None):
return super(NTriplesParser, self).parse(f, graph)
ntriples_parser = NTriplesParser()
class NQuadsParser(BaseNParser):
def make_quad(self, values):
quad = self.env.createQuad(*values)
self._call_state.graph.add(quad)
return quad
def __init__(self, environment=None):
super(NQuadsParser, self).__init__(environment)
self.graph_name = self.uriref
self.quad = self.subject & ~Plus(Space()) & self.predicate \
& ~Plus(Space()) & self.object_ & ~Plus(Space()) & self.graph_name \
& ~Star(Space()) & ~Literal('.') & ~Star(Space()) >= self.make_quad
self.line = Star(Space()) & Optional(~self.quad | ~self.comment) \
& ~Literal('\n')
self.document = Star(self.line)
def _make_graph(self):
return self.env.createDataset()
def parse(self, f, dataset=None):
return super(NQuadsParser, self).parse(f, dataset)
nquads_parser = NQuadsParser()
class ClassicTurtleParser(BaseLeplParser):
def __init__(self, environment=None):
super(ClassicTurtleParser, self).__init__(environment)
self.absolute_uri_re = re.compile('^[^/]+:')
# White space is significant in the following rules.
hex_ = Any('0123456789ABCDEF')
character_escape = Or(And(Literal('r\u'), hex_[4]),
And(Literal(r'\U'), hex_[8]),
Literal(r'\\'))
character = Or(character_escape,
Regexp(ur'[\u0020-\u005B\]-\U0010FFFF]'))
echaracter = character | Any('\t\n\r')
ucharacter = Or(character_escape,
Regexp(ur'[\u0020-\u003D\u003F-\u005B\]-\U0010FFFF]')) | r'\>'
scharacter = Or(character_escape,
Regexp(ur'[\u0020-\u0021\u0023-\u005B\]-\U0010FFFF]')) | r'\"'
lcharacter = echaracter | '\"' | '\u009' | '\u000A' | '\u000D'
longString = ~Literal('"""') & lcharacter[:,...] & ~Literal('"""')
string = ~Literal('"') & scharacter[:,...] & ~Literal('"')
quotedString = longString | string > 'quotedString'
relativeURI = ucharacter[...]
prefixStartChar = Regexp(ur'[A-Z]') | Regexp(ur'[a-z]') | Regexp(ur'[\u00C0-\u00D6]') | Regexp(ur'[\u00D8-\u00F6]') | Regexp(ur'[\u00F8-\u02FF]') | Regexp(ur'[\u0370-\u037D]') | Regexp(ur'[\u037F-\u1FFF]') | Regexp(ur'[\u200C-\u200D]') | Regexp(ur'[\u2070-\u218F]') | Regexp(ur'[\u2C00-\u2FEF]') | Regexp(ur'[\u3001-\uD7FF]') | Regexp(ur'[\uF900-\uFDCF]') | Regexp(ur'[\uFDF0-\uFFFD]') | Regexp(ur'[\U00010000-\U000EFFFF]')
nameStartChar = prefixStartChar | "_"
nameChar = nameStartChar | '-' | Regexp(ur'[0-9]') | '\u00B7' | Regexp(ur'[\u0300-\u036F]') | Regexp(ur'[\u203F-\u2040]')
name = (nameStartChar & nameChar[:])[...] > 'name'
prefixName = (prefixStartChar & nameChar[:])[...] > 'prefixName'
language = Regexp(r'[a-z]+ (?:-[a-z0-9]+)*') > 'language'
qname = ((Optional(prefixName) & ~Literal(':') & Optional(name) > dict) > self.resolve_prefix) > 'qname'
nodeID = ~Literal('_:') & name > self.make_blank_node
self.comment = '#' & Regexp(r'[^\n\r]*') & Newline()
# Whie space is not significant in the following rules.
with Separator(~Star(Any(' \t\n\r'))):
uriref = (And(~Literal('<'), relativeURI, ~Literal('>')) > self.resolve_relative_uri) > 'uriref'
resource = (uriref | qname > dict) > self.make_named_node
self.ws = ws = ~Whitespace() | ~self.comment
blank = Delayed()
integer = Regexp(r'(?:-|\+)?[0-9]+') > self.make_integer_literal
decimal = Regexp(r'(?:-|\+)?(?:[0-9]+\.[0-9]*|\.(?:[0-9])+)') > self.make_decimal_literal
exponent = r'[eE](?:-|\+)?[0-9]+'
double = Regexp(r'(?:-|\+)?(?:[0-9]+\.[0-9]*' + exponent + r'|\.[0-9]+' + exponent + r'|[0-9]+' + exponent + ')') > self.make_double_literal
boolean = Literal('true') | Literal('false') > self.make_boolean_literal
datatypeString = quotedString & "^^" & (resource > 'dataType') > self.make_datatype_literal
literal = Or ( datatypeString,
quotedString & Optional(~Literal('@') & language) > self.make_language_literal,
double,
decimal,
integer,
boolean )
object_ = resource | blank | literal
predicate = resource
subject = resource | blank > 'subject'
verb = predicate | Literal('a') > 'predicate'
collection = ~Literal('(') & object_[:] & ~Literal(')') > self.make_collection
objectList = (object_ & (~Literal(',') & object_)[:] > List) > 'objectList'
predicateObjectList = ((verb & objectList > Node) & (~Literal(';') & (verb & objectList > Node))[:] & ~Optional(';') > List) > 'predicateObjectList'
blank += Or (nodeID, collection,
(~Literal('[') & ~Literal(']') > self.make_triples),
(~Literal('[') & predicateObjectList & ~Literal(']') > self.make_triples)
)
triples = subject & predicateObjectList > self.make_triples
base = (~Literal('@base') & Plus(ws) & uriref > dict) > self.record_base
prefixId = (~Literal('@prefix') & Plus(ws) & Optional(prefixName) & ~Literal(':') & uriref > dict) > self.record_prefix
directive = prefixId | base
statement = Or (directive & '.', triples & '.', Plus(ws))
self.document = Star(statement)
def _prepare_parse(self, graph):
super(TurtleParser, self)._prepare_parse(graph)
self._call_state.prefixes = self.env.createPrefixMap(empty=True)
self._call_state.base_uri = None
def record_base(self, values):
self._call_state.base_uri = values[0]['uriref']
return ''
def record_prefix(self, values):
prefix = values[0]
self._call_state.prefixes[prefix.get('prefixName', '')] = prefix['uriref']
return ''
def resolve_relative_uri(self, values):
relative_uri = values[0]
if self.absolute_uri_re.match(relative_uri):
return relative_uri
else:
return self._call_state.base_uri + relative_uri
def resolve_prefix(self, values):
qname = values[0]
return self._call_state.prefixes[qname.get('prefixName', '')] + qname.get('name', '')
def make_named_node(self, values):
resource = values[0]
return super(TurtleParser, self).make_named_node(
(resource.get('uriref') or resource.get('qname'),))
def make_triples(self, values):
triples = dict(values)
subject = triples.get('subject')
if not subject:
subject = self.env.createBlankNode()
for predicate_object_node in triples.get('predicateObjectList', ()):
predicate = predicate_object_node.predicate[0]
for object_ in predicate_object_node.objectList[0]:
self._call_state.graph.add(self.env.createTriple(subject, predicate, object_))
return subject
def make_collection(self, values):
prior = self.env.resolve('rdf:nil')
for element in reversed(values):
this = self.env.createBlankNode()
self._call_state.graph.add(self.env.createTriple(
subject=this, predicate=self.env.resolve('rdf:first'),
object=element))
self._call_state.graph.add(self.env.createTriple(
subject=this, predicate=self.env.resolve('rdf:rest'), object=prior))
prior = this
return prior
def _make_graph(self):
return self.env.createGraph()
def make_datatype_literal(self, values):
datatyped = dict(values)
return self.env.createLiteral(datatyped['quotedString'],
datatype = datatyped['dataType'])
def make_integer_literal(self, values):
return self.env.createLiteral(values[0],
datatype = self.env.resolve('xsd:integer'))
def make_decimal_literal(self, values):
return self.env.createLiteral(values[0],
datatype = self.env.resolve('xsd:decimal'))
def make_double_literal(self, values):
return self.env.createLiteral(values[0],
datatype = self.env.resolve('xsd:double'))
def make_boolean_literal(self, values):
return self.env.createLiteral(values[0],
datatype = self.env.resolve('xsd:boolean'))
def make_language_literal(self, values):
languageable = dict(values)
return self.env.createLiteral(languageable['quotedString'],
language = languageable.get('language'))
classic_turtle_parser = ClassicTurtleParser()
TriplesClause = namedtuple('TriplesClause', ['subject', 'predicate_objects'])
PredicateObject = namedtuple('PredicateObject', ['predicate', 'object'])
BindPrefix = namedtuple('BindPrefix', ['prefix', 'iri'])
SetBase = namedtuple('SetBase', ['iri'])
NamedNodeToBe = namedtuple('NamedNodeToBe', ['iri'])
LiteralToBe = namedtuple('LiteralToBe', ['value', 'datatype', 'language'])
PrefixReference = namedtuple('PrefixReference', ['prefix', 'local'])
class TurtleParser(BaseLeplParser):
"""Parser for turtle as described at:
http://dvcs.w3.org/hg/rdf/raw-file/e8b1d7283925/rdf-turtle/index.html"""
RDF_TYPE = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'
echar_map = OrderedDict((
('\\', '\\'),
('t', '\t'),
('b', '\b'),
('n', '\n'),
('r', '\r'),
('f', '\f'),
('"', '"'),
("'", "'"),
))
def __init__(self, environment=None):
super(TurtleParser, self).__init__(environment)
UCHAR = (Regexp(ur'\\u([0-9a-fA-F]{4})') |\
Regexp(ur'\\U([0-9a-fA-F]{8})')) >> self.decode_uchar
ECHAR = Regexp(ur'\\([tbnrf\\"\'])') >> self.decode_echar
PN_CHARS_BASE = Regexp(ur'[A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF'
ur'\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F'
ur'\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD'
ur'\U00010000-\U000EFFFF]') | UCHAR
PN_CHARS_U = PN_CHARS_BASE | Literal('_')
PN_CHARS = PN_CHARS_U | Regexp(ur'[\-0-9\u00B7\u0300-\u036F\u203F-\u2040]')
PN_PREFIX = PN_CHARS_BASE & Optional(Star(PN_CHARS | Literal(".")) & PN_CHARS ) > ''.join
PN_LOCAL = (PN_CHARS_U | Regexp('[0-9]')) & Optional(Star(PN_CHARS | Literal(".")) & PN_CHARS) > ''.join
WS = Regexp(ur'[\t\n\r ]')
ANON = ~(Literal('[') & Star(WS) & Literal(']'))
NIL = Literal('(') & Star(WS) & Literal(')')
STRING_LITERAL1 = (Literal("'") &\
Star(Regexp(ur"[^'\\\n\r]") | ECHAR | UCHAR ) &\
Literal("'")) > self.string_contents
STRING_LITERAL2 = (Literal('"') &\
Star(Regexp(ur'[^"\\\n\r]') | ECHAR | UCHAR ) &\
Literal('"')) > self.string_contents
STRING_LITERAL_LONG1 = (Literal("'''") &\
Star(Optional( Regexp("'|''")) &\
( Regexp(ur"[^'\\]") | ECHAR | UCHAR ) ) &\
Literal("'''")) > self.string_contents
STRING_LITERAL_LONG2 = (Literal('"""') &\
Star(Optional( Regexp(ur'"|""') ) &\
( Regexp(ur'[^\"\\]') | ECHAR | UCHAR ) ) &\
Literal('"""')) > self.string_contents
INTEGER = Regexp(ur'[+-]?[0-9]+')
DECIMAL = Regexp(ur'[+-]?(?:[0-9]+\.[0-9]+|\.[0-9]+)')
DOUBLE = Regexp(ur'[+-]?(?:[0-9]+\.[0-9]+|\.[0-9]+|[0-9]+)[eE][+-]?[0-9]+')
IRI_REF = (~Literal('<') & (Star(Regexp(ur'[^<>"{}|^`\\\u0000-\u0020]') | UCHAR | ECHAR) > ''.join) & ~Literal('>'))
PNAME_NS = Optional(PN_PREFIX) & Literal(":")
PNAME_LN = PNAME_NS & PN_LOCAL
BLANK_NODE_LABEL = ~Literal("_:") & PN_LOCAL
LANGTAG = ~Literal("@") & (Literal('base') | Literal('prefix') |\
Regexp(ur'[a-zA-Z]+(?:-[a-zA-Z0-9]+)*'))
intertoken = ~Regexp(ur'[ \t\r\n]+|#[^\r\n]+')[:]
with Separator(intertoken):
BlankNode = (BLANK_NODE_LABEL >> self.create_blank_node) |\
(ANON > self.create_anon_node)
prefixID = (~Literal('@prefix') & PNAME_NS & IRI_REF) > self.bind_prefixed_name
base = (~Literal('@base') & IRI_REF) >> self.set_base
PrefixedName = (PNAME_LN | PNAME_NS) > self.resolve_prefixed_name
IRIref = PrefixedName | (IRI_REF >> self.create_named_node)
BooleanLiteral = (Literal('true') | Literal('false')) >> self.boolean_value
String = STRING_LITERAL1 | STRING_LITERAL2 | STRING_LITERAL_LONG1 | STRING_LITERAL_LONG2
RDFLiteral = ((String & LANGTAG) > self.langtag_string) |\
((String & ~Literal('^^') & IRIref) > self.typed_string) |\
(String > self.bare_string)
literal = RDFLiteral | (INTEGER >> self.int_value) |\
(DECIMAL >> self.decimal_value) |\
(DOUBLE >> self.double_value) | BooleanLiteral
object = Delayed()
predicateObjectList = Delayed()
blankNodePropertyList = ~Literal('[') & predicateObjectList & ~Literal(']') > self.make_blank_node_property_list
collection = (~Literal('(') & object[:] & ~Literal(')')) > self.make_collection
blank = BlankNode | blankNodePropertyList | collection
subject = IRIref | blank
predicate = IRIref
object += IRIref | blank | literal
verb = predicate | (~Literal('a') > self.create_rdf_type)
objectList = ((object & (~Literal(',') & object)[:]) | object) > self.make_object
predicateObjectList += ((verb & objectList &\
(~Literal(';') & Optional(verb & objectList))[:]) |\
(verb & objectList)) > self.make_object_list
triples = (subject & predicateObjectList) > self.make_triples
directive = prefixID | base
statement = (directive | triples) & ~Literal('.')
self.turtle_doc = intertoken & statement[:] & intertoken & Eos()
self.turtle_doc.config.clear()
def _prepare_parse(self, graph):
super(TurtleParser, self)._prepare_parse(graph)
self._call_state.base_iri = self._base
self._call_state.prefixes = {}
self._call_state.current_subject = None
self._call_state.current_predicate = None
def decode_uchar(self, uchar_string):
return unichr(int(uchar_string, 16))
def decode_echar(self, echar_string):
return self.echar_map[echar_string]
def string_contents(self, string_chars):
return ''.join(string_chars[1:-1])
def int_value(self, value):
return LiteralToBe(value, language=None,
datatype=NamedNodeToBe(self.profile.resolve('xsd:integer')))
def decimal_value(self, value):
return LiteralToBe(value, language=None,
datatype=NamedNodeToBe(self.profile.resolve('xsd:decimal')))
def double_value(self, value):
return LiteralToBe(value, language=None,
datatype=NamedNodeToBe(self.profile.resolve('xsd:double')))
def boolean_value(self, value):
return LiteralToBe(value, language=None,
datatype=NamedNodeToBe(self.profile.resolve('xsd:boolean')))
def langtag_string(self, values):
return LiteralToBe(values[0], language=values[1], datatype=None)
def typed_string(self, values):
return LiteralToBe(values[0], language=None, datatype=values[1])
def bare_string(self, values):
return LiteralToBe(values[0], language=None,
datatype=NamedNodeToBe(self.profile.resolve('xsd:string')))
def create_named_node(self, iri):
return NamedNodeToBe(iri)
def create_blank_node(self, name=None):
if name is None:
return self.env.createBlankNode()
return self._call_state.bnodes[name]
def create_anon_node(self, anon_tokens):
return self.env.createBlankNode()
def create_rdf_type(self, values):
return self.profile.resolve('rdf:type')
def resolve_prefixed_name(self, values):
if values[0] == ':':
pname = ''
local = values[1] if len(values) == 2 else ''
elif values[-1] == ':':
pname = values[0]
local = ''
else:
pname = values[0]
local = values[2]
return NamedNodeToBe(PrefixReference(pname, local))
def bind_prefixed_name(self, values):
iri = values.pop()
assert values.pop() == ':'
pname = values.pop() if values else ''
return BindPrefix(pname, iri)
def set_base(self, base_iri):
return SetBase(base_iri)
def make_object(self, values):
return values
def make_object_list(self, values):
return list(discrete_pairs(values))
def make_blank_node_property_list(self, values):
subject = self.env.createBlankNode()
predicate_objects = []
for predicate, objects in values[0]:
for obj in objects:
predicate_objects.append(PredicateObject(predicate, obj))
return TriplesClause(subject, predicate_objects)
def make_triples(self, values):
subject = values[0]
predicate_objects = [PredicateObject(predicate, obj) for
predicate, objects in values[1] for obj in objects]
return TriplesClause(subject, predicate_objects)
def make_collection(self, values):
prev_node = TriplesClause(self.profile.resolve('rdf:nil'), [])
for value in reversed(values):
prev_node = TriplesClause(
self.env.createBlankNode(),
[PredicateObject(self.profile.resolve('rdf:first'), value),
PredicateObject(self.profile.resolve('rdf:rest'), prev_node)])
return prev_node
def _interpret_parse(self, data, sink):
for line in data:
if isinstance(line, BindPrefix):
self._call_state.prefixes[line.prefix] = urljoin(
self._call_state.base_iri, line.iri, allow_fragments=False)
elif isinstance(line, SetBase):
self._call_state.base_iri = urljoin(
self._call_state.base_iri, line.iri, allow_fragments=False)
else:
self._interpret_triples_clause(line)
def _interpret_triples_clause(self, triples_clause):
assert isinstance(triples_clause, TriplesClause)
subject = self._resolve_node(triples_clause.subject)
for predicate_object in triples_clause.predicate_objects:
self._call_state.graph.add(self.env.createTriple(
subject, self._resolve_node(predicate_object.predicate),
self._resolve_node(predicate_object.object)))
return subject
def _resolve_node(self, node):
if isinstance(node, NamedNodeToBe):
if isinstance(node.iri, PrefixReference):
return self.env.createNamedNode(
self._call_state.prefixes[node.iri.prefix] + node.iri.local)
else:
return self.env.createNamedNode(
urljoin(self._call_state.base_iri, node.iri,
allow_fragments=False))
elif isinstance(node, TriplesClause):
return self._interpret_triples_clause(node)
elif isinstance(node, LiteralToBe):
if node.datatype:
return self.env.createLiteral(
node.value, datatype=self._resolve_node(node.datatype))
else:
return self.env.createLiteral(node.value, language=node.language)
else:
return node
def parse(self, data, sink = None, base = ''):
if sink is None:
sink = self._make_graph()
self._base = base
self._prepare_parse(sink)
self._interpret_parse(self.turtle_doc.parse(data), sink)
self._cleanup_parse()
return sink
def parse_string(self, string, sink = None):
return self.parse(string, sink)
turtle_parser = TurtleParser()
scheme_re = re.compile(r'[a-zA-Z](?:[a-zA-Z0-9]|\+|-|\.)*')
class RDFXMLParser(object):
RDF_TYPE = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'
def __init__(self):
self.namespaces = {'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',}
self._call_state = local()
def clark(self, prefix, tag):
return '{%s}%s' % (self.namespaces[prefix], tag)
def parse(self, f, sink = None):
self._call_state.bnodes = {}
tree = etree.parse(f)
if tree.getroot() != self.clark('rdf', 'RDF'):
raise ValueError('Invalid XML document.')
for element in tree.getroot():
self._handle_resource(element, sink)
def _handle_resource(self, element, sink):
from pymantic.primitives import BlankNode, NamedNode, Triple
subject = self._determine_subject(element)
if element.tag != self.clark('rdf', 'Description'):
resource_class = self._resolve_tag(element)
sink.add(Triple(subject, NamedNode(self.RDF_TYPE), resource_class))
for property_element in element:
if property_element.tag == self.clark('rdf', 'li'):
pass
else:
predicate = self._resolve_tag(property_element)
if self.clark('rdf', 'resource') in property_element.attrib:
object_ = self._resolve_uri(
property_element, property_element.attrib[self.clark(
'rdf', 'resource')])
sink.add(Triple(subject, NamedNode(predicate), NamedNode(object_)))
return subject
def _resolve_tag(self, element):
if element.tag[0] == '{':
tag_bits = element[1:].partition('}')
return NamedNode(tag_bits[0] + tag_bits[2])
else:
return NamedNode(urljoin(element.base, element.tag))
def _determine_subject(self, element):
if self.clark('rdf', 'about') not in element.attrib and\
self.clark('rdf', 'nodeID') not in element.attrib and\
self.clark('rdf', 'ID') not in element.attrib:
return BlankNode()
elif self.clark('rdf', 'nodeID') in element.attrib:
node_id = element.attrib[self.clark('rdf', 'nodeID')]
if node_id not in self._call_state.bnodes:
self._call_state.bnodes[node_id] = BlankNode()
return self._call_state.bnodes[node_id]
elif self.clark('rdf', 'ID') in element.attrib:
if not element.base:
raise ValueError('No XML base for %r', element)
return NamedNode(element.base + '#' +\
element.attrib[self.clark('rdf', 'ID')])
elif self.clark('rdf', 'about') in element.attrib:
return self._resolve_uri(element, element.attrib[
self.clark('rdf', 'resource')])
def _resolve_uri(self, element, uri):
if not scheme_re.match(uri):
return NamedNode(urljoin(element.base, uri))
else:
return NamedNode(uri)
| {
"repo_name": "SYSTAP/blazegraph-python",
"path": "pymantic/parsers.py",
"copies": "4",
"size": "29945",
"license": "bsd-3-clause",
"hash": -5999987292491661000,
"line_mean": 41.3550212164,
"line_max": 431,
"alpha_frac": 0.5500417432,
"autogenerated": false,
"ratio": 3.7525062656641603,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.021410086282355204,
"num_lines": 707
} |
__all__ = [
"xlfunc",
"xlret",
"xlarg",
"xlsub"
]
def xlfunc(f = None, **kwargs):
def inner(f):
if not hasattr(f, "__xlfunc__"):
xlf = f.__xlfunc__ = {}
xlf = f.__xlfunc__ = {}
xlf["name"] = f.__name__
xlf["sub"] = False
xlf["xlwings"] = kwargs.get("xlwings", None)
xlargs = xlf["args"] = []
xlargmap = xlf["argmap"] = {}
nArgs = f.__code__.co_argcount
if f.__code__.co_flags & 4: # function has an '*args' argument
nArgs += 1
for vpos, vname in enumerate(f.__code__.co_varnames[:nArgs]):
xlargs.append({
"name": vname,
"pos": vpos,
"marshal": "var",
"vba": None,
"range": False,
"dtype": None,
"dims": -1,
"doc": "Positional argument " + str(vpos+1),
"vararg": True if vpos == f.__code__.co_argcount else False
})
xlargmap[vname] = xlargs[-1]
xlf["ret"] = {
"marshal": "var",
"lax": True,
"doc": f.__doc__ if f.__doc__ is not None else "Python function '" + f.__name__ + "' defined in '" + str(f.__code__.co_filename) + "'."
}
return f
if f is None:
return inner
else:
return inner(f)
def xlsub(f = None, **kwargs):
def inner(f):
f = xlfunc(**kwargs)(f)
f.__xlfunc__["sub"] = True
return f
if f is None:
return inner
else:
return inner(f)
xlretparams = set(("marshal", "lax", "doc"))
def xlret(marshal=None, **kwargs):
if marshal is not None:
kwargs["marshal"] = marshal
def inner(f):
xlf = xlfunc(f).__xlfunc__
xlr = xlf["ret"]
for k, v in kwargs.items():
if k in xlretparams:
xlr[k] = v
else:
raise Exception("Invalid parameter '" + k + "'.")
return f
return inner
xlargparams = set(("marshal", "dims", "dtype", "range", "doc", "vba"))
def xlarg(arg, marshal=None, dims=None, **kwargs):
if marshal is not None:
kwargs["marshal"] = marshal
if dims is not None:
kwargs["dims"] = dims
def inner(f):
xlf = xlfunc(f).__xlfunc__
if arg not in xlf["argmap"]:
raise Exception("Invalid argument name '" + arg + "'.")
xla = xlf["argmap"][arg]
for k, v in kwargs.items():
if k in xlargparams:
xla[k] = v
else:
raise Exception("Invalid parameter '" + k + "'.")
return f
return inner
udf_scripts = {}
def udf_script(filename):
import os.path
filename = filename.lower()
mtime = os.path.getmtime(filename)
if filename in udf_scripts:
mtime2, vars = udf_scripts[filename]
if mtime == mtime2:
return vars
vars = {}
with open(filename, "r") as f:
exec(compile(f.read(), filename, "exec"), vars)
udf_scripts[filename] = (mtime, vars)
return vars | {
"repo_name": "Tusky/excelpython",
"path": "addin/xlpython/__init__.py",
"copies": "3",
"size": "2555",
"license": "bsd-2-clause",
"hash": 174530615854132060,
"line_mean": 24.0588235294,
"line_max": 139,
"alpha_frac": 0.5800391389,
"autogenerated": false,
"ratio": 2.642192347466391,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47222314863663917,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'ubcMeshReaderBase',
'ModelAppenderBase',
]
__displayname__ = 'Base Classes'
# Outside Imports:
import os
import numpy as np
import vtk
from .. import _helpers, base
###############################################################################
# UBC Mesh Reader Base
class ubcMeshReaderBase(base.TwoFileReaderBase):
"""A base class for the UBC mesh readers"""
__displayname__ = 'UBC Mesh Reader Base'
__category__ = 'base'
extensions = 'mesh msh dat txt text'
def __init__(self, nOutputPorts=1, outputType='vtkUnstructuredGrid', **kwargs):
base.TwoFileReaderBase.__init__(
self, nOutputPorts=nOutputPorts, outputType=outputType, **kwargs
)
self.__data_name = 'Data'
self.__use_filename = True # flag on whether or not to use the model file
# extension as data name
# For keeping track of type (2D vs 3D)
self.__sizeM = None
def is_3d(self):
"""Returns true if mesh is spatially references in three dimensions"""
return self.__sizeM.shape[0] >= 3
def is_2d(self):
"""Returns true if mesh is spatially references in only two dimensions"""
return self.__sizeM.shape[0] == 1
@staticmethod
def _ubc_mesh_2d_part(FileName):
"""Internal helper to read 2D mesh file"""
# This is a helper method to read file contents of mesh
try:
fileLines = np.genfromtxt(FileName, dtype=str, delimiter='\n', comments='!')
except (IOError, OSError) as fe:
raise _helpers.PVGeoError(str(fe))
def _genTup(sft, n):
# This reads in the data for a dimension
pts = []
disc = []
for i in range(n):
ln = fileLines[i + sft].split('!')[0].split()
if i == 0:
o = ln[0]
pts.append(o)
ln = [ln[1], ln[2]]
pts.append(ln[0])
disc.append(ln[1])
return pts, disc
# Get the number of lines for each dimension
nx = int(fileLines[0].split('!')[0])
nz = int(fileLines[nx + 1].split('!')[0])
# Get the origins and tups for both dimensions
xpts, xdisc = _genTup(1, nx)
zpts, zdisc = _genTup(2 + nx, nz)
return xpts, xdisc, zpts, zdisc
def _read_extent(self):
"""Reads the mesh file for the UBC 2D/3D Mesh or OcTree format to get
output extents. Computationally inexpensive method to discover whole
output extent.
Return:
tuple(int) :
This returns a tuple of the whole extent for the grid to be
made of the input mesh file (0,n1-1, 0,n2-1, 0,n3-1). This
output should be directly passed to set the whole output extent.
"""
# Read the mesh file as line strings, remove lines with comment = !
v = np.array(np.__version__.split('.')[0:2], dtype=int)
FileName = self.get_mesh_filename()
try:
if v[0] >= 1 and v[1] >= 10:
# max_rows in numpy versions >= 1.10
msh = np.genfromtxt(
FileName, delimiter='\n', dtype=np.str, comments='!', max_rows=1
)
else:
# This reads whole file :(
msh = np.genfromtxt(
FileName, delimiter='\n', dtype=np.str, comments='!'
)[0]
except (IOError, OSError) as fe:
raise _helpers.PVGeoError(str(fe))
# Fist line is the size of the model
self.__sizeM = np.array(msh.ravel()[0].split(), dtype=int)
# Check if the mesh is a UBC 2D mesh
if self.__sizeM.shape[0] == 1:
# Read in data from file
xpts, xdisc, zpts, zdisc = ubcMeshReaderBase._ubc_mesh_2d_part(FileName)
nx = np.sum(np.array(xdisc, dtype=int)) + 1
nz = np.sum(np.array(zdisc, dtype=int)) + 1
return (0, nx, 0, 1, 0, nz)
# Check if the mesh is a UBC 3D mesh or OcTree
elif self.__sizeM.shape[0] >= 3:
# Get mesh dimensions
dim = self.__sizeM[0:3]
ne, nn, nz = dim[0], dim[1], dim[2]
return (0, ne, 0, nn, 0, nz)
else:
raise _helpers.PVGeoError('File format not recognized')
@staticmethod
def ubc_model_3d(FileName):
"""Reads the 3D model file and returns a 1D NumPy float array. Use the
place_model_on_mesh() method to associate with a grid.
Args:
FileName (str) : The model file name(s) as an absolute path for the
input model file in UBC 3D Model Model Format. Also accepts a
`list` of string file names.
Return:
np.array :
Returns a NumPy float array that holds the model data
read from the file. Use the ``place_model_on_mesh()`` method to
associate with a grid. If a list of file names is given then it
will return a dictionary of NumPy float array with keys as the
basenames of the files.
"""
# Check if recurssion needed
if isinstance(FileName, (list, tuple)):
out = {}
for f in FileName:
out[os.path.basename(f)] = ubcMeshReaderBase.ubc_model_3d(f)
return out
# Perform IO
try:
data = np.genfromtxt(FileName, dtype=np.float, comments='!')
except (IOError, OSError) as fe:
raise _helpers.PVGeoError(str(fe))
return data
def set_use_filename(self, flag):
"""Set a flag on whether or not to use the filename as the data array name"""
if self.__use_filename != flag:
self.__use_filename = flag
self.Modified(read_again_mesh=False, read_again_models=False)
def set_data_name(self, name):
"""Set the data array name"""
if name == '':
self.__use_filename = True
self.Modified(read_again_mesh=False, read_again_models=False)
elif self.__data_name != name:
self.__data_name = name
self.__use_filename = False
self.Modified(read_again_mesh=False, read_again_models=False)
def get_data_name(self):
"""Get the data array name"""
if self.__use_filename:
mname = self.get_model_filenames(idx=0)
return os.path.basename(mname)
return self.__data_name
###############################################################################
# UBC Model Appender Base
class ModelAppenderBase(base.AlgorithmBase):
"""A base class for create mesh-model appenders on the UBC Mesh formats"""
__displayname__ = 'Model Appender Base'
__category__ = 'base'
def __init__(
self, inputType='vtkRectilinearGrid', outputType='vtkRectilinearGrid', **kwargs
):
base.AlgorithmBase.__init__(
self,
nInputPorts=1,
inputType=inputType,
nOutputPorts=1,
outputType=outputType,
)
self._model_filenames = kwargs.get('model_files', [])
self.__data_name = kwargs.get('dataname', 'Appended Data')
self.__use_filename = True
self._models = []
self.__need_to_read = True
self._is_3D = None
# For the VTK/ParaView pipeline
self.__dt = kwargs.get('dt', 1.0)
self.__timesteps = None
self.__last_successfull_index = (
0 # This is the index to use if the current timestep is unavailable
)
def need_to_read(self, flag=None):
"""Ask self if the reader needs to read the files again
Args:
flag (bool): if the flag is set then this method will set the read
status
Return:
bool:
The status of the reader aspect of the filter.
"""
if flag is not None and isinstance(flag, (bool, int)):
self.__need_to_read = flag
self._update_time_steps()
return self.__need_to_read
def Modified(self, read_again=True):
"""Call modified if the files needs to be read again again."""
if read_again:
self.__need_to_read = read_again
base.AlgorithmBase.Modified(self)
def modified(self, read_again=True):
"""Call modified if the files needs to be read again again."""
return self.Modified(read_again=read_again)
def _update_time_steps(self):
"""For internal use only: appropriately sets the timesteps."""
# Use the inputs' timesteps: this merges the timesteps values
ts0 = _helpers.get_input_time_steps(self, port=0)
if ts0 is None:
ts0 = np.array([])
ts1 = _helpers._calculate_time_range(len(self._model_filenames), self.__dt)
tsAll = np.unique(np.concatenate((ts0, ts1), 0))
# Use both inputs' time steps
self.__timesteps = _helpers.update_time_steps(self, tsAll, explicit=True)
return 1
def _read_up_front(self):
raise NotImplementedError()
def _place_on_mesh(self, output, idx=0):
raise NotImplementedError()
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
output = self.GetOutputData(outInfo, 0)
output.DeepCopy(pdi) # ShallowCopy if you want changes to propagate upstream
# Get requested time index
i = _helpers.get_requested_time(self, outInfo)
# Perfrom task:
if self.__need_to_read:
self._read_up_front()
# Place the model data for given timestep onto the mesh
if len(self._models) > i:
self._place_on_mesh(output, idx=i)
self.__last_successfull_index = i
else:
# put the last array as a placeholder
self._place_on_mesh(output, idx=self.__last_successfull_index)
return 1
def RequestInformation(self, request, inInfo, outInfo):
"""Used by pipeline to handle time variance and update output extents"""
self._update_time_steps()
pdi = self.GetInputData(inInfo, 0, 0)
# Determine if 2D or 3D and read
if isinstance(pdi, vtk.vtkRectilinearGrid) and pdi.GetExtent()[3] == 1:
self._is_3D = False
else:
self._is_3D = True
return 1
#### Setters and Getters ####
def has_models(self):
"""Return True if models are associated with this mesh"""
return len(self._model_filenames) > 0
def get_time_step_values(self):
"""Use this in ParaView decorator to register timesteps."""
# if unset, force at least one attempt to set the timesteps
if self.__timesteps is None:
self._update_time_steps()
return self.__timesteps if self.__timesteps is not None else None
def clear_models(self):
"""Use to clear data file names."""
self._model_filenames = []
self._models = []
self.Modified(read_again=True)
def add_model_file_name(self, filename):
"""Use to set the file names for the reader. Handles single string or
list of strings.
"""
if filename is None:
return # do nothing if None is passed by a constructor on accident
elif isinstance(filename, (list, tuple)):
for f in filename:
self.add_model_file_name(f)
self.Modified()
elif filename not in self._model_filenames:
self._model_filenames.append(filename)
self.Modified()
return 1
def get_model_filenames(self, idx=None):
"""Returns the list of file names or given and index returns a specified
timestep's filename.
"""
if idx is None or not self.has_models():
return self._model_filenames
return self._model_filenames[idx]
def set_use_filename(self, flag):
"""Set a flag on whether or not to use the filename as the data array name"""
if self.__use_filename != flag:
self.__use_filename = flag
self.Modified(read_again=False)
def set_data_name(self, name):
"""Set the data array name"""
if name == '':
self.__use_filename = True
self.Modified(read_again=False)
elif self.__data_name != name:
self.__data_name = name
self.__use_filename = False
self.Modified(read_again=False)
def get_data_name(self):
"""Get the data array name"""
if self.__use_filename:
mname = self.get_model_filenames(idx=0)
return os.path.basename(mname)
return self.__data_name
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "PVGeo/ubc/two_file_base.py",
"copies": "1",
"size": "12901",
"license": "bsd-3-clause",
"hash": 2306600373077320700,
"line_mean": 35.86,
"line_max": 88,
"alpha_frac": 0.5609642663,
"autogenerated": false,
"ratio": 3.9817901234567903,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5042754389756791,
"avg_score": null,
"num_lines": null
} |
__all__ = ["NullCrawler", "Random3Crawler"]
from typing import Optional
from nichtparasoup.core import Crawler, _CrawlerWeight, _IsImageAddable, _OnImageAdded
from nichtparasoup.core.image import Image
from nichtparasoup.core.imagecrawler import BaseImageCrawler
class _LoggingCrawler(Crawler):
def __init__(self,
imagecrawler: BaseImageCrawler, weight: _CrawlerWeight,
is_image_addable: Optional[_IsImageAddable] = None,
on_image_added: Optional[_OnImageAdded] = None) -> None:
super().__init__(imagecrawler, weight, is_image_addable, on_image_added)
self.crawl_call_count = 0
self.reset_call_count = 0
def crawl(self) -> int:
self.crawl_call_count += 1
return 0
def reset(self) -> None:
self.reset_call_count += 1
class NullCrawler(_LoggingCrawler):
pass
class Random3Crawler(_LoggingCrawler):
def crawl(self) -> int:
super().crawl()
for _ in range(3):
self.images.add(Image(uri='test', source='test', is_generic=True))
return 3
| {
"repo_name": "k4cg/nichtparasoup",
"path": "tests/test_10_nichtparasoup/test_core/mockable_crawler.py",
"copies": "1",
"size": "1097",
"license": "mit",
"hash": 1188192445941171200,
"line_mean": 28.6486486486,
"line_max": 86,
"alpha_frac": 0.643573382,
"autogenerated": false,
"ratio": 3.428125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9568476769582985,
"avg_score": 0.0006443224834029431,
"num_lines": 37
} |
__all__ = ['Null']
class NullType(object):
'''An SQL-like Null object: allows most (if not all) operations on it to succeed.
>>> repr(Null)
'(Null)'
>>> Null(3,x=3)
(Null)
>>> [Null==Null, Null!=None, Null<Null, Null>=3]
[(Null), (Null), (Null), (Null)]
>>> [Null[3], Null.foo]
[(Null), (Null)]
>>> Null[4] = 3
>>> del Null['bar']
>>> 2.5*Null + 3
(Null)
>>> [4 not in Null, 'all' in Null]
[True, False]
>>> list(Null)
[]
>>> bool(Null)
False
>>> len(Null)
0
>>> [int(Null), long(Null), float(Null), complex(Null)]
[0, 0, 0.0, 0j]
>>> [oct(Null), hex(Null)]
['(Null)', '(Null)']
>>> type(Null)() is Null
True
>>> from pickle import dumps, loads
>>> loads(dumps(Null)) is Null
True
'''
__singleton = None
def __new__(cls, *args, **kwds):
assert __name__ == '__main__', __name__
if cls.__singleton is None:
cls.__singleton = super(NullType,cls).__new__(cls)
return cls.__singleton
def __len__(self): return 0
def __iter__(self): return; yield
def __nonzero__ (self): return False
def __contains__(self, item): return False
def __repr__(self): return '(Null)'
def __reduce__(self): return (type(self), ())
__oct__ = __hex__ = __repr__
__int__ = __long__ = __len__
def __float__(self): return 0.0
def __call__(self, *args, **kwds): return self
__getitem__ = __getattr__ = __setitem__ = __setattr__ = __delitem__ = \
__delattr__ = __eq__ = __ne__ = __gt__ = __ge__ = __lt__ = __le__ = \
__neg__ = __pos__ = __abs__ = __invert__ = __add__ = __sub__ = \
__mul__ = __div__ = __truediv__ = __floordiv__ = __mod__ = \
__divmod__ = __pow__ = __lshift__ = __rshift__ = __and__ = __or__ = \
__xor__ = __radd__ = __rsub__ = __rmul__ = __rdiv__ = __rtruediv__ = \
__rfloordiv__ = __rmod__ = __rdivmod__ = __rpow__ = __rlshift__ = \
__rrshift__ = __rand__ = __ror__ = __rxor__ = __call__
Null = NullType()
if __name__ == '__main__':
from doctest import testmod
testmod()
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/576562_Yet_another_Null_object/recipe-576562.py",
"copies": "1",
"size": "2137",
"license": "mit",
"hash": -5297756056126307000,
"line_mean": 31.3787878788,
"line_max": 85,
"alpha_frac": 0.4576509125,
"autogenerated": false,
"ratio": 3.1519174041297937,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41095683166297936,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Unavailable',
# Circuit breaker.
'TristateBreakers',
'NO_BREAK',
# Rate limit.
'unlimited',
'TokenBucket',
# Retry.
'no_retry',
'ExponentialBackoff',
]
import collections
import enum
import logging
import time
from g1.asyncs.bases import timers
from g1.bases import collections as g1_collections
from g1.bases.assertions import ASSERT
LOG = logging.getLogger(__name__)
class Unavailable(Exception):
"""When rate limit is exceeded or circuit breaker disconnects."""
class CircuitBreaker:
async def __aenter__(self):
raise NotImplementedError
async def __aexit__(self, exc_type, exc, traceback):
raise NotImplementedError
def notify_success(self):
raise NotImplementedError
def notify_failure(self):
raise NotImplementedError
class CircuitBreakers:
def get(self, key: str) -> CircuitBreaker:
raise NotImplementedError
@enum.unique
class _States(enum.Enum):
GREEN = enum.auto()
YELLOW = enum.auto()
RED = enum.auto()
class _EventLog:
"""Record when events happened."""
def __init__(self, capacity):
self._log = collections.deque(maxlen=capacity)
def add(self, t):
if self._log:
ASSERT.greater(t, self._log[-1])
self._log.append(t)
def count(self, t0=None):
"""Return number of events after ``t0``."""
if t0 is None:
return len(self._log)
for i, t in enumerate(self._log):
if t >= t0:
return len(self._log) - i
return 0
def clear(self):
self._log.clear()
class TristateBreaker(CircuitBreaker):
"""Tristate circuit breaker.
It operates in three states:
* GREEN: This is the initial state. When in this state, it lets all
requests pass through. When there are ``failure_threshold``
failures consecutively in the last ``failure_period`` seconds, it
changes the state to RED.
* YELLOW: When in this state, it only lets one concurrent request
pass through, and errs out on all others. When there are
``success_threshold`` successes consecutively, it changes the
state to GREEN. When there is a failure, it changes the state to
RED.
* RED: When in this state, it lets no requests pass through. After
``failure_timeout`` seconds, it changes the state to YELLOW.
"""
def __init__(
self,
*,
key,
failure_threshold,
failure_period,
failure_timeout,
success_threshold,
):
self._key = key
self._failure_threshold = ASSERT.greater(failure_threshold, 0)
self._failure_period = ASSERT.greater(failure_period, 0)
self._failure_timeout = ASSERT.greater(failure_timeout, 0)
self._success_threshold = ASSERT.greater(success_threshold, 0)
self._state = _States.GREEN
# When state is GREEN, _event_log records failures; when state
# is YELLOW, it records successes; when state is RED, it records
# when the state was changed to RED.
self._event_log = _EventLog(max(failure_threshold, success_threshold))
self._num_concurrent_requests = 0
async def __aenter__(self):
if self._state is _States.GREEN:
self._num_concurrent_requests += 1
return self
if self._state is _States.RED:
if (
self._event_log.
count(time.monotonic() - self._failure_timeout) > 0
):
raise Unavailable(
'circuit breaker disconnected: %s' % self._key
)
self._change_state_yellow()
ASSERT.is_(self._state, _States.YELLOW)
if self._num_concurrent_requests > 0:
raise Unavailable(
'circuit breaker has not re-connected yet: %s' % self._key
)
self._num_concurrent_requests += 1
return self
async def __aexit__(self, exc_type, exc, traceback):
self._num_concurrent_requests -= 1
def notify_success(self):
if self._state is _States.GREEN:
self._event_log.clear()
elif self._state is _States.YELLOW:
self._event_log.add(time.monotonic())
if self._event_log.count() >= self._success_threshold:
self._change_state_green()
else:
ASSERT.is_(self._state, _States.RED)
# Nothing to do here.
def notify_failure(self):
if self._state is _States.GREEN:
now = time.monotonic()
self._event_log.add(now)
if (
self._event_log.count(now - self._failure_period) >=
self._failure_threshold
):
self._change_state_red(now)
elif self._state is _States.YELLOW:
self._change_state_red(time.monotonic())
else:
ASSERT.is_(self._state, _States.RED)
# Nothing to do here.
def _change_state_green(self):
LOG.info('TristateBreaker: change state to GREEN: %s', self._key)
self._state = _States.GREEN
self._event_log.clear()
def _change_state_yellow(self):
LOG.info('TristateBreaker: change state to YELLOW: %s', self._key)
self._state = _States.YELLOW
self._event_log.clear()
def _change_state_red(self, now):
LOG.info('TristateBreaker: change state to RED: %s', self._key)
self._state = _States.RED
self._event_log.clear()
self._event_log.add(now)
class TristateBreakers(CircuitBreakers):
def __init__(self, **breaker_kwargs):
self._breaker_kwargs = breaker_kwargs
self._breakers = g1_collections.LruCache(128)
def get(self, key):
breaker = self._breakers.get(key)
if breaker is None:
# pylint: disable=missing-kwoa
breaker = self._breakers[key] = TristateBreaker(
key=key,
**self._breaker_kwargs,
)
return breaker
class NeverBreaker(CircuitBreaker):
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, traceback):
pass
def notify_success(self):
pass
def notify_failure(self):
pass
class NeverBreakers(CircuitBreakers):
def __init__(self):
self._no_break = NeverBreaker()
def get(self, key):
return self._no_break
NO_BREAK = NeverBreakers()
async def unlimited():
pass
class TokenBucket:
def __init__(self, token_rate, bucket_size, raise_when_empty):
self._raise_when_empty = raise_when_empty
self._token_rate = ASSERT.greater(token_rate, 0)
self._token_period = 1 / self._token_rate
self._bucket_size = ASSERT.greater(bucket_size, 0)
self._num_tokens = 0
self._last_added = time.monotonic()
async def __call__(self):
self._add_tokens()
if self._num_tokens < 1 and self._raise_when_empty:
raise Unavailable('rate limit exceeded')
while self._num_tokens < 1:
await timers.sleep(self._token_period)
self._add_tokens()
self._num_tokens -= 1
def _add_tokens(self):
now = time.monotonic()
self._num_tokens = min(
self._num_tokens + (now - self._last_added) * self._token_rate,
self._bucket_size,
)
self._last_added = now
def no_retry(retry_count): # pylint: disable=useless-return
del retry_count # Unused.
return None
class ExponentialBackoff:
"""Retry ``max_retries`` times with exponential backoff.
NOTE: This retry policy does not implement jitter of delays; if you
are using the ``Session`` object to write to a shared resource, you
could suffer from write conflicts. In that case, you should use a
retry policy with jitter.
"""
def __init__(self, max_retries, backoff_base):
self._max_retries = ASSERT.greater(max_retries, 0)
self._backoff_base = ASSERT.greater(backoff_base, 0)
def __call__(self, retry_count):
if retry_count >= self._max_retries:
return None
else:
return self._backoff_base * 2**retry_count
| {
"repo_name": "clchiou/garage",
"path": "py/g1/http/clients/g1/http/clients/policies.py",
"copies": "1",
"size": "8292",
"license": "mit",
"hash": -2229832402979022300,
"line_mean": 27.4948453608,
"line_max": 78,
"alpha_frac": 0.5933429812,
"autogenerated": false,
"ratio": 3.8765778401122017,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4969920821312202,
"avg_score": null,
"num_lines": null
} |
__all__ = [
"WBAPIUnauthorizedError",
"WBAPI"
]
import requests
from requests.exceptions import HTTPError as WBAPIHTTPError
from requests.packages.urllib3.util import Retry
from requests.adapters import HTTPAdapter
from urlparse import urljoin
from functools import wraps
class WBAPIUnauthorizedError(WBAPIHTTPError):
pass
def catch_api_http_exception(f):
@wraps(f)
def wrapper(*args, **kwds):
try:
r = f(*args, **kwds)
except WBAPIHTTPError as e:
if e.response.status_code in [401, 403]:
raise WBAPIUnauthorizedError(response=e.response)
else:
raise
return r
return wrapper
class FakeResponse(object):
content = None
status_code = None
def json(self):
return self.content
class WBAPI(object):
def __init__(self, base_uri, connect_timeout=5, read_timeout=30):
self.__base_uri = base_uri
self.__api_uri = urljoin(base_uri, "api/")
self.__session = requests.Session()
requests_http_adapter = HTTPAdapter(
Retry(total=10, status_forcelist=[502, 500], backoff_factor=0.5))
self.__session.mount('https://', requests_http_adapter)
self.__session.mount('http://', requests_http_adapter)
self.__timeout = (connect_timeout, read_timeout)
def get_token(self):
try:
return self.__session.headers['Authorization'].split()[1]
except KeyError:
return None
def set_token(self, token):
self.__session.headers['Authorization'] = "Token {}".format(token)
@catch_api_http_exception
def login(self, username, password):
session = self.__session
r = session.post(
urljoin(self.__base_uri, "token/"),
{"username": username, "password": password}, timeout=self.__timeout)
try:
r.raise_for_status()
except WBAPIHTTPError as e:
try:
assert e.response.json()["non_field_errors"][0] == "Unable to log in with provided credentials."
except:
raise e
fake_response = FakeResponse()
fake_response.content = {"details": e.response.json()["non_field_errors"][0]}
fake_response.status_code = 401
unauthed_exception = WBAPIUnauthorizedError(response=fake_response)
raise unauthed_exception
token = r.json()['token']
self.set_token(token)
@catch_api_http_exception
def get_apps(self):
r = self.__session.get(urljoin(self.__api_uri, "apps/"), timeout=self.__timeout)
r.raise_for_status()
apps = r.json()
return [
dict(a.items() + {"stages": {s["name"]: s for s in a["stages"]}}.items())
for a in apps
]
@catch_api_http_exception
def get_stages(self):
r = self.__session.get(urljoin(self.__api_uri, "stages/"), timeout=self.__timeout)
r.raise_for_status()
return r.json()
@catch_api_http_exception
def deploy_app(self, app, stage, version):
r = self.__session.put(
urljoin(self.__api_uri, "apps/{}/stages/{}/version/{}/".format(app, stage, version)),
timeout=self.__timeout)
r.raise_for_status()
| {
"repo_name": "hmrc/wristband-frontend",
"path": "wb_api/__init__.py",
"copies": "1",
"size": "3302",
"license": "apache-2.0",
"hash": -2709421653575760000,
"line_mean": 30.1509433962,
"line_max": 112,
"alpha_frac": 0.5899454876,
"autogenerated": false,
"ratio": 3.92627824019025,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.501622372779025,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'WePayWarning', 'WePayError', 'WePayHTTPError', 'WePayClientError',
'WePayServerError', 'WePayConnectionError'
]
class WePayWarning(UserWarning):
pass
class WePayError(Exception):
"""Raised whenever WePay API call was not successfull. `WePay API Errors
Documentation <https://www.wepay.com/developer/reference/errors>`_
"""
def __init__(self, error, error_code, error_description):
self._error = error
self._error_code = error_code
self._error_description = error_description
super(WePayError, self).__init__()
def __str__(self):
return "%s (%s): %s" % (self.error, self.error_code, self.error_description)
def __repr__(self):
return "<%s> %s" % (self.__class__.__name__, self)
@property
def error(self):
"""Generic ``error`` category. Returned by WePay."""
return self._error
@property
def error_code(self):
"""A specific "error_code" that you can use to program responses to
errors. Returned by WePay.
"""
return self._error_code
@property
def error_description(self):
"""A human readable ``error_description`` that explains why the error
happened. Returned by WePay.
"""
return self._error_description
class WePayHTTPError(WePayError):
"""This is a base http error"""
def __init__(self, http_error, status_code,
error=None, error_code=None, error_description=None):
self._http_error = http_error
self._status_code = status_code
error = error or 'unknown'
error_code = error_code or -1
error_description = error_description or "Unknown"
super(WePayHTTPError, self).__init__(error, error_code, error_description)
def __str__(self):
return "HTTP %s - %s" % (
self.status_code, super(WePayHTTPError, self).__str__())
@property
def status_code(self):
"""Error Code as specified by RFC 2616."""
return self._status_code
@property
def http_error(self):
"""Instance of :exc:`urllib.error.HTTPError` or
:exc:`requests.exceptions.HTTPError`, depending on the library you chose.
"""
return self._http_error
class WePayClientError(WePayHTTPError):
"""This is a 4xx type error, which, most of the time, carries important
error information about the object of interest.
"""
class WePayServerError(WePayHTTPError):
"""This is a 5xx type error, which, most of the time, means there is an
error in implemetation or some unknown WePay Error, in latter case there is
a chance there will be no `error`, `error_code` or `error_description` from
WePay. It is recommended this exception is to be ignored or handled
separatly in production.
"""
class WePayConnectionError(Exception):
"""Raised in case there is a problem connecting to WePay servers, for
instance when request times out.
"""
def __init__(self, error):
self._error = error
super(WePayConnectionError, self).__init__()
def __str__(self):
return "%s - %s" % (self.error.__class__.__name__, str(self.error))
def __repr__(self):
return "<%s> %s" % (self.__class__.__name__, self)
@property
def error(self):
"""Original exception raised by `urllib` or `requests` library. It will
be either :exc:`urllib.error.URLError` or
a subclass of :exc:`requests.exceptions.RequestExeption`. See their corresponding
documentation if necessary.
"""
return self._error
| {
"repo_name": "lehins/python-wepay",
"path": "wepay/exceptions.py",
"copies": "1",
"size": "3650",
"license": "mit",
"hash": -4276097600300843000,
"line_mean": 28.9180327869,
"line_max": 89,
"alpha_frac": 0.6216438356,
"autogenerated": false,
"ratio": 4.078212290502793,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5199856126102793,
"avg_score": null,
"num_lines": null
} |
__all__ = (
'Worker',
)
import sys
import time
import logging
import threading
from Queue import Queue, Empty
from pyramid.request import Request
import model as M
from .task import Task, Function
log = logging.getLogger(__name__)
exc_log = logging.getLogger('exc_logger')
class Worker(object):
def __init__(
self, app, name, qnames,
chapman_path, registry,
num_threads=1, sleep=0.2, raise_errors=False):
self._app = app
self._name = name
self._qnames = qnames
self._chapman_path = chapman_path
self._registry = registry
self._num_threads = num_threads
self._sleep = sleep
Function.raise_errors = raise_errors
self._handler_threads = []
self._num_active_messages = 0
self._send_event = threading.Event()
self._shutdown = False # flag to indicate worker is shutting down
def start(self):
M.doc_session.db.collection_names() # force connection & auth
sem = threading.Semaphore(self._num_threads)
q = Queue()
self._handler_threads = [
threading.Thread(
name='dispatch',
target=self.dispatcher,
args=(sem, q))]
self._handler_threads += [
threading.Thread(
name='worker-%d' % x,
target=self.worker,
args=(sem, q))
for x in range(self._num_threads)]
for t in self._handler_threads:
t.setDaemon(True)
t.start()
def run(self):
log.info('Entering event thread')
conn = M.doc_session.bind.bind.conn
conn.start_request()
chan = M.Message.channel.new_channel()
chan.pub('start', self._name)
@chan.sub('ping')
def handle_ping(chan, msg):
data = msg['data']
if data['worker'] in (self._name, '*'):
data['worker'] = self._name
chan.pub('pong', data)
@chan.sub('kill')
def handle_kill(chan, msg):
if msg['data'] in (self._name, '*'):
log.error('Received %r, exiting', msg)
sys.exit(0)
@chan.sub('shutdown')
def handle_shutdown(chan, msg):
if msg['data'] in (self._name, '*'):
log.error('Received %r, shutting down gracefully', msg)
self._shutdown = True
raise StopIteration()
@chan.sub('send')
def handle_send(chan, msg):
self._send_event.set()
while True:
try:
chan.handle_ready(await=True, raise_errors=True)
except StopIteration:
break
time.sleep(self._sleep)
for t in self._handler_threads:
t.join()
def _waitfunc(self):
if self._shutdown:
raise StopIteration()
self._send_event.clear()
self._send_event.wait(self._sleep)
def dispatcher(self, sem, q):
log.info('Entering dispatcher thread')
while not self._shutdown:
sem.acquire()
try:
msg, state = _reserve_msg(
self._name, self._qnames, self._waitfunc)
except StopIteration:
break
except Exception as err:
exc_log.exception(
'Error reserving message: %r, waiting 5s and continuing',
err)
sem.release()
time.sleep(5)
continue
self._num_active_messages += 1
q.put((msg, state))
log.info('Exiting dispatcher thread')
def worker(self, sem, q):
log.info('Entering chapmand worker thread')
while not self._shutdown:
try:
msg, state = q.get(timeout=0.25)
except Empty:
continue
try:
log.info('Received %r', msg)
task = Task.from_state(state)
# task.handle(msg, 25)
if task.path:
req = Request.blank(task.path, method='CHAPMAN')
else:
req = Request.blank(self._chapman_path, method='CHAPMAN')
req.registry = self._registry
req.environ['chapmand.task'] = task
req.environ['chapmand.message'] = msg
for x in self._app(req.environ, lambda *a,**kw:None):
pass
except Exception as err:
exc_log.exception('Unexpected error in worker thread: %r', err)
time.sleep(self._sleep)
finally:
self._num_active_messages -= 1
sem.release()
log.info('Exiting chapmand worker thread')
def handle_messages(self):
'''Handle messages until there are no more'''
while True:
msg, state = M.Message.reserve(self._name, self._qnames)
if msg is None:
return
task = Task.from_state(state)
task.handle(msg)
def _reserve_msg(name, qnames, waitfunc):
while True:
msg, state = M.Message.reserve(name, qnames)
if msg is None:
waitfunc()
continue
if state is None:
continue
return msg, state
| {
"repo_name": "synappio/chapman",
"path": "chapman/worker.py",
"copies": "1",
"size": "5376",
"license": "mit",
"hash": 4127523853321743400,
"line_mean": 30.6235294118,
"line_max": 79,
"alpha_frac": 0.51171875,
"autogenerated": false,
"ratio": 4.280254777070064,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5291973527070064,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'WriteRectilinearGridToUBC',
'WriteImageDataToUBC',
]
__displayname__ = 'Writers'
import os
import numpy as np
from .. import interface
from ..base import WriterBase
class ubcTensorMeshWriterBase(WriterBase):
"""A base class to assist in writing data bjects to the UBC Tensor Mesh
format.
"""
__displayname__ = 'UBC Format Writer Base'
__category__ = 'base'
def __init__(self, inputType='vtkRectilinearGrid'):
WriterBase.__init__(self, inputType=inputType, ext='msh')
# These MUST be set by children
self.xcells = None
self.ycells = None
self.zcells = None
self.origin = None
def write_mesh_3d(self, nx, ny, nz, filename):
"""Write 3D Tensor Mesh to the UBC format"""
def arr2str(arr):
return ' '.join(map(str, arr))
ox, oy, oz = self.origin
# Write out grid / mesh
with open(filename, 'w') as f:
f.write('%d %d %d\n' % (nx, ny, nz))
f.write('%d %d %d\n' % (ox, oy, oz))
f.write('%s\n' % arr2str(self.xcells))
f.write('%s\n' % arr2str(self.ycells))
f.write('%s\n' % arr2str(self.zcells))
return
def write_models(self, grd, filename):
"""Write cell data attributes to model files"""
nx, ny, nz = grd.GetDimensions()
nx -= 1
ny -= 1
nz -= 1
def reshape_model(model):
# Swap axes because VTK structures the coordinates a bit differently
# - This is absolutely crucial!
# - Do not play with unless you know what you are doing!
model = np.reshape(model, (nz, ny, nx))
model = np.swapaxes(model, 0, 2)
model = np.swapaxes(model, 0, 1)
# Now reverse Z axis
model = model[:, :, ::-1]
return model.flatten()
# make up file names for models
for i in range(grd.GetCellData().GetNumberOfArrays()):
vtkarr = grd.GetCellData().GetArray(i)
arr = interface.convert_array(vtkarr)
arr = reshape_model(arr)
path = os.path.dirname(filename)
filename = '%s/%s.mod' % (path, vtkarr.GetName().replace(' ', '_'))
np.savetxt(
filename,
arr,
comments='! ',
header='Mesh File: %s' % os.path.basename(filename),
fmt=self.get_format(),
)
return
class WriteRectilinearGridToUBC(ubcTensorMeshWriterBase):
"""Writes a ``vtkRectilinearGrid`` data object to the UBC Tensor Mesh format.
This file reader currently only handles 3D data.
"""
__displayname__ = 'Write ``vtkRectilinearGrid`` to UBC Tensor Mesh'
__category__ = 'writer'
def __init__(self):
ubcTensorMeshWriterBase.__init__(self, inputType='vtkRectilinearGrid')
def perform_write_out(self, input_data_object, filename, object_name):
"""Write out a ``vtkRectilinearGrid`` to the UBC file format"""
# Get the input data object
grd = input_data_object
# Get grid dimensions
nx, ny, nz = grd.GetDimensions()
# get the points and convert to spacings
xcoords = interface.convert_array(grd.GetXCoordinates())
ycoords = interface.convert_array(grd.GetYCoordinates())
zcoords = interface.convert_array(grd.GetZCoordinates())
# TODO: decide if 2D or 3D
# Now get the cell sizes
self.xcells = np.diff(xcoords)
self.ycells = np.diff(ycoords)
self.zcells = np.diff(zcoords)
# find origin (top southwest corner): this works because of input type
ox, oy, oz = np.min(xcoords), np.min(ycoords), np.max(zcoords)
self.origin = (ox, oy, oz)
# flip z
self.zcells = self.zcells[::-1]
# Write mesh
self.write_mesh_3d(nx - 1, ny - 1, nz - 1, filename)
# Now write out model data
self.write_models(grd, filename)
# Always return 1 from pipeline methods or seg-faults will occur
return 1
class WriteImageDataToUBC(ubcTensorMeshWriterBase):
"""Writes a ``vtkImageData`` (uniform grid) data object to the UBC Tensor
Mesh format. This file reader currently only handles 3D data.
"""
__displayname__ = 'Write ``vtkImageData`` to UBC Tensor Mesh'
__category__ = 'writer'
def __init__(self):
ubcTensorMeshWriterBase.__init__(self, inputType='vtkImageData')
def perform_write_out(self, input_data_object, filename, object_name):
"""Write out a ``vtkImageData`` to the UBC file format"""
# Get the input data object
grd = input_data_object
# Get grid dimensions
nx, ny, nz = grd.GetDimensions()
nx -= 1
ny -= 1
nz -= 1
# get the points and convert to spacings
dx, dy, dz = grd.GetSpacing()
# Now make the cell arrays
self.xcells = np.full(nx, dx)
self.ycells = np.full(ny, dy)
self.zcells = np.full(nz, dz)
# find origin (top southwest corner)
ox, oy, oz = grd.GetOrigin()
oz += nz * dz
self.origin = (ox, oy, oz)
# TODO: decide if 2D or 3D
# Write mesh
self.write_mesh_3d(nx, ny, nz, filename)
# Now write out model data
self.write_models(grd, filename)
# Always return 1 from pipeline methods or seg-faults will occur
return 1
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "PVGeo/ubc/write.py",
"copies": "1",
"size": "5502",
"license": "bsd-3-clause",
"hash": 3906998049047916500,
"line_mean": 30.2613636364,
"line_max": 81,
"alpha_frac": 0.5756088695,
"autogenerated": false,
"ratio": 3.577373211963589,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4652982081463589,
"avg_score": null,
"num_lines": null
} |
__all___ = [
'YubicoError',
'StatusCodeError',
'InvalidClientIdError',
'InvalidValidationResponse',
'SignatureVerificationError'
]
class YubicoError(Exception):
""" Base class for Yubico related exceptions. """
pass
class StatusCodeError(YubicoError):
def __init__(self, status_code):
self.status_code = status_code
def __str__(self):
return ('Yubico server returned the following status code: %s' %
(self.status_code))
class InvalidClientIdError(YubicoError):
def __init__(self, client_id):
self.client_id = client_id
def __str__(self):
return 'The client with ID %s does not exist' % (self.client_id)
class InvalidValidationResponse(YubicoError):
def __init__(self, reason, response, parameters=None):
self.reason = reason
self.response = response
self.parameters = parameters
self.message = self.reason
def __str__(self):
return self.reason
class SignatureVerificationError(YubicoError):
def __init__(self, generated_signature, response_signature):
self.generated_signature = generated_signature
self.response_signature = response_signature
def __str__(self):
return repr('Server response message signature verification failed' +
'(expected %s, got %s)' % (self.generated_signature,
self.response_signature))
| {
"repo_name": "Yubico/python-yubico-client-dpkg",
"path": "yubico_client/yubico_exceptions.py",
"copies": "1",
"size": "1465",
"license": "bsd-3-clause",
"hash": 6885111900151873000,
"line_mean": 27.7254901961,
"line_max": 77,
"alpha_frac": 0.628668942,
"autogenerated": false,
"ratio": 4.234104046242774,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5362772988242774,
"avg_score": null,
"num_lines": null
} |
__all___ = [
'YubicoError',
'StatusCodeError',
'InvalidClientIdError',
'SignatureVerificationError'
]
class YubicoError(Exception):
""" Base class for Yubico related exceptions. """
pass
class StatusCodeError(YubicoError):
def __init__(self, status_code):
self.status_code = status_code
def __str__(self):
return ('Yubico server returned the following status code: %s' %
(self.status_code))
class InvalidClientIdError(YubicoError):
def __init__(self, client_id):
self.client_id = client_id
def __str__(self):
return 'The client with ID %s does not exist' % (self.client_id)
class SignatureVerificationError(YubicoError):
def __init__(self, generated_signature, response_signature):
self.generated_signature = generated_signature
self.response_signature = response_signature
def __str__(self):
return repr('Server response message signature verification failed' +
'(expected %s, got %s)' % (self.generated_signature,
self.response_signature))
| {
"repo_name": "meddius/yubisaslauthd",
"path": "yubico/yubico_exceptions.py",
"copies": "1",
"size": "1140",
"license": "bsd-3-clause",
"hash": -1026294211435872100,
"line_mean": 28.2307692308,
"line_max": 77,
"alpha_frac": 0.6219298246,
"autogenerated": false,
"ratio": 4.1454545454545455,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5267384370054545,
"avg_score": null,
"num_lines": null
} |
__all__ = ['oagprop', 'staticproperty']
class oagprop(object):
"""Responsible for maitaining _oagcache on decorated properties"""
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, obj, searchwin=None, searchoffset=None, searchdesc=False, cache=True):
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute")
try:
if not cache:
raise Exception("No cache check")
return obj.cache.match(self.fget.__name__)
except:
subnode = self.fget(obj, searchwin=searchwin, searchoffset=searchoffset, searchdesc=searchdesc)
if subnode is not None:
from ._graph import OAG_RootNode
if isinstance(subnode, OAG_RootNode):
from ._rpc import reqcls
reqcls(obj).register(subnode.rpc.url, self.fget.__name__)
if cache:
obj.cache.put(self.fget.__name__, subnode)
return subnode
def __set__(self, obj, value):
pass
class staticproperty(property):
def __get__(self, cls, owner):
return classmethod(self.fget).__get__(None, owner)() | {
"repo_name": "kchoudhu/openarc",
"path": "openarc/_util.py",
"copies": "1",
"size": "1433",
"license": "bsd-3-clause",
"hash": 9122870412233465000,
"line_mean": 36.7368421053,
"line_max": 107,
"alpha_frac": 0.5624563852,
"autogenerated": false,
"ratio": 3.9916434540389973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5054099839238997,
"avg_score": null,
"num_lines": null
} |
__all__ = ['ObjectFeatureUnion', 'ListConcatTransformer']
from collections import OrderedDict
import numpy as np
from . import PureTransformer
class ObjectFeatureUnion(PureTransformer):
"""Extract a given key from dictionary object."""
def __init__(self, *args, as_dict=True, **kwargs):
kwargs.setdefault('nparray', True)
kwargs.setdefault('nparray_dtype', np.object)
super(ObjectFeatureUnion, self).__init__(**kwargs)
self.as_dict = as_dict
self.steps = OrderedDict()
for (k, step) in args:
self.steps[k] = step
#end def
def fit(self, *args, **fit_params):
for k, step in self.steps.items():
step.fit(*args, **fit_params)
return self
#end def
def _transform(self, X, y=None, **kwargs):
if self.as_dict:
transformed = []
first = True
# zip(*[step.transform(X, **kwargs) for k, step in self.steps.items()])
for k, step in self.steps.items():
step_transformed = step.transform(X, **kwargs)
for i, t in enumerate(step_transformed):
if first:
transformed.append({k: t})
else:
transformed[i][k] = t
#end for
first = False
#end for
else:
transformed = []
first = True
for k, step in self.steps.items():
step_transformed = step.transform(X, y=y, **kwargs)
for i, t in enumerate(step_transformed):
if first:
transformed.append([t])
else:
transformed[i].append(t)
#end for
#end for
#end if
return transformed
#end def
#end class
class ListConcatTransformer(PureTransformer):
"""Concatenate two lists for each instance."""
def __init__(self, steps=[], **kwargs):
kwargs.setdefault('nparray', False)
super(ListConcatTransformer, self).__init__(**kwargs)
self.steps = steps
#end def
def fit(self, *args, **kwargs):
for step in self.steps:
step.fit(*args, **kwargs)
return self
#end def
def _transform(self, X, *args, **kwargs):
N = len(X)
transformed = [[] for i in range(N)]
for step in self.steps:
transformed_step = step.transform(X, *args, **kwargs)
assert len(transformed_step) == N
for j in range(N):
transformed[j] += transformed_step[j]
#end for
return transformed
#end def
#end class
| {
"repo_name": "skylander86/ycml",
"path": "ycml/transformers/union.py",
"copies": "1",
"size": "2719",
"license": "apache-2.0",
"hash": 6174945584202147000,
"line_mean": 27.0309278351,
"line_max": 83,
"alpha_frac": 0.5211474807,
"autogenerated": false,
"ratio": 4.281889763779527,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5303037244479527,
"avg_score": null,
"num_lines": null
} |
__all__ = ('object_property', 'bool_property',
'array_property', 'set_property', 'dict_property')
from objc import ivar, selector, _C_ID, _C_NSBOOL, _C_BOOL, NULL, _C_NSUInteger
from objc import lookUpClass
import collections
from copy import copy as copy_func
import sys
NSSet = lookUpClass('NSSet')
NSObject = lookUpClass('NSObject')
if sys.version_info[0] == 2:
range = xrange
def _str(value):
return value
else: # pragma: no cover (py3k)
long = int
def _str(value):
return value.decode('ascii')
def attrsetter(prop, name, copy):
if copy:
def func(self, value):
if isinstance(value, NSObject):
setattr(self, name, value.copy())
else:
setattr(self, name, copy_func(value))
else:
def func(self, value):
setattr(self, name, value)
return func
def attrgetter(name):
def func(self):
return getattr(self, name)
return func
def _return_value(value):
def func(self):
return value
return func
def _dynamic_getter(name):
def getter(object):
m = getattr(object.pyobjc_instanceMethods, name)
return m()
getter.__name__ = name
return getter
def _dynamic_setter(name):
def setter(object, value):
m = getattr(object.pyobjc_instanceMethods, name)
return m(value)
setter.__name__ = name
return setter
class object_property (object):
def __init__(self, name=None,
read_only=False, copy=False, dynamic=False,
ivar=None, typestr=_C_ID, depends_on=None):
self.__created = False
self.__inherit = False
self._name = name
self._typestr = typestr
self._ro = read_only
self._copy = copy
self._dynamic = dynamic
self._ivar = ivar
self._getter = None
self._setter = None
self._validate = None
if depends_on is None:
self._depends_on = None
else:
self._depends_on = set(depends_on)
self.__getprop = None
self.__setprop = None
self.__parent = None
def _clone(self):
if self._depends_on is None:
depends = None
else:
depends = self._depends_on.copy()
v = type(self)(name=self._name,
read_only=self._ro, copy=self._copy, dynamic=self._dynamic,
ivar=self._ivar, typestr=self._typestr, depends_on=depends)
v.__inherit = True
v.__getprop = self.__getprop
v.__setprop = self.__setprop
v.__parent = self
return v
def __pyobjc_class_setup__(self, name, class_dict, instance_methods, class_methods):
self.__created = True
if self._name is None:
self._name = name
if self._ivar is not NULL:
if self._ivar is None:
ivname = '_' + self._name
else:
ivname = self._ivar
if self.__parent is None:
ivar_ref = ivar(name=ivname, type=self._typestr)
class_dict[ivname] = ivar_ref
if self._ro:
self._setter = None
else:
setterName = b'set' + name[0].upper().encode('latin1') + name[1:].encode('latin1') + b':'
signature = b'v@:' + self._typestr
if self._setter is None:
if self.__inherit:
pass
elif self._dynamic:
dynSetterName = 'set' + name[0].upper() + name[1:] + '_'
self.__setprop = _dynamic_setter(dynSetterName)
instance_methods.add(setterName)
else:
if self._ivar is NULL:
raise ValueError(
"Cannot create default setter for property "
"without ivar")
setprop = selector(
attrsetter(self._name, ivname, self._copy),
selector=setterName,
signature=signature
)
setprop.isHidden = True
instance_methods.add(setprop)
# Use dynamic setter to avoid problems when subclassing
self.__setprop = _dynamic_setter(_str(setterName))
else:
setprop = selector(
self._setter,
selector=setterName,
signature=signature
)
setprop.isHidden = True
instance_methods.add(setprop)
# Use dynamic setter to avoid problems when subclassing
self.__setprop = _dynamic_setter(_str(setterName))
if self._typestr in (_C_NSBOOL, _C_BOOL):
getterName = b'is' + name[0].upper().encode('latin1') + name[1:].encode('latin1')
else:
getterName = self._name.encode('latin1')
if self._getter is None:
if self.__inherit:
pass
elif self._dynamic:
if self._typestr in (_C_NSBOOL, _C_BOOL):
dynGetterName = 'is' + name[0].upper() + name[1:]
else:
dynGetterName = self._name
self.__getprop = _dynamic_getter(dynGetterName)
instance_methods.add(getterName)
else:
if self._ivar is NULL:
raise ValueError(
"Cannot create default getter for property without ivar")
self.__getprop = selector(
attrgetter(ivname),
selector=getterName,
signature=self._typestr + b'@:')
self.__getprop.isHidden=True
instance_methods.add(self.__getprop)
else:
self.__getprop = getprop = selector(
self._getter,
selector=getterName,
signature=self._typestr + b'@:')
getprop.isHidden=True
instance_methods.add(getprop)
#self.__getprop = _dynamic_getter(getterName)
if self._validate is not None:
selName = b'validate' + self._name[0].upper().encode('latin') + self._name[1:].encode('latin') + b':error:'
signature = _C_NSBOOL + b'@:N^@o^@'
validate = selector(
self._validate,
selector=selName,
signature=signature)
class_dict[validate.selector] = validate
instance_methods.add(validate)
if self._depends_on:
if self.__parent is not None:
if self.__parent._depends_on:
self._depends_on.update(self.__parent._depends_on.copy())
self._depends_on = self._depends_on
affecting = selector(
_return_value(NSSet.setWithArray_(list(self._depends_on))),
selector = b'keyPathsForValuesAffecting' + self._name[0].upper().encode('latin1') + self._name[1:].encode('latin1'),
signature = b'@@:',
isClassMethod=True)
class_dict[affecting.selector] = affecting
class_methods.add(affecting)
def __get__(self, object, owner):
if object is None:
return self
return self.__getprop(object)
def __set__(self, object, value):
if self.__setprop is None:
raise ValueError("setting read-only property " + self._name)
return self.__setprop(object, value)
def __delete__(self, object):
raise TypeError("cannot delete property " + self._name)
def depends_on(self, keypath):
if self._depends_on is None:
self._depends_on = set()
self._depends_on.add(keypath)
def getter(self, function):
if self.__created:
v = self._clone()
v._getter = function
return v
self._getter = function
return self
def setter(self, function):
if self.__created:
v = self._clone()
v._ro = False
v._setter = function
return v
if self._ro:
raise ValueError("Defining settter for read-only property")
self._setter = function
return self
def validate(self, function):
if self._ro:
raise ValueError("Defining validator for read-only property")
if self.__created:
v = self._clone()
v._validate = function
return v
self._validate = function
return self
class bool_property (object_property):
def __init__(self, name=None,
read_only=False, copy=False, dynamic=False,
ivar=None, typestr=_C_NSBOOL):
super(bool_property, self).__init__(
name, read_only, copy, dynamic, ivar, typestr)
NSIndexSet = lookUpClass('NSIndexSet')
NSMutableIndexSet = lookUpClass('NSMutableIndexSet')
NSKeyValueChangeSetting = 1
NSKeyValueChangeInsertion = 2
NSKeyValueChangeRemoval = 3
NSKeyValueChangeReplacement = 4
# Helper function for (not) pickling array_proxy instances
# NOTE: Don't remove this function, it can be referenced from
# pickle files.
def _id(value):
return value
# FIXME: split into two: array_proxy and mutable_array_proxy
class array_proxy (collections.MutableSequence):
# XXX: The implemenation should be complete, but is currently not
# tested.
__slots__ = ('_name', '_parent', '__wrapped', '_ro')
def __init__(self, name, parent, wrapped, read_only):
self._name = name
self._parent = parent
self._ro = read_only
self.__wrapped = wrapped
@property
def _wrapped(self):
return self.__wrapped.__getvalue__(self._parent)
def __indexSetForIndex(self, index):
if isinstance(index, slice):
result = NSMutableIndexSet.alloc().init()
start, stop, step = index.indices(len(self._wrapped))
for i in range(start, stop, step):
result.addIndex_(i)
return result
elif isinstance(index, (int, long)):
if index < 0:
v = len(self) + index
if v < 0:
raise IndexError(index)
return NSIndexSet.alloc().initWithIndex_(v)
else:
return NSIndexSet.alloc().initWithIndex_(index)
else:
raise TypeError(index)
def __repr__(self):
return '<array proxy for property ' + self._name + ' ' + repr(self._wrapped) + '>'
def __reduce__(self):
# Ensure that the proxy itself doesn't get stored
# in pickles.
return _id, (self._wrapped,)
def __getattr__(self, name):
# Default: just defer to wrapped list
return getattr(self._wrapped, name)
def __len__(self):
return self._wrapped.__len__()
def __getitem__(self, index):
return self._wrapped[index]
def __setitem__(self, index, value):
if self._ro:
raise ValueError("Property '%s' is read-only"%(self._name,))
indexes = self.__indexSetForIndex(index)
self._parent.willChange_valuesAtIndexes_forKey_(
NSKeyValueChangeSetting,
indexes, self._name)
try:
self._wrapped[index] = value
finally:
self._parent.didChange_valuesAtIndexes_forKey_(
NSKeyValueChangeReplacement,
indexes, self._name)
def __delitem__(self, index):
if self._ro:
raise ValueError("Property '%s' is read-only"%(self._name,))
indexes = self.__indexSetForIndex(index)
self._parent.willChange_valuesAtIndexes_forKey_(
NSKeyValueChangeRemoval,
indexes, self._name)
try:
del self._wrapped[index]
finally:
self._parent.didChange_valuesAtIndexes_forKey_(
NSKeyValueChangeRemoval,
indexes, self._name)
def append(self, value):
if self._ro:
raise ValueError("Property '%s' is read-only"%(self._name,))
index = len(self)
indexes = NSIndexSet.alloc().initWithIndex_(index)
self._parent.willChange_valuesAtIndexes_forKey_(
NSKeyValueChangeInsertion,
indexes, self._name)
try:
self._wrapped.append(value)
finally:
self._parent.didChange_valuesAtIndexes_forKey_(
NSKeyValueChangeInsertion,
indexes, self._name)
def insert(self, index, value):
if self._ro:
raise ValueError("Property '%s' is read-only"%(self._name,))
if isinstance(index, slice):
raise TypeError("insert argument 1 is a slice")
indexes = self.__indexSetForIndex(index)
self._parent.willChange_valuesAtIndexes_forKey_(
NSKeyValueChangeInsertion,
indexes, self._name)
try:
self._wrapped.insert(index, value)
finally:
self._parent.didChange_valuesAtIndexes_forKey_(
NSKeyValueChangeInsertion,
indexes, self._name)
def pop(self, index=-1):
if self._ro:
raise ValueError("Property '%s' is read-only"%(self._name,))
if isinstance(index, slice):
raise TypeError("insert argument 1 is a slice")
indexes = self.__indexSetForIndex(index)
self._parent.willChange_valuesAtIndexes_forKey_(
NSKeyValueChangeRemoval,
indexes, self._name)
try:
return self._wrapped.pop(index)
finally:
self._parent.didChange_valuesAtIndexes_forKey_(
NSKeyValueChangeRemoval,
indexes, self._name)
def extend(self, values):
# XXX: This is suboptimal but correct
if self._ro:
raise ValueError("Property '%s' is read-only"%(self._name,))
values = list(values)
indexes = NSIndexSet.alloc().initWithIndexesInRange_((len(self), len(values)))
self._parent.willChange_valuesAtIndexes_forKey_(
NSKeyValueChangeInsertion,
indexes, self._name)
try:
self._wrapped.extend(values)
finally:
self._parent.didChange_valuesAtIndexes_forKey_(
NSKeyValueChangeInsertion,
indexes, self._name)
def __iadd__(self, values):
self.extend(values)
return self
def __add__(self, values):
return self._wrapped + values
def __mul__(self, count):
return self._wrapped * count
def __imul__(self, count):
if self._ro:
raise ValueError("Property '%s' is read-only"%(self._name,))
if not isinstance(count, (int, long)):
raise TypeError(count)
indexes = NSIndexSet.alloc().initWithIndexesInRange_((len(self), len(self)*(count-1)))
self._parent.willChange_valuesAtIndexes_forKey_(
NSKeyValueChangeInsertion,
indexes, self._name)
try:
self._wrapped.__imul__(count)
finally:
self._parent.didChange_valuesAtIndexes_forKey_(
NSKeyValueChangeInsertion,
indexes, self._name)
return self
def __eq__(self, other):
if isinstance(other, array_proxy):
return self._wrapped == other._wrapped
else:
return self._wrapped == other
def __ne__(self, other):
if isinstance(other, array_proxy):
return self._wrapped != other._wrapped
else:
return self._wrapped != other
def __lt__(self, other):
if isinstance(other, array_proxy):
return self._wrapped < other._wrapped
else:
return self._wrapped < other
def __le__(self, other):
if isinstance(other, array_proxy):
return self._wrapped <= other._wrapped
else:
return self._wrapped <= other
def __gt__(self, other):
if isinstance(other, array_proxy):
return self._wrapped > other._wrapped
else:
return self._wrapped > other
def __ge__(self, other):
if isinstance(other, array_proxy):
return self._wrapped >= other._wrapped
else:
return self._wrapped >= other
if sys.version_info[0] == 2:
def __cmp__(self, other):
if isinstance(other, array_proxy):
return cmp(self._wrapped, other._wrapped)
else:
return cmp(self._wrapped, other)
if sys.version_info[0] == 2:
def sort(self, cmp=None, key=None, reverse=False):
if self._ro:
raise ValueError("Property '%s' is read-only"%(self._name,))
indexes = NSIndexSet.alloc().initWithIndexesInRange_(
(0, len(self._wrapped)))
self._parent.willChange_valuesAtIndexes_forKey_(
NSKeyValueChangeReplacement,
indexes, self._name)
try:
self._wrapped.sort(cmp=cmp, key=key, reverse=reverse)
finally:
self._parent.didChange_valuesAtIndexes_forKey_(
NSKeyValueChangeReplacement,
indexes, self._name)
else: # pragma: no cover (py3k)
def sort(self, key=None, reverse=False):
if self._ro:
raise ValueError("Property '%s' is read-only"%(self._name,))
indexes = NSIndexSet.alloc().initWithIndexesInRange_(
(0, len(self._wrapped)))
self._parent.willChange_valuesAtIndexes_forKey_(
NSKeyValueChangeReplacement,
indexes, self._name)
try:
self._wrapped.sort(key=key, reverse=reverse)
finally:
self._parent.didChange_valuesAtIndexes_forKey_(
NSKeyValueChangeReplacement,
indexes, self._name)
def reverse(self):
if self._ro:
raise ValueError("Property '%s' is read-only"%(self._name,))
indexes = NSIndexSet.alloc().initWithIndexesInRange_(
(0, len(self._wrapped)))
self._parent.willChange_valuesAtIndexes_forKey_(
NSKeyValueChangeReplacement,
indexes, self._name)
try:
self._wrapped.reverse()
finally:
self._parent.didChange_valuesAtIndexes_forKey_(
NSKeyValueChangeReplacement,
indexes, self._name)
def makeArrayAccessors(name):
def countOf(self):
return len(getattr(self, name))
def objectIn(self, idx):
return getattr(self, name)[idx]
def insert(self, value, idx):
getattr(self, name).insert(idx, value)
def replace(self, idx, value):
getattr(self, name)[idx] = value
def remove(self, idx):
del getattr(self, name)[idx]
return countOf, objectIn, insert, remove, replace
class array_property (object_property):
def __init__(self, name=None,
read_only=False, copy=True, dynamic=False,
ivar=None, depends_on=None):
super(array_property, self).__init__(name,
read_only=read_only,
copy=copy, dynamic=dynamic,
ivar=ivar, depends_on=depends_on)
def __pyobjc_class_setup__(self, name, class_dict, instance_methods, class_methods):
super(array_property, self).__pyobjc_class_setup__(name, class_dict, instance_methods, class_methods)
# Insert (Mutable) Indexed Accessors
# FIXME: should only do the mutable bits when we're actually a mutable property
name = self._name
Name = name[0].upper() + name[1:]
countOf, objectIn, insert, remove, replace = makeArrayAccessors(self._name)
countOf = selector(countOf,
selector = ('countOf%s'%(Name,)).encode('latin1'),
signature = _C_NSUInteger + b'@:',
)
countOf.isHidden = True
instance_methods.add(countOf)
objectIn = selector(objectIn,
selector = ('objectIn%sAtIndex:'%(Name,)).encode('latin1'),
signature = b'@@:' + _C_NSUInteger,
)
objectIn.isHidden = True
instance_methods.add(objectIn)
insert = selector(insert,
selector = ('insertObject:in%sAtIndex:'%(Name,)).encode('latin1'),
signature = b'v@:@' + _C_NSUInteger,
)
insert.isHidden = True
instance_methods.add(insert)
remove = selector(remove,
selector = ('removeObjectFrom%sAtIndex:'%(Name,)).encode('latin1'),
signature = b'v@:' + _C_NSUInteger,
)
remove.isHidden = True
instance_methods.add(remove)
replace = selector(replace,
selector = ('replaceObjectIn%sAtIndex:withObject:'%(Name,)).encode('latin1'),
signature = b'v@:' + _C_NSUInteger + b'@',
)
replace.isHidden = True
instance_methods.add(replace)
def __set__(self, object, value):
if isinstance(value, array_proxy):
if value._name == self._name and value._parent is object:
# attr.prop = attr.prop
return
if isinstance(value, array_proxy):
value = list(value)
super(array_property, self).__set__(object, value)
def __get__(self, object, owner):
v = object_property.__get__(self, object, owner)
if v is None:
v = list()
object_property.__set__(self, object, v)
return array_proxy(self._name, object, self, self._ro)
def __getvalue__(self, object):
v = object_property.__get__(self, object, None)
if v is None:
v = list()
object_property.__set__(self, object, v)
return v
NSKeyValueUnionSetMutation = 1
NSKeyValueMinusSetMutation = 2
NSKeyValueIntersectSetMutation = 3
NSKeyValueSetSetMutation = 4
class set_proxy (collections.MutableSet):
__slots__ = ('_name', '__wrapped', '_parent', '_ro')
def __init__(self, name, parent, wrapped, read_only):
self._name = name
self._parent = parent
self._ro = read_only
self.__wrapped = wrapped
def __repr__(self):
return '<set proxy for property ' + self._name + ' ' + repr(self._wrapped) + '>'
@property
def _wrapped(self):
return self.__wrapped.__getvalue__(self._parent)
def __reduce__(self):
# Ensure that the proxy itself doesn't get stored
# in pickles.
return _id, (self._wrapped,)
def __getattr__(self, attr):
return getattr(self._wrapped, attr)
def __contains__(self, value):
return self._wrapped.__contains__(value)
def __iter__(self):
return self._wrapped.__iter__()
def __len__(self):
return self._wrapped.__len__()
def __eq__(self, other):
if isinstance(other, set_proxy):
return self._wrapped == other._wrapped
else:
return self._wrapped == other
def __ne__(self, other):
if isinstance(other, set_proxy):
return self._wrapped != other._wrapped
else:
return self._wrapped != other
def __lt__(self, other):
if isinstance(other, set_proxy):
return self._wrapped < other._wrapped
else:
return self._wrapped < other
def __le__(self, other):
if isinstance(other, set_proxy):
return self._wrapped <= other._wrapped
else:
return self._wrapped <= other
def __gt__(self, other):
if isinstance(other, set_proxy):
return self._wrapped > other._wrapped
else:
return self._wrapped > other
def __ge__(self, other):
if isinstance(other, set_proxy):
return self._wrapped >= other._wrapped
else:
return self._wrapped >= other
if sys.version_info[0] == 2:
def __cmp__(self, other):
raise TypeError('cannot compare sets using cmp()')
def add(self, item):
if self._ro:
raise ValueError("Property '%s' is read-only"%(self._name,))
self._parent.willChangeValueForKey_withSetMutation_usingObjects_(
self._name,
NSKeyValueUnionSetMutation,
set([item]),
)
try:
self._wrapped.add(item)
finally:
self._parent.didChangeValueForKey_withSetMutation_usingObjects_(
self._name,
NSKeyValueUnionSetMutation,
set([item]),
)
def clear(self):
if self._ro:
raise ValueError("Property '%s' is read-only"%(self._name,))
object = set(self._wrapped)
self._parent.willChangeValueForKey_withSetMutation_usingObjects_(
self._name,
NSKeyValueMinusSetMutation,
object
)
try:
self._wrapped.clear()
finally:
self._parent.didChangeValueForKey_withSetMutation_usingObjects_(
self._name,
NSKeyValueMinusSetMutation,
object
)
def difference_update(self, *others):
if self._ro:
raise ValueError("Property '%s' is read-only"%(self._name,))
s = set()
s.update(*others)
self._parent.willChangeValueForKey_withSetMutation_usingObjects_(
self._name,
NSKeyValueMinusSetMutation,
s
)
try:
self._wrapped.difference_update(s)
finally:
self._parent.didChangeValueForKey_withSetMutation_usingObjects_(
self._name,
NSKeyValueMinusSetMutation,
s
)
def discard(self, item):
if self._ro:
raise ValueError("Property '%s' is read-only"%(self._name,))
self._parent.willChangeValueForKey_withSetMutation_usingObjects_(
self._name,
NSKeyValueMinusSetMutation,
{item}
)
try:
self._wrapped.discard(item)
finally:
self._parent.didChangeValueForKey_withSetMutation_usingObjects_(
self._name,
NSKeyValueMinusSetMutation,
{item}
)
def intersection_update(self, other):
if self._ro:
raise ValueError("Property '%s' is read-only"%(self._name,))
other = set(other)
self._parent.willChangeValueForKey_withSetMutation_usingObjects_(
self._name,
NSKeyValueIntersectSetMutation,
other
)
try:
self._wrapped.intersection_update(other)
finally:
self._parent.didChangeValueForKey_withSetMutation_usingObjects_(
self._name,
NSKeyValueIntersectSetMutation,
other
)
def pop(self):
if self._ro:
raise ValueError("Property '%s' is read-only"%(self._name,))
try:
v = next(iter(self))
except StopIteration:
raise KeyError("Empty set")
self.remove(v)
return v
def remove(self, item):
if self._ro:
raise ValueError("Property '%s' is read-only"%(self._name,))
self._parent.willChangeValueForKey_withSetMutation_usingObjects_(
self._name,
NSKeyValueMinusSetMutation,
set([item])
)
try:
self._wrapped.remove(item)
finally:
self._parent.didChangeValueForKey_withSetMutation_usingObjects_(
self._name,
NSKeyValueMinusSetMutation,
set([item])
)
def symmetric_difference_update(self, other):
# NOTE: This method does not call the corresponding method
# of the wrapped set to ensure that we generate the right
# notifications.
if self._ro:
raise ValueError("Property '%s' is read-only"%(self._name,))
other = set(other)
to_add = set()
to_remove = set()
for o in other:
if o in self:
to_remove.add(o)
else:
to_add.add(o)
self._parent.willChangeValueForKey_withSetMutation_usingObjects_(
self._name,
NSKeyValueMinusSetMutation,
to_remove
)
try:
self._wrapped.difference_update(to_remove)
finally:
self._parent.didChangeValueForKey_withSetMutation_usingObjects_(
self._name,
NSKeyValueMinusSetMutation,
to_remove
)
self._parent.willChangeValueForKey_withSetMutation_usingObjects_(
self._name,
NSKeyValueUnionSetMutation,
to_add
)
try:
self._wrapped.update(to_add)
finally:
self._parent.didChangeValueForKey_withSetMutation_usingObjects_(
self._name,
NSKeyValueUnionSetMutation,
to_add
)
def update(self, *others):
if self._ro:
raise ValueError("Property '%s' is read-only"%(self._name,))
s = set()
s.update(*others)
self._parent.willChangeValueForKey_withSetMutation_usingObjects_(
self._name,
NSKeyValueUnionSetMutation,
s
)
try:
self._wrapped.update(s)
finally:
self._parent.didChangeValueForKey_withSetMutation_usingObjects_(
self._name,
NSKeyValueUnionSetMutation,
s
)
def __or__(self, other):
return self._wrapped | other
def __and__(self, other):
return self._wrapped & other
def __xor__(self, other):
return self._wrapped ^ other
def __sub__(self, other):
return self._wrapped - other
def __ior__(self, other):
if self._ro:
raise ValueError("Property '%s' is read-only"%(self._name,))
self.update(other)
return self
def __isub__(self, other):
if self._ro:
raise ValueError("Property '%s' is read-only"%(self._name,))
self.difference_update(other)
return self
def __ixor__(self, other):
if self._ro:
raise ValueError("Property '%s' is read-only"%(self._name,))
self.symmetric_difference_update(other)
return self
def __iand__(self, other):
if self._ro:
raise ValueError("Property '%s' is read-only"%(self._name,))
self.intersection_update(other)
return self
def makeSetAccessors(name):
def countOf(self):
return len(getattr(self, name))
def enumeratorOf(self):
return iter(getattr(self, name))
def memberOf(self, value):
collection = getattr(self, name)
if value not in collection:
return None
for item in collection:
if item == value:
return item
def add(self, value):
getattr(self, name).add(value)
def remove(self, value):
getattr(self, name).discard(value)
return countOf, enumeratorOf, memberOf, add, remove
class set_property (object_property):
def __init__(self, name=None,
read_only=False, copy=True, dynamic=False,
ivar=None, depends_on=None):
super(set_property, self).__init__(name,
read_only=read_only,
copy=copy, dynamic=dynamic,
ivar=ivar, depends_on=depends_on)
def __get__(self, object, owner):
v = object_property.__get__(self, object, owner)
if v is None:
v = set()
object_property.__set__(self, object, v)
return set_proxy(self._name, object, self, self._ro)
def __set__(self, object, value):
if isinstance(value, set_proxy):
if value._name == self._name and value._parent is object:
# attr.prop = attr.prop
return
if isinstance(value, set_proxy):
value = list(value)
super(set_property, self).__set__(object, value)
def __getvalue__(self, object):
v = object_property.__get__(self, object, None)
if v is None:
v = set()
object_property.__set__(self, object, v)
return v
def __pyobjc_class_setup__(self, name, class_dict, instance_methods, class_methods):
super(set_property, self).__pyobjc_class_setup__(name, class_dict, instance_methods, class_methods)
# (Mutable) Unordered Accessors
# FIXME: should only do the mutable bits when we're actually a mutable property
name = self._name
Name = name[0].upper() + name[1:]
countOf, enumeratorOf, memberOf, add, remove = makeSetAccessors(self._name)
countOf = selector(countOf,
selector = ('countOf%s'%(Name,)).encode('latin1'),
signature = _C_NSUInteger + b'@:',
)
countOf.isHidden = True
instance_methods.add(countOf)
enumeratorOf = selector(enumeratorOf,
selector = ('enumeratorOf%s'%(Name,)).encode('latin1'),
signature = b'@@:',
)
enumeratorOf.isHidden = True
instance_methods.add(enumeratorOf)
memberOf = selector(memberOf,
selector = ('memberOf%s:'%(Name,)).encode('latin'),
signature = b'@@:@',
)
memberOf.isHidden = True
instance_methods.add(memberOf)
add1 = selector(add,
selector = ('add%s:'%(Name,)).encode('latin'),
signature = b'v@:@',
)
add1.isHidden = True
instance_methods.add(add1)
add2 = selector(add,
selector = ('add%sObject:'%(Name,)).encode('latin1'),
signature = b'v@:@',
)
add2.isHidden = True
instance_methods.add(add2)
remove1 = selector(remove,
selector = ('remove%s:'%(Name,)).encode('latin1'),
signature = b'v@:@',
)
remove1.isHidden = True
instance_methods.add(remove1)
remove2 = selector(remove,
selector = ('remove%sObject:'%(Name,)).encode('latin'),
signature = b'v@:@',
)
remove2.isHidden = True
instance_methods.add(remove2)
NSMutableDictionary = lookUpClass('NSMutableDictionary')
class dict_property (object_property):
def __get__(self, object, owner):
v = object_property.__get__(self, object, owner)
if v is None:
v = NSMutableDictionary.alloc().init()
object_property.__set__(self, object, v)
return object_property.__get__(self, object, owner)
| {
"repo_name": "ariabuckles/pyobjc-core",
"path": "Lib/objc/_properties.py",
"copies": "3",
"size": "35435",
"license": "mit",
"hash": -5151348366853060000,
"line_mean": 29.7062391681,
"line_max": 136,
"alpha_frac": 0.5395512911,
"autogenerated": false,
"ratio": 4.276490465846005,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0038890857272857442,
"num_lines": 1154
} |
"""Allocators determine which flights should be bundled in a formation."""
from lib.intervals import Interval, group
from lib import sim, debug
from lib.debug import print_line as p
from models import Formation
import config
from lib.geo.segment import Segment
class FormationAllocator(object):
"""Abstract Allocator. Creates one giant formation."""
def __init__(self):
self.aircraft_queue = []
self.formations = []
def allocate(self, aircraft):
# No filtering, put all aircraft in one big formation.
self.formations = [self.aircraft_queue]
def find_formation(self, aircraft):
self.allocate(aircraft)
"""Finds the formation having the aircraft requested"""
for formation in self.formations:
if aircraft in formation:
return formation
raise Exception("No formation having %s found" % aircraft)
def add_aircraft(self, aircraft):
self.aircraft_queue.append(aircraft)
def remove_aircraft(self, aircraft):
try:
self.aircraft_queue.remove(aircraft)
except ValueError:
p('Could not remove %s from queue because not present' % aircraft)
class FormationAllocatorEtah(FormationAllocator):
"""Uses interval overlapping to group aircraft into formations"""
def allocate(self, aircraft):
p('debug', 'Starting formation allocation for %s' % aircraft)
# Do not perform allocation if no hub exists in the flight route.
if len(aircraft.route.segments) == 0:
return
self.formations = []
intervals = []
candidates = self.aircraft_queue
hub = aircraft.route.waypoints[0]
# This is bad. We don't want to filter anything.
# @todo: pre-process at a higher level.
# Only consider other aircraft flying to the same hub
candidates = filter(lambda a: a.route.waypoints[0] is hub,
candidates)
p('debug', 'Full candidate set: %s' % candidates)
# Only consider aircraft having a maximum heading difference between
# the hub and their destination
segment = Segment(aircraft.hub, aircraft.destination)
leader_heading = segment.get_initial_bearing()
def heading_filter(buddy):
segment = Segment(buddy.hub, buddy.destination)
buddy_heading = segment.get_initial_bearing()
phi_obs = abs(leader_heading - buddy_heading)
p(
'debug',
'delta phi observed for %s (phi: %.2f) against %s (phi: %.2f)'
': %.2f degrees' % (
aircraft, leader_heading, buddy, buddy_heading, phi_obs
)
)
return phi_obs <= (config.phi_max/2)
candidates = filter(heading_filter, candidates)
# Other interesting filters
if 'same-airline' in config.restrictions:
airline = aircraft.label[0:2]
candidates = filter(lambda a: a.label[0:2] == airline,
candidates)
if 'same-aircraft-type' in config.restrictions:
aircraft_type = aircraft.aircraft_type
candidates = filter(lambda a: a.aircraft_type == aircraft_type,
candidates)
p('debug', 'Reduced candidate set: %s' % candidates)
for candidate in candidates:
# Quick and dirty: recalc position. Instead, pull eta from var.
candidate.controller.update_position()
tth = candidate.time_to_waypoint() # time to hub
hub_eta = sim.time + tth
# From the moment the aircraft enters the lock area, the slack
# decreases linearly to zero upon hub arrival.
if tth < config.lock_time:
slack = tth * config.etah_slack / config.lock_time
else:
slack = config.etah_slack
p('Time = %s, Hub (= %s) eta %s for candidate %s' %\
(sim.time, hub, hub_eta, candidate))
intervals.append(Interval(
candidate,
int(hub_eta) - slack,
int(hub_eta) + slack
))
for interval_group in group(intervals):
formation = Formation()
for interval in interval_group:
formation.append(interval.obj)
self.formations.append(formation)
| {
"repo_name": "mauzeh/formation-flight",
"path": "formation_flight/formation/allocators.py",
"copies": "1",
"size": "4534",
"license": "mit",
"hash": -7763255979906869000,
"line_mean": 36.1639344262,
"line_max": 78,
"alpha_frac": 0.5805028672,
"autogenerated": false,
"ratio": 4.1749539594843466,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5255456826684346,
"avg_score": null,
"num_lines": null
} |
''' AllocModel.py
'''
from __future__ import division
class AllocModel(object):
######################################################### Constructors
#########################################################
def __init__(self, inferType):
self.inferType = inferType
def set_prior(self, **kwargs):
pass
######################################################### Accessors
#########################################################
def get_keys_for_memoized_local_params(self):
''' Return list of string names of the LP fields
that this object needs to memoize across visits to a particular batch
'''
return list()
def requireMergeTerms(self):
''' Return boolean indicator for whether this model
requires precomputed merge terms
'''
return True
######################################################### Local Params
#########################################################
def calc_local_params( self, Data, LP ):
'''
'''
pass
######################################################### Suff Stats
#########################################################
def get_global_suff_stats( self, Data, SS, LP ):
'''
'''
pass
######################################################### Global Params
#########################################################
def update_global_params( self, SS, rho=None, **kwargs ):
''' Update (in-place) global parameters for this allocation model object,
given the provided suff stats object SS
This is the M-step of EM/VB algorithm
'''
self.K = SS.K
if self.inferType == 'EM':
self.update_global_params_EM(SS)
elif self.inferType == 'VB' or self.inferType == "moVB":
self.update_global_params_VB(SS, **kwargs)
elif self.inferType == 'soVB':
if rho is None or rho==1:
self.update_global_params_VB(SS, **kwargs)
else:
self.update_global_params_soVB(SS, rho, **kwargs)
else:
raise ValueError( 'Unrecognized Inference Type! %s' % (self.inferType) )
######################################################### Evidence
#########################################################
def calc_evidence(self):
pass
######################################################### IO Utils
######################################################### for humans
def get_info_string( self):
''' Returns one-line human-readable terse description of this object
'''
pass
######################################################### IO Utils
######################################################### for machines
def to_dict_essential(self):
PDict = dict(name=self.__class__.__name__, inferType=self.inferType)
if hasattr(self,'K'):
PDict['K'] = self.K
return PDict
def to_dict(self):
pass
def from_dict(self):
pass
def get_prior_dict(self):
pass
| {
"repo_name": "daeilkim/refinery",
"path": "refinery/bnpy/bnpy-dev/bnpy/allocmodel/AllocModel.py",
"copies": "1",
"size": "2941",
"license": "mit",
"hash": -5028250390491278000,
"line_mean": 30.9673913043,
"line_max": 78,
"alpha_frac": 0.4243454607,
"autogenerated": false,
"ratio": 4.959527824620573,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5883873285320573,
"avg_score": null,
"num_lines": null
} |
# All of Django's wonderful imports
from django.shortcuts import render_to_response, redirect, HttpResponse
from django.template import RequestContext
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.template.defaultfilters import slugify
from django.template.loader import render_to_string
from django import template
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import Site
from django.utils.hashcompat import sha_constructor
from django.core.files.uploadedfile import UploadedFile
from django.utils.encoding import force_unicode, smart_str
# Imports from this app and other SPC apps
from scipy_central.person.views import create_new_account_internal
from scipy_central.filestorage.models import FileSet
from scipy_central.tagging.views import get_and_create_tags
from scipy_central.utils import (send_email, paginated_queryset,
highlight_code, ensuredir)
from scipy_central.rest_comments.views import compile_rest_to_html
from scipy_central.pages.views import page_404_error
from scipy_central.pagehit.views import create_hit, get_pagehits
from scipy_central.submission.templatetags.core_tags import top_authors
import models
import forms
# Python imports
from hashlib import md5
from collections import namedtuple
import random
import logging
import os
import re
import datetime
import shutil
import zipfile
import tempfile
import mimetypes; mimetypes.init()
from pygments.lexers import guess_lexer_for_filename
from pygments.util import ClassNotFound
logger = logging.getLogger('scipycentral')
logger.debug('Initializing submission::views.py')
def get_items_or_404(view_function):
"""
Decorator for views that ensures the revision and submission requested
actually exist. If not, throws a 404, else, it calls the view function
with the required inputs.
"""
def decorator(request, item_id, rev_id=None, slug=None, filename=None):
"""Retrieves the ``Submission`` and ``Revision`` objects when given,
at a minimum the submission's primary key (``item_id``). Since the
submission can have more than 1 revision, we can get a specific
revision, ``rev_id``, otherwise we will always get the latest
revision. ``slug`` is ignored for now - just used to create good SEO
URLs.
"""
try:
# Use the Submissions manager's ``all()`` function
the_submission = models.Submission.objects.all().filter(id=item_id)
except ObjectDoesNotExist:
return page_404_error(request, 'You request a non-existant item')
if len(the_submission) == 0:
return page_404_error(request, 'This item does not exist yet')
the_submission = the_submission[0]
the_revision = the_submission.last_revision
if rev_id: # can be None or '':
all_revisions = the_submission.revisions.all()
rev_index = int(rev_id)-1
if rev_index < 0:
rev_index = len(all_revisions)-1
try:
the_revision = all_revisions[rev_index]
except (ValueError, IndexError):
return page_404_error(request, ('The requested revision is '
'non-existant.'))
# Don't return revisions that are not approved for display yet
if not isinstance(the_revision, models.Revision) or\
not(the_revision.is_displayed):
return page_404_error(request, "That revision isn't available yet.")
# Is the URL of the form: "..../NN/MM/edit"; if so, then edit the item
path_split = request.path.split('/')
if len(path_split)>4 and path_split[4] in ['edit', 'download', 'show']:
if path_split[4] == 'show' and len(path_split)>=6:
return view_function(request, the_submission, the_revision,
filename=path_split[5:])
else:
return view_function(request, the_submission, the_revision)
# Is the URL not the canonical URL for the item? .... redirect the user
else:
if rev_id is None:
rev_id_str = '0'
do_redirect = True
else:
rev_id_str = str(the_revision.rev_id+1)
do_redirect = False
if slug is None or the_revision.slug != slug or do_redirect:
return redirect('/'.join(['/item',
item_id,
rev_id_str,
the_revision.slug]),
permanent=True)
return view_function(request, the_submission, the_revision)
return decorator
def get_form(request, form_class, field_order, bound=False):
"""
Generic function. Used for all submission types. Specify the ``form_class``
that's given in ``forms.py``. The ``field_order`` is a list of strings that
indicates the linear order of the fields in the form. A ``bound`` form
is a function of the object assigned to ``bound`` (see below). An unbound
form is simply an empty form.
"""
if bound:
if isinstance(bound, models.Revision):
tags = ','.join([str(tag) for tag in bound.tags.all()])
fields = {'item_url': bound.item_url,
'title': bound.title,
'description': bound.description,
'sub_tags': tags,
'snippet_code': bound.item_code,
'sub_type': 'snippet',
'sub_license': bound.sub_license_id,
}
if bound.entry.sub_type == 'link':
fields['sub_type'] = 'link'
elif bound.entry.sub_type == 'package':
fields['sub_type'] = 'package'
elif bound.entry.sub_type == 'code':
fields['sub_type'] = 'snippet'
form_output = form_class(fields)
else:
if request.POST['sub_type'] == 'package':
# Create a fake "UploadedFile" object, so the user can resume
# editing or finish their submission, without being told
# they have to reenter this field.
zip_hash = request.POST.get('package_hash', '')
zip_file = models.ZipFile.objects.filter(zip_hash=zip_hash)
if zip_file:
zip_name = zip_file[0].raw_zip_file.name
uploaded_file = UploadedFile(zip_name, name=zip_name,
content_type='application/zip',
size=zip_file[0].raw_zip_file.size)
uploaded_file.skip_validation = True # see ``forms.py``
request.FILES['package_file'] = uploaded_file
form_output = form_class(request.POST, request.FILES)
if request.POST['sub_type'] == 'package' and zip_file:
form_output.fields['package_file'].initial = uploaded_file
else:
form_output = form_class()
# Rearrange the form order
form_output.fields.keyOrder = field_order
index = 1
for field_name, field in form_output.fields.iteritems():
field.widget.attrs['tabindex'] = str(index)
index += 1
if request.user.is_authenticated():
# Email field not required for signed-in users
form_output.fields.pop('email')
return form_output
def create_or_edit_submission_revision(request, item, is_displayed,
user, submission=None, commit=False):
"""
Creates a new ``Submission`` (only if not given) and ``Revision``
instances. Returns these in a tuple.
"""
# NOTE: the ``user`` will always be a valid entry in our database. Code
# posted by users that have not yet validated themselves is not displayed
# until they do so.
new_submission = False
if submission is None:
# A new submission
new_submission = True
submission = models.Submission.objects.create_without_commit(
created_by=user, sub_type=item.cleaned_data['sub_type'])
sub = submission
# Process any tags
tag_list = get_and_create_tags(item.cleaned_data['sub_tags'])
# Create a ``Revision`` instance. Must always have a ``title``,
# ``created_by``, and ``description`` fields; the rest are set according
# to the submission type, ``sub.sub_type``
hash_id = ''
if sub.sub_type == 'link':
sub_license = None
item_url = item.cleaned_data['item_url']
item_code = None
elif sub.sub_type == 'snippet':
sub_license = item.cleaned_data['sub_license']
item_url = None
item_code = item.cleaned_data['snippet_code']
elif sub.sub_type == 'package':
sub_license = item.cleaned_data['sub_license']
item_url = None
item_code = None
# Handle the ZIP file more completely only when the user commits.
# ZIP file has been validated: OK to save it to the server
# However, this might be the second time around, so skip saving it
# (happens after preview, or if user resumes editing submission)
if not hasattr(request.FILES['package_file'], 'skip_validation'):
zip_f = models.ZipFile(raw_zip_file=request.FILES['package_file'],
zip_hash=request.POST.get('package_hash', ''))
zip_f.save()
# Convert the raw ReST description to HTML using Sphinx: could include
# math, paragraphs, <tt>, bold, italics, bullets, hyperlinks, etc.
description_html = compile_rest_to_html(item.cleaned_data['description'])
item_highlighted_code = highlight_code(item.cleaned_data.get(\
'snippet_code', None))
rev = models.Revision.objects.create_without_commit(
entry=sub,
title=item.cleaned_data['title'],
created_by=user,
sub_license=sub_license,
description=item.cleaned_data['description'],
description_html=description_html,
hash_id=hash_id,
item_url=item_url,
item_code=item_code,
item_highlighted_code=item_highlighted_code,
is_displayed=is_displayed,
)
user_url = settings.SPC['short_URL_root'] + 'user/' + str(user.id)
if commit:
# Save the submission, then the revision. If we are editing a
# previous submission, then do not save the submission
# (because the Submission object has fields that will never
# change once it has been first created).
if new_submission:
# Sets the primary key
sub.save()
rev.entry_id = sub.id
if not is_displayed:
rev.validation_hash = create_validation_code(rev)
rev.save()
# Storage location: if we do save files it will be here
datenow = datetime.datetime.now()
year, month = datenow.strftime('%Y'), datenow.strftime('%m')
repo_path = os.path.join(year, month, '%06d'% sub.id)
full_repo_path = os.path.join(settings.SPC['storage_dir'], repo_path)
if sub.sub_type == 'package':
# Save the uploaded file to the server. At this point we are sure
# it's a valid ZIP file, has no malicious filenames, and can be
# unpacked to the hard drive. See validation in ``forms.py``.
if os.path.exists(full_repo_path) and sub.fileset.repo_path == \
repo_path:
# Make a temporary directory and copy the existing package
# repository to that location
temp_dir = tempfile.mkdtemp(prefix='tmp_spc_')
src = os.path.join(full_repo_path, '.' + \
settings.SPC['revisioning_backend'])
shutil.move(src, temp_dir)
shutil.rmtree(full_repo_path, ignore_errors=True)
else:
temp_dir = None
# Create/ensure destination directory exists
ensuredir(full_repo_path)
# Copy ZIP file
zip_file = request.FILES['package_file']
dst = os.path.join(full_repo_path, zip_file.name)
src = os.path.join(settings.MEDIA_ROOT, settings.SPC['ZIP_staging'],
zip_file.name)
shutil.copyfile(src, dst)
# os.remove(src) Keep the original ZIP file, for now
# Remove the entry from the database
zip_hash = request.POST.get('package_hash', '')
zip_objs = models.ZipFile.objects.filter(zip_hash=zip_hash)
if zip_objs:
zip_objs[0].delete()
# Unzip file and commit contents to the repo
zip_f = zipfile.ZipFile(dst, 'r')
zip_f.extractall(full_repo_path)
zip_f.close()
os.remove(dst) # but delete the copy
# Delete common RCS directories that might have been in the ZIP
for path, dirs, files in os.walk(full_repo_path):
if os.path.split(path)[1] in settings.SPC['common_rcs_dirs']:
shutil.rmtree(path, ignore_errors=True)
if temp_dir:
src = os.path.join(temp_dir, '.' + \
settings.SPC['revisioning_backend'])
dst = os.path.join(full_repo_path , '.' + \
settings.SPC['revisioning_backend'])
try:
os.rename(src, dst)
except os.error, e:
# For cases when /tmp is on a different filesystem
# (usually production servers)
import errno
if e.errno == errno.EXDEV:
shutil.copytree(src, dst, symlinks=True)
shutil.rmtree(src)
else:
raise
shutil.rmtree(temp_dir, ignore_errors=True)
repo = sub.fileset.get_repo()
else:
# Create the repo
sub.fileset = FileSet.objects.create(repo_path=repo_path)
repo = sub.fileset.create_empty()
sub.save()
# Then add all files from the ZIP file to the repo. Add directories
# at a time rather than file-by-file.
for path, dirs, files in os.walk(full_repo_path):
if os.path.split(path)[1] == '.' + \
settings.SPC['revisioning_backend']:
for entry in dirs[:]:
dirs.remove(entry)
continue
all_files = []
for name in files:
all_files.append(os.path.join(path, name))
if all_files:
repo.add(patterns=all_files, ignore_errors=True)
# Add "DESCRIPTION.txt"
descrip_name = os.path.join(full_repo_path, 'DESCRIPTION.txt')
descrip_file = file(descrip_name, 'w')
descrip_file.write(rev.description)
descrip_file.close()
sub.fileset.add_file(descrip_name, user=user_url,
commit_msg=('Added/updated files from web-uploaded '
'ZIP file. Added DESCRIPTION.txt also.'))
if sub.sub_type == 'snippet':
fname = rev.slug.replace('-', '_') + '.py'
if new_submission:
# Create a new repository for the files
sub.fileset = FileSet.objects.create(repo_path=repo_path)
sub.save()
commit_msg = ('Add "%s" to the repo '
'based on the web submission by user "%s"') %\
(fname, user_url)
else:
commit_msg = ('Update of file(s) in the repo '
'based on the web submission by user "%s"') %\
(user_url)
sub.fileset.add_file_from_string(fname,
request.POST['snippet_code'],
user=user_url,
commit_msg=commit_msg)
if sub.sub_type in ['snippet', 'package']:
license_file = settings.SPC['license_filename']
license_text = get_license_text(rev)
sub.fileset.add_file_from_string(license_file, license_text,
user="SciPy Central",
commit_msg="SPC: added/updated license file" )
rev.hash_id = sub.fileset.get_hash()
# Once you have the revision you can add tags through the intermediate
# model instance (which tracks the user that added the tag and when).
for tag in tag_list:
tag_intermediate = models.TagCreation(created_by=user,
revision=rev,
tag=tag)
tag_intermediate.save()
logger.debug('User "%s" added tag "%s" to rev.id=%d' % (
user.profile.slug,
str(tag), rev.id))
# Update the search index so that the tags are included in the search
rev.save()
# log the new submission and revision
logger.info('New %s: %s [id=%d] and revision id=%d' % (
sub.sub_type,
item.cleaned_data['title'],
sub.id,
rev.id))
return sub, rev, tag_list
#------------------------------------------------------------------------------
# Licensing
def get_license_text(rev):
"""
Generates and returns the license text for the given revision. Uses these
revision and authorship information from previous revisions, if necessary,
to create the license.
"""
# See http://wiki.creativecommons.org/CC0_FAQ for all the details
if rev.entry.num_revisions > 1:
update_list = ['', 'Subsequent updates by:']
else:
update_list = []
for idx, item in enumerate(rev.entry.revisions.all()):
if idx > 0:
url = settings.SPC['short_URL_root'] + 'user/'
url += str(item.created_by.id) + '/'
date = datetime.datetime.strftime(item.date_created, '%d %B %Y')
update_list.append('%s on %s' % (url, date))
update_string = '\n'.join(update_list)
if rev.sub_license.slug == 'cc0':
return """%s
-----
Originally written on %s by %s
%s
To the extent possible under law, the author(s) have dedicated all copyright
and related and neighboring rights to this software to the public domain
worldwide. This software is distributed without any warranty.
You should have received a copy of the CC0 Public Domain Dedication along with
this software (see below).
Also see http://creativecommons.org/publicdomain/zero/1.0/
-----
%s
""" % \
(rev.title, datetime.datetime.strftime(rev.entry.date_created,
'%d %B %Y'),
settings.SPC['short_URL_root'] + 'users/' +\
rev.entry.created_by.profile.slug,
update_string, rev.sub_license.text_template)
if rev.sub_license.slug == 'bsd':
creator_url = settings.SPC['short_URL_root'] + 'user/' + \
str(rev.created_by.id) + '/'
text = ('{{title}}\n'
'Copyright holder: {{copyright_holder}} (full details at this page)\n'
'-----\n') + rev.sub_license.text_template
context = {}
context['title'] = rev.title
context['copyright_holder'] = creator_url
context['year'] = datetime.datetime.now().year
resp = template.Template(text)
return resp.render(template.Context(context))
#------------------------------------------------------------------------------
# All submissions: have a form associated with them, as well as a number of
# fields that must appear in a certain order
Item = namedtuple('Item', 'form field_order')
SUBS = {'snippet': Item(forms.SnippetForm, field_order=['title',
'snippet_code', 'description', 'sub_tags',
'sub_license', 'email', 'sub_type']),
'package': Item(forms.PackageForm, field_order=['title',
'description', 'sub_license',
'package_file', 'package_hash',
'sub_tags', 'email', 'sub_type']),
'link': Item(forms.LinkForm, field_order=['title', 'description',
'item_url', 'sub_tags', 'email', 'sub_type']),
}
def new_or_edit_submission(request, item_type, bound_form=False, submission=None):
"""
Users wants to submit a new link item, or continue editing a submission.
There are multiple possible paths through the logic here. Careful about
making changes.
"""
# User is going to edit their submission
sub_type = None
if isinstance(bound_form, models.Revision):
new_item_or_edit = True
sub_type = bound_form.entry.sub_type # for later on...
# Cancel button, or a GET request
elif request.POST.has_key('spc-cancel'):
return redirect('spc-main-page')
else:
new_item_or_edit = False
commit = False
if request.POST.has_key('spc-edit'):
new_item_or_edit = True
bound_form = True
if request.POST.has_key('spc-submit'):
bound_form = True
commit = True
if request.POST.has_key('spc-preview'):
bound_form = True
buttontext_extra = ''
if item_type == 'snippet' and request.method == 'GET':
itemtype = 'snippet'
new_item_or_edit = True
elif item_type == 'package' and request.method == 'GET':
itemtype = 'package'
buttontext_extra = '(Upload ZIP file on next page)'
new_item_or_edit = True
#return not_implemented_yet(request, 48)
elif item_type == 'link' and request.method == 'GET':
itemtype = 'link'
new_item_or_edit = True
else:
itemtype = request.POST.get('sub_type', sub_type)
# Important: make a copy of ``field_order``, since it may be altered
field_order = SUBS[itemtype].field_order[:]
theform = get_form(request, form_class=SUBS[itemtype].form,
field_order=field_order, bound=bound_form)
# OK, having all that out of the way, lets process the user's submission
# 0. Use the built-in forms checking to validate the fields.
if new_item_or_edit or not(theform.is_valid()):
return render_to_response('submission/new-item.html', {},
context_instance=RequestContext(request,
{'item': theform,
'buttontext': 'Preview your submission',
'buttontext_extra': buttontext_extra,
'autocomplete_field': 'id_sub_tags',
'autocomplete_url': r'"spc-tagging-ajax"',
'pagetitle': 'Create a new submission'}))
# 1. Create user account, if required
if request.user.is_authenticated():
user = request.user
authenticated = True
else:
user = create_new_account_internal(\
theform.cleaned_data['email'])
authenticated = False
# 2. Create the submission and revision or update an existing submission
# with a new revision
_, rev, tag_list = create_or_edit_submission_revision(request,
item=theform,
is_displayed=authenticated,
user=user,
submission=submission,
commit=commit)
# i.e. just previewing ...
if not(commit):
# 3. Create a Cancel/Edit/Submit form via a template to account for
# hyperlinks and CSRF
context = RequestContext(request)
context['item'] = theform
context['finish_button_text'] = 'Finish submission'
# %% is required in below string to correctly format
html = ("""<div id="spc-preview-edit-submit" class="spc-form">
<form action="{%% url spc-new-submission item_type='%s' %%}"
method="POST" enctype="multipart/form-data">\n
{%% csrf_token %%}\n
{{item.as_hidden}}
<div id="spc-preview-edit-submit-button-group">
<input class="btn btn-primary" type="submit" name="spc-cancel" value="Cancel"
id="spc-item-cancel" />\n
<input class="btn btn-primary" type="submit" name="spc-edit" value="Resume editing"
id="spc-item-edit" />\n
<input class="btn btn-success" type="submit" name="spc-submit"
value="{{ finish_button_text }}"
id="spc-item-submit"/>\n
</div></form></div>""" % itemtype)
resp = template.Template(html)
extra_html = resp.render(template.Context(context))
return render_to_response('submission/item.html', {},
context_instance=RequestContext(request,
{'item': rev,
'tag_list': tag_list,
'extra_html': extra_html,
'preview': True,
}))
else:
# 4. Thank user and return with any extra messages, and send an email
ctx_dict = {'user': user,
'item': rev,
'site': Site.objects.get_current()
}
# User is signed in
if authenticated:
show_url = True
extra_messages = 'A confirmation email has been sent to you.'
message = render_to_string('submission/email_user_thanks.txt',
ctx_dict)
else:
show_url = False
extra_messages = ('You have been sent an email to '
'<i>confirm your submission</i> and to create '
'an account (if you do not have one '
'already). <p>Unconfirmed submissions '
'cannot be accepted, and <b>will be '
'deleted</b> after %d days. Please sign in '
'to avoid having to confirm your '
'valuable submissions in the future.') % \
settings.SPC['unvalidated_subs_deleted_after']
# User is not signed in, but they have validated their email address
if user.profile.is_validated:
message = render_to_string(\
'submission/email_validated_user_unvalidated_submission.txt',
ctx_dict)
else:
# User is told they first need to create account before their
# submission shows in the website
message = render_to_string(\
'submission/email_unvalidated_user_unvalidated_submission.txt',
ctx_dict)
send_email((user.email,), ("Thank you for your contribution "
"to SciPy Central"), message=message)
message = render_to_string('submission/email_website_admin.txt',
ctx_dict)
send_email((settings.SERVER_EMAIL,), ('A new/edited submission was '
'made on SciPy Central'), message=message)
return render_to_response('submission/thank-user.html', ctx_dict,
context_instance=RequestContext(request,
{'extra_message': extra_messages,
'show_url': show_url}))
def create_validation_code(revision):
"""
From BSD licensed code, James Bennett
https://bitbucket.org/ubernostrum/django-registration/src/58eef8330b0f/
registration/models.py
"""
salt = sha_constructor(str(random.random())).hexdigest()[:5]
slug = revision.slug
if isinstance(slug, unicode):
slug = slug.encode('utf-8')
return sha_constructor(salt+slug).hexdigest()
@login_required
def validate_submission(request, code):
"""
Validate a submission (via an emailed link to the user).
From BSD licensed code, James Bennett
https://bitbucket.org/ubernostrum/django-registration/src/58eef8330b0f/registration/models.py
"""
SHA1_RE = re.compile('^[a-f0-9]{40}$')
if SHA1_RE.search(code):
try:
rev = models.Revision.objects.get(validation_hash=code)
except ObjectDoesNotExist:
return page_404_error(request, ('That validation code was invalid'
' or used already.'))
rev.is_displayed = True
rev.validation_hash = None
rev.save()
return redirect(rev.get_absolute_url())
else:
return page_404_error(request, ('That validation code is invalid.'))
#------------------------------------------------------------------------------
# View and download existing submissions:
@get_items_or_404
def view_item(request, submission, revision):
"""
Shows a submitted item to web users. The ``slug`` is always ignored, but
appears in the URLs for the sake of search engines. The revision, if
specified >= 0 will show the particular revision of the item, rather than
the latest revision (default).
"""
create_hit(request, submission)
permalink = settings.SPC['short_URL_root'] + str(submission.id) + '/' + \
str(revision.rev_id+1) + '/'
latest_link = settings.SPC['short_URL_root'] + str(submission.id) + '/'
pageviews = get_pagehits('submission', start_date=datetime.datetime.now()\
-datetime.timedelta(days=settings.SPC['hit_horizon']),
item_pk=submission.id)
package_files = []
if submission.sub_type == 'package':
# Update the repo to the version required
submission.fileset.checkout_revision(revision.hash_id)
package_files = list(submission.fileset.list_iterator())
return render_to_response('submission/item.html', {},
context_instance=RequestContext(request,
{'item': revision,
'tag_list': revision.tags.all(),
'permalink': permalink,
'latest_link': latest_link,
'pageviews': pageviews,
'pageview_days': settings.SPC['hit_horizon'],
'package_files': package_files,
}))
def get_display(submission, revision, filename):
"""
Determines how to display a filetype, given its name
"""
fname = filename[-1]
mime_guess = mimetypes.guess_type(fname)[0]
mime_type, mime_file = mime_guess.split('/')
# Set the repo to the correct revision
repo = submission.fileset.checkout_revision(revision.hash_id)
src = os.path.join(repo.local_dir, *filename)
if str(mime_type).startswith('image'):
# We only dislay certain image types
VALID_IMAGE_TYPES = ['gif', 'jpeg', 'png', 'bmp']
if mime_file in VALID_IMAGE_TYPES:
disp_type = 'image'
# Copy image over to media location; we must make a copy, incase
# a later user views a different revision of the document
dirname = force_unicode(datetime.datetime.now().strftime(
smart_str(settings.SPC['resized_image_dir'])))
disp_obj = os.path.normpath(os.path.join(dirname, fname))
dst = os.path.join(settings.SPC['storage_dir'], disp_obj)
idx = 1
while os.path.exists(dst):
disp_obj = disp_obj.split(fname)[0] + '%s_%d.%s' % \
(fname.lower().split('.'+mime_file)[0], idx,
mime_file)
dst = os.path.join(settings.SPC['storage_dir'], disp_obj)
idx += 1
# Finally, copy the file across to the web storage area
shutil.copy2(src, dst)
return disp_type, disp_obj
if not repo:
# Something went wrong when checking out the repo
logger.error('Could not checked out revision "%s" for '
'rev.id=%d' % (revision.hash_id, revision.id))
return 'none', None
if str(mime_type).startswith('text'):
# Read the first 10kb to send to the lexer guessing mechanism
if os.path.exists(src):
fh = open(src, 'rb')
file_content = fh.readlines(10*1024)
amount_read = fh.tell()
fh.close()
else:
return 'none', None
try:
lexer = guess_lexer_for_filename(fname.lower(),
''.join(file_content))
except ClassNotFound:
pass
else:
disp_type = 'html'
# Only re-read the file if we didn't read it all the first time
if os.path.getsize(src) == amount_read:
file_content = ''.join(file_content)
else:
fh = open(src, 'rb')
file_content = fh.read()
fh.close()
# TODO(KGD): consider wrapping long text lines for text files
# Return the highlighted code, if we know the lexer
return disp_type, highlight_code(file_content,
lexer=lexer.mimetypes[0])
# All other file types are assumed to be binary
disp_type = 'binary'
#disp_obj = link to file (add this capability to ``FileSet``)
return disp_type, disp_obj
@get_items_or_404
def show_file(request, submission, revision, filename):
"""
Display a ``filename`` from a given ``submission`` and ``revision``
"""
key_parts = [str(submission.id), str(revision.id)]
key_parts.extend(filename)
key = md5('-'.join(key_parts)).hexdigest()
display = models.DisplayFile.objects.filter(fhash=key)
if display:
# Get the displayed item from the database rather than checking out
# the repository, determining the file type and HTML to display
obj = display[0]
else:
# Create the displayed item and store it in the database
disp_type, disp_obj = get_display(submission, revision, filename)
obj = models.DisplayFile.objects.create(fhash=key,
display_type=disp_type,
display_obj=disp_obj)
# Now return ``obj``
if obj.display_type == 'image':
return HttpResponse(u'<img = ...')
elif obj.display_type == 'html':
return HttpResponse(obj.display_obj)
elif obj.display_type == 'binary':
return HttpResponse(u'<a href="%s">%s</a>' % (obj.display_obj,
filename[-1]))
elif obj.display_type == 'none':
return HttpResponse(filename[-1])
@get_items_or_404
def download_submission(request, submission, revision):
create_hit(request, submission, extra_info="download")
if submission.sub_type == 'snippet':
response = HttpResponse(mimetype="application/x-python")
fname = submission.slug.replace('-', '_') + '.py'
response["Content-Disposition"] = "attachment; filename=%s" % fname
source = Site.objects.get_current().domain + \
submission.get_absolute_url()
response.write('# Source: ' + source + '\n\n' + revision.item_code)
return response
if submission.sub_type == 'package':
zip_dir = os.path.join(settings.MEDIA_ROOT,
settings.SPC['ZIP_staging'],
'download')
ensuredir(zip_dir)
response = HttpResponse(mimetype="attachment; application/zip")
zip_name = '%s-%d-%d.zip' % (submission.slug, submission.id,
revision.rev_id_human)
response['Content-Disposition'] = 'filename=%s' % zip_name
full_zip_file = os.path.join(zip_dir, zip_name)
if not os.path.exists(full_zip_file):
# Set the repo's state to the state when that particular revision
# existed
out = submission.fileset.checkout_revision(revision.hash_id)
if out:
logger.info('Checked out revision "%s" for rev.id=%d' % \
(revision.hash_id, revision.id))
else:
logger.error('Could not checked out revision "%s" for '
'rev.id=%d' % (revision.hash_id, revision.id))
return page_404_error(request, ('Could not create the ZIP '
'file. This error has been '
'reported.'))
zip_f = zipfile.ZipFile(full_zip_file, "w", zipfile.ZIP_DEFLATED)
src_dir = os.path.join(settings.SPC['storage_dir'],
submission.fileset.repo_path)
for path, dirs, files in os.walk(src_dir):
for name in files:
file_name = os.path.join(path, name)
file_h = open(file_name, "r")
zip_f.write(file_name, file_name.partition(src_dir)[2])
file_h.close()
for file_h in zip_f.filelist:
file_h.create_system = 0
zip_f.close()
# Return the repo checkout back to the most recent revision
out = submission.fileset.checkout_revision(submission.\
last_revision.hash_id)
# Return the ZIP file
zip_data = open(full_zip_file, "rb")
response.write(zip_data.read())
zip_data.close()
return response
#------------------------------------------------------------------------------
# Editing submissions: decorator order is important!
@login_required
@get_items_or_404
def edit_submission(request, submission, revision):
if submission.sub_type in ['link', 'package'] and \
request.user != submission.created_by:
return page_404_error(request, ('You are not authorized to edit that '
'submission. Only the original author '
'may edit it.'))
# if by POST, we are in the process of editing, so don't send the revision
if request.POST:
return new_or_edit_submission(request, submission.sub_type, bound_form=False,
submission=submission)
else:
return new_or_edit_submission(request, submission.sub_type, bound_form=revision,
submission=submission)
#------------------------------------------------------------------------------
def sort_items_by_page_views(all_items, item_module_name):
# TODO(KGD): Cache this reordering of ``items`` for a period of time
today = datetime.datetime.now()
start_date = today - datetime.timedelta(days=settings.SPC['hit_horizon'])
page_order = get_pagehits(item_module_name, start_date=start_date,
end_date=today)
page_order.sort(reverse=True)
#``page_order`` is a list of tuples; the 2nd entry in each tuple is the
# primary key, that must exist in ``items_pk``.
all_items = list(all_items)
items_pk = [item.pk for item in all_items]
entry_order = []
count_list = []
for count, pk in page_order:
try:
idx = items_pk.index(pk)
except ValueError:
pass
else:
items_pk[idx] = None
entry_order.append(all_items[idx])
count_list.append(count)
# Items that have never been viewed get added to the bottom:
for idx, pk in enumerate(items_pk):
if pk is not None:
entry_order.append(all_items[idx])
count_list.append(0)
return entry_order, count_list
def show_items(request, what_view='', extra_info=''):
""" Show different views onto all **revision** items (not submissions)
"""
what_view = what_view.lower()
extra_info = extra_info.lower()
entry_order = []
page_title = ''
template_name = 'submission/show-entries.html'
if what_view == 'tag':
all_revs = models.Revision.objects.most_recent().\
filter(tags__slug=slugify(extra_info))
page_title = 'All entries tagged'
entry_order = list(all_revs)
elif what_view == 'show' and extra_info == 'all-tags':
page_title = 'All tags'
template_name = 'submission/show-tag-cloud.html'
elif what_view == 'show' and extra_info == 'all-revisions':
# Show all submissions in reverse time order
all_revs = models.Revision.objects.all().order_by('-date_created')
page_title = 'All revisions'
extra_info = ''
entry_order = list(all_revs)
elif what_view == 'show' and extra_info == 'all-unique-revisions':
all_subs = models.Submission.objects.all().order_by('-date_created')
page_title = 'All submissions'
extra_info = ' (only showing the latest revision)'
entry_order = [sub.last_revision for sub in all_subs if sub.last_revision.is_displayed]
elif what_view == 'sort' and extra_info == 'most-viewed':
page_title = 'All submissions in order of most views'
extra_info = ''
all_subs = set()
for rev in models.Revision.objects.all():
all_subs.add(rev.entry)
entry_order, _ = sort_items_by_page_views(all_subs, 'submission')
entry_order = [entry.last_revision for entry in entry_order]
elif what_view == 'show' and extra_info == 'top-contributors':
page_title = 'Top contributors'
extra_info = ''
entry_order = top_authors('', 0)
elif what_view == 'validate':
return validate_submission(request, code=extra_info)
entries = paginated_queryset(request, entry_order)
return render_to_response(template_name, {},
context_instance=RequestContext(request,
{'entries': entries,
'page_title': page_title,
'extra_info': extra_info,
'what_view' : what_view,}))
| {
"repo_name": "oswalpalash/OctaveCodeShare",
"path": "scipy_central/submission/views.py",
"copies": "1",
"size": "44356",
"license": "bsd-3-clause",
"hash": -5858101192499315000,
"line_mean": 42.4011741683,
"line_max": 101,
"alpha_frac": 0.5369510326,
"autogenerated": false,
"ratio": 4.40695479384004,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.544390582644004,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Offset']
import os
import sys
import mmap
import struct
class Offset(object):
'''
Offset is class that finds out position of row at given index
'''
def __init__(self, sstable, t, path=None):
self.sstable = sstable
self.t = t
self.mm = None
self.f = None
def __getitem__(self, i):
return self._read_sstable_pos(i)
def get_path(self):
filename = 'offset-%s.data' % self.t
path = os.path.join(self.sstable.table.get_path(), filename)
return path
def open(self):
'''
Open file for reading.
'''
self.f = open(self.get_path(), 'r+b')
self.mm = mmap.mmap(self.f.fileno(), 0)
def close(self):
'''
Open file for reading.
'''
self.mm.close()
self.f.close()
def w_open(self):
'''
Open file for writing.
'''
self.f = open(self.get_path(), 'wb')
def w_close(self):
'''
Close file for writing.
'''
self.f.close()
def _read_sstable_pos(self, i):
offset_pos = i * 8
sstable_pos, = struct.unpack_from('!Q', self.mm, offset_pos)
return sstable_pos
def _write_sstable_pos(self, sstable_pos):
pos_blob = struct.pack('!Q', sstable_pos)
self.f.write(pos_blob) | {
"repo_name": "yadb/yadb",
"path": "backup/store/offset.py",
"copies": "1",
"size": "1356",
"license": "mit",
"hash": 753632426339009200,
"line_mean": 21.6166666667,
"line_max": 68,
"alpha_frac": 0.5221238938,
"autogenerated": false,
"ratio": 3.476923076923077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.949248131415742,
"avg_score": 0.0013131313131313133,
"num_lines": 60
} |
"""All of our database stuff.
"""
import sqlite3
import os
# Makes/opens database relative to this file.
db_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "wrestlers.db3")
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
# When we first init this module, create what we need.
cursor.execute("""CREATE TABLE IF NOT EXISTS wrestlers (
name text NOT NULL UNIQUE,
brawn integer,
finesse integer,
wins integer,
losses integer
);""")
conn.commit()
def cleanup():
"""Call when it's time to close and cleanup the database.
"""
cursor.close()
conn.close()
def create_wrestler(**stats):
"""Create a new wrestler in the database.
"""
try:
cursor.execute(
"""INSERT INTO wrestlers
VALUES ('{0[name]}', {0[brawn]}, {0[finesse]}, {0[wins]}, {0[losses]})
""".format(stats))
conn.commit()
except sqlite3.IntegrityError as err:
# reraise error so that external things don't need to know about
# the database error types.
raise ValueError("Wrestler already exists: %s" % err)
def update_wrestler(**stats):
"""Create a new wrestler in the database.
"""
try:
cursor.execute(
"""UPDATE wrestlers SET
brawn={0[brawn]},
finesse={0[finesse]},
wins={0[wins]},
losses={0[losses]}
WHERE name='{0[name]}'
""".format(stats))
conn.commit()
except sqlite3.Error as err:
# reraise error so that external things don't need to know about
# the database error types.
raise ValueError("Something Bad Happened in update_wrestler: %s" % err)
def get_wrestlers():
"""Returns a the raw data of wrestlers as a dictionary.
"""
results = []
try:
cursor.execute("""SELECT * FROM wrestlers""")
fieldnames = map(lambda x: x[0], cursor.description)
for wrestler in cursor:
# Compress with field names and turn into a dictionary
wrestler = dict(zip(fieldnames, wrestler))
results.append(wrestler)
except sqlite3.Error as err:
print "ERROR in get_wrestlers: %s" % err
# Make sure we always return a list.
return results
| {
"repo_name": "jeremyosborne/python",
"path": "general/wrestling/db.py",
"copies": "1",
"size": "2283",
"license": "mit",
"hash": -7282393078876713000,
"line_mean": 25.2413793103,
"line_max": 84,
"alpha_frac": 0.6009636443,
"autogenerated": false,
"ratio": 3.7860696517412937,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.980317949116266,
"avg_score": 0.016770760975726683,
"num_lines": 87
} |
import os, time, re
import sublime
import sublime_plugin
import glob
import os
import shutil
import pprint
from xml.etree import ElementTree
current_path = None
class CreateJrubyfxProjectCommand(sublime_plugin.WindowCommand):
def run(self):
self.doCommand()
def doCommand(self):
if not self.find_root():
self.construct_excluded_pattern()
self.build_dir_paths()
self.build_file_paths()
self.select_dir()
else:
self.construct_excluded_pattern()
self.build_dir_paths()
self.build_file_paths()
self.move_current_directory_to_top()
self.select_dir()
def find_root(self):
folders = self.window.folders()
if len(folders) == 0:
from os.path import expanduser
self.root = unicode(expanduser("~"))
self.rel_path_start = len(self.root) + 1
return False
self.root = folders[0]
self.rel_path_start = len(self.root) + 1
return True
def construct_excluded_pattern(self):
patterns = [pat.replace('|', '\\') for pat in self.get_setting('excluded_dir_patterns')]
self.excluded = re.compile('^(?:' + '|'.join(patterns) + ')$')
def get_setting(self, key):
settings = None
view = self.window.active_view()
if view:
settings = self.window.active_view().settings()
if settings and settings.has('JRubyFXML') and key in settings.get('JRubyFXML'):
# Get project-specific setting
results =settings.get('JRubyFXML')[key]
else:
# Get user-specific or default setting
settings = sublime.load_settings('JRubyFXML.sublime-settings')
results = settings.get(key)
return results
def build_dir_paths(self):
self.dir_paths = []
self.dir_paths = [["../", "Go up one level in the directory structure"]]
self.selected_dir = ""
for base, dirs, files in os.walk(self.root):
dirs_copy = dirs[:]
[dirs.remove(dir) for dir in dirs_copy if self.excluded.search(dir)]
for dir in dirs:
dir_path = os.path.join(base, dir)[self.rel_path_start:]
self.dir_paths.append(dir_path)
def build_file_paths(self):
self.directory_files = []
directory = self.root + "/" + self.selected_dir
for base, dirs, files in os.walk(directory):
files_copy = files[:]
# for file in files:
# self.directory_files.append(file)
def move_current_directory_to_top(self):
view = self.window.active_view()
if view:
cur_dir = os.path.dirname(view.file_name())[self.rel_path_start:]
for path in self.dir_paths:
if path == cur_dir:
i = self.dir_paths.index(path)
self.dir_paths.insert(1, self.dir_paths.pop(i))
break
def select_dir(self):
self.window.show_quick_panel(self.dir_paths, self.dir_selected, sublime.MONOSPACE_FONT)
def dir_selected(self, selected_index):
if selected_index != -1:
if selected_index == 0:
self.up_one_level()
else:
self.selected_dir = self.dir_paths[selected_index]
self.build_file_paths()
# Add aditional menu options
self.directory_files.insert(0, ["Browse Directories", "go back to browsing directories"])
self.directory_files.insert(1, ["New JRubyFX Project", "new project in the current directory"])
# Open window to choose desired action
self.window.show_quick_panel(self.directory_files, self.file_selected, sublime.MONOSPACE_FONT)
def file_selected(self, selected_index):
if selected_index != -1:
if selected_index == 0:
self.select_dir()
elif selected_index == 1:
self.new_dir()
def up_one_level(self):
self.root = os.path.abspath(os.path.join(self.root, os.path.pardir))
self.rel_path_start = len(self.root) + 1
self.build_dir_paths()
self.build_file_paths()
self.move_current_directory_to_top()
self.select_dir()
def new_dir(self):
self.window.show_input_panel("New project name:", '', self.new_dir_action, None, None)
def new_dir_action(self, dir_name):
full_path = os.path.join(self.root, self.selected_dir, dir_name)
if os.path.lexists(full_path):
sublime.error_message('Directory already exists:\n%s' % full_path)
return
else:
os.mkdir(full_path)
os.mkdir(full_path + os.sep + "src")
os.mkdir(full_path + os.sep + "dist")
os.mkdir(full_path + os.sep + "package")
os.mkdir(full_path + os.sep + "package" + os.sep + "linux")
os.mkdir(full_path + os.sep + "package" + os.sep + "macosx")
os.mkdir(full_path + os.sep + "package" + os.sep + "windows")
os.mkdir(full_path + os.sep + "build")
file_name = dir_name + ".sublime-project"
self.create_project_file(file_name, full_path)
# self.remove_window_folders()
project_file_loc = os.path.join(full_path, file_name)
sublime_command_line(['-a', project_file_loc])
def create_project_file(self, file_name, full_path):
string_full_path = str(full_path).replace("\\","\\\\")
prj_file_contents = ("{\n"
" \"folders\":\n"
" [\n"
" {\n"
" \"path\": \"%s\"\n"
" }\n"
" ]\n"
"}\n" % string_full_path)
self.project_file = os.path.join(full_path, file_name)
file_ref = open(self.project_file, "w")
file_ref.write((prj_file_contents));
file_ref.close()
def remove_window_folders(self):
folders_to_remove = self.window.folders()
self.window.run_command('remove_folder', {"dirs":folders_to_remove})
# hack to add folders to sidebar (stolen from wbond)
def get_sublime_path():
if sublime.platform() == 'osx':
return '/Applications/Sublime Text 2.app/Contents/SharedSupport/bin/subl'
elif sublime.platform() == 'linux':
return open('/proc/self/cmdline').read().split(chr(0))[0]
elif sublime.platform() == "windows":
return 'sublime_text.exe'
else:
return sys.executable
def sublime_command_line(args):
import subprocess
args.insert(0, get_sublime_path())
return subprocess.Popen(args)
class CreateFxmlTemplateCommand(sublime_plugin.WindowCommand):
ROOT_DIR_PREFIX = '[root: '
ROOT_DIR_SUFFIX = ']'
INPUT_PANEL_CAPTION = 'File name:'
def run(self):
if not self.find_root():
return
self.find_templates()
self.window.show_quick_panel(self.templates, self.template_selected)
def refresh_folders(self):
try:
sublime.set_timeout(lambda:sublime.active_window().run_command('refresh_folder_list'), 200);
sublime.set_timeout(lambda:sublime.active_window().run_command('refresh_folder_list'), 600);
sublime.set_timeout(lambda:sublime.active_window().run_command('refresh_folder_list'), 1300);
sublime.set_timeout(lambda:sublime.active_window().run_command('refresh_folder_list'), 2300);
except:
pass
def create_and_open_file(self, path):
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
open(path, 'w')
global template
template = {
'content': self.replace_variables(self.get_content(path)),
'filename': os.path.basename(path),
'path': os.path.dirname(path)
}
global current_path
view = self.window.open_file(path)
current_path = view.file_name()
if not view.is_loading():
populate_file(view)
self.refresh_folders()
def get_content(self, path):
content = ''
try:
content = self.template.find("content").text
except:
pass
try:
path = os.path.abspath(os.path.join(os.path.dirname(self.template_path), self.template.find("file").text))
content = open(path).read()
print content
except:
pass
return content
def find_root(self):
folders = self.window.folders()
if len(folders) == 0:
sublime.error_message('Could not find project root')
return False
self.root = folders[0]
self.rel_path_start = len(self.root) + 1
return True
def construct_excluded_pattern(self):
patterns = [pat.replace('|', '\\') for pat in self.get_setting('excluded_dir_patterns')]
self.excluded = re.compile('^(?:' + '|'.join(patterns) + ')$')
def get_setting(self, key):
settings = None
view = self.window.active_view()
if view:
settings = self.window.active_view().settings()
if settings and settings.has('FileTemplates') and key in settings.get('FileTemplates'):
# Get project-specific setting
results = settings.get('FileTemplates')[key]
else:
# Get user-specific or default setting
settings = sublime.load_settings('JRubyFXML.sublime-settings')
results = settings.get(key)
return results
def find_templates(self):
self.templates = []
self.template_paths = []
for root, dirnames, filenames in os.walk(sublime.packages_path()):
for filename in filenames:
if filename.endswith(".fxml-template"):
self.template_paths.append(os.path.join(root, filename))
self.templates.append(os.path.basename(root) + ": " + os.path.splitext(filename)[0])
def template_selected(self, selected_index):
if selected_index != -1:
self.template_path = self.template_paths[selected_index]
#print self.template_path
from elementtree import SimpleXMLTreeBuilder
ElementTree.XMLTreeBuilder = SimpleXMLTreeBuilder.TreeBuilder
tree = ElementTree.parse(open(self.template_path))
self.template = tree
self.construct_excluded_pattern()
self.build_relative_paths()
#self.move_current_directory_to_top()
self.window.show_quick_panel(self.relative_paths, self.dir_selected)
def build_relative_paths(self):
self.relative_paths = []
try:
path = self.template.find("path").text
except:
path = ""
if len(path) > 0:
self.relative_paths = [ "Default: " + self.template.find("path").text ]
self.relative_paths.append( self.ROOT_DIR_PREFIX + os.path.split(self.root)[-1] + self.ROOT_DIR_SUFFIX )
for base, dirs, files in os.walk(self.root):
dirs_copy = dirs[:]
[dirs.remove(dir) for dir in dirs_copy if self.excluded.search(dir)]
for dir in dirs:
relative_path = os.path.join(base, dir)[self.rel_path_start:]
self.relative_paths.append(relative_path)
def move_current_directory_to_top(self):
view = self.window.active_view()
if view:
cur_dir = os.path.dirname(view.file_name())[self.rel_path_start:]
for path in self.relative_paths:
if path == cur_dir:
i = self.relative_paths.index(path)
self.relative_paths.insert(0, self.relative_paths.pop(i))
break
def dir_selected(self, selected_index):
if selected_index != -1:
self.selected_dir = self.relative_paths[selected_index]
filename = ''
if len(self.template.find("filename").text) > 0:
filename = self.template.find("filename").text
try:
self.arguments = list(self.template.find("arguments"))
except:
self.arguments = []
self.variables = {}
self.next_argument()
def next_argument(self):
if len(self.arguments) > 0 :
self.argument = self.arguments.pop(0)
caption = self.argument.text
self.window.show_input_panel(caption, '', self.process_argument, None, None)
else:
self.file_name_input()
def process_argument(self, value):
self.variables[self.argument.tag] = value
self.next_argument()
def replace_variables(self, text):
for variable in self.variables.keys():
text = text.replace( "$" + variable, self.variables[variable] )
return text
def file_name_input(self):
file_name = self.template.find("filename").text
file_name = self.replace_variables(file_name)
dir = self.selected_dir
if self.selected_dir.startswith(self.ROOT_DIR_PREFIX):
dir = ''
if self.selected_dir.startswith("Default: "):
dir = self.template.find("path").text
dir = self.replace_variables(dir)
full_path = os.path.join(self.root, dir, file_name)
if os.path.lexists(full_path):
sublime.error_message('File already exists:\n%s' % full_path)
return
else:
self.create_and_open_file(full_path)
class FileTemplatesListener(sublime_plugin.EventListener):
def on_load(self, view):
global current_path
if view.file_name() == current_path:
populate_file(view)
current_path = None
def populate_file(view):
global template
view.run_command("insert_snippet", {'contents': template["content"]})
view.window().run_command("refresh_folder_list")
class BuildAndDeployCommand(sublime_plugin.WindowCommand):
def run(self, cmd = [], file_regex = "", line_regex = "", working_dir = "",
encoding = "utf-8", env = {}, quiet = False, kill = False,
# Catches "path" and "shell"
**kwargs):
self.window.run_command("exec", {"cmd": cmd, "working_dir": working_dir})
class WindowShowOverlayCommand(sublime_plugin.WindowCommand):
"""Wrap show_overlay command because I can't call this from a build system.
"""
def run(self, *args, **kwargs):
self.window.run_command('show_overlay', kwargs) | {
"repo_name": "edubkendo/SublimeJRubyFXML",
"path": "JRubyFXML.py",
"copies": "1",
"size": "15922",
"license": "mit",
"hash": 1589108028923368000,
"line_mean": 35.6045977011,
"line_max": 118,
"alpha_frac": 0.5884939078,
"autogenerated": false,
"ratio": 3.9101178781925343,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49986117859925344,
"avg_score": null,
"num_lines": null
} |
# Copyright (c) 2012 Nick Lloyd
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from zope.interface import implements
from sub_collab.peer import base
from twisted.internet import reactor, protocol, error, interfaces
from twisted.protocols import basic
from sub_collab import registry, status_bar
from sub_collab import event as collab_event
import sublime
import logging, threading, sys, socket, struct, os, re, time, functools
# in bytes
MAX_CHUNK_SIZE = 1024
REGION_PATTERN = re.compile('(\d+), (\d+)')
class ViewMonitorThread(threading.Thread):
logger = logging.getLogger('SubliminalCollaborator.ViewMonitor')
def __init__(self, peer):
threading.Thread.__init__(self)
self.peer = peer
self.lastViewCenterLine = None
self.shutdown = False
def grabAndSendViewPosition(self):
"""
Separate function to be called from the sublime main thread...
because the view.visible_region() function demands that.
"""
# calculate the center-most line in the view
# this will match most closely with the true center of the view
viewRegionLines = self.peer.view.split_by_newlines(self.peer.view.visible_region())
lineIdx = len(viewRegionLines) / 2 - 1
if lineIdx < 0:
lineIdx = 0
viewCenterRegion = viewRegionLines[lineIdx]
if not viewCenterRegion == self.lastViewCenterLine:
self.lastViewCenterLine = viewCenterRegion
self.peer.sendViewPositionUpdate(viewCenterRegion)
def sendViewSize(self):
self.peer.sendMessage(base.VIEW_SYNC, payload=str(self.peer.view.size()))
def run(self):
self.logger.info('Monitoring view')
count = 0
# we must be the host and connected
while (self.peer.role == base.HOST_ROLE) \
and (self.peer.state == base.STATE_CONNECTED) \
and (not self.shutdown):
if not self.peer.view == None:
sublime.set_timeout(self.grabAndSendViewPosition, 0)
if count == 10:
count = 0
sublime.set_timeout(self.sendViewSize, 0)
time.sleep(0.5)
count += 1
self.logger.info('Stopped monitoring view')
def destroy(self):
self.shutdown = True
# build off of the Int32StringReceiver to leverage its unprocessed buffer handling
class BasicPeer(base.BasePeer, basic.Int32StringReceiver, protocol.ClientFactory, protocol.ServerFactory):
"""
One side of a peer-to-peer collaboration connection.
This is a direct connection with another peer endpoint for sending
view data and events.
"""
logger = logging.getLogger('SubliminalCollaborator.BasicPeer')
# Message header structure in struct format:
# '!HBB'
# elements:
# - magicNumber: 9, http://futurama.wikia.com/wiki/Number_9_man
# - messageType: see constants below
# - messageSubType: see constants below, 0 in all but edit-messages
messageHeaderFmt = '!HBB'
messageHeaderSize = struct.calcsize(messageHeaderFmt)
def __init__(self, username, parentNegotiator):
base.BasePeer.__init__(self, username, parentNegotiator)
# connection can be an IListeningPort or IConnector
self.connection = None
self.host = None
self.port = None
# CLIENT or SERVER
self.peerType = None
# HOST_ROLE or PARTNER_ROLE
self.role = None
# STATE_CONNECTING, STATE_CONNECTED, STATE_DISCONNECTED
self.state = None
self.view = None
# queue of 2 or 3 part tuples
self.toDoToViewQueue = []
self.toDoToViewQueueLock = threading.Lock()
# thread for polling host-side view and periodically checking view sync state
self.viewMonitorThread = ViewMonitorThread(self)
# last collected command tuple (str, dict, int)
self.lastViewCommand = ('', {}, 0)
# flag to inform EventListener if Proxy plugin is sending events
# relates to a selection update issue around the cut command
self.isProxyEventPublishing = False
def hostConnect(self, port = 0, ipaddress=''):
"""
Initiate a peer-to-peer session as the host by listening on the
given port for a connection.
@param port: C{int} port number to listen on, defaults to 0 (system will pick one)
@return: the connected port number
"""
self.peerType = base.SERVER
self.role = base.HOST_ROLE
self.state = base.STATE_CONNECTING
self.connection = reactor.listenTCP(port, self, backlog=1, interface=ipaddress)
self.port = self.connection.getHost().port
self.logger.info('Listening for peers at %s:%d' % (ipaddress, self.port))
return self.port
def clientConnect(self, host, port):
"""
Initiate a peer-to-peer session as the partner by connecting to the
host peer with the given host and port.
@param host: ip address of the host Peer
@param port: C{int} port number of the host Peer
"""
self.logger.info('Connecting to peer at %s:%d' % (host, port))
self.host = host
self.port = port
self.peerType = base.CLIENT
self.role = base.PARTNER_ROLE
self.state = base.STATE_CONNECTING
self.connection = reactor.connectTCP(self.host, self.port, self, timeout=5)
def disconnect(self):
"""
Disconnect from the peer-to-peer session.
"""
self.stopCollab()
if self.state == base.STATE_DISCONNECTED:
# already disconnected!
return
earlierState = self.state
self.state = base.STATE_DISCONNECTING
if self.transport != None:
self.sendMessage(base.DISCONNECT)
if self.peerType == base.SERVER:
self.logger.debug('Closing server-side connection')
# self.connection.stopListening()
reactor.callFromThread(self.connection.stopListening)
elif self.peerType == base.CLIENT:
self.logger.debug('Closing client-side connection')
# self.connection.disconnect()
reactor.callFromThread(self.connection.disconnect)
def onDisconnect(self):
"""
Callback method if we are disconnected.
"""
if self.peerType == base.CLIENT:
self.logger.debug('Disconnecting from peer at %s:%d' % (self.host, self.port))
else:
self.logger.debug('Disconnecting from peer at %d' % self.port)
self.disconnect()
self.state = base.STATE_DISCONNECTED
self.logger.info('Disconnected from peer %s' % self.sharingWithUser)
status_bar.status_message('Stopped sharing with %s' % self.sharingWithUser)
def startCollab(self, view):
"""
Send the provided C{sublime.View} contents to the connected peer.
"""
self.view = view
self.view.set_read_only(True)
viewName = self.view.file_name()
if not viewName == None:
viewName = os.path.basename(viewName)
else:
viewName = 'NONAME'
totalToSend = self.view.size()
begin = 0
end = MAX_CHUNK_SIZE
# now we make sure we are connected... better way to do this?
while not self.state == base.STATE_CONNECTED:
time.sleep(1.0)
if (self.state == base.STATE_DISCONNECTING) or (self.state == base.STATE_DISCONNECTED):
self.logger.error('While waiting to share view over a connection the peer was disconnected!')
self.disconnect()
return
self.logger.info('Sharing view %s with %s' % (self.view.file_name(), self.sharingWithUser))
self.toAck = []
self.sendMessage(base.SHARE_VIEW, payload=('%s|%s' % (viewName, totalToSend)))
while begin < totalToSend:
chunkToSend = self.view.substr(sublime.Region(begin, end))
self.toAck.append(len(chunkToSend))
self.sendMessage(base.VIEW_CHUNK, payload=chunkToSend)
begin = begin + MAX_CHUNK_SIZE
end = end + MAX_CHUNK_SIZE
status_bar.progress_message("sending view to %s" % self.sharingWithUser, begin, totalToSend)
self.sendMessage(base.END_OF_VIEW, payload=view.settings().get('syntax'))
self.view.set_read_only(False)
# start the view monitoring thread
self.viewMonitorThread.start()
def resyncCollab(self):
"""
Resync the shared editor contents between the host and the partner.
"""
status_bar.status_message('RESYNCING VIEW CONTENT WITH PEER')
self.view.set_read_only(True)
totalToSend = self.view.size()
begin = 0
end = MAX_CHUNK_SIZE
# now we make sure we are connected... better way to do this?
while not self.state == base.STATE_CONNECTED:
time.sleep(1.0)
if (self.state == base.STATE_DISCONNECTING) or (self.state == base.STATE_DISCONNECTED):
self.logger.error('While waiting to resync view over a connection the peer was disconnected!')
self.disconnect()
return
view_name = self.view.file_name()
if not view_name:
view_name = self.view.name()
self.logger.info('Resyncing view %s with %s' % (view_name, self.sharingWithUser))
self.toAck = []
self.sendMessage(base.RESHARE_VIEW, payload=str(totalToSend))
while begin < totalToSend:
chunkToSend = self.view.substr(sublime.Region(begin, end))
self.toAck.append(len(chunkToSend))
self.sendMessage(base.VIEW_CHUNK, payload=chunkToSend)
begin = begin + MAX_CHUNK_SIZE
end = end + MAX_CHUNK_SIZE
status_bar.progress_message("sending view to %s" % self.sharingWithUser, begin, totalToSend)
self.sendMessage(base.END_OF_VIEW, payload=self.view.settings().get('syntax'))
self.view.set_read_only(False)
# send view position as it stands now so the partner view is positioned appropriately post-resync
viewRegionLines = self.view.split_by_newlines(self.view.visible_region())
lineIdx = len(viewRegionLines) / 2 - 1
if lineIdx < 0:
lineIdx = 0
viewCenterRegion = viewRegionLines[lineIdx]
self.sendViewPositionUpdate(viewCenterRegion)
# start the view monitoring thread if not already running
if not self.viewMonitorThread.is_alive():
self.viewMonitorThread.start()
def onStartCollab(self):
"""
Callback method informing the peer that we have received the view.
"""
self.logger.debug('collaboration session with view started!')
registry.registerSessionByView(self.view, self)
# self.notify(collab_event.RECVD_VIEW, self)
def stopCollab(self):
"""
Notify the connected peer that we are terminating the collaborating session.
"""
if (self.peerType == base.CLIENT) and (self.view != None):
self.view.set_read_only(False)
self.view = None
status_bar.status_message('stopped sharing with %s' % self.str())
def onStopCollab(self):
"""
Callback method informing the peer that we are terminating a collaborating session.
"""
self.stopCollab()
def swapRole(self):
"""
Request a role swap with the connected peer.
"""
if self.view is None:
self.logger.warn('Request to swap role when no view is being shared!')
return
if self.role == base.HOST_ROLE:
self.logger.debug('Stopping ViewMonitorThread until role swap is decided')
self.viewMonitorThread.destroy()
self.viewMonitorThread.join()
self.sendMessage(base.SWAP_ROLE)
def onSwapRole(self):
"""
Callback method to respond to role swap requests from the connected peer.
"""
if self.view is None:
self.logger.warn('Request from %s to swap role when no view is being shared!' % self.str())
return
if self.role == base.HOST_ROLE:
self.logger.debug('Stopping ViewMonitorThread until role swap is decided')
self.viewMonitorThread.destroy()
self.viewMonitorThread.join()
message = None
view_name = self.view.file_name()
if not view_name or (len(view_name) == 0):
view_name = self.view.name()
if not view_name or (len(view_name) == 0):
view_name = 'untitled'
if self.role == base.HOST_ROLE:
message = '%s sharing %s with you wants to host...' % (self.str(), view_name)
else:
message = '%s sharing %s with you wants you to host...' % (self.str(), view_name)
swapping_roles = sublime.ok_cancel_dialog(message)
if swapping_roles:
if self.role == base.HOST_ROLE:
self.role = base.PARTNER_ROLE
self.view.set_read_only(True)
else:
self.role = base.HOST_ROLE
self.view.set_read_only(False)
self.viewMonitorThread = ViewMonitorThread(self)
self.viewMonitorThread.start()
self.sendMessage(base.SWAP_ROLE_ACK)
else:
self.sendMessage(base.SWAP_ROLE_NACK)
self.logger.info('session %s with %s role now changed to %s' % (view_name, self.str(), self.role))
# wait for the swap to complete on the client side... 0.5 second because the message is tiny
time.sleep(0.5)
def onSwapRoleAck(self):
"""
Callback method to respond to accepted role swap response from the connected peer.
The caller of swapRole() waits for this method before actually swapping roles on its side.
"""
if self.role == base.HOST_ROLE:
self.role = base.PARTNER_ROLE
self.view.set_read_only(True)
else:
self.role = base.HOST_ROLE
self.view.set_read_only(False)
self.viewMonitorThread = ViewMonitorThread(self)
self.viewMonitorThread.start()
view_name = self.view.file_name()
if not view_name or (len(view_name) == 0):
view_name = self.view.name()
if not view_name or (len(view_name) == 0):
view_name = 'untitled'
self.logger.info('session %s with %s role now changed to %s' % (view_name, self.str(), self.role))
def onSwapRoleNAck(self):
"""
Callback method to respond to rejected role swap response from the connected peer.
The caller of swapRole() may have this called if the connected peer rejects a swap role request.
"""
if self.role == base.HOST_ROLE:
self.viewMonitorThread = ViewMonitorThread()
self.viewMonitorThread.start()
sublime.message_dialog('%s sharing %s did not want to swap roles' % (self.str(), self.view.file_name()))
def sendViewPositionUpdate(self, centerOnRegion):
"""
Send a window view position update to the peer so they know what
we are looking at.
@param centerOnRegion: C{sublime.Region} of the central-most line of the current visible portion of the view to send to the peer.
"""
status_bar.heartbeat_message('sharing with %s' % self.str())
self.sendMessage(base.POSITION, payload=str(centerOnRegion))
def recvViewPositionUpdate(self, centerOnRegion):
"""
Callback method for handling view position updates from the peer.
@param centerOnRegion: C{sublime.Region} to set as the current center of the view.
"""
self.view.show_at_center(centerOnRegion.begin())
def sendSelectionUpdate(self, selectedRegions):
"""
Send currently selected regions to the peer.
@param selectedRegions: C{sublime.RegionSet} of all selected regions in the current view.
"""
status_bar.heartbeat_message('sharing with %s' % self.str())
self.sendMessage(base.SELECTION, payload=str(selectedRegions))
def recvSelectionUpdate(self, selectedRegions):
"""
Callback method for handling selected regions updates from the peer.
@param selectedRegions: C{sublime.RegionSet} of all selected regions to be set.
"""
self.view.add_regions(self.sharingWithUser, selectedRegions, 'comment', sublime.DRAW_OUTLINED)
def sendEdit(self, editType, content=None):
"""
Send an edit event to the peer.
@param editType: C{str} edit type (see above)
@param content: C{Array} contents of the edit (None-able)
"""
status_bar.heartbeat_message('sharing with %s' % self.str())
self.logger.debug('sending edit: %s %s' %(base.numeric_to_symbolic[editType], content))
if (editType == base.EDIT_TYPE_INSERT) \
or (editType == base.EDIT_TYPE_INSERT_SNIPPET) \
or (editType == base.EDIT_TYPE_PASTE):
self.sendMessage(base.EDIT, editType, payload=content)
else:
self.sendMessage(base.EDIT, editType)
def recvEdit(self, editType, content):
"""
Callback method for handling edit events from the peer.
@param editType: C{str} edit type (see above)
@param content: C{Array} contents of the edit (None if delete editType)
"""
self.view.set_read_only(False)
if editType == base.EDIT_TYPE_INSERT:
self.view.run_command('insert', { 'characters': content })
elif editType == base.EDIT_TYPE_INSERT_SNIPPET:
self.view.run_command('insert_snippet', { 'contents': content })
elif editType == base.EDIT_TYPE_LEFT_DELETE:
self.view.run_command('left_delete')
elif editType == base.EDIT_TYPE_RIGHT_DELETE:
self.view.run_command('right_delete')
elif editType == base.EDIT_TYPE_CUT:
# faux cut since we are recieving the commands instead of invoking them directly
self.view.run_command('left_delete')
elif editType == base.EDIT_TYPE_COPY:
# we dont actually want to do anything here
pass
elif editType == base.EDIT_TYPE_PASTE:
# faux cut since we are recieving the commands instead of invoking them directly
# we actually have to handle this as a direct view.replace() call to avoid
# autoindent which occurs if we use the view.run_command('insert', ...) call
paste_edit = self.view.begin_edit()
for region in self.view.sel():
self.view.replace(paste_edit, region, content)
self.view.end_edit(paste_edit)
elif editType == base.EDIT_TYPE_UNDO:
self.view.run_command('undo')
elif editType == base.EDIT_TYPE_REDO:
self.view.run_command('redo')
elif editType == base.EDIT_TYPE_REDO_OR_REPEAT:
self.view.run_command('redo_or_repeat')
elif editType == base.EDIT_TYPE_SOFT_UNDO:
self.view.run_command('soft_undo')
elif editType == base.EDIT_TYPE_SOFT_REDO:
self.view.run_command('soft_redo')
self.view.set_read_only(True)
def handleViewChanges(self):
"""
Runs on the main UI event loop.
Goes through the list of events queued up to modify the shared view
and applies them to the associated view.
"""
self.toDoToViewQueueLock.acquire()
while len(self.toDoToViewQueue) > 0:
toDo = self.toDoToViewQueue.pop(0)
if len(toDo) == 2:
self.logger.debug('Handling view change %s with size %d payload' % (base.numeric_to_symbolic[toDo[0]], len(toDo[1])))
if (toDo[0] == base.SHARE_VIEW) or (toDo[0] == base.RESHARE_VIEW):
self.totalNewViewSize = 0
if toDo[0] == base.SHARE_VIEW:
self.view = sublime.active_window().new_file()
payloadBits = toDo[1].split('|')
if payloadBits[0] == 'NONAME':
self.view.set_name('SHARING-WITH-%s' % self.sharingWithUser)
else:
self.view.set_name(payloadBits[0])
self.totalNewViewSize = int(payloadBits[1])
else:
# resync event, purge the old view in preparation for the fresh content
self.logger.debug('resyncing view')
self.lastResyncdPosition = 0
self.totalNewViewSize = int(toDo[1])
self.view.set_read_only(True)
self.view.set_scratch(True)
status_bar.progress_message("receiving view from %s" % self.sharingWithUser, self.view.size(), self.totalNewViewSize)
elif toDo[0] == base.VIEW_CHUNK:
self.view.set_read_only(False)
self.viewPopulateEdit = self.view.begin_edit()
# if we are a resync chunk...
if hasattr(self, 'lastResyncdPosition'):
self.view.replace(self.viewPopulateEdit, \
sublime.Region(self.lastResyncdPosition, self.lastResyncdPosition + len(toDo[1])), \
toDo[1])
self.lastResyncdPosition += len(toDo[1])
else:
self.view.insert(self.viewPopulateEdit, self.view.size(), toDo[1])
self.view.end_edit(self.viewPopulateEdit)
self.viewPopulateEdit = None
self.view.set_read_only(True)
status_bar.progress_message("receiving view from %s" % self.sharingWithUser, self.view.size(), self.totalNewViewSize)
elif toDo[0] == base.END_OF_VIEW:
self.view.set_syntax_file(toDo[1])
if hasattr(self, 'lastResyncdPosition'):
del self.lastResyncdPosition
status_bar.progress_message("receiving view from %s" % self.sharingWithUser, self.view.size(), self.totalNewViewSize)
# view is populated and configured, lets share!
self.onStartCollab()
elif toDo[0] == base.SELECTION:
status_bar.heartbeat_message('sharing with %s' % self.str())
regions = []
for regionMatch in REGION_PATTERN.finditer(toDo[1]):
regions.append(sublime.Region(int(regionMatch.group(1)), int(regionMatch.group(2))))
self.recvSelectionUpdate(regions)
elif toDo[0] == base.POSITION:
status_bar.heartbeat_message('sharing with %s' % self.str())
regionMatch = REGION_PATTERN.search(toDo[1])
if regionMatch:
self.recvViewPositionUpdate(sublime.Region(int(regionMatch.group(1)), int(regionMatch.group(2))))
elif len(toDo) == 3:
status_bar.heartbeat_message('sharing with %s' % self.str())
# edit event
assert toDo[0] == base.EDIT
# make the shared selection the ACTUAL selection
self.view.sel().clear()
for region in self.view.get_regions(self.sharingWithUser):
self.view.sel().add(region)
self.view.erase_regions(self.sharingWithUser)
self.recvEdit(toDo[1], toDo[2])
self.toDoToViewQueueLock.release()
def checkViewSyncState(self, peerViewSize):
"""
Compares a received view size with this sides' view size.... if they don't match a resync event is
triggered.
"""
if self.view.size() != peerViewSize:
self.logger.info('view out of sync!')
self.sendMessage(base.VIEW_RESYNC)
def recvd_CONNECTED(self, messageSubType, payload):
"""
Callback method for the connection confirmation handshake between
client and server.
"""
if self.peerType == base.CLIENT:
if self.state == base.STATE_CONNECTING:
self.state = base.STATE_CONNECTED
self.logger.info('Connected to peer: %s' % self.sharingWithUser)
else:
self.logger.error('Received CONNECTED message from server-peer when in state %s' % self.state)
else:
## server/initiator side of the wire...
# client is connected, send ACK and set our state to be connected
self.sendMessage(base.CONNECTED)
self.state = base.STATE_CONNECTED
self.logger.info('Connected to peer: %s' % self.sharingWithUser)
self.notify(collab_event.ESTABLISHED_SESSION, self)
def recvd_DISCONNECT(self, messageSubType=None, payload=''):
self.onDisconnect()
def recvd_SHARE_VIEW(self, messageSubType, payload):
self.toDoToViewQueueLock.acquire()
self.toDoToViewQueue.append((base.SHARE_VIEW, payload))
self.toDoToViewQueueLock.release()
self.sendMessage(base.SHARE_VIEW_ACK)
self.handleViewChanges()
def recvd_RESHARE_VIEW(self, messageSubType, payload):
self.toDoToViewQueueLock.acquire()
self.toDoToViewQueue.append((base.RESHARE_VIEW, payload))
self.toDoToViewQueueLock.release()
self.sendMessage(base.SHARE_VIEW_ACK)
self.handleViewChanges()
def recvd_SHARE_VIEW_ACK(self, messageSubType, payload):
self.ackdChunks = []
def recvd_VIEW_CHUNK(self, messageSubType, payload):
self.toDoToViewQueueLock.acquire()
self.toDoToViewQueue.append((base.VIEW_CHUNK, payload))
self.toDoToViewQueueLock.release()
self.sendMessage(base.VIEW_CHUNK_ACK, payload=str(len(payload)))
self.handleViewChanges()
def recvd_VIEW_CHUNK_ACK(self, messageSubType, payload):
ackdChunkSize = int(payload)
self.ackdChunks.append(ackdChunkSize)
def recvd_END_OF_VIEW(self, messageSubType, payload):
self.toDoToViewQueueLock.acquire()
self.toDoToViewQueue.append((base.END_OF_VIEW, payload))
self.toDoToViewQueueLock.release()
self.sendMessage(base.END_OF_VIEW_ACK)
self.handleViewChanges()
def recvd_END_OF_VIEW_ACK(self, messageSubType, payload):
if self.toAck == self.ackdChunks:
self.toAck = None
self.ackdChunks = None
else:
self.logger.error('Sent %s chunks of data to peer but peer received %s chunks of data' % (self.toAck, self.ackdChunks))
self.toAck = None
self.ackdChunks = None
self.sendMessage(base.BAD_VIEW_SEND)
self.disconnect()
def recvd_SELECTION(self, messageSubType, payload):
# self.logger.debug('selection change: %s' % payload)
self.toDoToViewQueueLock.acquire()
self.toDoToViewQueue.append((base.SELECTION, payload))
self.toDoToViewQueueLock.release()
self.handleViewChanges()
def recvd_POSITION(self, messageSubType, payload):
self.toDoToViewQueueLock.acquire()
self.toDoToViewQueue.append((base.POSITION, payload))
self.toDoToViewQueueLock.release()
self.handleViewChanges()
def recvd_EDIT(self, messageSubType, payload):
self.toDoToViewQueueLock.acquire()
self.toDoToViewQueue.append((base.EDIT, messageSubType, payload))
self.toDoToViewQueueLock.release()
self.handleViewChanges()
def recvd_SWAP_ROLE(self, messageSubType, payload):
self.onSwapRole()
def recvd_SWAP_ROLE_ACK(self, messageSubType, payload):
self.onSwapRoleAck()
def recvd_SWAP_ROLE_NACK(self, messageSubType, payload):
self.onSwapRoleNAck()
def recvd_VIEW_SYNC(self, messageSubType, payload):
self.toDoToViewQueueLock.acquire()
# no pending edits... safe to check
if len(self.toDoToViewQueue) == 0:
self.checkViewSyncState(int(payload))
self.toDoToViewQueueLock.release()
def recvd_VIEW_RESYNC(self, messageSubType, payload):
self.resyncCollab()
def recvdUnknown(self, messageType, messageSubType, payload):
self.logger.warn('Received unknown message: %s, %s, %s' % (messageType, messageSubType, payload))
def stringReceived(self, data):
magicNumber, msgTypeNum, msgSubTypeNum = struct.unpack(self.messageHeaderFmt, data[:self.messageHeaderSize])
assert magicNumber == base.MAGIC_NUMBER
msgType = base.numeric_to_symbolic[msgTypeNum]
msgSubType = base.numeric_to_symbolic[msgSubTypeNum]
payload = data[self.messageHeaderSize:]
self.logger.debug('RECVD: %s-%s[%s]' % (msgType, msgSubType, payload))
method = getattr(self, "recvd_%s" % msgType, None)
if method is not None:
method(msgSubTypeNum, payload)
else:
self.recvdUnknown(msgType, msgSubType, payload)
def connectionLost(self, reason):
registry.removeSession(self)
if self.peerType == base.CLIENT:
# ignore this, clientConnectionLost() below will also be called
return
self.state = base.STATE_DISCONNECTED
if error.ConnectionDone == reason.type:
self.disconnect()
else:
status_bar.heartbeat_message('lost share session with %s' % self.str())
# may want to reconnect, but for now lets print why
self.logger.error('Connection lost: %s - %s' % (reason.type, reason.value))
#*** internet.base.BaseProtocol (via basic.Int32StringReceiver) method implementations ***#
def connectionMade(self):
if self.peerType == base.CLIENT:
pass
else:
pass
#*** protocol.Factory method implementations ***#
def buildProtocol(self, addr):
self.logger.debug('building protocol for %s' % self.peerType)
if self.peerType == base.CLIENT:
self.logger.debug('Connected to peer at %s:%d' % (self.host, self.port))
self.sendMessage(base.CONNECTED)
return self
#*** protocol.ClientFactory method implementations ***#
def clientConnectionLost(self, connector, reason):
registry.removeSession(self)
self.state = base.STATE_DISCONNECTED
if error.ConnectionDone == reason.type:
self.disconnect()
else:
status_bar.status_message('lost share session with %s' % self.str())
# may want to reconnect, but for now lets print why
self.logger.error('Connection lost: %s - %s' % (reason.type, reason.value))
def clientConnectionFailed(self, connector, reason):
self.logger.error('Connection failed: %s - %s' % (reason.type, reason.value))
registry.removeSession(self)
self.state = base.STATE_DISCONNECTED
if (error.ConnectionRefusedError == reason.type) or (error.TCPTimedOutError == reason.type) or (error.TimeoutError == reason.type):
if self.peerType == base.CLIENT:
self.notify(collab_event.FAILED_SESSION, self.sharingWithUser)
self.disconnect()
#*** helper functions ***#
def sendMessage(self, messageType, messageSubType=base.EDIT_TYPE_NA, payload=''):
self.logger.debug('SEND: %s-%s[bytes: %d]' % (base.numeric_to_symbolic[messageType], base.numeric_to_symbolic[messageSubType], len(payload)))
reactor.callFromThread(self.sendString, struct.pack(self.messageHeaderFmt, base.MAGIC_NUMBER, messageType, messageSubType) + payload.encode())
| {
"repo_name": "nlloyd/SubliminalCollaborator",
"path": "libs/sub_collab/peer/basic.py",
"copies": "1",
"size": "33047",
"license": "apache-2.0",
"hash": -8426019828940414000,
"line_mean": 41.1517857143,
"line_max": 150,
"alpha_frac": 0.6201168033,
"autogenerated": false,
"ratio": 3.992147861802368,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5112264665102367,
"avg_score": null,
"num_lines": null
} |
# All of the functions allow formats to be a dtype
__all__ = ['record', 'recarray', 'format_parser']
import numeric as sb
from defchararray import chararray
import numerictypes as nt
import types
import os
import sys
ndarray = sb.ndarray
_byteorderconv = {'b':'>',
'l':'<',
'n':'=',
'B':'>',
'L':'<',
'N':'=',
'S':'s',
's':'s',
'>':'>',
'<':'<',
'=':'=',
'|':'|',
'I':'|',
'i':'|'}
# formats regular expression
# allows multidimension spec with a tuple syntax in front
# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 '
# are equally allowed
numfmt = nt.typeDict
_typestr = nt._typestr
def find_duplicate(list):
"""Find duplication in a list, return a list of duplicated elements"""
dup = []
for i in range(len(list)):
if (list[i] in list[i+1:]):
if (list[i] not in dup):
dup.append(list[i])
return dup
class format_parser:
def __init__(self, formats, names, titles, aligned=False, byteorder=None):
self._parseFormats(formats, aligned)
self._setfieldnames(names, titles)
self._createdescr(byteorder)
def _parseFormats(self, formats, aligned=0):
""" Parse the field formats """
if formats is None:
raise ValueError, "Need formats argument"
if isinstance(formats, list):
if len(formats) < 2:
formats.append('')
formats = ','.join(formats)
dtype = sb.dtype(formats, aligned)
fields = dtype.fields
if fields is None:
dtype = sb.dtype([('f1', dtype)], aligned)
fields = dtype.fields
keys = dtype.names
self._f_formats = [fields[key][0] for key in keys]
self._offsets = [fields[key][1] for key in keys]
self._nfields = len(keys)
def _setfieldnames(self, names, titles):
"""convert input field names into a list and assign to the _names
attribute """
if (names):
if (type(names) in [types.ListType, types.TupleType]):
pass
elif (type(names) == types.StringType):
names = names.split(',')
else:
raise NameError, "illegal input names %s" % `names`
self._names = [n.strip() for n in names[:self._nfields]]
else:
self._names = []
# if the names are not specified, they will be assigned as
# "f0, f1, f2,..."
# if not enough names are specified, they will be assigned as "f[n],
# f[n+1],..." etc. where n is the number of specified names..."
self._names += ['f%d' % i for i in range(len(self._names),
self._nfields)]
# check for redundant names
_dup = find_duplicate(self._names)
if _dup:
raise ValueError, "Duplicate field names: %s" % _dup
if (titles):
self._titles = [n.strip() for n in titles[:self._nfields]]
else:
self._titles = []
titles = []
if (self._nfields > len(titles)):
self._titles += [None]*(self._nfields-len(titles))
def _createdescr(self, byteorder):
descr = sb.dtype({'names':self._names,
'formats':self._f_formats,
'offsets':self._offsets,
'titles':self._titles})
if (byteorder is not None):
byteorder = _byteorderconv[byteorder[0]]
descr = descr.newbyteorder(byteorder)
self._descr = descr
class record(nt.void):
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.item())
def __getattribute__(self, attr):
if attr in ['setfield', 'getfield', 'dtype']:
return nt.void.__getattribute__(self, attr)
try:
return nt.void.__getattribute__(self, attr)
except AttributeError:
pass
fielddict = nt.void.__getattribute__(self, 'dtype').fields
res = fielddict.get(attr, None)
if res:
obj = self.getfield(*res[:2])
# if it has fields return a recarray,
# if it's a string ('SU') return a chararray
# otherwise return the object
try:
dt = obj.dtype
except AttributeError:
return obj
if dt.fields:
return obj.view(obj.__class__)
if dt.char in 'SU':
return obj.view(chararray)
return obj
else:
raise AttributeError, "'record' object has no "\
"attribute '%s'" % attr
def __setattr__(self, attr, val):
if attr in ['setfield', 'getfield', 'dtype']:
raise AttributeError, "Cannot set '%s' attribute" % attr
fielddict = nt.void.__getattribute__(self, 'dtype').fields
res = fielddict.get(attr, None)
if res:
return self.setfield(val, *res[:2])
else:
if getattr(self,attr,None):
return nt.void.__setattr__(self, attr, val)
else:
raise AttributeError, "'record' object has no "\
"attribute '%s'" % attr
def pprint(self):
# pretty-print all fields
names = self.dtype.names
maxlen = max([len(name) for name in names])
rows = []
fmt = '%% %ds: %%s' %maxlen
for name in names:
rows.append(fmt%(name, getattr(self, name)))
return "\n".join(rows)
# The recarray is almost identical to a standard array (which supports
# named fields already) The biggest difference is that it can use
# attribute-lookup to find the fields and it is constructed using
# a record.
# If byteorder is given it forces a particular byteorder on all
# the fields (and any subfields)
class recarray(ndarray):
def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None,
formats=None, names=None, titles=None,
byteorder=None, aligned=False):
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
if buf is None:
self = ndarray.__new__(subtype, shape, (record, descr))
else:
self = ndarray.__new__(subtype, shape, (record, descr),
buffer=buf, offset=offset,
strides=strides)
return self
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError: # attr must be a fieldname
pass
fielddict = ndarray.__getattribute__(self,'dtype').fields
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError, "record array has no attribute %s" % attr
obj = self.getfield(*res)
# if it has fields return a recarray, otherwise return
# normal array
if obj.dtype.fields:
return obj
if obj.dtype.char in 'SU':
return obj.view(chararray)
return obj.view(ndarray)
# Save the dictionary
# If the attr is a field name and not in the saved dictionary
# Undo any "setting" of the attribute and do a setfield
# Thus, you can't create attributes on-the-fly that are field names.
def __setattr__(self, attr, val):
newattr = attr not in self.__dict__
try:
ret = object.__setattr__(self, attr, val)
except:
fielddict = ndarray.__getattribute__(self,'dtype').fields or {}
if attr not in fielddict:
exctype, value = sys.exc_info()[:2]
raise exctype, value
else:
fielddict = ndarray.__getattribute__(self,'dtype').fields or {}
if attr not in fielddict:
return ret
if newattr: # We just added this one
try: # or this setattr worked on an internal
# attribute.
object.__delattr__(self, attr)
except:
return ret
try:
res = fielddict[attr][:2]
except (TypeError,KeyError):
raise AttributeError, "record array has no attribute %s" % attr
return self.setfield(val, *res)
def __getitem__(self, indx):
obj = ndarray.__getitem__(self, indx)
if (isinstance(obj, ndarray) and obj.dtype.isbuiltin):
return obj.view(ndarray)
return obj
def field(self, attr, val=None):
if isinstance(attr, int):
names = ndarray.__getattribute__(self,'dtype').names
attr = names[attr]
fielddict = ndarray.__getattribute__(self,'dtype').fields
res = fielddict[attr][:2]
if val is None:
obj = self.getfield(*res)
if obj.dtype.fields:
return obj
if obj.dtype.char in 'SU':
return obj.view(chararray)
return obj.view(ndarray)
else:
return self.setfield(val, *res)
def view(self, obj):
try:
if issubclass(obj, ndarray):
return ndarray.view(self, obj)
except TypeError:
pass
dtype = sb.dtype(obj)
if dtype.fields is None:
return self.__array__().view(dtype)
return ndarray.view(self, obj)
def fromarrays(arrayList, dtype=None, shape=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
""" create a record array from a (flat) list of arrays
>>> x1=N.array([1,2,3,4])
>>> x2=N.array(['a','dd','xyz','12'])
>>> x3=N.array([1.1,2,3,4])
>>> r = fromarrays([x1,x2,x3],names='a,b,c')
>>> print r[1]
(2, 'dd', 2.0)
>>> x1[1]=34
>>> r.a
array([1, 2, 3, 4])
"""
arrayList = [sb.asarray(x) for x in arrayList]
if shape is None or shape == 0:
shape = arrayList[0].shape
if isinstance(shape, int):
shape = (shape,)
if formats is None and dtype is None:
# go through each object in the list to see if it is an ndarray
# and determine the formats.
formats = ''
for obj in arrayList:
if not isinstance(obj, ndarray):
raise ValueError, "item in the array list must be an ndarray."
formats += _typestr[obj.dtype.type]
if issubclass(obj.dtype.type, nt.flexible):
formats += `obj.itemsize`
formats += ','
formats = formats[:-1]
if dtype is not None:
descr = sb.dtype(dtype)
_names = descr.names
else:
parsed = format_parser(formats, names, titles, aligned, byteorder)
_names = parsed._names
descr = parsed._descr
# Determine shape from data-type.
if len(descr) != len(arrayList):
raise ValueError, "mismatch between the number of fields "\
"and the number of arrays"
d0 = descr[0].shape
nn = len(d0)
if nn > 0:
shape = shape[:-nn]
for k, obj in enumerate(arrayList):
nn = len(descr[k].shape)
testshape = obj.shape[:len(obj.shape)-nn]
if testshape != shape:
raise ValueError, "array-shape mismatch in array %d" % k
_array = recarray(shape, descr)
# populate the record array (makes a copy)
for i in range(len(arrayList)):
_array[_names[i]] = arrayList[i]
return _array
# shape must be 1-d if you use list of lists...
def fromrecords(recList, dtype=None, shape=None, formats=None, names=None,
titles=None, aligned=False, byteorder=None):
""" create a recarray from a list of records in text form
The data in the same field can be heterogeneous, they will be promoted
to the highest data type. This method is intended for creating
smaller record arrays. If used to create large array without formats
defined
r=fromrecords([(2,3.,'abc')]*100000)
it can be slow.
If formats is None, then this will auto-detect formats. Use list of
tuples rather than list of lists for faster processing.
>>> r=fromrecords([(456,'dbe',1.2),(2,'de',1.3)],names='col1,col2,col3')
>>> print r[0]
(456, 'dbe', 1.2)
>>> r.col1
array([456, 2])
>>> r.col2
chararray(['dbe', 'de'],
dtype='|S3')
>>> import cPickle
>>> print cPickle.loads(cPickle.dumps(r))
[(456, 'dbe', 1.2) (2, 'de', 1.3)]
"""
nfields = len(recList[0])
if formats is None and dtype is None: # slower
obj = sb.array(recList, dtype=object)
arrlist = [sb.array(obj[...,i].tolist()) for i in xrange(nfields)]
return fromarrays(arrlist, formats=formats, shape=shape, names=names,
titles=titles, aligned=aligned, byteorder=byteorder)
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
try:
retval = sb.array(recList, dtype = descr)
except TypeError: # list of lists instead of list of tuples
if (shape is None or shape == 0):
shape = len(recList)
if isinstance(shape, (int, long)):
shape = (shape,)
if len(shape) > 1:
raise ValueError, "Can only deal with 1-d array."
_array = recarray(shape, descr)
for k in xrange(_array.size):
_array[k] = tuple(recList[k])
return _array
else:
if shape is not None and retval.shape != shape:
retval.shape = shape
res = retval.view(recarray)
res.dtype = sb.dtype((record, res.dtype))
return res
def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
""" create a (read-only) record array from binary data contained in
a string"""
if dtype is None and formats is None:
raise ValueError, "Must have dtype= or formats="
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
itemsize = descr.itemsize
if (shape is None or shape == 0 or shape == -1):
shape = (len(datastring)-offset) / itemsize
_array = recarray(shape, descr, buf=datastring, offset=offset)
return _array
def get_remaining_size(fd):
try:
fn = fd.fileno()
except AttributeError:
return os.path.getsize(fd.name) - fd.tell()
st = os.fstat(fn)
size = st.st_size - fd.tell()
return size
def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
"""Create an array from binary file data
If file is a string then that file is opened, else it is assumed
to be a file object.
>>> from tempfile import TemporaryFile
>>> a = N.empty(10,dtype='f8,i4,a5')
>>> a[5] = (0.5,10,'abcde')
>>>
>>> fd=TemporaryFile()
>>> a = a.newbyteorder('<')
>>> a.tofile(fd)
>>>
>>> fd.seek(0)
>>> r=fromfile(fd, formats='f8,i4,a5', shape=10, byteorder='<')
>>> print r[5]
(0.5, 10, 'abcde')
>>> r.shape
(10,)
"""
if (shape is None or shape == 0):
shape = (-1,)
elif isinstance(shape, (int, long)):
shape = (shape,)
name = 0
if isinstance(fd, str):
name = 1
fd = open(fd, 'rb')
if (offset > 0):
fd.seek(offset, 1)
size = get_remaining_size(fd)
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
itemsize = descr.itemsize
shapeprod = sb.array(shape).prod()
shapesize = shapeprod*itemsize
if shapesize < 0:
shape = list(shape)
shape[ shape.index(-1) ] = size / -shapesize
shape = tuple(shape)
shapeprod = sb.array(shape).prod()
nbytes = shapeprod*itemsize
if nbytes > size:
raise ValueError(
"Not enough bytes left in file for specified shape and type")
# create the array
_array = recarray(shape, descr)
nbytesread = fd.readinto(_array.data)
if nbytesread != nbytes:
raise IOError("Didn't read as many bytes as expected")
if name:
fd.close()
return _array
def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None, copy=True):
"""Construct a record array from a wide-variety of objects.
"""
if isinstance(obj, (type(None), str, file)) and (formats is None) \
and (dtype is None):
raise ValueError("Must define formats (or dtype) if object is "\
"None, string, or an open file")
kwds = {}
if dtype is not None:
dtype = sb.dtype(dtype)
elif formats is not None:
dtype = format_parser(formats, names, titles,
aligned, byteorder)._descr
else:
kwds = {'formats': formats,
'names' : names,
'titles' : titles,
'aligned' : aligned,
'byteorder' : byteorder
}
if obj is None:
if shape is None:
raise ValueError("Must define a shape if obj is None")
return recarray(shape, dtype, buf=obj, offset=offset, strides=strides)
elif isinstance(obj, str):
return fromstring(obj, dtype, shape=shape, offset=offset, **kwds)
elif isinstance(obj, (list, tuple)):
if isinstance(obj[0], (tuple, list)):
return fromrecords(obj, dtype=dtype, shape=shape, **kwds)
else:
return fromarrays(obj, dtype=dtype, shape=shape, **kwds)
elif isinstance(obj, recarray):
if dtype is not None and (obj.dtype != dtype):
new = obj.view(dtype)
else:
new = obj
if copy:
new = new.copy()
return new
elif isinstance(obj, file):
return fromfile(obj, dtype=dtype, shape=shape, offset=offset)
elif isinstance(obj, ndarray):
if dtype is not None and (obj.dtype != dtype):
new = obj.view(dtype)
else:
new = obj
if copy:
new = new.copy()
res = new.view(recarray)
if issubclass(res.dtype.type, nt.void):
res.dtype = sb.dtype((record, res.dtype))
return res
else:
interface = getattr(obj, "__array_interface__", None)
if interface is None or not isinstance(interface, dict):
raise ValueError("Unknown input type")
obj = sb.array(obj)
if dtype is not None and (obj.dtype != dtype):
obj = obj.view(dtype)
res = obj.view(recarray)
if issubclass(res.dtype.type, nt.void):
res.dtype = sb.dtype((record, res.dtype))
return res
| {
"repo_name": "santisiri/popego",
"path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/numpy-1.0.4-py2.5-linux-x86_64.egg/numpy/core/records.py",
"copies": "1",
"size": "19390",
"license": "bsd-3-clause",
"hash": 1869564355852155400,
"line_mean": 31.9761904762,
"line_max": 84,
"alpha_frac": 0.5484270242,
"autogenerated": false,
"ratio": 4.0095119933829615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5057939017582962,
"avg_score": null,
"num_lines": null
} |
#All of the graphs-related functionality for MapGeist
#Based on NetworkX, the Python library for dealing with
#graphs
from networkx import (Graph, minimum_spanning_tree,)
def generate_complete_graph(nodes, distance_matrix):
"""
Returns a NetworkX 'Graph' instance given the following-
1. The list of nodes to include in the graph.
2. Distance matrix having distances between nodes- as a dict
of dicts
The complete map does NOT contain self loops at any node.
"""
#First generate an empty graph
graph = Graph()
#Make a list of edges, with appropriate weights
graph_edges = []
for i in range(len(nodes)-1):
for j in range(i+1, len(nodes)):
word1 = nodes[i]
word2 = nodes[j]
weight = distance_matrix[word1][word2]
graph_edges.append((word1, word2, weight))
#Construct the graph from the edge list
graph.add_weighted_edges_from(graph_edges)
#return graph
return graph
def generate_mst(graph):
"""
Wrapper for minimum spanning tree computation
"""
return minimum_spanning_tree(graph)
| {
"repo_name": "sachinrjoglekar/MapGeist",
"path": "mapgeist/graphs/graph_functions.py",
"copies": "1",
"size": "1124",
"license": "mit",
"hash": -8380887761824499000,
"line_mean": 27.1,
"line_max": 64,
"alpha_frac": 0.6663701068,
"autogenerated": false,
"ratio": 4.014285714285714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5180655821085713,
"avg_score": null,
"num_lines": null
} |
"""All of the kinds of traces in UConnRCMPy"""
# System imports
# Third-party imports
import numpy as np
import cantera as ct
from scipy import signal as sig
from scipy.interpolate import UnivariateSpline
from scipy.stats import linregress
# Local imports
from .constants import (one_atm_in_bar,
one_atm_in_torr,
one_bar_in_pa,
)
class VoltageTrace(object):
"""Voltage signal from a single experiment.
Parameters
----------
file_path : `pathlib.Path`
`~pathlib.Path` object associated with the particular experiment
Attributes
----------
signal : `numpy.ndarray`
2-D array containing the raw signal from the experimental
text file. First column is the time, second column is the
voltage.
time : `numpy.ndarray`
The time loaded from the signal trace
frequency : `int`
The sampling frequency of the pressure trace
filtered_voltage : `numpy.ndarray`
The voltage trace after filtering
Note
----
The first sample of the voltage is set equal to the
mean of the first 200 points to eliminate DAQ startup
effects seen in some data.
"""
def __init__(self, file_path):
self.file_path = file_path
self.signal = np.genfromtxt(str(self.file_path))
self.time = self.signal[:, 0]
self.frequency = np.rint(1/self.time[1])
self.filter_frequency = None
self.signal[0, 1] = np.mean(self.signal[:200, 1])
self.filtered_voltage = self.filtering(self.signal[:, 1])
def __repr__(self):
return 'VoltageTrace(file_path={self.file_path!r})'.format(self=self)
@property
def filter_frequency(self):
"""The cutoff frequency for the low-pass filter
When setting the frequency, if the ``value`` is `None`,
determines the optimal cutoff frequency for a first-order
Butterworth low-pass filter by analyzing the root-mean-squared
residuals for a sequence of cutoff frequencies. The residuals
plotted as a function of the cutoff frequency tend to have a
linear portion for a range of cutoff frequencies. Analysis of
typical data files from our RCM has shown this range to start
near ``nyquist_freq*0.05``. The end point is determined by
looping through values from ``nyquist_freq*0.5`` to
``nyquist_freq*0.1`` and finding the location where the
coefficient of determination of a linear fit is maximized.
A line is fit to this portion of the residuals curve and
the intersection point of a horizontal line through the
y-intercept of the fit and the residuals curve is used to
determine the optimal cutoff frequency (see Figure 2 in Yu
et al. [1]_). The methodology is described by Yu et al. [1]_,
and the code is modifed from Duarte [2]_.
References
----------
.. [1] B. Yu, D. Gabriel, L. Noble, and K.N. An, "Estimate of
the Optimum Cutoff Frequency for the Butterworth Low-Pass
Digital Filter", Journal of Applied Biomechanics, Vol. 15,
pp. 318-329, 1999.
DOI: `10.1123/jab.15.3.318 <http://dx.doi.org/10.1123/jab.15.3.318>`_
.. [2] M. Duarte, "Residual Analysis", v.3 2014/06/13,
http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/ResidualAnalysis.ipynb
"""
return self._filter_frequency
@filter_frequency.setter
def filter_frequency(self, value):
if value is None:
nyquist_freq = self.frequency/2.0
n_freqs = 101
freqs = np.linspace(nyquist_freq/n_freqs, nyquist_freq, n_freqs)
resid = np.zeros(n_freqs)
for i, fc in enumerate(freqs):
b, a = sig.butter(1, fc/nyquist_freq)
yf = sig.filtfilt(b, a, self.signal[:, 1])
resid[i] = np.sqrt(np.mean((yf - self.signal[:, 1])**2))
end_points = np.linspace(0.5, 0.1, 9)
r_sq = np.zeros(len(end_points))
intercepts = np.zeros(len(end_points))
for i, end_point in enumerate(end_points):
# The indices of the frequencies used for fitting the straight line
fit_freqs = np.arange(np.nonzero(freqs >= nyquist_freq*0.05)[0][0],
np.nonzero(freqs >= nyquist_freq*end_point)[0][0] + 1)
_, intercepts[i], r, _, _ = linregress(freqs[fit_freqs], resid[fit_freqs])
r_sq[i] = r**2
intercept = intercepts[np.argmax(r_sq)]
# The UnivariateSpline with s=0 forces the spline fit through every
# data point in the array. The residuals are shifted down by the
# intercept so that the root of the spline is the optimum cutoff
# frequency
try:
self._filter_frequency = UnivariateSpline(freqs, resid - intercept, s=0).roots()[0]
except IndexError:
self._filter_frequency = float(input(
'Automatic setting of the filter frequency failed. Please input a frequency; '
'typical values are between 1000-5000 Hz: '))
else:
self._filter_frequency = value
def change_filter_freq(self, value):
"""Change the filter frequency
This method is intended to be used after the `VoltageTrace` has
already been created. It sets the `~VoltageTrace.filter_frequency`
attribute and then runs the filtering.
Parameters
----------
value : `float` or `None`
Value for the filter frequency. If `None`, an optimal frequency
will be estimated by the procedure detailed in the documentation
for the `~VoltageTrace.filter_frequency` attribute.
"""
self.filter_frequency = value
self.filtered_voltage = self.filtering(self.signal[:, 1])
def savetxt(self, filename, **kwargs):
"""Save a text file output of the voltage trace.
Save a text file with the time in the first column and the filtered
voltage in the second column. The keyword arguments are the same as
`numpy.savetxt`.
Parameters
----------
filename : `str`
Filename of the output file
"""
np.savetxt(fname=filename, X=np.vstack(self.time, self.filtered_voltage).T, **kwargs)
def filtering(self, data):
"""Filter the input using a low-pass filter.
The filter is a first-order Butterworth filter, with the
cutoff frequency taken from the instance attribute
`~VoltageTrace.filter_frequency`.
Parameters
----------
data : `numpy.ndarray`
The data that should be filtered
Returns
-------
`numpy.ndarray`
1-D array of the same length as the input data
"""
nyquist_freq = self.frequency/2.0
b, a = sig.butter(1, (self.filter_frequency)/nyquist_freq)
return sig.filtfilt(b, a, data, padtype='odd', padlen=101, method='pad')
class ExperimentalPressureTrace(object):
"""Pressure trace from a single experiment.
Parameters
----------
voltage_trace : `VoltageTrace`
Instance of class containing the voltage trace of the
experiment.
initial_pressure_in_torr : `float`
The initial pressure of the experiment, in units of Torr
factor : `float`
The factor set on the charge amplifier
Attributes
----------
pressure : `numpy.ndarray`
The pressure trace computed from the filtered voltage trace
time : `numpy.ndarray`
A 1-D array containting the time. Copied from
`VoltageTrace.time`
frequency : `int`
Integer sampling frequency of the experiment. Copied from
`VoltageTrace.frequency`
p_EOC : `float`
Pressure at the end of compression
EOC_idx : `int`
Integer index in the `pressure` and `time` arrays
of the end of compression.
is_reactive : `bool`
Boolean if the pressure trace represents a reactive or
or non-reactive experiment
derivative : `numpy.ndarray`
1-D array containing the raw derivative computed from the
`pressure` trace.
zeroed_time : `numpy.ndarray`
1-D array containing the time, with the zero point set at
the end of compression.
"""
def __init__(self, voltage_trace, initial_pressure_in_torr, factor):
initial_pressure_in_bar = initial_pressure_in_torr*one_atm_in_bar/one_atm_in_torr
self.pressure = (voltage_trace.filtered_voltage - voltage_trace.filtered_voltage[0])
self.pressure *= factor
self.pressure += initial_pressure_in_bar
self.raw_pressure = (voltage_trace.signal[:, 1] - voltage_trace.signal[0, 1])
self.raw_pressure *= factor
self.raw_pressure += initial_pressure_in_bar
self.time = voltage_trace.time
self.frequency = voltage_trace.frequency
self.p_EOC, self.EOC_idx, self.is_reactive = self.find_EOC()
self.derivative = self.calculate_derivative(self.pressure, self.time)
# Smooth the derivative with a moving average 151 points wide
self.derivative = sig.fftconvolve(self.derivative, np.ones(151)/151, mode='same')
self.zeroed_time = self.time - self.time[self.EOC_idx]
def __repr__(self):
return ('ExperimentalPressureTrace(p_EOC={self.p_EOC!r}, '
'is_reactive={self.is_reactive!r})').format(self=self)
def savetxt(self, filename, **kwargs):
"""Save a text file output of the pressure trace.
Save a text file with the time in the first column and the filtered
pressure in the second column. The keyword arguments are the same as
`numpy.savetxt`.
Parameters
----------
filename : `str`
Filename of the output file
"""
np.savetxt(fname=filename, X=np.vstack(self.time, self.pressure).T, **kwargs)
def pressure_fit(self, comptime=0.08):
"""Fit a line to the pressure trace before compression starts.
Parameters
----------
comptime : `float`, optional
Desired compression time, computed from the EOC, to when
the pressure fit should start
Returns
-------
`numpy.polyfit`
Numpy object containing the parameters of the fit
"""
beg_compress = int(np.floor(self.EOC_idx - comptime*self.frequency))
time = np.linspace(0, (beg_compress - 1)/self.frequency, beg_compress)
fit_pres = self.pressure[:beg_compress]
fit_pres[0:9] = fit_pres[10]
linear_fit = np.polyfit(time, fit_pres, 1)
return linear_fit
def change_EOC_time(self, time, is_reactive=True):
"""Change the EOC time for an experiment
Parameters
----------
time : `float`
The new value of the EOC time
is_reactive : `boolean`
The experiment is reactive or not
"""
offset = int(round(time/1000*self.frequency, 0))
self.EOC_idx += offset
if self.EOC_idx <= 0 or self.EOC_idx >= len(self.pressure):
raise ValueError('EOC index out of range, please check the EOC time on the plot')
else:
self.p_EOC = self.pressure[self.EOC_idx]
self.is_reactive = is_reactive
self.zeroed_time = self.time - self.time[self.EOC_idx]
def find_EOC(self):
"""Find the index and pressure at the end of compression.
Returns
-------
`tuple`
Returns a tuple with types (`float`, `int`,
`bool`) representing the pressure at the end of
compression, the index of the end of compression relative
to the start of the pressure trace, and a boolean that is
True if the case is reactive and False otherwise,
respectively
Notes
-----
The EOC is found by moving backwards from the maximum pressure
point and testing the values of the pressure. When the test
value becomes greater than the previous pressure, we have reached
the minimum pressure before ignition, in the case of a reactive
experiment. Then, the EOC is the maximum of the pressure before
this minimum point. If the pressure at the minimum is close to
the initial pressure, assume the case is non-reactive and set
the EOC pressure and the index to the max pressure point.
"""
is_reactive = True
max_p = np.amax(self.pressure)
max_p_idx = np.argmax(self.pressure)
min_p_idx = max_p_idx - 100
while self.pressure[min_p_idx] >= self.pressure[min_p_idx - 50]:
min_p_idx -= 1
p_EOC = np.amax(self.pressure[0:min_p_idx])
p_EOC_idx = np.argmax(self.pressure[0:min_p_idx])
diff = abs(self.pressure[p_EOC_idx] - self.pressure[15])
if diff < 5.0:
p_EOC, p_EOC_idx = max_p, max_p_idx
is_reactive = False
return p_EOC, p_EOC_idx, is_reactive
def calculate_derivative(self, dep_var, indep_var):
"""Calculate the derivative.
Parameters
----------
dep_var : `numpy.ndarray`
Dependent variable (e.g., the pressure)
indep_var : `numpy.ndarray`
Independent variable (e.g., the time)
Returns
-------
`numpy.ndarray`
1-D array containing the derivative
Notes
-----
The derivative is calculated by a second-order forward method
and any places where the derivative is infinite are set to
zero.
"""
m = len(dep_var)
ddt = np.zeros(m)
ddt[:m-2] = (-3*dep_var[:m-2] + 4*(dep_var[1:m-1]) - dep_var[2:m])/(2*np.diff(indep_var[:m-1])) # NOQA
ddt[np.isinf(ddt)] = 0
return ddt
class AltExperimentalPressureTrace(ExperimentalPressureTrace):
"""Process an alternate experimental pressure trace.
These pressure traces do not have an associated voltage trace,
but the machinery in the VoltageTrace class is useful for
filtering.
"""
def __init__(self, file_path, initial_pressure_in_torr):
# This is not a real voltage trace
pressure_trace = VoltageTrace(file_path)
self.time = pressure_trace.time
self.frequency = pressure_trace.frequency
self.filter_frequency = pressure_trace.filter_frequency
self.pressure = pressure_trace.filtered_voltage
pressure_start = np.mean(self.pressure[20:500])
self.pressure -= pressure_start
self.pressure += initial_pressure_in_torr*one_atm_in_bar/one_atm_in_torr
self.raw_pressure = pressure_trace.signal[:, 1]
raw_pressure_start = np.mean(self.raw_pressure[20:500])
self.raw_pressure -= raw_pressure_start
self.raw_pressure += initial_pressure_in_torr*one_atm_in_bar/one_atm_in_torr
self.p_EOC, self.EOC_idx, self.is_reactive = self.find_EOC()
self.derivative = self.calculate_derivative(self.pressure, self.time)
self.zeroed_time = self.time - self.time[self.EOC_idx]
def __repr__(self):
return ('AltExperimentalPressureTrace(p_EOC={self.p_EOC!r}, '
'is_reactive={self.is_reactive!r})').format(self=self)
class PressureFromVolume(object):
"""Create a pressure trace given a volume trace.
Using Cantera to evaluate the thermodynamic properties, compute a
pressure trace from a volume trace.
Parameters
----------
volume : `numpy.ndarray`
1-D array containing the reactor volume
p_initial : `float`
Initial pressure of the experiment, in bar
T_initial : `float`, optional
Initial temperature of the experiment, in Kelvin.
Optional for Cantera versions greater than 2.2.0.
chem_file : `str`, optional
Filename of the chemistry file to be used
Attributes
----------
pressure : `numpy.ndarray`
The pressure trace
Notes
-----
The pressure is computed in a `cantera.Solution` object by
setting the volume and the entropy according to an isentropic
process using the given volume trace.
"""
def __init__(self, volume, p_initial, T_initial, chem_file='species.cti', cti_source=None):
if cti_source is None:
gas = ct.Solution(chem_file)
else:
gas = ct.Solution(source=cti_source)
gas.TP = T_initial, p_initial
initial_volume = gas.volume_mass
initial_entropy = gas.entropy_mass
self.pressure = np.zeros((len(volume)))
for i, v in enumerate(volume):
gas.SV = initial_entropy, v*initial_volume
self.pressure[i] = gas.P/one_bar_in_pa
def __repr__(self):
return 'PressureFromVolume(pressure={self.pressure!r})'.format(self=self)
class VolumeFromPressure(object):
r"""Create a volume trace given a pressure trace.
Using Cantera to evaluate the thermodynamic properties, compute a
volume trace from a pressure trace.
Parameters
----------
pressure : `numpy.ndarray`
1-D array containing the reactor pressure
v_initial : `float`
Initial volume of the experiment, in m**3
T_initial : `float`, optional
Initial temperature of the experiment, in Kelvin. Optional for
Cantera versions greater than 2.2.0.
chem_file : `str`, optional
Filename of the chemistry file to be used
Attributes
----------
volume : `numpy.ndarray`
The volume trace
Notes
-----
The volume is computed according to the formula
.. math:: v_i = v_{initial}*\rho_{initial}/\rho_i
where the index :math:`i` indicates the current point. The state
is set at each point by setting the pressure from the input array
and the entropy to be constant. The volume is computed by the
isentropic relationship described above.
"""
def __init__(self, pressure, v_initial, T_initial, chem_file='species.cti', cti_source=None):
if cti_source is None:
gas = ct.Solution(chem_file)
else:
gas = ct.Solution(source=cti_source)
gas.TP = T_initial, pressure[0]*one_bar_in_pa
initial_entropy = gas.entropy_mass
initial_density = gas.density
self.volume = np.zeros((len(pressure)))
for i, p in enumerate(pressure):
gas.SP = initial_entropy, p*one_bar_in_pa
self.volume[i] = v_initial*initial_density/gas.density
def __repr__(self):
return 'VolumeFromPressure(volume={self.volume!r})'.format(self=self)
class TemperatureFromPressure(object):
"""Create a temperature trace given a pressure trace.
Using Cantera to evaluate the thermodynamic properties, compute a
pressure trace from a volume trace.
Parameters
----------
pressure : `numpy.ndarray`
1-D array containing the pressure
T_initial : `float`
Initial temperature of the experiment, in Kelvin.
Optional for Cantera versions greater than 2.2.0.
chem_file : `str`, optional
Filename of the chemistry file to be used
Attributes
----------
temperature : `numpy.ndarray`
The temperature trace
Notes
-----
The temperature is computed in a `cantera.Solution` object by
setting the pressure and the entropy according to an isentropic
process using the given pressure trace.
"""
def __init__(self, pressure, T_initial, chem_file='species.cti', cti_source=None):
if cti_source is None:
gas = ct.Solution(chem_file)
else:
gas = ct.Solution(source=cti_source)
gas.TP = T_initial, pressure[0]*one_bar_in_pa
initial_entropy = gas.entropy_mass
self.temperature = np.zeros((len(pressure)))
for i, p in enumerate(pressure):
gas.SP = initial_entropy, p*one_bar_in_pa
self.temperature[i] = gas.T
def __repr__(self):
return 'TemperatureFromPressure(temperature={self.temperature!r})'.format(self=self)
| {
"repo_name": "bryanwweber/UConnRCMPy",
"path": "uconnrcmpy/traces.py",
"copies": "1",
"size": "20363",
"license": "bsd-3-clause",
"hash": 1302995466280997000,
"line_mean": 36.6395563771,
"line_max": 111,
"alpha_frac": 0.6166085547,
"autogenerated": false,
"ratio": 3.953212968355659,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004555325956553884,
"num_lines": 541
} |
#All of the non-trivial parts of this program (the frequency extraction) is code from Justin Peel
#At http://stackoverflow.com/questions/2648151/python-frequency-detection Thanks a bunch!
import pyaudio
import wave
import numpy as np
from notes import note_map
import logging
import time
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger()
chunk = 2048
file_name = 'hum.wav'
wf = wave.open(file_name, 'rb')
swidth = wf.getsampwidth()
RATE = wf.getframerate()
# use a Blackman window
window = np.blackman(chunk)
def extract_tone_from_chunk(data):
#again, to reiterate, not my code.
# Obtained from http://stackoverflow.com/questions/2648151/python-frequency-detection
# unpack the data and times by the hamming window
indata = np.array(wave.struct.unpack("%dh"%(len(data)/swidth),\
data))*window
# Take the fft and square each value
fftData=abs(np.fft.rfft(indata))**2
# find the maximum
which = fftData[1:].argmax() + 1
# use quadratic interpolation around the max
if which != len(fftData)-1:
y0,y1,y2 = np.log(fftData[which-1:which+2:])
x1 = (y2 - y0) * .5 / (2 * y1 - y2 - y0)
# find the frequency and output it
thefreq = (which+x1)*RATE/chunk #name your god and bleed thefreq
LOG.info("Chunk freq is %f Hz." % (thefreq))
return thefreq
else:
thefreq = which*RATE/chunk
return thefreq
LOG.info("Chunk freq is %f Hz." % (thefreq))
def auto_tune(raw_freq):
min_diff = 100000
note = None
freq = None
for name, f in note_map.items():
d = abs(f-raw_freq)
if d < min_diff:
min_diff = d
freq = f
note = name
LOG.info("auto tuning %s to %s: %s " % (raw_freq, freq, note))
return freq
def simplify(milli_per_note, frequencies):
s_frequencies = []
s_millis = []
cur_val = frequencies[0]
milli_count = milli_per_note
for f in frequencies:
if f != cur_val:
if cur_val is not None:
s_frequencies.append(cur_val)
s_millis.append(milli_count)
cur_val = f
milli_count = milli_per_note
else:
milli_count += milli_per_note
return s_frequencies, s_millis
def format_arduino_array(array_name, vals):
s = "PROGMEM short %s[] = {" % array_name
for f in vals[:-1]:
s += (str(int(round(f))) + ", ")
s += str(int(round(vals[-1]))) + "};"
return s
def main():
# open stream
p = pyaudio.PyAudio()
frequencies = []
# read some data
data = wf.readframes(chunk)
while len(data) == chunk * swidth:
frequencies.append(auto_tune(extract_tone_from_chunk(data)))
data = wf.readframes(chunk)
wf.close()
p.terminate()
milli_per_sample = int(round((chunk/float(RATE)) * 1000))
LOG.info("%s milliseconds per sample " % milli_per_sample)
s_frequencies, milli_per_note = simplify(milli_per_sample, frequencies)
LOG.info("%s simplified notes and %s millisecond per note entries (These better match!)" %
(len(s_frequencies), len(milli_per_note)))
time.sleep(1)
print format_arduino_array("maria_freqs", s_frequencies)
print format_arduino_array("maria_millis", milli_per_note)
if __name__ == '__main__':
main() | {
"repo_name": "parkercoleman/home_tweet_home",
"path": "audio_parser.py",
"copies": "1",
"size": "3377",
"license": "mit",
"hash": 6064977294938241000,
"line_mean": 28.1206896552,
"line_max": 97,
"alpha_frac": 0.6106011253,
"autogenerated": false,
"ratio": 3.3535253227408144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44641264480408144,
"avg_score": null,
"num_lines": null
} |
# All of the other examples directly embed the Javascript and CSS code for
# Bokeh's client-side runtime into the HTML. This leads to the HTML files
# being rather large. An alternative is to ask Bokeh to produce HTML that
# has a relative link to the Bokeh Javascript and CSS. This is easy to
# do; you just pass in a few extra arguments to the output_file() command.
import numpy as np
from bokeh.plotting import figure, show, output_file
N = 100
x = np.linspace(0, 4*np.pi, N)
y = np.sin(x)
output_file("relative_paths.html", title="Relative path example", mode="relative")
p = figure(tools="pan,wheel_zoom,box_zoom,reset,save")
p.circle(x,y, alpha=0.5, color="tomato", radius=0.1)
show(p)
# By default, the URLs for the Javascript and CSS will be relative to
# the current directory, i.e. the directory in which the HTML file is
# generated. You can provide a different "root" directory from which
# the relative paths will be computed:
#
# output_file("relative_paths.html", title="Relative path example",
# resources="relative", rootdir="some/other/path")
| {
"repo_name": "matbra/bokeh",
"path": "examples/plotting/file/relative_paths.py",
"copies": "45",
"size": "1085",
"license": "bsd-3-clause",
"hash": 7441038244425854000,
"line_mean": 37.75,
"line_max": 82,
"alpha_frac": 0.730875576,
"autogenerated": false,
"ratio": 3.4227129337539433,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011041470464066509,
"num_lines": 28
} |
"""All of the various asynchronous message producers."""
import json
from messaging import config
from messaging.message_producer import MessageProducer
from messaging.singleton_mixin import SingletonMixin
class CommandProducer(SingletonMixin):
"""Forwards commands."""
def __init__(self):
super(CommandProducer, self).__init__()
self._producer = MessageProducer(config.COMMAND_EXCHANGE)
def start(self):
"""Send the start command."""
self._producer.publish('start')
def stop(self):
"""Send the stop command."""
self._producer.publish('stop')
def reset(self):
"""Send the reset command."""
self._producer.publish('reset')
def calibrate_compass(self):
"""Send the calibrate_compass command."""
self._producer.publish('calibrate-compass')
def set_max_throttle(self, throttle):
"""Send the set max throttle command."""
self._producer.publish('set-max-throttle={}'.format(throttle))
class TelemetryProducer(SingletonMixin):
"""Forwards telemetry messages."""
def __init__(self):
super(TelemetryProducer, self).__init__()
self._producer = MessageProducer(config.TELEMETRY_EXCHANGE)
def gps_reading(
self,
latitude_d,
longitude_d,
accuracy_m,
heading_d,
speed_m_s,
timestamp_s,
device_id
):
"""Sends a GPS reading."""
self._producer.publish(json.dumps({
'latitude_d': latitude_d,
'longitude_d': longitude_d,
'accuracy_m': accuracy_m,
'heading_d': heading_d,
'speed_m_s': speed_m_s,
'timestamp_s': timestamp_s,
'device_id': device_id,
}))
def compass_reading(self, compass_d, confidence, device_id):
"""Sends a compass reading."""
self._producer.publish(json.dumps({
'compass_d': compass_d,
'confidence': confidence,
'device_id': device_id,
}))
def accelerometer_reading(
self,
acceleration_g_x,
acceleration_g_y,
acceleration_g_z,
device_id
):
"""Sends an accelerometer reading."""
self._producer.publish(json.dumps({
'acceleration_g_x': acceleration_g_x,
'acceleration_g_y': acceleration_g_y,
'acceleration_g_z': acceleration_g_z,
'device_id': device_id,
}))
class CommandForwardProducer(SingletonMixin):
"""Forwards commands to another exchange."""
# This is a complete hack. I couldn't figure out how to do multiple
# consumers, but I only need it for one producer (command) and I only have
# two consumers, so I'll just manually forward them. I know this is fragile
# and tightly coupled, because handlers shouldn't need to or know about
# forwarding messages.
# TODO(skari): Implement multi consumer
def __init__(self):
super(CommandForwardProducer, self).__init__()
self._producer = MessageProducer(config.COMMAND_FORWARDED_EXCHANGE)
def forward(self, message):
"""Forwards the message."""
self._producer.publish(message)
class WaypointProducer(SingletonMixin):
"""Forwards waypoint commands to another exchange."""
def __init__(self):
super(WaypointProducer, self).__init__()
self._producer = MessageProducer(config.WAYPOINT_EXCHANGE)
def load_kml_file(self, kml_file_name):
"""Loads some waypoints from a KML file."""
self._producer.publish(
json.dumps({
'command': 'load',
'file': kml_file_name
})
)
| {
"repo_name": "bskari/sparkfun-avc",
"path": "messaging/async_producers.py",
"copies": "1",
"size": "3765",
"license": "mit",
"hash": -4989430901875622000,
"line_mean": 30.375,
"line_max": 79,
"alpha_frac": 0.592563081,
"autogenerated": false,
"ratio": 4.174057649667406,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5266620730667405,
"avg_score": null,
"num_lines": null
} |
#All of this can be performed with inbuilt functions
#Basically this is an useless program, written only for
#learning purposes
count = raw_input("Enter the number of values to be computed on:")
count = int(count)
#Here are the function declarations
def maximumNo(uList):
largestNo = None
for item in uList:
if largestNo == None or largestNo < item:
largestNo = item
return largestNo
def minimumNo(uList):
smallestNo = None
for item in uList:
if smallestNo == None or smallestNo > item:
smallestNo = item
return smallestNo
def sumOfList(uList):
sumoflist = 0
for item in uList:
sumoflist = sumoflist + item
return sumoflist
def countOfList(uList):
countoflist = 0
for item in uList:
countoflist = countoflist + 1
return countoflist
def average(uList):
avg = 0
for items in uList:
userList = []
#Getting the inputs
t = count
while t > 0:
uip = raw_input("Enter number:")
uip = int(uip);
userList.append(uip)
t = t - 1
print "The maximum no in the list is: " + str(maximumNo(userList))
print "The minimum no in the list is: " + str(minimumNo(userList))
print "The sum of nos in the list is: " + str(sumOfList(userList))
print "The count of nos in the list is: " + str(countOfList(userList))
print "Average of the list: " + str(average(userList))
print "Results from inbuilt functions"
print "The maximum no in the list is: " + str(max(userList))
print "The minimum no in the list is: " + str(min(userList))
print "The sum of nos in the list is: " + str(sum(userList))
print "The count of nos in the list is: " + str(len(userList))
| {
"repo_name": "ashgang/RandomAlgos",
"path": "Python/minMaxSum.py",
"copies": "1",
"size": "1571",
"license": "apache-2.0",
"hash": -180249006893982140,
"line_mean": 26.0862068966,
"line_max": 70,
"alpha_frac": 0.7148313176,
"autogenerated": false,
"ratio": 2.9810246679316887,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9032503830000527,
"avg_score": 0.03267043110623222,
"num_lines": 58
} |
#All of this code is awful and needs to be re-written at some point.
#The line drawing functions have been reimplemented in other modules,
#so I'm not entirely sure how often these are called.
class line_diag:
def __init__(self, start, end):
self.path = []
if start == end: return None
self.start = list(start)
self.end = list(end)
self.steep = abs(self.end[1]-self.start[1]) > abs(self.end[0]-self.start[0])
if self.steep:
self.start = self.swap(self.start[0],self.start[1])
self.end = self.swap(self.end[0],self.end[1])
if self.start[0] > self.end[0]:
self.start[0],self.end[0] = self.swap(self.start[0],self.end[0])
self.start[1],self.end[1] = self.swap(self.start[1],self.end[1])
dx = self.end[0] - self.start[0]
dy = abs(self.end[1] - self.start[1])
error = 0
try:
derr = dy/float(dx)
except:
return None
ystep = 0
y = self.start[1]
if self.start[1] < self.end[1]: ystep = 1
else: ystep = -1
for x in range(self.start[0],self.end[0]+1):
if self.steep:
self.path.append((y,x))
else:
self.path.append((x,y))
error += derr
if error >= 0.5:
y += ystep
error -= 1.0
if not self.path[0] == (start[0],start[1]):
self.path.reverse()
def swap(self,n1,n2):
return [n2,n1]
def diag_line(start, end):
_l = line_diag(start,end)
return _l.path
def draw_3d_line(pos1,pos2):
_xchange = pos2[0]-pos1[0]
_ychange = pos2[1]-pos1[1]
_zchange = pos2[2]-pos1[2]
_line = [tuple(pos1)]
if abs(_xchange) >= abs(_ychange) and abs(_xchange) >= abs(_zchange):
_xnegative = False
_ynegative = False
_znegative = False
if _ychange < 0:
_ynegative = True
if _xchange < 0:
_xnegative = True
if _zchange < 0:
_znegative = True
_x = pos1[0]
_y = pos1[1]
_z = pos1[2]
try:
_ystep = abs(_ychange/float(_xchange))
except:
_ystep = abs(_ychange)
try:
_zstep = abs(_zchange/float(_xchange))
except:
_zstep = abs(_zchange)
for x in range(1,abs(_xchange)):
if _xnegative:
x = -x
if _ynegative:
_y -= _ystep
else:
_y += _ystep
if _znegative:
_z -= _zstep
else:
_z += _zstep
_line.append((_x+x,int(round(_y)),int(round(_z))))
elif abs(_ychange) >= abs(_xchange) and abs(_ychange) >= abs(_zchange):
_xnegative = False
_ynegative = False
_znegative = False
if _ychange < 0:
_ynegative = True
if _xchange < 0:
_xnegative = True
if _zchange < 0:
_znegative = True
_x = pos1[0]
_y = pos1[1]
_z = pos1[2]
_xstep = abs(_xchange/float(_ychange))
_zstep = abs(_zchange/float(_ychange))
for y in range(1,abs(_ychange)):
if _ynegative:
y = -y
if _xnegative:
_x -= _xstep
else:
_x += _xstep
if _znegative:
_z -= _zstep
else:
_z += _zstep
_line.append((int(round(_x)),_y+y,int(round(_z))))
elif abs(_zchange) > abs(_xchange) and abs(_zchange) > abs(_ychange):
_xnegative = False
_ynegative = False
_znegative = False
if _zchange < 0:
_znegative = True
if _xchange < 0:
_xnegative = True
if _ychange < 0:
_ynegative = True
_x = pos1[0]
_y = pos1[1]
_z = pos1[2]
_xstep = abs(_xchange/float(_zchange))
_ystep = abs(_ychange/float(_zchange))
for z in range(1,abs(_zchange)):
if _znegative:
z = -z
if _xnegative:
_x -= _xstep
else:
_x += _xstep
if _ynegative:
_y -= _ystep
else:
_y += _ystep
_line.append((int(round(_x)),int(round(_y)),_z+z))
_line.append(tuple(pos2))
return _line
def draw_circle(at, size):
Circle = 0
width=size
height=size
CenterX=(width/float(2))
CenterY=(height/float(2))
circle = []
for i in range(height+1):
for j in range(width+1):
Circle = (((i-CenterY)*(i-CenterY))/((float(height)/2)*(float(height)/2)))+((((j-CenterX)*(j-CenterX))/((float(width)/2)*(float(width)/2))));
if Circle>0 and Circle<1.1:
circle.append((at[0]+(j-(width/2)),at[1]+(i-(height/2))))
if not at in circle:
circle.append(at)
return circle | {
"repo_name": "flags/Reactor-3",
"path": "drawing.py",
"copies": "1",
"size": "4086",
"license": "mit",
"hash": 6114127590774948000,
"line_mean": 18.9365853659,
"line_max": 144,
"alpha_frac": 0.5736661772,
"autogenerated": false,
"ratio": 2.4718693284936477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3545535505693648,
"avg_score": null,
"num_lines": null
} |
# all of us
from cmd import *
from Database import sql_database
from FileManagement.filehandler import *
from graph import *
import sys
class Interpreter(Cmd):
# Kris
def __init__(self, database_name):
Cmd.__init__(self)
self.file_handler = FileHandler()
self.database = sql_database.SQLDatabase(database_name)
self.graph = Graph(self.database)
self.graphs = []
# Kris
# Pull data from database
def do_display_data(self, args):
self.database.display_data()
# Kris Little
# - This function loads and saves data to the database
def do_load_from_file(self, args):
args = args.split(' ')
if len(args) == 1:
file_path = args[0]
data = self.file_handler.load_file(file_path)
data_to_add = self.file_handler.validate(data)
print("adding data")
print(data_to_add)
self.database.write_to_database(data_to_add)
elif len(args) == 2:
file_path = args[1]
optn = args[0]
if "-d" in optn:
data = self.file_handler.load_file(file_path)
data_to_add = self.file_handler.validate(data)
self.database.write_to_database(data_to_add)
elif "-g" in optn:
print("creating graph")
else:
print("Invalid option. Refer to help file")
# Kris Little
# backup the database. This could be changed to use the pickle
# function brendan makes soon
def do_backup_database(self, args):
args = args.split(' ')
msg = ""
data = self.database.backup_database()
can_create = True
if "-o" in args[0]:
if os.path.isfile(args[1]):
can_create = False
msg = "File already exists. Try a different filename."
else:
msg = "Incorrect option. Refer to help."
can_create = False
print(can_create)
if can_create:
if len(args) > 1:
self.file_handler.write_file(args[1], data)
else:
self.file_handler.write_file(args[0], data)
else:
print(msg)
# Kris
# This gets all data from the database
def do_get_data(self, sql):
self.database.execute_sql(sql)
return self.database.cursor.fetchall()
# Brendan Holt
# get data by calling the command execute_sql
# data should be returned as an array holding tuples, keep this in mind
# feel free to add other graph commands e.g. def do_display_piechart(self, args)
# (args being data)
# default value 'new_graph' is only set when called from creategraph which will return it by reference
# the default value will be used should the user call the function from the command line
def do_display_graph(self, args, my_graph=None):
try:
argss = []
args = getopt.getopt(args, "t:o:", ["graph-type=", "option="])
# if new graph is none then create argss as regular else append args from create_graph
if my_graph is None:
argss = args[1].split()
else:
argss.append(args[1][0])
argss.append(args[1][1])
if len(argss) > 2 or len(argss) < 2:
raise TypeError
if argss[0] == 'pie' and argss[1] != 'gender' and argss[1] != 'bmi' and argss[1] != 'age' \
or argss[0] == 'bar' and argss[1] != 'salary-by-gender' and argss[1] != 'salary-by-age':
raise ValueError
except TypeError:
print('This functions takes exactly one parameters')
return
except ValueError:
print('Ensure Graph Value Option Parameter is correctly spelt')
return
if my_graph is None:
my_graph = self.graph.build_graph(argss)
self.graph.print_graph(my_graph)
del my_graph
else:
self.graph.print_graph(my_graph)
# Brendan Holt
# Used to create a graph by calling collecting user defined arguments -
# and passing them to build_graph in the graph class
def do_create_graph(self, args):
try:
args = getopt.getopt(args, "t:o:", ["graph-type=", "option="])
argss = args[1].split()
# Raises exception if the incorrect amount of args have been entered
if len(argss) > 2 or len(argss) < 2:
raise TypeError
# Raises exception if the args have been incorrectly typed
if argss[0] == 'pie' and argss[1] != 'gender' and argss[1] != 'bmi' and argss[1] != 'age' \
or argss[0] == 'bar' and argss[1] != 'salary-by-gender' and argss[1] != 'salary-by-age':
raise ValueError
except TypeError:
print('This functions takes exactly two parameters')
return
except ValueError:
print('Ensure Parameters are correctly spelt')
return
# new graph is temp and is created to be appended to the graph list then destroyed
self.graphs.append(self.graph.build_graph(argss))
# Brendan Holt
# User called function to list graphs currenltly loaded in the interpreters graph list
def do_list_graphs(self, args):
try:
args = getopt.getopt(args, "t:o:", ["graph-type="])
argss = args[1].split()
# If there are arguments
if len(argss) > 0:
# Raises exception if the incorrect amount of args have been entered
if len(argss) > 1 or len(argss) < 0:
raise TypeError
# Raises exception if the args have been incorrectly typed
if argss[0] != 'pie' and argss[0] != 'bar':
raise ValueError
# Raises exception should there be no graphs inside the iterators graph list
if len(self.graphs) == 0:
raise IndexError
except TypeError:
print('This functions takes exactly one or no parameters')
return
except ValueError:
print('Ensure Parameters are correctly spelt')
return
except IndexError:
print('There are currently no graphs loaded')
return
if len(argss) > 0:
print(argss[0])
for g in range(len(self.graphs)):
# NEW Brendan changed self.graph[0].data to self.graph[g].title
# Checks args length for type of graph user selects
if len(argss) > 0:
if argss[0] == self.graphs[g].type:
print(g, self.graphs[g].title + " " + self.graphs[g].time)
# If not args are found the graph is listed to the output without regardless of type
else:
print(g, self.graphs[g].title + " " + self.graphs[g].time)
while True:
selection = input("Select graph number to display graph or press enter to continue >>> ")
try:
if selection == "":
return
elif int(selection) not in range(len(self.graphs)):
raise ValueError
except ValueError:
print('Graph selection is outside of range')
continue
break
self.graph.print_graph(self.graphs[int(selection)])
# Brendan Holt
# Pickles the currently loaded graphs in the list self.graph by calling the file handlers pack_pickle
# Args is currently not used but kept to implement user defined files should the need arise
def do_save_graphs(self, args):
try:
if len(self.graphs) < 1:
raise ValueError
except ValueError:
print('There is currently no graphs to be saved')
return
self.file_handler.pack_pickle(self.graphs)
# Brendan Holt
# Unpickles the default pickle file (see unpack_pickle in the file handler) to the graphs list
# Args is currently not used but kept to implement user defined files should the need arise
def do_load_graphs(self, args):
# Should the file not exist an exception is raised in the file handler
filepath = os.path.dirname(os.path.realpath(sys.argv[0])) + "\\files\\pickle.dat"
# Ensure graph list is cleared
self.graphs = []
# Reload graph list
self.graphs = self.file_handler.unpack_pickle(filepath)
# Brendan Holt
# Pickles and backs up the entire database
# Args is currently not used but kept to implement user defined files should the need arise
def do_pickle(self, args):
data = self.database.backup_database()
print('The above has been pickled to a backup file')
self.file_handler.pickle_all(data)
# Help Commands - Kate
# For each of the do_ commands above, print help info about them
# Following this format: help_function
# e.g. help_write_data(self):
# for info on what each function does, check out the help.doc file
@staticmethod
def do_about(args):
"""
This about command shows user some information about this application
"""
print("Welcome to Interterpreter \n" +
" This application able to read, store and display data \n" +
"in a given format \n")
@staticmethod
def help_display_data():
print("Display data is a simple command that shows all "
"the data from the database in text form.\n" +
"USAGE: display_data\n" +
"OPTIONS and ARGUMENTS: This command takes no options or arguments.")
@staticmethod
def help_load_from_file():
print("Load data from a file and save it to the database.\n" +
"USAGE: load_from_file -option filepath\n" +
"OPTIONS:\n" +
" -g: Create a graph with the data\n" +
" -d: Save data to the database. This is the default option.\n" +
"ARGUMENTS:\n" +
" filepath: Supply a filename or file path to the file that you want to load.")
@staticmethod
def help_backup_database():
print("This command saves data to a file.\n" +
"USAGE: backup_database -option filepath\n" +
"OPTIONS:\n" +
" -o: Overwrite existing file\n" +
"ARGUMENTS:\n" +
" filepath: Supply a filename or file path to where you want to save the database.")
@staticmethod
def help_create_graph():
print("Create a bar or pie graph that visually represents data.\n" +
"USAGE: create_graph <chart-type> <data>\n" +
"OPTIONS: this command takes no options.\n" +
"ARGUMENTS:\n" +
" chart-type: the type of graph you want to create. Can be 'pie' or 'graph'\n" +
" data: the data you want to show. For 'pie' "
"it can be 'gender, bmi or age', for 'bar' it can be 'salary-by-gender'")
@staticmethod
def help_display_graph():
print("Create a bar or pie graph that visually represents data.\n" +
"USAGE: display_graph <chart-type> <data>\n" +
"OPTIONS: this command takes no options.\n" +
"ARGUMENTS:\n" +
" chart-type: the type of graph you want to create. Can be 'pie' or 'graph'\n" +
" data: the data you want to show. For 'pie'"
" it can be 'gender, bmi or age', for 'bar' it can be 'salary-by-gender'")
@staticmethod
def help_list_graphs():
print("Display a list of graphs. Use this if you need to load a specific "
"graph that is active in the system.\n" +
"USAGE: list_graphs <graph-type>\n" +
"OPTIONS: This function takes no options.\n" +
"ARGUMENTS:\n" +
" graph-type: Supply the type of graph you want to list. Can be 'pie' or 'bar'.")
@staticmethod
def help_load_graphs():
print("Load graphs that have been saved.\n" +
"USAGE: load_graphs\n" +
"OPTIONS: This function takes no options.\n" +
"ARGUMENTS:This function takes no arguments\n")
@staticmethod
def help_save_graphs():
print("Save existing graphs to a file so they can be loaded again.\n" +
"USAGE: save_graphs\n" +
"OPTIONS: This function takes no options.\n" +
"ARGUMENTS:This function takes no arguments\n")
@staticmethod
def help_pickle():
print("Encrypt database\n" +
"USAGE: pickle\n" +
"OPTIONS: This function takes no options.\n" +
"ARGUMENTS:This function takes no arguments\n")
def emptyline(self):
pass
| {
"repo_name": "Comikris/Assignment_Interpreter",
"path": "interpreter.py",
"copies": "1",
"size": "13219",
"license": "apache-2.0",
"hash": 6336291241495619000,
"line_mean": 40.2332268371,
"line_max": 108,
"alpha_frac": 0.5588168545,
"autogenerated": false,
"ratio": 4.23821737736454,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009784125006542497,
"num_lines": 313
} |
#All of visualization code for MapGeist
from time import sleep
import pygraphviz as PG
import os
def _is_tree(G):
"""
Checks if the NetworkX Graph G is a tree.
"""
from networkx import (number_of_nodes, number_of_edges,
is_connected)
if number_of_nodes(G) != number_of_edges(G) + 1:
return False
return is_connected(G)
def _construct_subtree_2G(G, tree, node, added_nodes):
"""
Constructs the subtree of 'tree' that begins at
'node'.
G is the pygraphwiz AGraph instance.
Assumes that 'node' is already connected to its parents.
'added_nodes' is a set of strings specifying nodes already
added to the visualized tree.
"""
#Iterating over every neighbour of the root of this subtree
for neighbour in tree.neighbors(node):
#Make sure the neighbour is not a parent
if neighbour not in added_nodes:
#Make a note of the node being added
added_nodes.add(neighbour)
#Add the edge between root and neighbour(child)
G.add_edge(node, neighbour)
#Call this function recursively on the child
_construct_subtree_2G(G, tree, neighbour, added_nodes)
def visualize_tree_2D(tree, root, filepath):
"""
Visualizes a NetworkX tree using PyGraphWiz.
The graph is rendered as a '.ps' named after the root,
in the folder specified by 'foldername'.
'tree' is a NetworkX Graph instance, root is a string
that appears in 'tree'.
"""
#Check is the Graph is infact a tree
if not _is_tree(tree):
raise ValueError("Given Graph is not a tree!")
G = PG.AGraph(directed=True, strict=True)
added_nodes = set([root])
#Call recursive function to build rest of the tree
_construct_subtree_2G(G, tree, root, added_nodes)
#Prepare the file with necessary settings
G.layout(prog='dot')
G.draw(filepath, format='png')
| {
"repo_name": "sachinrjoglekar/MapGeist",
"path": "mapgeist/visualization/functions.py",
"copies": "1",
"size": "1958",
"license": "mit",
"hash": -5994442372613942000,
"line_mean": 30.5806451613,
"line_max": 66,
"alpha_frac": 0.6475995914,
"autogenerated": false,
"ratio": 3.7653846153846153,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9892267584019412,
"avg_score": 0.0041433245530406206,
"num_lines": 62
} |
__all__ = ['oldtype2dtype', 'convtypecode', 'convtypecode2', 'oldtypecodes']
import numpy as N
oldtype2dtype = {'1': N.dtype(N.byte),
's': N.dtype(N.short),
# 'i': N.dtype(N.intc),
# 'l': N.dtype(int),
# 'b': N.dtype(N.ubyte),
'w': N.dtype(N.ushort),
'u': N.dtype(N.uintc),
# 'f': N.dtype(N.single),
# 'd': N.dtype(float),
# 'F': N.dtype(N.csingle),
# 'D': N.dtype(complex),
# 'O': N.dtype(object),
# 'c': N.dtype('c'),
None:N.dtype(int)
}
# converts typecode=None to int
def convtypecode(typecode, dtype=None):
if dtype is None:
try:
return oldtype2dtype[typecode]
except:
return N.dtype(typecode)
else:
return dtype
#if both typecode and dtype are None
# return None
def convtypecode2(typecode, dtype=None):
if dtype is None:
if typecode is None:
return None
else:
try:
return oldtype2dtype[typecode]
except:
return N.dtype(typecode)
else:
return dtype
_changedtypes = {'B': 'b',
'b': '1',
'h': 's',
'H': 'w',
'I': 'u'}
class _oldtypecodes(dict):
def __getitem__(self, obj):
char = N.dtype(obj).char
try:
return _changedtypes[char]
except KeyError:
return char
oldtypecodes = _oldtypecodes()
| {
"repo_name": "santisiri/popego",
"path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/numpy-1.0.4-py2.5-linux-x86_64.egg/numpy/oldnumeric/typeconv.py",
"copies": "1",
"size": "1595",
"license": "bsd-3-clause",
"hash": -8861997918272358000,
"line_mean": 25.5833333333,
"line_max": 76,
"alpha_frac": 0.4595611285,
"autogenerated": false,
"ratio": 3.6085972850678734,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4568158413567873,
"avg_score": null,
"num_lines": null
} |
__all__ = ['OligoAnnealing']
def annealing_score_n(x,y):
if (x=='A' and y=='T') or (x=='T' and y=='A'):
return 2
elif (x=='G' and y=='C') or (x=='C' and y=='G'):
return 4
else:
return 0
class AnnealingState:
def __init__(self, p, q, score, index, scores):
self.p = p
self.q = q
self.score = score
self.scores = scores
self.index = index
def get_bar(self):
i = self.index
p = self.p
q = self.q
spc = ' '*abs(i)
ss = self.scores
bar = ''.join(['|' if s>0 else ' ' for s in ss])
if i>0:
return [spc+"5'-%s-3'"%p, spc+" <"+bar+">", "3'-%s-5'"%q[::-1] ]
else:
return ["5'-%s-3'"%p, spc+" <"+bar+">", spc+"3'-%s-5'"%q[::-1] ]
def write_html(self, w):
w.push('div',style='annealing')
w.insert('p','score=%s, index=%s'%(self.score,self.index))
w.push('pre')
w.text('\n'.join(self.get_bar()))
w.pop()
w.pop()
def annealing_score(p,q):
"""
>>> fw = 'GAAGGAGACCCAAATTCAAAGTT'
>>> rv = 'CCTTTCTCCCTTCGTAGGT'
>>> annealing_score(fw, rv)
((18, -7, [2, 4, 4, 0, 2, 0, 0, 0, 0, 0, 0, 0, 2, 4, 0, 0]), (10, -7, [2, 4, 4, 0, 2, 0, 0, 0, 0, 0, 0, 0, 2, 4, 0, 0]))
>>> annealing_score(fw, fw)
((24, 6, [4, 2, 2, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 2, 2, 4]), (4, -18, [2, 2, 0, 2, 2]))
"""
sv = annealing_score_n
p = str(p).upper()
q = str(q).upper()
w = p
v = q[::-1]
n = len(w)
m = len(v)
def ea(ss):
ret = 0
for n in ss:
if n==0:
return ret
ret += n
return ret
def ea_l(ss):
return ea(ss)
def ea_r(ss):
return ea(ss[::-1])
def ea_lr(ss):
return max(ea_l(ss),ea_r(ss))
ea_v = (-1, None, None)
def u_ea(score, index, scores):
nonlocal ea_v
os,oi,oss = ea_v
if os < score:
ea_v = (score, index, scores)
a_v = (-1, None, None)
def u_a(score, index, scores):
nonlocal a_v
os,oi,oss = a_v
if os < score:
a_v = (score, index, scores)
if n<=m:
assert m-n >= 0
for k in range(-(n-1),m-1 +1):
if k<=0:
# 5'- w[0]....w[-k]....w[n-1] -3'
# 3'- v[0].....v[n+k-1]....v[m-1] -5'
ss = [sv(w[-k+i],v[i]) for i in range(n+k)]
u_a(sum(ss),k,ss)
u_ea(ea_lr(ss),k,ss)
elif k<=m-n:
# w[0]....w[n-1]
# v[0]....v[k]....v[k+n-1].....v[m-1]
ss = [sv(w[0+i],v[k+i]) for i in range(n)]
u_a(sum(ss),k,ss)
u_ea(ea_r(ss),k,ss)
else:
# w[0]...w[m-k-1]....w[n-1]
# v[0]...v[k]...v[m-1]
ss = [sv(w[i],v[k+i]) for i in range(m-k)]
u_a(sum(ss),k,ss)
else:
assert m-n <= 0
for k in range(-(n-1),m-1 +1):
if k<=m-n:
# w[0]....w[-k]....w[n-1]
# v[0].....v[n+k-1]....v[m-1]
ss = [sv(w[-k+i],v[i]) for i in range(n+k)]
u_a(sum(ss),k,ss)
u_ea(ea_lr(ss),k,ss)
elif k<=0:
# w[0]....w[k]....w[m-k-1].....w[n-1]
# v[0]....v[m-1]
ss = [sv(w[k+i],v[0+i]) for i in range(m)]
u_a(sum(ss),k,ss)
u_ea(ea_l(ss),k,ss)
else:
# w[0]...w[m-k-1]....w[n-1]
# v[0]...v[k]...v[m-1]
ss = [sv(w[i],v[k+i]) for i in range(m-k)]
u_a(sum(ss),k,ss)
return a_v, ea_v
class OligoAnnealing:
def __init__(self, p, q):
a_v, ea_v = annealing_score(p,q)
self.max_annealing = AnnealingState(p, q, a_v[0], a_v[1], a_v[2])
self.end_annealing = AnnealingState(p, q, ea_v[0], ea_v[1], ea_v[2])
| {
"repo_name": "mizuy/seqtool",
"path": "seqtool/nucleotide/annealing.py",
"copies": "1",
"size": "4044",
"license": "mit",
"hash": 5311961299929754000,
"line_mean": 29.4060150376,
"line_max": 124,
"alpha_frac": 0.3746290801,
"autogenerated": false,
"ratio": 2.5417976115650536,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8328619069462841,
"avg_score": 0.017561524440442556,
"num_lines": 133
} |
__all__ = 'Once', 'For', 'Every', 'Hertz', 'After', 'Continu', 'Model'
from collections import defaultdict
def test():
m = Model(3*1000)
out = []
d = out.append
count = 0
def add():
nonlocal count
count += 1
m.add_action(lambda:d(m.steps), Once())
m.add_action(lambda:d(m.steps), Every(3))
m.add_action(lambda:d(m.steps), After(3))
m.add_action(lambda:d(m.steps), For(2))
m.add_action(add , Continu())
m.simulate_ms(12)
correct = [0, 0, 1, 2, 3, 4, 5, 9, 9, 18, 27]
assert out == correct
assert count == m.steps
class Once:
def register_action(self, model, phase, action):
def once():
action()
return False # do not call again
phase.add_action(once, 0)
class For:
def __init__(self, ms):
self.ms = ms
def register_action(self, model, phase, action):
count = model.ms_to_steps(self.ms)
def for_():
nonlocal count
action()
count -= 1
if count < 1:
return False
else:
return 0
phase.add_action(for_, 0)
class Every:
def __init__(self, ms, immediate=False):
self.ms = ms
self.imm = immediate
def register_action(self, model, phase, action):
steps = model.ms_to_steps(self.ms)
def every():
action()
return steps # call again after `steps` steps
if self.imm:
phase.add_action(every, 0)
else:
phase.add_action(every, steps)
class Hertz(Every):
def __init__(self, hz, imm=False):
super().__init__(1000/hz, imm)
class After:
def __init__(self, ms):
self.ms = ms
def register_action(self, model, phase, action):
steps = model.ms_to_steps(self.ms)
def after():
action()
return False
phase.add_action(after, steps)
class Continu:
def register_action(self, model, phase, action):
def continu():
action()
return 0
phase.add_action(continu, 0)
def nearest(n):
return int(n+.5)
class Model:
def __init__(self, f=5000, dt=None):
self.phases_by_name = {}
self.phases_sorted = []
self.steps = 0
if dt is None:
self.steps_per_second = f
self.dt = 1/f
elif f == 5000:
raise ArgumentException("can't set f and dt")
else:
self.dt = dt
self.steps_per_second = 1/dt
self.logs = defaultdict(list)
def ms_to_steps(self, ms):
return nearest(ms * self.steps_per_second / 1000)
def halftime(self, ms):
return 0.5 ** (1/self.ms_to_steps(ms))
def add_action(self, action, pattern=Once(), phase="default"):
if phase not in self.phases_by_name:
self.add_phase(phase)
phase_ = self.phases_by_name[phase]
action_ = pattern.register_action(self, phase_, action)
def add_log(self, name, get_val):
log = self.logs[name]
self.add_action(lambda:log.append(get_val()), Continu(), "log")
def add_phase(self, *names):
for name in names:
if name in self.phases_by_name:
continue
phase = Phase(name)
self.phases_sorted.append(phase)
self.phases_by_name[name] = phase
def remove_phase(self, *names):
names = set(names)
for name in names:
if name in self.phases_by_name:
del self.phases_by_name[name]
self.phases_sorted[:] = [phase for phase in self.phases_sorted
if phase.name not in names]
def step(self):
for phase in self.phases_sorted:
phase.execute()
self.steps += 1
def simulate_ms(self, ms):
steps = nearest(self.steps_per_second * ms / 1000)
for _ in range(steps):
self.step()
def simulate_seconds(self, seconds):
steps = nearest(self.steps_per_second * seconds)
for _ in range(steps):
self.step()
def show(self):
import matplotlib.pyplot as plt
xs = [x/self.steps_per_second for x in range(self.steps)]
for name in sorted(self.logs.keys()):
plt.plot(xs, self.logs[name], label=name)
plt.legend()
plt.show()
class Phase:
def __init__(self, name):
self.actions = [] # (steps_left,action)
self.name = name
def add_action(self, action, after=0):
self.actions.append([after,action])
def execute(self):
to_delete = []
for i, (steps_left,action) in enumerate(self.actions):
if steps_left < 1:
again_after = action()
if again_after is False:
to_delete.append(i)
else:
self.actions[i][0] = again_after
self.actions[i][0] -= 1
while to_delete:
# reverse traversal to preserve indices
self.actions.pop(to_delete.pop())
| {
"repo_name": "lennart96/neurons",
"path": "model.py",
"copies": "1",
"size": "5109",
"license": "isc",
"hash": -2665846652795409400,
"line_mean": 27.3833333333,
"line_max": 71,
"alpha_frac": 0.5325895479,
"autogenerated": false,
"ratio": 3.6649928263988523,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4697582374298852,
"avg_score": null,
"num_lines": null
} |
__all__ = ['on', 'when', 'decode_escapes', 'get_dict_path']
import re
import codecs
import copy
ESCAPE_SEQUENCE_RE = re.compile(r'''
( \\U........ # 8-digit hex escapes
| \\u.... # 4-digit hex escapes
| \\x.. # 2-digit hex escapes
| \\[0-7]{1,3} # Octal escapes
| \\N\{[^}]+\} # Unicode characters by name
| \\[\\'"abfnrtv] # Single-character escapes
)''', re.UNICODE | re.VERBOSE)
def decode_escapes(s):
def decode_match(match):
return codecs.decode(match.group(0), 'unicode-escape')
return ESCAPE_SEQUENCE_RE.sub(decode_match, s)
def get_dict_path(dic, path):
def callback(accumulator, key):
obj, keys = accumulator
if isinstance(obj, dict):
if key in obj:
keys.append(key)
return obj[key], keys
path_string = '/'.join(keys)
raise Exception('Object "./{}" has no key "{}"'.format(path_string, key))
try:
path = path.strip(' ./').replace('.', '/')
if not path:
return dic, None
result, _ = reduce(callback, path.split('/'), (dic, []))
return copy.copy(result), None
except Exception, e:
return None, e.message
def get_position(string, index):
line = string.count('\n', 0, index) + 1
if line == 1:
col = index + 1
else:
col = index - string.rindex('\n', 0, index)
return line, col
| {
"repo_name": "AlexYukikaze/JSONx",
"path": "JSONx/utils.py",
"copies": "1",
"size": "1442",
"license": "mit",
"hash": 4844074744746173000,
"line_mean": 27.2745098039,
"line_max": 81,
"alpha_frac": 0.5353675451,
"autogenerated": false,
"ratio": 3.534313725490196,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45696812705901957,
"avg_score": null,
"num_lines": null
} |
"""AlloPred - Predict allosteric pockets on proteins
Usage:
python $ALLOPRED_DIR/run_allopred.py in_file act_res.txt
Arguments are the input file prefix and the active site residue file.
The working directory must contain the input PDB file (in_file.pdb) and the fpocket output directory (in_file_out).
The environmental variables $ALLOPRED_DIR and $SVM_LIGHT_DIR have to be defined - they refer to the AlloPred and SVM-light directories respectively.
See the AlloPred README for more information.
"""
# PARAMETERS
#
# these can be adjusted, but bear in mind the SVM is optimised on the default parameters
#
# proportion to increase the spring constant by at a chosen pocket - default is 1.5
# this parameter represents the reduction in flexibility of a pocket on modulator binding
spring_frac = 1.5
# atoms to use for normal mode calculation - default is 'calpha' but other options are 'backbone' and 'all'
# choosing 'backbone' will increase computation time; choosing 'all' will increase computation time significantly
nma_atoms = 'calpha'
# cutoff in Angstrom for ANM normal mode calculation; default is 15
nma_cutoff = 15
# check whether to print instructions and exit
import sys
if len(sys.argv) != 3:
print __doc__
sys.exit()
# check whether environmental variables are set
import os
try:
allopred_dir = os.environ['ALLOPRED_DIR']
except KeyError:
print 'Environmental variable ALLOPRED_DIR cannot be found - see the AlloPred README'
sys.exit()
try:
svm_light_dir = os.environ['SVM_LIGHT_DIR']
except KeyError:
print 'Environmental variable SVM_LIGHT_DIR cannot be found - see the AlloPred README'
sys.exit()
import re
from operator import itemgetter
from prody import *
# import custom functions
from functions.pocket_methods import *
from functions.nma_methods import *
# read arguments
input_prefix, act_res_filepath = sys.argv[1:]
pdb_file = input_prefix+'.pdb'
pocket_folder = input_prefix+'_out'
print '- Running AlloPred -'
print 'PDB file:', pdb_file
print 'Active site residue file:', act_res_filepath
print 'Pocket directory:', pocket_folder
# read PDB file
print 'Reading PDB file'
pro = parsePDB(pdb_file)
calphas = pro.select('calpha')
nma_selection = pro.select(nma_atoms)
# read active site residues from file
print 'Reading active site residue file'
act_res_file = open(act_res_filepath)
act_res_lines = act_res_file.readlines()
act_res_file.close()
active_res = re.split(r',',act_res_lines[0])
print 'Active site residues:', act_res_lines[0].rstrip()
# extract active site residue indices
act_sel_list = []
for res in active_res:
info = re.split(r':',res)
if len(info) == 2:
act_sel_list.append('(chain '+info[1].strip()+' and resnum '+info[0].strip()+')')
act_selector = str(' or '.join(act_sel_list))
no_act_res = len(act_sel_list)
if no_act_res > 0:
try:
act_res_indices = calphas.select(act_selector).getResindices()
except AttributeError:
print 'No residues found matching your active residues'
sys.exit()
else:
print 'No active residues could be detected - please ensure that the format is correct, including chain labels'
sys.exit()
# check all active residues are included
if len(act_res_indices) != len(active_res):
print 'One or more active site residues could not be read, or is missing in the PDB file; continuing anyway'
# fpocket information
pocket_info_list = [
'Score :',
'Druggability Score :',
'Number of Alpha Spheres :',
'Total SASA :',
'Polar SASA :',
'Apolar SASA :',
'Volume :',
'Mean local hydrophobic density :',
'Mean alpha sphere radius :',
'Mean alp. sph. solvent access :',
'Apolar alpha sphere proportion :',
'Hydrophobicity score:',
'Volume score:',
'Polarity score:',
'Charge score :',
'Proportion of polar atoms:',
'Alpha sphere density :',
'Cent. of mass - Alpha Sphere max dist:',
'Flexibility :'
]
print 'Extracting pocket information'
# extract pocket information into a dictionary
pockets_info = extract_info('',input_prefix,pocket_info_list)
# extract pocket residues into a dictionary
pockets_res, pockets_res_name = extract_pockets('',input_prefix,calphas)
# extract distances of each pocket to the active site into a dictionary
pockets_dist = dist_to_active(calphas,act_res_indices,pockets_res)
# RUN NMA
# calculate normal modes without perturbation
print 'Calculating NMA without perturbation'
anm = ANM('normal')
anm.buildHessian(nma_selection,cutoff=nma_cutoff,gamma=1)
anm.calcModes(n_modes=None)
anm_used, sel_active = sliceModel(anm,nma_selection,act_selector)
# iterate over each pocket and perturb the normal modes
print 'Calculating NMA with perturbation'
results = {}
for pocket_no in pockets_res:
# calculate normal modes with perturbation to residues in pocket
anm_mod = ANM('modified')
# do not perturb active residues directly
indices_to_perturb = [i for i in pockets_res[pocket_no] if i not in act_res_indices]
anm_mod.buildHessian(nma_selection,cutoff=nma_cutoff,gamma=GammaResidue(nma_selection,res_nos=indices_to_perturb,frac_change=spring_frac))
anm_mod.calcModes(n_modes=None)
anm_mod_used, sel_mod_active = sliceModel(anm_mod,nma_selection,act_selector)
# measure the change in the normal modes
diff_100 = quantify_difference(anm_used,anm_mod_used,no_modes=100)
diff_200 = quantify_difference(anm_used,anm_mod_used,no_modes=200)
diff_all = quantify_difference(anm_used,anm_mod_used,no_modes=len(anm))
# add results to results dictionary
results[pocket_no] = [diff_100,diff_200,diff_all]
# form title line
title_string = 'fpocket_rank\tpocket_size\tdist_to_active\tno_pockets\tC_100\tE_100\tC_200\tE_200\tC_all\tE_all'
clean_string = title_string
for info_id in pocket_info_list:
title_string += '\t'+info_id
clean_string += '\tfpocket_'+re.sub('[:.\-\s+]','',info_id)
title_string += '\tresidues'
clean_string += '\tresidues'
# form output line for each pocket
lines = []
for pocket_no in range(len(pockets_res)):
# write fpocket rank
line = str(pocket_no)
# write pocket size
line += '\t'+str(len(pockets_res[pocket_no]))
# write distance to active site
line += '\t'+str(pockets_dist[pocket_no])
# write number of pockets
line += '\t'+str(len(pockets_res))
# write combined NM output for 100 modes
line += '\t'+str(results[pocket_no][0])
line += '\t'+str(results[pocket_no][0]/len(pockets_res[pocket_no]))
# write combined NM output for 200 modes
line += '\t'+str(results[pocket_no][1])
line += '\t'+str(results[pocket_no][1]/len(pockets_res[pocket_no]))
# write combined NM output for all modes
line += '\t'+str(results[pocket_no][2])
line += '\t'+str(results[pocket_no][2]/len(pockets_res[pocket_no]))
# write pocket information
for info_id in pocket_info_list:
line += '\t'+str(pockets_info[pocket_no][info_id])
# write residues in pocket
line += '\t'+','.join(pockets_res_name[pocket_no])
lines.append(line)
# FORM SVM INPUT FILE
# optimal SVM features and the ranges in the training set
svm_features = [
['Number of Alpha Spheres :_r','raw'],
['E_200','ranked'],
['Score :_r','raw'],
['E_all','ranked'],
['dist_to_active_r','raw'],
['pocket_size_r','raw'],
['fpocket_rank_r','raw']
]
svm_features_range = {
'Number of Alpha Spheres :_r':(36.0, 2276.0),
'Score :_r':(-6.782, 70.118),
'dist_to_active_r':(0.1, 110.6),
'pocket_size_r':(2.0, 275.0),
'fpocket_rank_r':(0.0, 83.0),
}
# split output lines for SVM sorting
print 'Forming SVM input file'
breaks = []
for line in lines:
split = re.split(r'\t+',line.rstrip())
# convert to float for sorting; ignore residue names
split = [float(i) for i in split[:-1]]
# convert to int for readability
split[0] = int(split[0])
breaks.append(split)
title_break = re.split(r'\t+',title_string.rstrip())
# form dictionary for SVM features
pocket_info = {}
for i in range(len(breaks)):
pocket_info[str(breaks[i][0])] = {}
for feature in svm_features:
# find column to sort by
index = 'not_found'
for column in title_break:
if feature[1] == 'ranked' and column == feature[0]:
index = title_break.index(column)
break
elif feature[1] == 'raw' and column+'_r' == feature[0]:
index = title_break.index(column)
break
if index != 'not_found':
# sort by correct column
breaks.sort(key=itemgetter(index),reverse=True)
# cycle through pockets
for i in range(len(breaks)):
if feature[1] == 'ranked':
# append fractional ranking
pocket_info[str(breaks[i][0])][feature[0]] = round((i + 1) / float(len(breaks)),4)
elif feature[1] == 'raw':
# scale value using min and max to be between 0 and 1
scaled_value = (breaks[i][index] - svm_features_range[feature[0]][0]) / float(svm_features_range[feature[0]][1] - svm_features_range[feature[0]][0])
pocket_info[str(breaks[i][0])][feature[0]] = round(scaled_value,4)
# write SVM input file
svm_in_file = open(input_prefix+'.svm','w')
for j in range(len(pocket_info)):
line = '0 '
for i in range(len(svm_features)):
if svm_features[i][0] in pocket_info[str(j)]:
# feature ID is an integer for SVMlight input
line += ' '+str(i+1)+':'+str(pocket_info[str(j)][svm_features[i][0]])
svm_in_file.write(line+'\n')
svm_in_file.close()
# RUN SVM
print 'Running SVM'
svm_classify_cmd = svm_light_dir+'/svm_classify '+input_prefix+'.svm '+allopred_dir+'/svm_model.txt '+input_prefix+'.pred'
os.system(svm_classify_cmd)
# FORM OUTPUT FILE
# read and sort prediction file
print 'Forming output file'
svm_pred_file = open(input_prefix+'.pred')
svm_pred_lines = svm_pred_file.readlines()
svm_pred_file.close()
ordering = []
for i in range(len(svm_pred_lines)):
ordering.append([i,svm_pred_lines[i].rstrip()])
ordering.sort(key=itemgetter(1),reverse=True)
# remove prediction file
os.system('rm '+input_prefix+'.pred')
# write tab-delimited output file
out_file = open(input_prefix+'.out','w')
out_file.write('# - AlloPred output -\n')
out_file.write('#\n')
out_file.write('# PDB file: '+pdb_file+'\n')
out_file.write('# Active site residue file: '+act_res_filepath+'\n')
out_file.write('# Active site residues: '+act_res_lines[0].rstrip()+'\n')
out_file.write('# Pocket directory: '+pocket_folder+'\n')
out_file.write('#\n')
out_file.write('# C_n is the NMA effect over n modes; E_n is the NMA effect per perturbed residue over n modes\n')
out_file.write('#\n')
out_file.write('# AlloPred_rank\t'+clean_string+'\n')
out_file.write('#\n')
# write pocket lines in order of AlloPred ranking
for i in range(len(ordering)):
line = str(i)+'\t'+lines[ordering[i][0]]
out_file.write(line+'\n')
out_file.close()
print 'Output file written'
# finish
print 'AlloPred finished successfully'
| {
"repo_name": "jgreener64/allopred",
"path": "run_allopred.py",
"copies": "1",
"size": "11049",
"license": "mit",
"hash": 3582375095768818700,
"line_mean": 33.1018518519,
"line_max": 164,
"alpha_frac": 0.6766223188,
"autogenerated": false,
"ratio": 3.1027801179443975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4279402436744397,
"avg_score": null,
"num_lines": null
} |
__all__ = ['optimalSVHT']
import numpy as np
def optimalSVHT(matrix):
"""
Returns the optimal threshold of the singular values of a matrix, i.e., it computes the optimal number
of singular values to keep or the optimal rank of the matrix.
Given a matrix Y=X+sigma*Z, with Y the observed matrix, X the matrix that we want to find, Z an matrix whose elements are iid Gaussian
random numbers with zero mean and unit variance and sigma is the standard deviation of the noise, the matrix X can be recovered in the
least squares sense using a truncated SVD (compute the SVD of the matrix, keep only the relevant singular values and reconstruct the
matrix). The problem is estimate the number of such singular values to keep. This function uses the theory of Gavish & Donoho (2014)
that is valid for matrices with sizes larger than its rank, i.e., for low-rank matrices
Input:
matrix: matrix to estimate the rank
Output:
thrKnownNoise: in case sigma is known, multiply this value with sigma and this gives the threshold
thrUnknownNoise: returns the threshold directly
noiseEstimation: returns the estimation of the noise
sv: returns the singular values of the matrix
"""
m, n = matrix.shape
beta = 1.0 * m / n
w = (8.0 * beta) / (beta + 1 + np.sqrt(beta**2 + 14 * beta +1))
lambdaStar = np.sqrt(2.0 * (beta + 1) + w)
omega = 0.56*beta**3 - 0.95*beta**2 + 1.82*beta + 1.43
uSVD, wSVD, vSVD = np.linalg.svd(matrix)
medianSV = np.median(wSVD)
thrKnownNoise = lambdaStar * np.sqrt(n)
thrUnknownNoise = omega * medianSV
muSqrt = lambdaStar / omega
noiseEstimation = medianSV / (np.sqrt(n) * muSqrt)
return thrKnownNoise, thrUnknownNoise, noiseEstimation, wSVD
if __name__ == "__main__":
# Generate a matrix of given rank
rank = 3
m = 80
n = 80
sigma = 0.02
X1 = np.random.randn(m,rank)
X2 = np.random.randn(rank,n)
Y = np.dot(X1, X2) + sigma * np.random.randn(m,n)
thrKnownNoise, thrUnknownNoise, noiseEstimation, wSVD = optimalSVHT(Y)
rankKnownNoise = len(np.where(wSVD > thrKnownNoise*sigma)[0])
rankUnknownNoise = len(np.where(wSVD > thrUnknownNoise)[0])
print "Known sigma : {0} - rank={1}".format(thrKnownNoise * sigma, rankKnownNoise)
print "Unknown sigma : {0} - rank={1} - sigmaNoise={2}".format(thrUnknownNoise, rankUnknownNoise, noiseEstimation) | {
"repo_name": "aasensio/pyAndres",
"path": "optimalSVHT.py",
"copies": "1",
"size": "2312",
"license": "mit",
"hash": 5771860972406984000,
"line_mean": 38.2033898305,
"line_max": 135,
"alpha_frac": 0.7197231834,
"autogenerated": false,
"ratio": 3.1327913279132793,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4352514511313279,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Option', 'Configuration', 'option']
from typing import Type, FrozenSet, Dict, Any
import attr
import logging
from . import exceptions
logger = logging.getLogger(__name__) # type: logging.Logger
logger.setLevel(logging.DEBUG)
@attr.s(frozen=True)
class Option(object):
"""
Describes a configuration option in terms of its type and legal
values.
"""
name = attr.ib(type=str)
typ = attr.ib(type=Type)
@property
def _field(self) -> str:
return "__{}".format(self.name)
@attr.s(frozen=True)
class Builder(object):
typ = attr.ib(type=Type)
def build(self, name: str) -> 'Option':
return Option(name, self.typ)
def option(*args, **kwargs) -> Option.Builder:
return Option.Builder(*args, **kwargs)
class ConfigurationMeta(type):
def __new__(mcl,
cls_name: str,
bases, # FIXME
ns: Dict[str, Any]
):
# collect and build option definitions
builders = {} # type: Dict[str, Option.Builder]
for name in ns:
if isinstance(ns[name], Option.Builder):
logger.debug("found option: %s", name)
builders[name] = ns[name]
logger.debug("building options")
options = frozenset(
b.build(name) for (name, b) in builders.items()
) # type: FrozenSet[Option]
logger.debug("built options: %s", options)
logger.debug("storing options in options property")
ns['options'] = options
logger.debug("stored options in options property")
logger.debug("constructing options")
for option in options:
getter = lambda self, f=option._field: getattr(self, f)
ns[option.name] = property(getter)
logger.debug("constructed options")
return super().__new__(mcl, cls_name, bases, ns)
class Configuration(object, metaclass=ConfigurationMeta):
@classmethod
def from_dict(cls: Type['Configuration'],
dkt: Dict[str, Any]
) -> 'Configuration':
return cls(**dkt)
def __init__(self, *args, **kwargs) -> None:
cls_name = self.__class__.__name__
options = self.__class__.options # type: FrozenSet[Option]
# were any positional arguments passed to the constructor?
if args:
msg = "constructor [{}] accepts no positional arguments but {} {} given" # noqa: pycodestyle
msg = msg.format(cls_name,
"was" if len(args) == 1 else "were",
len(args))
raise TypeError(msg)
# set values for each option
for opt in options:
try:
val = kwargs[opt.name]
except KeyError:
msg = "missing keyword argument [{}] to constructor [{}]"
msg = msg.format(opt.name, cls_name)
raise TypeError(msg)
setattr(self, opt._field, val)
# were any unexpected keyword arguments provided?
if len(kwargs) > len(options):
actual_args = set(n for n in kwargs)
expected_args = set(opt.name for opt in options)
unexpected_arguments = list(actual_args - expected_args)
msg = "unexpected keyword arguments [{}] supplied to constructor [{}]" # noqa: pycodestyle
msg = msg.format('; '.join(unexpected_arguments), cls_name)
raise TypeError(msg)
def __getitem__(self, name: str) -> Any:
# FIXME use frozendict
try:
options = self.__class__.options
opt = next(opt for opt in options if opt.name == name)
except StopIteration:
msg = "no option [{}] in state [{}]"
msg.format(name, self.__class__.__name__)
raise KeyError(msg)
return getattr(self, opt._field)
def __hash__(self) -> int:
all_opts = [self[opt.name] for opt in self.__class__.options]
all_opts.insert(0, self.__class__.__name__)
return hash(tuple(all_opts))
def __eq__(self, other: 'Configuration') -> bool:
if type(self) != type(other):
msg = "illegal comparison of configurations: [{}] vs. [{}]"
msg = msg.format(self.__class__.__name__, other.__class__.__name__)
raise exceptions.HoustonException(msg)
return self.__dict__ == other.__dict__
def to_dict(self) -> Dict[str, Any]:
fields = {} # type: Dict[str, Any]
for opt in self.__class__.options:
fields[opt.name] = getattr(self, opt._field)
return fields
def __repr__(self) -> str:
fields = self.to_dict()
for (name, val) in fields.items():
if isinstance(val, float):
s = "{:.3f}".format(val)
else:
s = str(val)
fields[name] = val
s = '; '.join(["{}: {}".format(k, v) for (k, v) in fields.items()])
s = "{}({})".format(self.__class__.__name__, s)
return s
| {
"repo_name": "squaresLab/Houston",
"path": "houston/configuration.py",
"copies": "1",
"size": "5071",
"license": "mit",
"hash": 2108550771146111500,
"line_mean": 33.7328767123,
"line_max": 105,
"alpha_frac": 0.5450601459,
"autogenerated": false,
"ratio": 4.073092369477911,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5118152515377912,
"avg_score": null,
"num_lines": null
} |
__all__ = ['OrderedDict']
# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
# They should however be considered an integral part of collections.py.
from _abcoll import *
import _abcoll
__all__ += _abcoll.__all__
from operator import itemgetter as _itemgetter, eq as _eq
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap, \
ifilter as _ifilter, imap as _imap
try:
from thread import get_ident
except ImportError:
from dummy_thread import get_ident
def _recursive_repr(user_function):
'Decorator to make a repr function return "..." for a recursive call'
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return '...'
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
return wrapper
################################################################################
### OrderedDict
################################################################################
class OrderedDict(dict, MutableMapping):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
globalNodeList = []
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [None, None, None] # sentinel node
PREV = 0
NEXT = 1
root[PREV] = root[NEXT] = root
self.__map = {}
self.update(*args, **kwds)
def __setitem__(self, key, value, PREV=0, NEXT=1, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[PREV]
last[NEXT] = root[PREV] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, PREV=0, NEXT=1, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link = self.__map.pop(key)
link_prev = link[PREV]
link_next = link[NEXT]
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
def __iter__(self, NEXT=1, KEY=2):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root[NEXT]
while curr is not root:
yield curr[KEY]
curr = curr[NEXT]
def __reversed__(self, PREV=0, KEY=2):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root[PREV]
while curr is not root:
yield curr[KEY]
curr = curr[PREV]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__root
del self.__map, self.__root
inst_dict = vars(self).copy()
self.__map, self.__root = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
self.__root[:] = [self.__root, self.__root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
setdefault = MutableMapping.setdefault
update = MutableMapping.update
pop = MutableMapping.pop
keys = MutableMapping.keys
values = MutableMapping.values
items = MutableMapping.items
iterkeys = MutableMapping.iterkeys
itervalues = MutableMapping.itervalues
iteritems = MutableMapping.iteritems
__ne__ = MutableMapping.__ne__
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
key = next(reversed(self) if last else iter(self))
value = self.pop(key)
return key, value
@_recursive_repr
def __repr__(self):
'od.__repr__() <==> repr(od)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(_imap(_eq, self.iteritems(), other.iteritems()))
return dict.__eq__(self, other)
| {
"repo_name": "baseblack/ReproWeb",
"path": "3rdParty/python/ordereddict.py",
"copies": "1",
"size": "7155",
"license": "bsd-2-clause",
"hash": -5961520905938475000,
"line_mean": 34.9547738693,
"line_max": 85,
"alpha_frac": 0.5693920335,
"autogenerated": false,
"ratio": 4.198943661971831,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007527596076596198,
"num_lines": 199
} |
__all__ = ['OrderedDict']
# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
# They should however be considered an integral part of collections.py.
from _abcoll import *
import _abcoll
__all__ += _abcoll.__all__
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
################################################################################
### OrderedDict
################################################################################
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, PREV=0, NEXT=1, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[PREV]
last[NEXT] = root[PREV] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, PREV=0, NEXT=1, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
NEXT, KEY = 1, 2
root = self.__root
curr = root[NEXT]
while curr is not root:
yield curr[KEY]
curr = curr[NEXT]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
PREV, KEY = 0, 2
root = self.__root
curr = root[PREV]
while curr is not root:
yield curr[KEY]
curr = curr[PREV]
def clear(self):
'od.clear() -> None. Remove all items from od.'
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
dict.clear(self)
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) pairs in od'
for k in self:
yield (k, self[k])
update = MutableMapping.update
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
key = next(reversed(self) if last else iter(self))
value = self.pop(key)
return key, value
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
'od.__ne__(y) <==> od!=y'
return not self == other
# -- the following methods support python 3.x style dictionary views --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| {
"repo_name": "enthought/depsolver",
"path": "depsolver/compat/_collections.py",
"copies": "1",
"size": "7765",
"license": "bsd-3-clause",
"hash": 1244417524683258400,
"line_mean": 33.2070484581,
"line_max": 85,
"alpha_frac": 0.5577591758,
"autogenerated": false,
"ratio": 4.099788806758184,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009598452064689465,
"num_lines": 227
} |
__all__ = ('OrderedDict',)
try:
from collections import OrderedDict
except ImportError:
# http://code.activestate.com/recipes/576693/
from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
key = reversed(self).next() if last else iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other | {
"repo_name": "r3gis3r/android2po",
"path": "android2po/compat.py",
"copies": "1",
"size": "3293",
"license": "bsd-2-clause",
"hash": 3718322584942820000,
"line_mean": 30.6730769231,
"line_max": 83,
"alpha_frac": 0.4691770422,
"autogenerated": false,
"ratio": 4.355820105820106,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009293753008373216,
"num_lines": 104
} |
__all__ = ('override_settings',)
try:
from django.test.utils import override_settings
except ImportError:
# we are in Django 1.3
from django.conf import settings, UserSettingsHolder
from django.utils.functional import wraps
class override_settings(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator
it takes a function and returns a wrapped function. If it's a
contextmanager it's used with the ``with`` statement. In either event
entering/exiting are called before and after, respectively,
the function/block is executed.
This class was backported from Django 1.5
As django.test.signals.setting_changed is not supported in 1.3,
it's not sent on changing settings.
"""
def __init__(self, **kwargs):
self.options = kwargs
self.wrapped = settings._wrapped
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def __call__(self, test_func):
from django.test import TransactionTestCase
if isinstance(test_func, type):
if not issubclass(test_func, TransactionTestCase):
raise Exception(
"Only subclasses of Django SimpleTestCase "
"can be decorated with override_settings")
original_pre_setup = test_func._pre_setup
original_post_teardown = test_func._post_teardown
def _pre_setup(innerself):
self.enable()
original_pre_setup(innerself)
def _post_teardown(innerself):
original_post_teardown(innerself)
self.disable()
test_func._pre_setup = _pre_setup
test_func._post_teardown = _post_teardown
return test_func
else:
@wraps(test_func)
def inner(*args, **kwargs):
with self:
return test_func(*args, **kwargs)
return inner
def enable(self):
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
settings._wrapped = override
def disable(self):
settings._wrapped = self.wrapped
| {
"repo_name": "treyhunner/django-crispy-forms",
"path": "crispy_forms/tests/utils.py",
"copies": "25",
"size": "2494",
"license": "mit",
"hash": -4779961259839020000,
"line_mean": 35.6764705882,
"line_max": 77,
"alpha_frac": 0.5609462711,
"autogenerated": false,
"ratio": 4.899803536345776,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 68
} |
__all__ = ('override_settings', 'setup')
try:
from django import setup
except ImportError:
def setup():
pass
try:
from django.test.runner import DiscoverRunner
except ImportError:
# Django 1.5 or earlier. Fallback to a patched version of the old runner.
# Should be removed when dropping 1.4 and 1.5 support.
from django.test.simple import DjangoTestSuiteRunner
class DiscoverRunner(DjangoTestSuiteRunner):
def run_tests(self, tests, *args, **kwargs):
tests = [
test.replace('crispy_forms.tests.', 'crispy_forms.')
for test in tests
]
return super(DiscoverRunner, self).run_tests(tests, *args, **kwargs)
try:
from django.template import engines
def get_template_from_string(s):
return engines['django'].from_string(s)
except ImportError:
# Old template loading private API in Django < 1.8.
# Remove this when dropping 1.4 and 1.7 support.
from django.template import loader
def get_template_from_string(s):
return loader.get_template_from_string(s)
try:
from django.test.utils import override_settings
except ImportError:
# we are in Django 1.3
from django.conf import settings, UserSettingsHolder
from django.utils.functional import wraps
class override_settings(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator
it takes a function and returns a wrapped function. If it's a
contextmanager it's used with the ``with`` statement. In either event
entering/exiting are called before and after, respectively,
the function/block is executed.
This class was backported from Django 1.5
As django.test.signals.setting_changed is not supported in 1.3,
it's not sent on changing settings.
"""
def __init__(self, **kwargs):
self.options = kwargs
self.wrapped = settings._wrapped
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def __call__(self, test_func):
from django.test import TransactionTestCase
if isinstance(test_func, type):
if not issubclass(test_func, TransactionTestCase):
raise Exception(
"Only subclasses of Django SimpleTestCase "
"can be decorated with override_settings")
original_pre_setup = test_func._pre_setup
original_post_teardown = test_func._post_teardown
def _pre_setup(innerself):
self.enable()
original_pre_setup(innerself)
def _post_teardown(innerself):
original_post_teardown(innerself)
self.disable()
test_func._pre_setup = _pre_setup
test_func._post_teardown = _post_teardown
return test_func
else:
@wraps(test_func)
def inner(*args, **kwargs):
with self:
return test_func(*args, **kwargs)
return inner
def enable(self):
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
settings._wrapped = override
def disable(self):
settings._wrapped = self.wrapped
| {
"repo_name": "uranusjr/django-crispy-forms-ng",
"path": "crispy_forms/tests/utils.py",
"copies": "1",
"size": "3572",
"license": "mit",
"hash": 6399384676496187000,
"line_mean": 32.6981132075,
"line_max": 80,
"alpha_frac": 0.5904255319,
"autogenerated": false,
"ratio": 4.591259640102828,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5681685172002827,
"avg_score": null,
"num_lines": null
} |
# allowable multiple choice node and edge features
allowable_features = {
'possible_atomic_num_list' : list(range(1, 119)) + ['misc'],
'possible_chirality_list' : [
'CHI_UNSPECIFIED',
'CHI_TETRAHEDRAL_CW',
'CHI_TETRAHEDRAL_CCW',
'CHI_OTHER'
],
'possible_degree_list' : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 'misc'],
'possible_formal_charge_list' : [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 'misc'],
'possible_numH_list' : [0, 1, 2, 3, 4, 5, 6, 7, 8, 'misc'],
'possible_number_radical_e_list': [0, 1, 2, 3, 4, 'misc'],
'possible_hybridization_list' : [
'SP', 'SP2', 'SP3', 'SP3D', 'SP3D2', 'misc'
],
'possible_is_aromatic_list': [False, True],
'possible_is_in_ring_list': [False, True],
'possible_bond_type_list' : [
'SINGLE',
'DOUBLE',
'TRIPLE',
'AROMATIC',
'misc'
],
'possible_bond_stereo_list': [
'STEREONONE',
'STEREOZ',
'STEREOE',
'STEREOCIS',
'STEREOTRANS',
'STEREOANY',
],
'possible_is_conjugated_list': [False, True],
}
def safe_index(l, e):
"""
Return index of element e in list l. If e is not present, return the last index
"""
try:
return l.index(e)
except:
return len(l) - 1
# # miscellaneous case
# i = safe_index(allowable_features['possible_atomic_num_list'], 'asdf')
# assert allowable_features['possible_atomic_num_list'][i] == 'misc'
# # normal case
# i = safe_index(allowable_features['possible_atomic_num_list'], 2)
# assert allowable_features['possible_atomic_num_list'][i] == 2
def atom_to_feature_vector(atom):
"""
Converts rdkit atom object to feature list of indices
:param mol: rdkit atom object
:return: list
"""
atom_feature = [
safe_index(allowable_features['possible_atomic_num_list'], atom.GetAtomicNum()),
allowable_features['possible_chirality_list'].index(str(atom.GetChiralTag())),
safe_index(allowable_features['possible_degree_list'], atom.GetTotalDegree()),
safe_index(allowable_features['possible_formal_charge_list'], atom.GetFormalCharge()),
safe_index(allowable_features['possible_numH_list'], atom.GetTotalNumHs()),
safe_index(allowable_features['possible_number_radical_e_list'], atom.GetNumRadicalElectrons()),
safe_index(allowable_features['possible_hybridization_list'], str(atom.GetHybridization())),
allowable_features['possible_is_aromatic_list'].index(atom.GetIsAromatic()),
allowable_features['possible_is_in_ring_list'].index(atom.IsInRing()),
]
return atom_feature
# from rdkit import Chem
# mol = Chem.MolFromSmiles('Cl[C@H](/C=C/C)Br')
# atom = mol.GetAtomWithIdx(1) # chiral carbon
# atom_feature = atom_to_feature_vector(atom)
# assert atom_feature == [5, 2, 4, 5, 1, 0, 2, 0, 0]
def get_atom_feature_dims():
return list(map(len, [
allowable_features['possible_atomic_num_list'],
allowable_features['possible_chirality_list'],
allowable_features['possible_degree_list'],
allowable_features['possible_formal_charge_list'],
allowable_features['possible_numH_list'],
allowable_features['possible_number_radical_e_list'],
allowable_features['possible_hybridization_list'],
allowable_features['possible_is_aromatic_list'],
allowable_features['possible_is_in_ring_list']
]))
def bond_to_feature_vector(bond):
"""
Converts rdkit bond object to feature list of indices
:param mol: rdkit bond object
:return: list
"""
bond_feature = [
safe_index(allowable_features['possible_bond_type_list'], str(bond.GetBondType())),
allowable_features['possible_bond_stereo_list'].index(str(bond.GetStereo())),
allowable_features['possible_is_conjugated_list'].index(bond.GetIsConjugated()),
]
return bond_feature
# uses same molecule as atom_to_feature_vector test
# bond = mol.GetBondWithIdx(2) # double bond with stereochem
# bond_feature = bond_to_feature_vector(bond)
# assert bond_feature == [1, 2, 0]
def get_bond_feature_dims():
return list(map(len, [
allowable_features['possible_bond_type_list'],
allowable_features['possible_bond_stereo_list'],
allowable_features['possible_is_conjugated_list']
]))
def atom_feature_vector_to_dict(atom_feature):
[atomic_num_idx,
chirality_idx,
degree_idx,
formal_charge_idx,
num_h_idx,
number_radical_e_idx,
hybridization_idx,
is_aromatic_idx,
is_in_ring_idx] = atom_feature
feature_dict = {
'atomic_num': allowable_features['possible_atomic_num_list'][atomic_num_idx],
'chirality': allowable_features['possible_chirality_list'][chirality_idx],
'degree': allowable_features['possible_degree_list'][degree_idx],
'formal_charge': allowable_features['possible_formal_charge_list'][formal_charge_idx],
'num_h': allowable_features['possible_numH_list'][num_h_idx],
'num_rad_e': allowable_features['possible_number_radical_e_list'][number_radical_e_idx],
'hybridization': allowable_features['possible_hybridization_list'][hybridization_idx],
'is_aromatic': allowable_features['possible_is_aromatic_list'][is_aromatic_idx],
'is_in_ring': allowable_features['possible_is_in_ring_list'][is_in_ring_idx]
}
return feature_dict
# # uses same atom_feature as atom_to_feature_vector test
# atom_feature_dict = atom_feature_vector_to_dict(atom_feature)
# assert atom_feature_dict['atomic_num'] == 6
# assert atom_feature_dict['chirality'] == 'CHI_TETRAHEDRAL_CCW'
# assert atom_feature_dict['degree'] == 4
# assert atom_feature_dict['formal_charge'] == 0
# assert atom_feature_dict['num_h'] == 1
# assert atom_feature_dict['num_rad_e'] == 0
# assert atom_feature_dict['hybridization'] == 'SP3'
# assert atom_feature_dict['is_aromatic'] == False
# assert atom_feature_dict['is_in_ring'] == False
def bond_feature_vector_to_dict(bond_feature):
[bond_type_idx,
bond_stereo_idx,
is_conjugated_idx] = bond_feature
feature_dict = {
'bond_type': allowable_features['possible_bond_type_list'][bond_type_idx],
'bond_stereo': allowable_features['possible_bond_stereo_list'][bond_stereo_idx],
'is_conjugated': allowable_features['possible_is_conjugated_list'][is_conjugated_idx]
}
return feature_dict
# # uses same bond as bond_to_feature_vector test
# bond_feature_dict = bond_feature_vector_to_dict(bond_feature)
# assert bond_feature_dict['bond_type'] == 'DOUBLE'
# assert bond_feature_dict['bond_stereo'] == 'STEREOE'
# assert bond_feature_dict['is_conjugated'] == False | {
"repo_name": "snap-stanford/ogb",
"path": "ogb/utils/features.py",
"copies": "1",
"size": "6795",
"license": "mit",
"hash": 8965827276664335000,
"line_mean": 39.9397590361,
"line_max": 108,
"alpha_frac": 0.6441501104,
"autogenerated": false,
"ratio": 3.181179775280899,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4325329885680899,
"avg_score": null,
"num_lines": null
} |
# Allow access to command-line arguments
import sys
import pyrow
# Import the core and GUI elements of Qt
from PyQt4.Qt import *
class PM3(QWidget):
def __init__(self):
super(PM3,self).__init__()
self.pmCount = 0
self.strokeCount = 0
self.display = ""
self.distance =0
def paintEvent(self, event):
qp = QPainter()
font = QFont("times")
fm = QFontMetrics(font)
qp.begin(self)
wnd = qp.window()
w = wnd.width()
h = wnd.height()
qp.setBrush(QColor(127,127,192 ))
qp.drawRect(0,0,w-1,h-1)
qp.drawText(10, 10, self.display)
bw = w /6 # 6 buoys on the screen
dw = self.distance % 10
offset = (dw/10) *bw
for ix in range(0,10):
qp.drawEllipse(ix * bw +offset, h/4, 5,5)
qp.drawEllipse(ix * bw +offset, h/2, 5,5)
qp.drawText(ix * bw +offset, h/4-20, str(10* ((ix -3) + int(self.distance/10))))
qp.end()
class RaspiRowMain(QMainWindow):
def __init__(self, parent = None):
super(RaspiRowMain, self).__init__(parent)
ergs = pyrow.find()
if len(ergs) == 0: exit("No ergs found.")
self.erg = pyrow.pyrow(ergs[0])
self.initUI()
def onStrokeTime(self):
self.pm3.strokeCount = self.pm3.strokeCount +1
def onPmRefresh(self):
results = self.erg.getMonitor(True)
cstate = results['strokestate'] & 0xF
s = "Stroke : " + str(results['spm']) + "\r\n"
s = s+ "Distance : " + str(results['distance']) + "\r\n"
s = s + "Time : " + str(results['time']) + "\r\n"
s = s + "Power : " + str(results['power']) +"\r\n"
s = s + "Calories/hour : " + str(results['calhr'])+"\r\n"
s = s + "Calories : " + str(results['calories'])+"\r\n"
s = s + "Pace : " + str(results['pace'])+"\r\n"
self.pm3.distance = results['distance']
self.pm3.display = s
self.pm3.pmCount = self.pm3.pmCount +1
self.pm3.update()
def initUI(self):
openUserAction = QAction('&Open user', self)
openUserAction.setShortcut('Ctrl+O')
openUserAction.setStatusTip('CHoose or create a user')
# openUserAction.triggered.connect(qApp.quit)
exitAction = QAction('E&xit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(qApp.quit)
menuBar = self.menuBar()
fileMenu = menuBar.addMenu('&File')
fileMenu.addAction(openUserAction)
fileMenu.addSeparator()
fileMenu.addAction(exitAction)
self.toolbar = self.addToolBar('Main')
self.toolbar.setFloatable(False)
self.toolbar.addAction(openUserAction)
self.toolbar.addSeparator()
self.toolbar.addAction(exitAction)
self.statusBar().showMessage('Ready')
self.mainBody = QVBoxLayout()
self.pm3 = PM3()
self.pm3.setGeometry(10,10,100,100)
self.mainBody.addWidget(self.pm3)
centralWidget = QWidget()
centralWidget.setLayout(self.mainBody)
self.setCentralWidget(centralWidget)
self.setGeometry(50,50,640,480)
self.setWindowTitle('RaspiRow')
# Add some timers
self.strokeTime = QTimer(self)
self.strokeTime.timeout.connect(self.onStrokeTime)
self.strokeTime.start(2000)
self.pmRefresh = QTimer(self)
self.pmRefresh.timeout.connect(self.onPmRefresh)
self.pmRefresh.start(50)
self.show()
print "after the show"
print self.rect()
def main():
app = QApplication(sys.argv)
mw = RaspiRowMain()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| {
"repo_name": "chrisreynolds/RaspiRow",
"path": "RaspiRowMain.py",
"copies": "1",
"size": "3782",
"license": "bsd-2-clause",
"hash": -5584557144017902000,
"line_mean": 32.7678571429,
"line_max": 92,
"alpha_frac": 0.5780010576,
"autogenerated": false,
"ratio": 3.300174520069808,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9298143132158658,
"avg_score": 0.016006489102229775,
"num_lines": 112
} |
"""Allow a simple way to ensure execution is confined to one thread.
This module defines the Affinity data type that runs code on a single thread.
An instance of the class will execute functions only on the thread that made
the object in the first place. The class is useful in a GUI's main loop."""
__author__ = 'Stephen "Zero" Chappell <Noctis.Skytower@gmail.com>'
__date__ = '4 June 2012'
__version__ = 1, 0, 0
################################################################################
import sys
import _thread
import queue
################################################################################
def slots(names=''):
"Sets the __slots__ variable in the calling context with private names."
sys._getframe(1).f_locals['__slots__'] = \
tuple('__' + name for name in names.replace(',', ' ').split())
################################################################################
class Affinity:
"Affinity() -> Affinity instance"
slots('thread, action')
def __init__(self):
"Initializes instance with thread identity and job queue."
self.__thread = _thread.get_ident()
self.__action = queue.Queue()
def __call__(self, func, *args, **kwargs):
"Executes function on creating thread and returns result."
if _thread.get_ident() == self.__thread:
while not self.__action.empty():
self.__action.get_nowait()()
return func(*args, **kwargs)
delegate = _Delegate(func, args, kwargs)
self.__action.put_nowait(delegate)
return delegate.value
################################################################################
class _Delegate:
"_Delegate(func, args, kwargs) -> _Delegate instance"
slots('func, args, kwargs, mutex, value, error')
def __init__(self, func, args, kwargs):
"Initializes instance from arguments and prepares to run."
self.__func = func
self.__args = args
self.__kwargs = kwargs
self.__mutex = _thread.allocate_lock()
self.__mutex.acquire()
def __call__(self):
"Executes code with arguments and allows value retrieval."
try:
self.__value = self.__func(*self.__args, **self.__kwargs)
self.__error = False
except:
self.__value = sys.exc_info()[1]
self.__error = True
self.__mutex.release()
@property
def value(self):
"Waits for value availability and raises or returns data."
self.__mutex.acquire()
if self.__error:
raise self.__value
return self.__value
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/578151_affinitypy/recipe-578151.py",
"copies": "1",
"size": "2633",
"license": "mit",
"hash": -7013793165825529000,
"line_mean": 32.3291139241,
"line_max": 80,
"alpha_frac": 0.5267755412,
"autogenerated": false,
"ratio": 4.603146853146853,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5629922394346853,
"avg_score": null,
"num_lines": null
} |
"""allow bash-completion for argparse with argcomplete if installed
needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail
to find the magic string, so _ARGCOMPLETE env. var is never set, and
this does not need special code.
Function try_argcomplete(parser) should be called directly before
the call to ArgumentParser.parse_args().
The filescompleter is what you normally would use on the positional
arguments specification, in order to get "dirname/" after "dirn<TAB>"
instead of the default "dirname ":
optparser.add_argument(Config._file_or_dir, nargs='*'
).completer=filescompleter
Other, application specific, completers should go in the file
doing the add_argument calls as they need to be specified as .completer
attributes as well. (If argcomplete is not installed, the function the
attribute points to will not be used).
SPEEDUP
=======
The generic argcomplete script for bash-completion
(/etc/bash_completion.d/python-argcomplete.sh )
uses a python program to determine startup script generated by pip.
You can speed up completion somewhat by changing this script to include
# PYTHON_ARGCOMPLETE_OK
so the the python-argcomplete-check-easy-install-script does not
need to be called to find the entry point of the code and see if that is
marked with PYTHON_ARGCOMPLETE_OK
INSTALL/DEBUGGING
=================
To include this support in another application that has setup.py generated
scripts:
- add the line:
# PYTHON_ARGCOMPLETE_OK
near the top of the main python entry point
- include in the file calling parse_args():
from _argcomplete import try_argcomplete, filescompleter
, call try_argcomplete just before parse_args(), and optionally add
filescompleter to the positional arguments' add_argument()
If things do not work right away:
- switch on argcomplete debugging with (also helpful when doing custom
completers):
export _ARC_DEBUG=1
- run:
python-argcomplete-check-easy-install-script $(which appname)
echo $?
will echo 0 if the magic line has been found, 1 if not
- sometimes it helps to find early on errors using:
_ARGCOMPLETE=1 _ARC_DEBUG=1 appname
which should throw a KeyError: 'COMPLINE' (which is properly set by the
global argcomplete script).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from glob import glob
class FastFilesCompleter(object):
"Fast file completer class"
def __init__(self, directories=True):
self.directories = directories
def __call__(self, prefix, **kwargs):
"""only called on non option completions"""
if os.path.sep in prefix[1:]:
prefix_dir = len(os.path.dirname(prefix) + os.path.sep)
else:
prefix_dir = 0
completion = []
globbed = []
if "*" not in prefix and "?" not in prefix:
# we are on unix, otherwise no bash
if not prefix or prefix[-1] == os.path.sep:
globbed.extend(glob(prefix + ".*"))
prefix += "*"
globbed.extend(glob(prefix))
for x in sorted(globbed):
if os.path.isdir(x):
x += "/"
# append stripping the prefix (like bash, not like compgen)
completion.append(x[prefix_dir:])
return completion
if os.environ.get("_ARGCOMPLETE"):
try:
import argcomplete.completers
except ImportError:
sys.exit(-1)
filescompleter = FastFilesCompleter()
def try_argcomplete(parser):
argcomplete.autocomplete(parser, always_complete_options=False)
else:
def try_argcomplete(parser):
pass
filescompleter = None
| {
"repo_name": "pfctdayelise/pytest",
"path": "src/_pytest/_argcomplete.py",
"copies": "4",
"size": "3724",
"license": "mit",
"hash": -1166359100735830500,
"line_mean": 33.1651376147,
"line_max": 74,
"alpha_frac": 0.6922663802,
"autogenerated": false,
"ratio": 4.1331853496115425,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 109
} |
"""Allow bash-completion for argparse with argcomplete if installed.
Needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail
to find the magic string, so _ARGCOMPLETE env. var is never set, and
this does not need special code).
Function try_argcomplete(parser) should be called directly before
the call to ArgumentParser.parse_args().
The filescompleter is what you normally would use on the positional
arguments specification, in order to get "dirname/" after "dirn<TAB>"
instead of the default "dirname ":
optparser.add_argument(Config._file_or_dir, nargs='*').completer=filescompleter
Other, application specific, completers should go in the file
doing the add_argument calls as they need to be specified as .completer
attributes as well. (If argcomplete is not installed, the function the
attribute points to will not be used).
SPEEDUP
=======
The generic argcomplete script for bash-completion
(/etc/bash_completion.d/python-argcomplete.sh)
uses a python program to determine startup script generated by pip.
You can speed up completion somewhat by changing this script to include
# PYTHON_ARGCOMPLETE_OK
so the python-argcomplete-check-easy-install-script does not
need to be called to find the entry point of the code and see if that is
marked with PYTHON_ARGCOMPLETE_OK.
INSTALL/DEBUGGING
=================
To include this support in another application that has setup.py generated
scripts:
- Add the line:
# PYTHON_ARGCOMPLETE_OK
near the top of the main python entry point.
- Include in the file calling parse_args():
from _argcomplete import try_argcomplete, filescompleter
Call try_argcomplete just before parse_args(), and optionally add
filescompleter to the positional arguments' add_argument().
If things do not work right away:
- Switch on argcomplete debugging with (also helpful when doing custom
completers):
export _ARC_DEBUG=1
- Run:
python-argcomplete-check-easy-install-script $(which appname)
echo $?
will echo 0 if the magic line has been found, 1 if not.
- Sometimes it helps to find early on errors using:
_ARGCOMPLETE=1 _ARC_DEBUG=1 appname
which should throw a KeyError: 'COMPLINE' (which is properly set by the
global argcomplete script).
"""
import argparse
import os
import sys
from glob import glob
from typing import Any
from typing import List
from typing import Optional
class FastFilesCompleter:
"""Fast file completer class."""
def __init__(self, directories: bool = True) -> None:
self.directories = directories
def __call__(self, prefix: str, **kwargs: Any) -> List[str]:
# Only called on non option completions.
if os.path.sep in prefix[1:]:
prefix_dir = len(os.path.dirname(prefix) + os.path.sep)
else:
prefix_dir = 0
completion = []
globbed = []
if "*" not in prefix and "?" not in prefix:
# We are on unix, otherwise no bash.
if not prefix or prefix[-1] == os.path.sep:
globbed.extend(glob(prefix + ".*"))
prefix += "*"
globbed.extend(glob(prefix))
for x in sorted(globbed):
if os.path.isdir(x):
x += "/"
# Append stripping the prefix (like bash, not like compgen).
completion.append(x[prefix_dir:])
return completion
if os.environ.get("_ARGCOMPLETE"):
try:
import argcomplete.completers
except ImportError:
sys.exit(-1)
filescompleter: Optional[FastFilesCompleter] = FastFilesCompleter()
def try_argcomplete(parser: argparse.ArgumentParser) -> None:
argcomplete.autocomplete(parser, always_complete_options=False)
else:
def try_argcomplete(parser: argparse.ArgumentParser) -> None:
pass
filescompleter = None
| {
"repo_name": "RonnyPfannschmidt/pytest",
"path": "src/_pytest/_argcomplete.py",
"copies": "4",
"size": "3810",
"license": "mit",
"hash": 6918198904568441000,
"line_mean": 31.5641025641,
"line_max": 82,
"alpha_frac": 0.698687664,
"autogenerated": false,
"ratio": 4.061833688699361,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.676052135269936,
"avg_score": null,
"num_lines": null
} |
"""Allow basic operations with XML"""
import xml.etree.ElementTree as ET
import os
COLLADA_SCHEMA_TEXT = "{http://www.collada.org/2005/11/COLLADASchema}"
INPUT_DIRECTORY = os.path.dirname(os.path.realpath(__file__)) + "/input/"
OUTPUT_DIRECTORY = os.path.dirname(os.path.realpath(__file__)) + "/output/"
TREE = 0
ROOT = 0
def generate_skeletons_blender(input_dir, output_dir):
""" Generate a skeleton for every file in the input folder """
for file in get_dae_files():
global TREE
global ROOT
global INPUT_DIRECTORY
global OUTPUT_DIRECTORY
INPUT_DIRECTORY = input_dir
OUTPUT_DIRECTORY = output_dir
TREE = ET.parse(INPUT_DIRECTORY + file)
ROOT = TREE.getroot()
save_skeleton_file()
def generate_skeletons():
""" Generate a skeleton for every file in the input folder """
for file in get_dae_files():
global TREE
global ROOT
TREE = ET.parse(INPUT_DIRECTORY + file)
ROOT = TREE.getroot()
save_skeleton_file()
def get_dae_files():
""" Returns a list of dae files in the input directory """
files = []
for file in os.listdir(INPUT_DIRECTORY):
if ".dae" not in file:
continue
files.append(file)
return files
def get_bone_names():
""" Get the name of the bones inside of the dae file. """
bone_list = []
node = ROOT.find(COLLADA_SCHEMA_TEXT + "library_controllers/" +
COLLADA_SCHEMA_TEXT + "controller/" +
COLLADA_SCHEMA_TEXT + "skin/"+
COLLADA_SCHEMA_TEXT + "source/"
)
bone_list = node.text.split()
return bone_list
def get_armature_name():
"Gets the name of the armature"
node = ROOT.find(COLLADA_SCHEMA_TEXT + "library_visual_scenes/" +
COLLADA_SCHEMA_TEXT + "visual_scene/")
return node.attrib.get("name")
def is_a_skeleton_node(xml_node):
"""Defines wether the node belongs to the skeleton or not"""
return not COLLADA_SCHEMA_TEXT + "node" == xml_node.tag
def get_skeleton_bones(recursive_function):
""" Returns a string in 0ad format containing all the bones with their hierarchy"""
current_path = (COLLADA_SCHEMA_TEXT + "library_visual_scenes/"
+ COLLADA_SCHEMA_TEXT + "visual_scene/"
+ COLLADA_SCHEMA_TEXT + "node/")
string = ""
node = ROOT.findall(current_path)
for subnode in node:
if is_a_skeleton_node(subnode):
continue
string += recursive_function(subnode.findall("./"), subnode)
return string
def get_sub_nodes(xml_node, xml_node_path):
""" Returns a string containing the child nodes recursively"""
subnode_text = ""
for xml_subnode in xml_node:
xml_subnodes = xml_subnode.findall(xml_node_path)
subnode_text += recursive_load(xml_subnodes, xml_subnode)
return subnode_text
def get_sub_nodes_target(xml_node, xml_node_path):
""" Returns a string containing the child nodes recursively"""
subnode_text = ""
for xml_subnode in xml_node:
xml_subnodes = xml_subnode.findall(xml_node_path)
subnode_text += recursive_load_target(xml_subnodes, xml_subnode)
return subnode_text
def recursive_load(xml_node, xml_node_root):
"""Recursively load all nodes"""
if is_a_skeleton_node(xml_node_root):
return ""
if "prop" in xml_node_root.attrib['id']:
return ""
if "IK" in xml_node_root.attrib['id']:
return ""
return ("<bone name=\"" + xml_node_root.attrib["name"].replace(get_armature_name() + '_', '')
+ "\">" + get_sub_nodes(xml_node, "./")
+ "</bone>")
def recursive_load_target(xml_node, xml_node_root):
"""Recursively load all nodes"""
if is_a_skeleton_node(xml_node_root):
return ""
if "prop" in xml_node_root.attrib['id']:
return ""
if "IK" in xml_node_root.attrib['id']:
return ""
return ("<bone name=\"" + xml_node_root.attrib["name"].replace(get_armature_name() + '_', '')
+ "\"><target>"+ xml_node_root.attrib["name"].replace(get_armature_name() + '_', '') + "</target>"
+ get_sub_nodes_target(xml_node, "./")
+ "</bone>")
def write_xml():
"""Returns the skeleton xml string"""
string = "<?xml version='1.0' encoding='utf8'?>"
string += "<skeletons>"
string += "<standard_skeleton title=\""+ get_armature_name().replace("_", " ") + "\" id=\""
string += get_armature_name().replace(" ", "_") +"\">"
string += get_skeleton_bones(recursive_load)
string += "</standard_skeleton>"
string += "<skeleton title=\""+ get_armature_name().replace('_', ' ') + "\" target=\""
string += get_armature_name().replace(" ", "_") + "\">"
string += "<identifier><root>" + get_root_bone().replace(get_armature_name() + '_', '') + "</root></identifier>"
string += get_skeleton_bones(recursive_load_target)
string += "</skeleton>"
string += "</skeletons>"
return string
def save_skeleton_file():
"""Save the final skeleton file"""
file_tree = ET.fromstring(write_xml())
indent(file_tree)
document = ET.ElementTree(file_tree)
document.write(OUTPUT_DIRECTORY + get_armature_name() + ".xml", encoding='utf-8', xml_declaration=True, short_empty_elements=True)
print("Done generating file: " + OUTPUT_DIRECTORY + get_armature_name() +".xml")
def get_root_bone():
"""Get the root bone """
nodes = ROOT.findall(COLLADA_SCHEMA_TEXT + "library_visual_scenes/" +
COLLADA_SCHEMA_TEXT + "visual_scene/" +
COLLADA_SCHEMA_TEXT + "node/")
for node in nodes:
if is_a_skeleton_node(node):
continue
return node.attrib["name"]
return ""
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
generate_skeletons()
| {
"repo_name": "StanleySweet/0AD-Skeleton-Generator",
"path": "skeleton_generator.py",
"copies": "1",
"size": "6313",
"license": "mit",
"hash": -2284567141942363400,
"line_mean": 34.8693181818,
"line_max": 134,
"alpha_frac": 0.5930619357,
"autogenerated": false,
"ratio": 3.550618672665917,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46436806083659166,
"avg_score": null,
"num_lines": null
} |
"""allow deleting of rooms
Revision ID: f8f1883f81af
Revises: ff700db83195
Create Date: 2019-10-10 15:24:33.797124
"""
from alembic import op
import sqlalchemy as sa
import pycroft
# revision identifiers, used by Alembic.
revision = 'f8f1883f81af'
down_revision = 'ff700db83195'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('host_room_id_fkey', 'host', type_='foreignkey')
op.create_foreign_key('host_room_id_fkey', 'host', 'room', ['room_id'], ['id'], ondelete='SET NULL')
op.drop_constraint('patch_port_room_id_fkey', 'patch_port', type_='foreignkey')
op.create_foreign_key('patch_port_room_id_fkey', 'patch_port', 'room', ['room_id'], ['id'], ondelete='CASCADE')
op.drop_constraint('room_log_entry_room_id_fkey', 'room_log_entry', type_='foreignkey')
op.create_foreign_key('room_log_entry_room_id_fkey', 'room_log_entry', 'room', ['room_id'], ['id'], ondelete='CASCADE')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('room_log_entry_room_id_fkey', 'room_log_entry', type_='foreignkey')
op.create_foreign_key('room_log_entry_room_id_fkey', 'room_log_entry', 'room', ['room_id'], ['id'])
op.drop_constraint('patch_port_room_id_fkey', 'patch_port', type_='foreignkey')
op.create_foreign_key('patch_port_room_id_fkey', 'patch_port', 'room', ['room_id'], ['id'])
op.drop_constraint('host_room_id_fkey', 'host', type_='foreignkey')
op.create_foreign_key('host_room_id_fkey', 'host', 'room', ['room_id'], ['id'])
# ### end Alembic commands ###
| {
"repo_name": "agdsn/pycroft",
"path": "pycroft/model/alembic/versions/f8f1883f81af_allow_deleting_of_rooms.py",
"copies": "1",
"size": "1685",
"license": "apache-2.0",
"hash": -9169940880612791000,
"line_mean": 42.2051282051,
"line_max": 123,
"alpha_frac": 0.6617210682,
"autogenerated": false,
"ratio": 3.0143112701252237,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.916273817387149,
"avg_score": 0.0026588328907470207,
"num_lines": 39
} |
"""Allow dragging tabs or pressing keys to change their order."""
from __future__ import annotations
import tkinter
from functools import partial
from porcupine import get_main_window, get_tab_manager, tabs
def on_drag(event: tkinter.Event[tabs.TabManager]) -> str | None:
if event.widget.identify(event.x, event.y) == "label": # type: ignore[no-untyped-call]
destination_index = event.widget.index(f"@{event.x},{event.y}") # type: ignore[no-untyped-call]
event.widget.insert(destination_index, event.widget.select()) # type: ignore[no-untyped-call]
return "break"
return None
def select_tab_n(n: int, event: tkinter.Event[tkinter.Misc]) -> str | None:
try:
get_tab_manager().select(n - 1)
return "break"
except tkinter.TclError: # index out of bounds
return None
def select_left_or_right(diff: int) -> str | None:
selected_tab = get_tab_manager().select()
if selected_tab is None:
return None
new_index = get_tab_manager().index(selected_tab) + diff # type: ignore[no-untyped-call]
try:
get_tab_manager().select(new_index)
return "break"
except tkinter.TclError: # index out of bounds
return None
def move_left_or_right(diff: int) -> str | None:
selected_tab = get_tab_manager().select()
if selected_tab is None:
return None
destination_index = get_tab_manager().index(selected_tab) + diff # type: ignore[no-untyped-call]
try:
get_tab_manager().insert(destination_index, selected_tab) # type: ignore[no-untyped-call]
return "break"
except tkinter.TclError: # index out of bounds
return None
# bigger value --> less sensitive
MACOS_WHEEL_STEP = 2.5
# ignore mouse wheeling when mouse is below this height
WHEEL_Y_MAX = 50
def wheel_callback(diff: int, event: tkinter.Event[tkinter.Misc]) -> None:
# It's possible to trigger this somewhere else than at top of tab manager
if event.y < 50:
select_left_or_right(diff)
def switch_tabs_on_mouse_wheel() -> None:
tabmanager = get_tab_manager()
if tabmanager.tk.call("tk", "windowingsystem") == "x11":
tabmanager.bind("<Button-4>", partial(wheel_callback, -1), add=True)
tabmanager.bind("<Button-5>", partial(wheel_callback, 1), add=True)
elif tabmanager.tk.call("tk", "windowingsystem") == "aqua":
# Handle smooth scrolling
accumulator = 0.0
def reset(event: tkinter.Event[tkinter.Misc]) -> None:
nonlocal accumulator
accumulator = 0
def scroll(event: tkinter.Event[tkinter.Misc]) -> None:
nonlocal accumulator
accumulator += event.delta
if accumulator > MACOS_WHEEL_STEP:
accumulator -= MACOS_WHEEL_STEP
wheel_callback(-1, event)
elif accumulator < -MACOS_WHEEL_STEP:
accumulator += MACOS_WHEEL_STEP
wheel_callback(1, event)
tabmanager.bind("<MouseWheel>", scroll, add=True)
tabmanager.bind("<Leave>", reset, add=True)
else: # Windows
def real_callback(event: tkinter.Event[tkinter.Misc]) -> None:
if event.delta > 0:
wheel_callback(-1, event)
else:
wheel_callback(1, event)
tabmanager.bind("<MouseWheel>", real_callback, add=True)
def setup() -> None:
get_tab_manager().bind("<Button1-Motion>", on_drag, add=True)
# This doesn't use enable_traversal() because we want more bindings than it creates.
# The bindings also need to be configurable.
get_main_window().bind(
"<<TabOrder:SelectLeft>>", (lambda event: select_left_or_right(-1)), add=True
)
get_main_window().bind(
"<<TabOrder:SelectRight>>", (lambda event: select_left_or_right(1)), add=True
)
get_main_window().bind(
"<<TabOrder:MoveLeft>>", (lambda event: move_left_or_right(-1)), add=True
)
get_main_window().bind(
"<<TabOrder:MoveRight>>", (lambda event: move_left_or_right(1)), add=True
)
for n in range(1, 10):
get_main_window().bind(f"<<TabOrder:SelectTab{n}>>", partial(select_tab_n, n), add=True)
switch_tabs_on_mouse_wheel()
| {
"repo_name": "Akuli/porcupine",
"path": "porcupine/plugins/tab_order.py",
"copies": "1",
"size": "4244",
"license": "mit",
"hash": 7622730780100572000,
"line_mean": 33.2258064516,
"line_max": 104,
"alpha_frac": 0.6267672008,
"autogenerated": false,
"ratio": 3.5396163469557966,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9661148199634431,
"avg_score": 0.0010470696242732187,
"num_lines": 124
} |
"""Allow dragging tabs out of the Porcupine window."""
from __future__ import annotations
import logging
import os
import pickle
import subprocess
import sys
import tempfile
import threading
import tkinter
from typing import Any, Tuple, Union
from porcupine import get_main_window, get_parsed_args, get_tab_manager, menubar, pluginloader, settings, tabs
log = logging.getLogger(__name__)
class SpecialState:
pass
NO_TABS = SpecialState()
NOT_POPPABLE = SpecialState()
NOT_DRAGGING = SpecialState()
def is_on_window(event: tkinter.Event[tkinter.Misc]) -> bool:
window = event.widget.winfo_toplevel()
window_left = window.winfo_x()
window_right = window_left + window.winfo_width()
window_top = window.winfo_y()
window_bottom = window_top + window.winfo_height()
window_top -= 50 # menu bar and window border
return ((window_left < event.x_root < window_right) and
(window_top < event.y_root < window_bottom))
class PopManager:
def __init__(self) -> None:
self._window = tkinter.Toplevel()
self._window.withdraw()
self._window.overrideredirect(True)
# this is not ttk because i want it to look yellowish
self._label = tkinter.Label(self._window, fg='#000', bg='#ffc')
self._label.pack()
self._dragged_state: Union[SpecialState, Tuple[tabs.Tab, Any]] = NOT_DRAGGING
def _show_tooltip(self, event: tkinter.Event[tkinter.Misc]) -> None:
if self._window.state() == 'withdrawn':
self._window.deiconify()
left = event.x_root - (self._label.winfo_reqwidth() // 2) # centered
top = event.y_root - self._label.winfo_reqheight() # above cursor
self._window.geometry(f'+{left}+{top}')
# no need to return 'break' imo, other plugins are free to follow
# drags and drops
def on_drag(self, event: tkinter.Event[tkinter.Misc]) -> None:
if is_on_window(event):
self._window.withdraw()
return
if self._dragged_state is NOT_DRAGGING:
tab = get_tab_manager().select()
if tab is None:
# no tabs to pop up
self._dragged_state = NO_TABS
self._window.withdraw()
return
state = tab.get_state()
if state is None:
self._dragged_state = NOT_POPPABLE
self._label.config(text="This tab cannot\nbe popped up.")
else:
self._dragged_state = (tab, state)
self._label.config(text="Drop the tab here\nto pop it up...")
self._show_tooltip(event)
def on_drop(self, event: tkinter.Event[tkinter.Misc]) -> None:
self._window.withdraw()
if not (is_on_window(event) or isinstance(self._dragged_state, SpecialState)):
log.info("popping off a tab")
tab, state = self._dragged_state
# At least 600x400, bigger if necessary. Can't use
# get_main_window.winfo_reqwidth because that's huge
# when there's a lot of tabs.
width = max(600, tab.winfo_reqwidth())
height = max(400, get_main_window().winfo_reqheight())
# Center the window
x = event.x_root - round(width/2)
y = event.y_root - round(height/2)
# Make sure it's not off screen
screen_width = get_main_window().winfo_screenwidth()
screen_height = get_main_window().winfo_screenheight()
width = min(width, screen_width)
height = min(height, screen_height)
x = min(x, screen_width - width)
y = min(y, screen_height - height)
x = max(0, x)
y = max(0, y)
self.pop(tab, state, f'{width}x{height}+{x}+{y}')
self._dragged_state = NOT_DRAGGING
def pop(self, tab: tabs.Tab, state: Any, geometry: str) -> None:
log.info(f"Popping {repr(tab)} to {geometry} begins")
message = (type(tab), state, geometry)
with tempfile.NamedTemporaryFile(delete=False) as file:
log.info(f"writing pickled state to {file.name}")
pickle.dump(message, file)
settings.save() # let the new process use up-to-date settings
# The subprocess must be called so that it has a sane sys.path.
# In particular, import or don't import from current working
# directory exactly like the porcupine that is currently running.
# Importing from current working directory is bad if it contains
# e.g. queue.py (#31), but good when that's where porcupine is
# meant to be imported from (#230).
code = f'import sys; sys.path[:] = {sys.path}; from porcupine.__main__ import main; main()'
args = [sys.executable, '-c', code]
args.append('--without-plugins')
args.append(','.join({
info.name
for info in pluginloader.plugin_infos
if info.status == pluginloader.Status.DISABLED_ON_COMMAND_LINE
} | {
# these plugins are not suitable for popups
# TODO: geometry and restart stuff don't get saved
'restart',
'geometry',
}))
if get_parsed_args().verbose_logger is not None:
args.append('--verbose-logger')
args.append(get_parsed_args().verbose_logger)
process = subprocess.Popen(
args,
env={**os.environ, 'PORCUPINE_POPPINGTABS_STATE_FILE': file.name})
log.debug(f"started subprocess with PID {process.pid}")
get_tab_manager().close_tab(tab)
# don't exit python until the subprocess exits, also log stuff
threading.Thread(target=self._waiting_thread, args=[process]).start()
def pop_next_to_current_window(self) -> None:
tab = get_tab_manager().select()
assert tab is not None
state = tab.get_state()
assert state is not None
# Popup goes on the half of screen where the current main window is not
window_center = get_main_window().winfo_rootx() + get_main_window().winfo_width()/2
half_screen_width = round(get_main_window().winfo_screenwidth() / 2)
screen_height = get_main_window().winfo_screenheight()
if window_center > half_screen_width:
geometry = f'{half_screen_width}x{screen_height}+0+0'
else:
geometry = f'{half_screen_width}x{screen_height}+{half_screen_width}+0'
self.pop(tab, state, geometry)
def _waiting_thread(self, process: subprocess.Popen[bytes]) -> None:
status = process.wait()
if status == 0:
log.debug(f"subprocess with PID {process.pid} exited successfully")
else:
log.warning(f"subprocess with PID {process.pid} exited with status {status}")
def open_tab_from_state_file() -> None:
try:
path = os.environ.pop('PORCUPINE_POPPINGTABS_STATE_FILE')
except KeyError:
return
with open(path, 'rb') as file:
(tabtype, state, geometry) = pickle.load(file)
get_main_window().geometry(geometry)
get_tab_manager().add_tab(tabtype.from_state(get_tab_manager(), state))
# the state file is not removed earlier because if anything above
# fails, it still exists and can be recovered somehow
#
# most of the time this should "just work", so user-unfriendy recovery
# is not a huge problem
os.remove(path)
def setup() -> None:
manager = PopManager()
get_tab_manager().bind('<Button1-Motion>', manager.on_drag, add=True)
get_tab_manager().bind('<ButtonRelease-1>', manager.on_drop, add=True)
menubar.get_menu("View").add_command(label="Pop Tab", command=manager.pop_next_to_current_window)
menubar.set_enabled_based_on_tab("View/Pop Tab", (lambda tab: tab is not None and tab.get_state() is not None))
open_tab_from_state_file()
| {
"repo_name": "Akuli/editor",
"path": "porcupine/plugins/poppingtabs.py",
"copies": "1",
"size": "7905",
"license": "mit",
"hash": -1896532285347874800,
"line_mean": 37.0048076923,
"line_max": 115,
"alpha_frac": 0.6107526882,
"autogenerated": false,
"ratio": 3.7446707721459025,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48554234603459023,
"avg_score": null,
"num_lines": null
} |
# Allow flashing of the RPi LED
# Provided the following have been run as root on the command line:
# echo none > /sys/class/leds/led0/trigger
# chmod a+w /sys/class/leds/led0/brightness
import os.path
import time
def has_led_control():
return os.path.isfile('/sys/class/leds/led0/brightness')
def led_on():
if has_led_control():
with open('/sys/class/leds/led0/brightness', 'w') as f:
print('1', file=f)
def led_off():
if has_led_control():
with open('/sys/class/leds/led0/brightness', 'w') as f:
print('0', file=f)
def set_led(b):
if b:
led_on()
else:
led_off()
def is_led_on():
if has_led_control():
with open('/sys/class/leds/led0/brightness', 'r') as f:
status = f.readline().strip()
print("STATUS IS " + status)
return status is not '0'
def blink_led(sec: int, frequency: float, onTime: float, numBlinks: int):
offTime = frequency - onTime * (2 * numBlinks - 1)
iterations = int(float(sec) / float(frequency))
for i in range(iterations):
led_off()
time.sleep(offTime)
for j in range(numBlinks):
led_on()
time.sleep(onTime)
if j < numBlinks - 1:
led_off()
time.sleep(onTime)
led_off()
# while we're updating status: give a few quick blinks
def blink_led_updating(sec: int):
blink_led(sec, frequency=0.2, onTime=0.1, numBlinks=1)
# this is called when the internet is down: slow flash
def blink_led_internet_down(sec: int):
blink_led(sec, frequency=1, onTime=1, numBlinks=1)
# this is called when the router is down: two quick blinks
def blink_led_router_down(sec: int):
blink_led(sec, frequency=0.5, onTime=0.1, numBlinks=2)
# this is called when the modem is unreachable: three quick blinks
def blink_led_modem_down(sec: int):
blink_led(sec, frequency=0.5, onTime=0.1, numBlinks=3)
# keep LED on for this long, then turn it back off
def show_led(sec: int):
# Retry turning on every second since LED doesn't reliably turn on for long durations after certain flash patterns
led_off()
time.sleep(0.5)
led_on()
time.sleep(sec)
led_off()
| {
"repo_name": "jhclark/tattletale",
"path": "tattletaleled.py",
"copies": "1",
"size": "2230",
"license": "apache-2.0",
"hash": -7881620281091023000,
"line_mean": 29.9722222222,
"line_max": 118,
"alpha_frac": 0.6255605381,
"autogenerated": false,
"ratio": 3.0801104972375692,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4205671035337569,
"avg_score": null,
"num_lines": null
} |
"""Allow for Spring and Intro eval relationships
Revision ID: 5615d58892a1
Revises: 6f6b843e2b8f
Create Date: 2017-04-27 15:40:40.640402
"""
# revision identifiers, used by Alembic.
revision = '5615d58892a1'
down_revision = '6f6b843e2b8f'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('conditional', sa.Column('i_evaluation', sa.Integer(), nullable=True))
op.add_column('conditional', sa.Column('s_evaluation', sa.Integer(), nullable=True))
op.drop_constraint('conditional_evaluation_fkey', 'conditional', type_='foreignkey')
op.create_foreign_key(None, 'conditional', 'spring_evals', ['s_evaluation'], ['id'])
op.create_foreign_key(None, 'conditional', 'freshman_eval_data', ['i_evaluation'], ['id'])
op.drop_column('conditional', 'evaluation')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('conditional', sa.Column('evaluation', sa.INTEGER(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'conditional', type_='foreignkey')
op.drop_constraint(None, 'conditional', type_='foreignkey')
op.create_foreign_key('conditional_evaluation_fkey', 'conditional', 'spring_evals', ['evaluation'], ['id'])
op.drop_column('conditional', 's_evaluation')
op.drop_column('conditional', 'i_evaluation')
### end Alembic commands ###
| {
"repo_name": "RamZallan/conditional",
"path": "migrations/versions/5615d58892a1_.py",
"copies": "2",
"size": "1467",
"license": "mit",
"hash": 6487796911301676000,
"line_mean": 39.75,
"line_max": 111,
"alpha_frac": 0.6973415133,
"autogenerated": false,
"ratio": 3.4845605700712587,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5181902083371259,
"avg_score": null,
"num_lines": null
} |
"""Allow FRB populations to be explored interactively.
Can be run with:
$ bokeh serve --show code/plot.py --args <pop_example.csv>
in which all csv-files with populations can be given after ``--args``, and as
well as the optional arguments of ``-noshow`` and ``-notns``, to
respectively not show the resulting plot, and to not overplot tns
"""
import numpy as np
import os
import pandas as pd
import sys
from bokeh.io import curdoc
from bokeh.layouts import layout, column
from bokeh.models import ColumnDataSource, HoverTool, Div, Panel, Tabs
from bokeh.models.widgets import Select
from bokeh.palettes import Category10, viridis
from bokeh.plotting import figure
from frbpoppy.tns import TNS
from frbpoppy.do_hist import histogram
from frbpoppy.misc import pprint
from frbpoppy import unpickle
class Tab():
"""Gather all elements needed for a plot."""
def __init__(self):
"""Initializing."""
self.fig = None
self.sources = []
self.name = ''
class Plot():
"""Gather plotting options."""
def __init__(self, files=[], tns=True):
"""Initializing."""
# From arguments
self.files = files
self.tns = tns
# Predefined
self.height = 700 # Plot height
self.width = 700 # Plot width
# Initializing arguments set later in the code
self.x_axis = None
self.y_axis = None
self.n_df = 0
self.dfs = []
self.tabs = []
self.labels = []
# Set parameters
self.params = {'Comoving Distance (Gpc)': 'dist_co',
'Declination (°)': 'dec',
'Dispersion Measure - Host (pc/cm^3)': 'dm_host',
'Dispersion Measure - IGM (pc/cm^3)': 'dm_igm',
'Dispersion Measure - Milky Way (pc/cm^3)': 'dm_mw',
'Dispersion Measure (pc/cm^3)': 'dm',
'Fluence (Jy*ms)': 'fluence',
'Galactic Latitude (°)': 'gb',
'Galactic Longitude (°)': 'gl',
'Galactic X (Gpc)': 'gx',
'Galactic Y (Gpc)': 'gy',
'Galactic Z (Gpc)': 'gz',
'Luminosity - Bolometric (10^30 ergs/s)': 'lum_bol',
'Peak Flux Density (Jy)': 's_peak',
'Pulse Width - Effective (ms)': 'w_eff',
'Pulse Width - Intrinsic (ms)': 'w_int',
'Redshift': 'z',
'Right Ascension (°)': 'ra',
'Spectral Index': 'si',
'Signal to Noise Ratio': 'snr',
'Time (days)': 'time'}
# Running plotting
self.set_colours()
self.set_widgets()
self.get_data()
self.make_scatter()
self.make_histogram(kind='lin')
self.make_histogram(kind='log')
self.make_histogram(kind='cum')
self.set_layout()
def set_colours(self):
"""Determine which colours need to be used."""
# Ensure number of overplots is known
n = len(self.files)
if self.tns:
n += 1
if n > 10:
self.colours = viridis(n)
else:
self.colours = Category10[10][:n]
def get_data(self):
"""Read in populations."""
# Read in files
for f in self.files:
# Check whether file exists
if os.path.isfile(f):
try:
df = unpickle(f).frbs.to_df()
except ValueError:
pprint(f'Unpacking {f} seemed to have failed.')
continue
if '.' in f:
name = '.'.join(f.split('/')[-1].split('.')[:-1])
if '_for_plotting' in name:
name = name.split('_for_plotting')[0]
if len(name) > 15:
name = name.split('_')[-1]
else:
name = f
# If things haven't worked
if df is None:
m = 'Skipping population {} - contains no sources'.format(f)
pprint(m)
continue
# Downsample population size if it's too large
if df.shape[0] > 10000:
pprint(f'Downsampling population {f} (else too big to plot)')
df = df.sample(n=10000)
df['color'] = self.colours[self.n_df]
df['lum_bol'] = df['lum_bol'] / 1e30 # Sidestepping Bokeh issue
if df.empty:
m = 'Skipping population {} - contains no sources'.format(f)
pprint(m)
continue
else:
self.dfs.append(df)
self.labels.append(name)
self.n_df += 1
# Add on tns
if self.tns:
df = TNS(frbpoppy=True).df
# Filter by survey if wished
if isinstance(self.tns, str):
if not df[df.survey == self.tns].empty:
df = df[df.survey == self.tns]
elif not df[df.telescope == self.tns].empty:
df = df[df.telescope == self.tns]
else:
m = 'Your chosen input for tns is not found.'
raise ValueError(m)
df['color'] = self.colours[len(self.dfs)]
self.dfs.append(df)
self.labels.append(f'tns {self.tns}')
def set_widgets(self):
"""Set up widget details."""
self.x_axis = Select(title='',
options=sorted(self.params.keys()),
value='Right Ascension (°)')
self.y_axis = Select(title='',
options=sorted(self.params.keys()),
value='Declination (°)')
def make_scatter(self):
"""Set up a scatter plot."""
# Initializing plot
tab = Tab()
tab.name = 'Scatter'
# Set up interactive tools
props = [("x", "@x"), ("y", "@y")] # ("pop", "@label")
hover = HoverTool(tooltips=props)
tools = ['box_zoom', 'pan', 'save', hover, 'reset', 'wheel_zoom']
# Create scatter plot
tab.fig = figure(plot_height=self.height,
plot_width=self.width,
active_scroll='wheel_zoom',
toolbar_location='right',
tools=tools)
# Stop labels falling off
tab.fig.min_border_left = 80
# Create Column Data Sources for interacting with the plot
props = dict(x=[], y=[], color=[])
tab.sources = [ColumnDataSource(props) for df in self.dfs]
# Plot scatter plot of FRB populations
for i, source in enumerate(tab.sources):
tab.fig.circle(x='x',
y='y',
source=source,
size=7,
alpha=0.6,
color='color',
legend_label=self.labels[i])
self.tabs.append(tab)
def make_histogram(self, kind='lin'):
"""Set up a histogram plot."""
# Initializing plot
tab = Tab()
if kind == 'lin':
tab.name = 'Hist (Lin)'
axis_type = 'linear'
log = False
cum = False
elif kind == 'log':
tab.name = 'Hist (Log)'
axis_type = 'log'
log = True
cum = False
elif kind == 'cum':
tab.name = 'Hist (Cum)'
axis_type = 'log'
log = True
cum = True
# Set up interactive tools
tools = ['box_zoom', 'pan', 'save', 'reset', 'wheel_zoom']
# Create histogram plot
tab.fig = figure(plot_height=self.height,
plot_width=self.width,
active_scroll='wheel_zoom',
toolbar_location='right',
tools=tools,
x_axis_type=axis_type,
y_axis_type="log")
# Create Column Data Sources for interacting with the plot
hists = histogram(self.dfs, log=log, cum=cum)
props = dict(x=[], y=[])
tab.sources = [ColumnDataSource(props) for hist in hists]
# Plot histogram values
for i, source in enumerate(tab.sources):
tab.fig.step(x='x',
y='y',
color=self.colours[i],
legend_label=self.labels[i],
alpha=0.8,
line_width=2.5,
source=source,
mode='before')
if kind == 'lin':
self.hists_lin = hists
elif kind == 'log':
self.hists_log = hists
elif kind == 'cum':
self.hists_cum = hists
self.tabs.append(tab)
def update(self):
"""Update plots when interacted with."""
for tab in self.tabs:
x_abr = self.params[self.x_axis.value]
y_abr = self.params[self.y_axis.value]
tab.fig.xaxis.axis_label = self.x_axis.value
tab.fig.yaxis.axis_label = self.y_axis.value
if tab.name.startswith('Hist'):
tab.fig.yaxis.axis_label = 'Fraction'
for i, source in enumerate(tab.sources):
cols = [f'{x_abr}_x', f'{x_abr}']
if tab.name == 'Scatter':
cols = [x_abr, y_abr, 'color']
dfs = self.dfs
elif tab.name == "Hist (Lin)":
dfs = self.hists_lin
elif tab.name == "Hist (Log)":
dfs = self.hists_log
elif tab.name == "Hist (Cum)":
dfs = self.hists_cum
# Ensure columns are present in each population
if (x_abr not in dfs[i] or
(tab.name == 'Scatter' and y_abr not in dfs[i])):
df = pd.DataFrame(np.nan, index=[0], columns=cols)
else:
# Clean up data
df = dfs[i][cols]
df = df.replace('None', np.nan)
df = df.dropna()
# Update data
if tab.name == 'Scatter':
source.data = dict(x=df[x_abr],
y=df[y_abr],
color=df['color'])
else:
source.data = dict(x=df[f'{x_abr}_x'],
y=df[f'{x_abr}'])
def set_layout(self):
"""Create the plot layout."""
# What to interact with
for control in [self.x_axis, self.y_axis]:
control.on_change('value', lambda attr, old, new: self.update())
# Set up sidebar
cwd = os.path.dirname(__file__)
def path(p):
d = os.path.join(cwd, 'plot_config/{}.html'.format(p))
return open(d).read()
text_top = Div(text=path('text_top'))
text_bottom = Div(text=path('text_bottom'))
sidebar = [text_top, self.x_axis, self.y_axis, text_bottom]
s = column(sidebar, width=380)
# Set up tabs
panels = []
for tab in self.tabs:
panels.append(Panel(child=tab.fig, title=tab.name))
tab.fig.legend.click_policy = 'hide'
tabs = Tabs(tabs=panels, width=self.width)
# Add sidebar and tabs
L = layout([[s, tabs]])
# Initial load of data
self.update()
# Showtime
curdoc().title = 'frbpoppy'
curdoc().add_root(L)
# Parse system arguments
# (I know ArgumentParser is nicer, but bokeh only works with argv)
args = sys.argv
# Whether to plot the tns population
if '-tns' in args:
tns = args[args.index('-tns') + 1]
if tns == 'True':
tns = True
elif tns == 'False':
tns = False
else:
frcat = True
# Which files to plot
files = []
for a in args:
a = a.strip('"')
if a.endswith('.p'):
files.append(a)
# Check whether populations have been given as input
if len(files) == 0:
pprint('Nothing to plot: plot arguments are empty')
else:
Plot(files=files, tns=tns)
| {
"repo_name": "davidgardenier/frbpoppy",
"path": "frbpoppy/plot.py",
"copies": "1",
"size": "12466",
"license": "mit",
"hash": 7143589314570015000,
"line_mean": 31.8759894459,
"line_max": 77,
"alpha_frac": 0.4737560193,
"autogenerated": false,
"ratio": 3.991031390134529,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9964787409434529,
"avg_score": 0,
"num_lines": 379
} |
"""Allow index to be used for comment sorting
Revision ID: 8c2635571571
Revises: 664a23bbc217
Create Date: 2018-10-21 17:47:05.349531
"""
# revision identifiers, used by Alembic.
revision = '8c2635571571'
down_revision = '664a23bbc217'
from alembic import op # lgtm[py/unused-import]
import sqlalchemy as sa # lgtm[py/unused-import]
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index('ind_charcomment_targetid_commentid', 'charcomment', ['targetid', 'commentid'], unique=False)
op.drop_index('ind_charcomment_targetid', table_name='charcomment')
op.create_index('ind_comments_target_sub_commentid', 'comments', ['target_sub', 'commentid'], unique=False, postgresql_where=sa.text(u'target_sub IS NOT NULL'))
op.create_index('ind_comments_target_user_commentid', 'comments', ['target_user', 'commentid'], unique=False, postgresql_where=sa.text(u'target_user IS NOT NULL'))
op.drop_index('ix_comments_target_sub', table_name='comments')
op.drop_index('ix_comments_target_user', table_name='comments')
op.create_index('ind_journalcomment_targetid_commentid', 'journalcomment', ['targetid', 'commentid'], unique=False)
op.drop_index('ind_journalcomment_settings', table_name='journalcomment')
op.drop_index('ind_journalcomment_targetid', table_name='journalcomment')
op.drop_index('ind_journalcomment_targetid_settings', table_name='journalcomment')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index('ind_journalcomment_targetid_settings', 'journalcomment', ['targetid', 'settings'], unique=False)
op.create_index('ind_journalcomment_targetid', 'journalcomment', ['targetid'], unique=False)
op.create_index('ind_journalcomment_settings', 'journalcomment', ['settings'], unique=False)
op.drop_index('ind_journalcomment_targetid_commentid', table_name='journalcomment')
op.create_index('ix_comments_target_user', 'comments', ['target_user'], unique=False)
op.create_index('ix_comments_target_sub', 'comments', ['target_sub'], unique=False)
op.drop_index('ind_comments_target_user_commentid', table_name='comments')
op.drop_index('ind_comments_target_sub_commentid', table_name='comments')
op.create_index('ind_charcomment_targetid', 'charcomment', ['targetid'], unique=False)
op.drop_index('ind_charcomment_targetid_commentid', table_name='charcomment')
# ### end Alembic commands ###
| {
"repo_name": "Weasyl/weasyl",
"path": "libweasyl/libweasyl/alembic/versions/8c2635571571_allow_index_to_be_used_for_comment_.py",
"copies": "1",
"size": "2503",
"license": "apache-2.0",
"hash": -5556658209213680000,
"line_mean": 55.8863636364,
"line_max": 167,
"alpha_frac": 0.721534159,
"autogenerated": false,
"ratio": 3.319628647214854,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4541162806214854,
"avg_score": null,
"num_lines": null
} |
"""Allowing reporting_start_end_dates to be null
Revision ID: 9acf8cc4e454
Revises: df2f541291a5
Create Date: 2017-07-11 14:14:03.356183
"""
# revision identifiers, used by Alembic.
revision = '9acf8cc4e454'
down_revision = 'df2f541291a5'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('submission', 'reporting_end_date',
existing_type=sa.DATE(),
nullable=True,
server_default=None)
op.alter_column('submission', 'reporting_start_date',
existing_type=sa.DATE(),
nullable=True,
server_default=None)
### end Alembic commands ###
op.execute("UPDATE submission SET reporting_start_date = NULL "
"WHERE reporting_start_date = '1970-01-01'")
op.execute("UPDATE submission SET reporting_end_date = NULL "
"WHERE reporting_end_date = '1970-01-01'")
def downgrade_data_broker():
op.execute("UPDATE submission SET reporting_start_date = '1970-01-01' "
"WHERE reporting_start_date IS NULL")
op.execute("UPDATE submission SET reporting_end_date = '1970-01-01' "
"WHERE reporting_end_date IS NULL")
### commands auto generated by Alembic - please adjust! ###
op.alter_column('submission', 'reporting_start_date',
existing_type=sa.DATE(),
nullable=False,
server_default="1970-01-01")
op.alter_column('submission', 'reporting_end_date',
existing_type=sa.DATE(),
nullable=False,
server_default="1970-01-01")
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/9acf8cc4e454_allowing_reporting_start_end_dates_to_.py",
"copies": "1",
"size": "1912",
"license": "cc0-1.0",
"hash": 3374500906245179000,
"line_mean": 29.3492063492,
"line_max": 75,
"alpha_frac": 0.6208158996,
"autogenerated": false,
"ratio": 3.763779527559055,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4884595427159055,
"avg_score": null,
"num_lines": null
} |
"""allowlist rename
Revision ID: eeb702f77d7d
Revises: 8a44a4364f5a
Create Date: 2020-10-15 13:29:38.853574
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'eeb702f77d7d'
down_revision = '8a44a4364f5a'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_registrar():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_registrar():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def upgrade_cloud_verifier():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('verifiermain', 'ima_whitelist', new_column_name='allowlist', existing_type=sa.Text(length=429400000), existing_nullable=True)
# ### end Alembic commands ###
def downgrade_cloud_verifier():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('verifiermain', 'allowlist', new_column_name='ima_whitelist', existing_type=sa.Text(length=429400000), existing_nullable=True)
# ### end Alembic commands ###
| {
"repo_name": "mit-ll/python-keylime",
"path": "keylime/migrations/versions/eeb702f77d7d_allowlist_rename.py",
"copies": "1",
"size": "1286",
"license": "bsd-2-clause",
"hash": -5445349523617915000,
"line_mean": 25.7916666667,
"line_max": 146,
"alpha_frac": 0.6874027994,
"autogenerated": false,
"ratio": 3.447721179624665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46351239790246646,
"avg_score": null,
"num_lines": null
} |
"""Allow parameters when extending.
Based on https://djangosnippets.org/snippets/447/. Modified for use
with variables.
"""
from django import template
from django.template.loader_tags import do_extends
import tokenize
import io
register = template.Library()
class XExtendsNode(template.Node):
"""Allow parameters when extending."""
def __init__(self, node, kwargs):
"""Allow parameters when extending."""
self.node = node
self.kwargs = kwargs
def render(self, context):
"""Allow parameters when extending."""
# for k in list(self.kwargs.keys()):
# self.kwargs[k] = self.kwargs[k].resolve(context)
context.update(self.kwargs)
try:
return self.node.render(context)
finally:
context.pop()
def do_xextends(parser, token):
"""Allow parameters when extending."""
bits = token.contents.split()
kwargs = {}
if 'with' in bits:
pos = bits.index('with')
argslist = bits[pos+1:]
bits = bits[:pos]
for i in argslist:
try:
a, b = i.split('=', 1)
a = a.strip()
b = b.strip()
keys = list(
tokenize.generate_tokens(io.StringIO(a).readline))
if keys[0][0] == tokenize.NAME:
kwargs[str(a)] = template.Variable(b)
else:
raise ValueError
except ValueError:
raise template.TemplateSyntaxError
token.contents = " ".join(bits)
# let the orginal do_extends parse the tag, and wrap the ExtendsNode
return XExtendsNode(do_extends(parser, token), kwargs)
register.tag('xextends', do_xextends)
| {
"repo_name": "jscott1989/happening",
"path": "src/happening/templatetags/xextends.py",
"copies": "2",
"size": "1752",
"license": "mit",
"hash": -8312010145151559000,
"line_mean": 26.8095238095,
"line_max": 72,
"alpha_frac": 0.573630137,
"autogenerated": false,
"ratio": 4.141843971631205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5715474108631206,
"avg_score": null,
"num_lines": null
} |
"""Allow safety to be executable through `python -m safety`."""
from __future__ import absolute_import
import os
import sys
import sysconfig
LIBPATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "lib")
def get_site_packages():
prefixes = {sys.prefix, sysconfig.get_config_var('prefix')}
try:
prefixes.add(sys.real_prefix)
except AttributeError:
pass
form = sysconfig.get_path('purelib', expand=False)
py_version_short = '{0[0]}.{0[1]}'.format(sys.version_info)
return {
form.format(base=prefix, py_version_short=py_version_short)
for prefix in prefixes
}
def insert_before_site_packages(*paths):
site_packages = get_site_packages()
index = None
for i, path in enumerate(sys.path):
if path in site_packages:
index = i
break
if index is None:
sys.path += list(paths)
else:
sys.path = sys.path[:index] + list(paths) + sys.path[index:]
if __name__ == "__main__":
insert_before_site_packages(LIBPATH)
yaml_lib = 'yaml{0}'.format(sys.version_info[0])
locals()[yaml_lib] = __import__(yaml_lib)
sys.modules['yaml'] = sys.modules[yaml_lib]
from safety.cli import cli
cli(prog_name="safety")
| {
"repo_name": "kennethreitz/pipenv",
"path": "tasks/vendoring/safety/__main__.py",
"copies": "1",
"size": "1256",
"license": "mit",
"hash": -8426330673509959000,
"line_mean": 26.9111111111,
"line_max": 73,
"alpha_frac": 0.6265923567,
"autogenerated": false,
"ratio": 3.4600550964187327,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4586647453118733,
"avg_score": null,
"num_lines": null
} |
"""Allow safety to be executable through `python -m safety`."""
from __future__ import absolute_import
import os
import sys
import sysconfig
PATCHED_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PIPENV_DIR = os.path.dirname(PATCHED_DIR)
VENDORED_DIR = os.path.join("PIPENV_DIR", "vendor")
def get_site_packages():
prefixes = {sys.prefix, sysconfig.get_config_var('prefix')}
try:
prefixes.add(sys.real_prefix)
except AttributeError:
pass
form = sysconfig.get_path('purelib', expand=False)
py_version_short = '{0[0]}.{0[1]}'.format(sys.version_info)
return {
form.format(base=prefix, py_version_short=py_version_short)
for prefix in prefixes
}
def insert_before_site_packages(*paths):
site_packages = get_site_packages()
index = None
for i, path in enumerate(sys.path):
if path in site_packages:
index = i
break
if index is None:
sys.path += list(paths)
else:
sys.path = sys.path[:index] + list(paths) + sys.path[index:]
def insert_pipenv_dirs():
insert_before_site_packages(os.path.dirname(PIPENV_DIR), PATCHED_DIR, VENDORED_DIR)
if __name__ == "__main__": # pragma: no cover
insert_pipenv_dirs()
yaml_lib = "pipenv.patched.yaml{0}".format(sys.version_info[0])
locals()[yaml_lib] = __import__(yaml_lib)
sys.modules["yaml"] = sys.modules[yaml_lib]
from safety.cli import cli
cli(prog_name="safety")
| {
"repo_name": "kennethreitz/pipenv",
"path": "pipenv/patched/safety/__main__.py",
"copies": "1",
"size": "1485",
"license": "mit",
"hash": -5411513497470724000,
"line_mean": 28.1176470588,
"line_max": 87,
"alpha_frac": 0.6430976431,
"autogenerated": false,
"ratio": 3.2781456953642385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44212433384642386,
"avg_score": null,
"num_lines": null
} |
# Allows a mentor to ssh into a DigitalOcean droplet. This is designed to be
# executed on the target machine.
#
# This script takes the username of the mentor as an argument:
#
# $ python3 add_mentor.py <mentor's username>
#
# Alternatively you can pass in --remove to remove their SSH key from the
# machine:
#
# $ python3 add_mentor.py --remove <mentor's username>
import os
import re
import socket
import sys
from argparse import ArgumentParser
from typing import List
import requests
parser = ArgumentParser(description="Give a mentor ssh access to this machine.")
parser.add_argument("username", help="GitHub username of the mentor.")
parser.add_argument("--remove", help="Remove his/her key from the machine.", action="store_true")
# Wrap keys with line comments for easier key removal.
append_key = """\
#<{username}>{{{{
{key}
#}}}}<{username}>
"""
def get_mentor_keys(username: str) -> List[str]:
url = f"https://api.github.com/users/{username}/keys"
r = requests.get(url)
if r.status_code != 200:
print("Cannot connect to GitHub...")
sys.exit(1)
keys = r.json()
if not keys:
print(f'Mentor "{username}" has no public key.')
sys.exit(1)
return [key["key"] for key in keys]
if __name__ == "__main__":
args = parser.parse_args()
authorized_keys = os.path.expanduser("~/.ssh/authorized_keys")
if args.remove:
remove_re = re.compile(
"#<{0}>{{{{.+}}}}<{0}>(\n)?".format(args.username), re.DOTALL | re.MULTILINE
)
with open(authorized_keys, "r+") as f:
old_content = f.read()
new_content = re.sub(remove_re, "", old_content)
f.seek(0)
f.write(new_content)
f.truncate()
print(f"Successfully removed {args.username}' SSH key!")
else:
keys = get_mentor_keys(args.username)
with open(authorized_keys, "a") as f:
for key in keys:
f.write(append_key.format(username=args.username, key=key))
print(f"Successfully added {args.username}'s SSH key!")
print("Can you let your mentor know that they can connect to this machine with:\n")
print(f" $ ssh zulipdev@{socket.gethostname()}\n")
| {
"repo_name": "eeshangarg/zulip",
"path": "tools/droplets/add_mentor.py",
"copies": "6",
"size": "2243",
"license": "apache-2.0",
"hash": -5965207919732918000,
"line_mean": 28.9066666667,
"line_max": 97,
"alpha_frac": 0.6272848863,
"autogenerated": false,
"ratio": 3.488335925349922,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7115620811649922,
"avg_score": null,
"num_lines": null
} |
# Allows a mentor to ssh into a Digital Ocean droplet. This is designed to be
# executed on the target machine.
#
# This script takes the username of the mentor as an argument:
#
# $ python3 add_mentor.py <mentor's username>
#
# Alternatively you can pass in --remove to remove their ssh key from the
# machine:
#
# $ python3 add_mentor.py --remove <mentor's username>
import os
import re
import socket
import sys
from argparse import ArgumentParser
from typing import List
import requests
parser = ArgumentParser(description='Give a mentor ssh access to this machine.')
parser.add_argument('username', help='Github username of the mentor.')
parser.add_argument('--remove', help='Remove his/her key from the machine.',
action='store_true', default=False)
# Wrap keys with line comments for easier key removal.
append_key = """\
#<{username}>{{{{
{key}
#}}}}<{username}>
"""
def get_mentor_keys(username: str) -> List[str]:
url = f'https://api.github.com/users/{username}/keys'
r = requests.get(url)
if r.status_code != 200:
print('Cannot connect to Github...')
sys.exit(1)
keys = r.json()
if not keys:
print(f'Mentor "{username}" has no public key.')
sys.exit(1)
return [key['key'] for key in keys]
if __name__ == '__main__':
args = parser.parse_args()
authorized_keys = os.path.expanduser('~/.ssh/authorized_keys')
if args.remove:
remove_re = re.compile('#<{0}>{{{{.+}}}}<{0}>(\n)?'.format(args.username),
re.DOTALL | re.MULTILINE)
with open(authorized_keys, 'r+') as f:
old_content = f.read()
new_content = re.sub(remove_re, '', old_content)
f.seek(0)
f.write(new_content)
f.truncate()
print(f'Successfully removed {args.username}\' SSH key!')
else:
keys = get_mentor_keys(args.username)
with open(authorized_keys, 'a') as f:
for key in keys:
f.write(append_key.format(username=args.username, key=key))
print(f'Successfully added {args.username}\'s SSH key!')
print('Can you let your mentor know that they can connect to this machine with:\n')
print(f' $ ssh zulipdev@{socket.gethostname()}\n')
| {
"repo_name": "shubhamdhama/zulip",
"path": "tools/droplets/add_mentor.py",
"copies": "3",
"size": "2289",
"license": "apache-2.0",
"hash": -7705882464026787000,
"line_mean": 29.9324324324,
"line_max": 91,
"alpha_frac": 0.619921363,
"autogenerated": false,
"ratio": 3.5324074074074074,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007523189225152357,
"num_lines": 74
} |
# Allows a mentor to ssh into a DigitalOcean droplet. This is designed to be
# executed on the target machine.
#
# This script takes the username of the mentor as an argument:
#
# $ python3 add_mentor.py <mentor's username>
#
# Alternatively you can pass in --remove to remove their SSH key from the
# machine:
#
# $ python3 add_mentor.py --remove <mentor's username>
import os
import re
import socket
import sys
from argparse import ArgumentParser
from typing import List
import requests
parser = ArgumentParser(description='Give a mentor ssh access to this machine.')
parser.add_argument('username', help='GitHub username of the mentor.')
parser.add_argument('--remove', help='Remove his/her key from the machine.',
action='store_true')
# Wrap keys with line comments for easier key removal.
append_key = """\
#<{username}>{{{{
{key}
#}}}}<{username}>
"""
def get_mentor_keys(username: str) -> List[str]:
url = f'https://api.github.com/users/{username}/keys'
r = requests.get(url)
if r.status_code != 200:
print('Cannot connect to GitHub...')
sys.exit(1)
keys = r.json()
if not keys:
print(f'Mentor "{username}" has no public key.')
sys.exit(1)
return [key['key'] for key in keys]
if __name__ == '__main__':
args = parser.parse_args()
authorized_keys = os.path.expanduser('~/.ssh/authorized_keys')
if args.remove:
remove_re = re.compile('#<{0}>{{{{.+}}}}<{0}>(\n)?'.format(args.username),
re.DOTALL | re.MULTILINE)
with open(authorized_keys, 'r+') as f:
old_content = f.read()
new_content = re.sub(remove_re, '', old_content)
f.seek(0)
f.write(new_content)
f.truncate()
print(f'Successfully removed {args.username}\' SSH key!')
else:
keys = get_mentor_keys(args.username)
with open(authorized_keys, 'a') as f:
for key in keys:
f.write(append_key.format(username=args.username, key=key))
print(f'Successfully added {args.username}\'s SSH key!')
print('Can you let your mentor know that they can connect to this machine with:\n')
print(f' $ ssh zulipdev@{socket.gethostname()}\n')
| {
"repo_name": "showell/zulip",
"path": "tools/droplets/add_mentor.py",
"copies": "2",
"size": "2273",
"license": "apache-2.0",
"hash": 5640861156219217000,
"line_mean": 29.7162162162,
"line_max": 91,
"alpha_frac": 0.6190057193,
"autogenerated": false,
"ratio": 3.529503105590062,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5148508824890062,
"avg_score": null,
"num_lines": null
} |
# Allows a mentor to ssh into a Digital Ocean droplet. This is designed to be
# executed on the target machine.
#
# This script takes the username of the mentor as an argument:
#
# $ python3 add_mentor.py <mentor's username>
#
# Alternatively you can pass in --remove to remove their ssh key from the
# machine:
#
# $ python3 add_mentor.py --remove <mentor's username>
import os
import sys
from argparse import ArgumentParser
from typing import List
import socket
import re
import requests
parser = ArgumentParser(description='Give a mentor ssh access to this machine.')
parser.add_argument('username', help='Github username of the mentor.')
parser.add_argument('--remove', help='Remove his/her key from the machine.',
action='store_true', default=False)
# Wrap keys with line comments for easier key removal.
append_key = """\
#<{username}>{{{{
{key}
#}}}}<{username}>
"""
def get_mentor_keys(username: str) -> List[str]:
url = 'https://api.github.com/users/{}/keys'.format(username)
r = requests.get(url)
if r.status_code != 200:
print('Cannot connect to Github...')
sys.exit(1)
keys = r.json()
if not keys:
print('Mentor "{}" has no public key.'.format(username))
sys.exit(1)
return [key['key'] for key in keys]
if __name__ == '__main__':
args = parser.parse_args()
authorized_keys = os.path.expanduser('~/.ssh/authorized_keys')
if args.remove:
remove_re = re.compile('#<{0}>{{{{.+}}}}<{0}>(\n)?'.format(args.username),
re.DOTALL | re.MULTILINE)
with open(authorized_keys, 'r+') as f:
old_content = f.read()
new_content = re.sub(remove_re, '', old_content)
f.seek(0)
f.write(new_content)
f.truncate()
print('Successfully removed {}\' SSH key!'.format(args.username))
else:
keys = get_mentor_keys(args.username)
with open(authorized_keys, 'a') as f:
for key in keys:
f.write(append_key.format(username=args.username, key=key))
print('Successfully added {}\'s SSH key!'.format(args.username))
print('Can you let your mentor know that they can connect to this machine with:\n')
print(' $ ssh zulipdev@{}\n'.format(socket.gethostname()))
| {
"repo_name": "mahim97/zulip",
"path": "tools/droplets/add_mentor.py",
"copies": "16",
"size": "2330",
"license": "apache-2.0",
"hash": 5087274952683895000,
"line_mean": 30.0666666667,
"line_max": 91,
"alpha_frac": 0.6197424893,
"autogenerated": false,
"ratio": 3.579109062980031,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007422880035483658,
"num_lines": 75
} |
"""Allows creation and manipulation of python objects representing html, css and javascript elements.
Available predefined objects for html elements:
<html>, <head>, <body>, <title>, <meta>, <link>,
<p>, <h1> to <h6>, <a>, <div>, <script>, <img>,
<ul>, <ol>, <li>, <table>, <tr>, <th>, <td>, <hr>,
<br>, <style>, <center>
Available predefined objects for css elements:
element selector, id selector, class selector
Available predefined objects for javascript elements:
Todo:
* Add js element support
* Expand list of predefined html elements (video, )
* Add a way to parse dicts
* Add descriptions to ValueErrors
* Add custom defined attributes for some html tags
(meta (charset, ),
div (id, class, ),
video (dimensions(width, height), ),
)
* Create a TextNode class that replaces html special symbols with their representation
e.g. '<' to '<'
*
"""
import platform
import webbrowser
import os.path
class List:
"""Class that imitates some behaviours of python lists."""
def __init__(self, *args):
self.contents = list(args)
def __getitem__(self, item):
return self.contents[item]
def __setitem__(self, key, value):
self.contents[key] = value
def __delitem__(self, key):
del(self.contents[key])
def __len__(self):
return len(self.contents)
def __call__(self, *args):
for arg in args:
self.contents.append(arg)
class Dict:
"""Class that imitates some behaviours of python dictionaries."""
def __init__(self, **kwargs):
self.contents = kwargs
def __getitem__(self, item):
return self.contents[item]
def __setitem__(self, key, value):
self.contents[key] = value
def __delitem__(self, key):
del (self.contents[key])
def __len__(self):
return len(self.contents)
def __call__(self, **kwargs):
self.contents = {**self.contents, **kwargs}
class File:
def __init__(self, path, filename, extension):
self.filename = filename
self.path = path
self.extension = extension
def build(self):
f = open(self.filepath(), 'w')
f.write(str(self))
f.close()
def filepath(self):
"""Returns the formatted file path with filename and extension."""
return f'{self.path}{self.filename}.{self.extension}'
class FileHTML(File):
"""Class representing an HTML file"""
def __init__(self, path, filename, html, doctype='<!doctype html>'):
File.__init__(self, path, filename, 'html')
self.doctype = doctype
self.html = html
def openhtml(self):
webbrowser.open_new(self.filepath())
def parse(self, text):
pass
def __str__(self):
return f'{self.doctype}\n' \
f'{self.html.__str__(self.filepath())}\n'
class FileCSS(File):
"""Class representing a CSS file"""
def __init__(self, path, filename, css, *args):
File.__init__(self, path, filename, 'css')
self.css = css
def __str__(self):
if isinstance(self.css, list):
css_text = ''
if len(self.css) != 0:
for element in self.css:
css_text += f'\n{str(element)}\n'
return css_text
else:
return str(self.css)
class FileJS(File):
"""Class representing a Javascript file"""
def __init__(self, path, filename, js, *args):
File.__init__(self, path, filename, 'js')
self.js = js
def __str__(self):
if isinstance(self.js, list):
js_text = ''
if len(self.js) != 0:
for element in self.js:
js_text += f'\n{str(element)}\n'
return js_text
else:
return str(self.js)
def compare_path(path_main, path_other):
abs_main = os.path.normpath(os.path.dirname(os.path.abspath(path_main)))
abs_other = os.path.normpath(os.path.abspath(path_other))
return os.path.relpath(abs_other, abs_main)
# ========== HTML related code ========== #
class ElementHTML(List):
"""Class that handles html elements."""
def __init__(self, tag, *args, end_tag=True, **kwargs):
super().__init__(*args)
self.tag = tag
self.newlines = True
self.end_tag = end_tag
self._attributes = kwargs
def attributes(self, **kwargs):
self._attributes = kwargs
def __str__(self, holder_filepath=''):
"""Return html valid representation of self.
holder_filepath is propagated through the chain of elements
and only those that need it use it
"""
text_inline, text_attributes, nl = '', '', ''
if self.newlines:
nl = '\n'
if len(self.contents) != 0:
for element in self.contents:
if isinstance(element, str):
text_inline += element
else:
text_inline += nl + element.__str__(holder_filepath=holder_filepath) + nl
text_inline.replace('\n\n', '\n')
if len(self._attributes) != 0:
text_attributes = ' '
for key in self._attributes.keys():
text_attributes += f'{key}=\"{self._attributes[key]}\"'
if self.end_tag:
return f'<{self.tag}{text_attributes}>{text_inline}</{self.tag}>'
else:
return f'<{self.tag}{text_attributes}>'
def custom_tag(tagtxt):
"""Allows to create a custom wrapper class of ElementHTML."""
class CustomTag(ElementHTML):
def __init__(self, *args, **kwargs):
super().__init__(tagtxt, *args, **kwargs)
return CustomTag
class HTML(ElementHTML):
"""Wrapper class of ElementHTML for creating html element."""
def __init__(self, *args, **kwargs):
super().__init__('html', *args, **kwargs)
class Head(ElementHTML):
"""Wrapper class of ElementHTML for creating head html element."""
def __init__(self, *args, **kwargs):
super().__init__('head', *args, **kwargs)
class Body(ElementHTML):
"""Wrapper class of ElementHTML for creating body html element."""
def __init__(self, *args, **kwargs):
super().__init__('body', *args, **kwargs)
class Title(ElementHTML):
"""Wrapper class of ElementHTML for creating title html element."""
def __init__(self, *args, **kwargs):
super().__init__('title', *args, **kwargs)
class Meta(ElementHTML):
"""Wrapper class of ElementHTML for creating meta html element."""
def __init__(self, *args, **kwargs):
super().__init__('meta', *args, end_tag=False, **kwargs)
class Link(ElementHTML):
"""Wrapper class of ElementHTML for creating link html element."""
def __init__(self, *args, **kwargs):
super().__init__('link', *args, end_tag=False, **kwargs)
self.oldstr = ElementHTML.__str__
def __str__(self, holder_filepath=''): # holder_filepath is passed in FileHTML.__str__
if 'href' in self._attributes.keys() and \
isinstance(self._attributes['href'], File):
if holder_filepath == '':
raise ValueError('') # Add description to ValueError
new_href = compare_path(holder_filepath, self._attributes['href'].filepath())
self._attributes['href'] = new_href
return self.oldstr(self)
class P(ElementHTML):
"""Wrapper class of ElementHTML for creating paragraph html element."""
def __init__(self, *args, **kwargs):
super().__init__('p', *args, **kwargs)
class H(ElementHTML):
"""Wrapper class of ElementHTML for creating header html element."""
def __init__(self, *args, size=1, **kwargs):
super().__init__(f'h{size}', *args, **kwargs)
class A(ElementHTML):
"""Wrapper class of ElementHTML for creating anchor html element."""
def __init__(self, *args, **kwargs):
super().__init__('a', *args, **kwargs)
self.oldstr = ElementHTML.__str__
def __str__(self, holder_filepath=''): # holder_filepath is passed in FileHTML.__str__
if 'href' in self._attributes.keys() and \
isinstance(self._attributes['href'], FileHTML):
if holder_filepath == '':
raise ValueError('') # Add description to ValueError
new_href = compare_path(holder_filepath, self._attributes['href'].filepath())
self._attributes['href'] = new_href
return self.oldstr(self)
class Div(ElementHTML):
"""Wrapper class of ElementHTML for creating division html element."""
def __init__(self, *args, **kwargs):
super().__init__('div', *args, **kwargs)
class Script(ElementHTML):
"""Wrapper class of ElementHTML for creating script html element."""
def __init__(self, *args, **kwargs):
super().__init__('script', *args, **kwargs)
self.oldstr = ElementHTML.__str__
def __str__(self, holder_filepath=''): # holder_filepath is passed in FileHTML.__str__
if 'src' in self._attributes.keys() and \
isinstance(self._attributes['src'], File):
if holder_filepath == '':
raise ValueError('') # Add description to ValueError
new_src = compare_path(holder_filepath, self._attributes['src'].filepath())
self._attributes['src'] = new_src
return self.oldstr(self)
class Img(ElementHTML):
"""Wrapper class of ElementHTML for creating image html element."""
def __init__(self, *args, **kwargs):
super().__init__('img', *args, end_tag=False, **kwargs)
self.oldstr = ElementHTML.__str__
def __str__(self, holder_filepath=''): # holder_filepath is passed in FileHTML.__str__
if 'src' in self._attributes.keys() and \
isinstance(self._attributes['src'], File):
if holder_filepath == '':
raise ValueError('') # Add description to ValueError
new_src = compare_path(holder_filepath, self._attributes['src'].filepath())
self._attributes['src'] = new_src
return self.oldstr(self)
class Ul(ElementHTML):
"""Wrapper class of ElementHTML for creating unordered list html element."""
def __init__(self, *args, **kwargs):
super().__init__('ul', *args, **kwargs)
@classmethod
def from_list(cls, list_, _no_ul_tag=False):
"""Converts a python list into `Ul` object."""
if _no_ul_tag:
# Returns a python list containing `Li` class objects
items = []
for item in list_:
if isinstance(item, list):
items.append(cls.from_list(item))
elif isinstance(item, dict):
items.append(*cls.from_dict(item, _part_of_list=True))
else:
items.append(Li(item))
return items
else:
# Returns a `Ul` class object
temp_ul = cls()
for item in list_:
if isinstance(item, list):
temp_ul(cls.from_list(item))
elif isinstance(item, dict):
temp_ul(*cls.from_dict(item, _part_of_list=True))
else:
temp_ul(Li(item))
return temp_ul
@classmethod
def from_dict(cls, dict_, _part_of_list=False):
keys = list(dict_.keys())
keys.sort()
items = []
for key in keys:
items.append(key)
if isinstance(dict_[key], list):
items.append(dict_[key])
else:
items.append([dict_[key]])
return cls.from_list(items, _no_ul_tag=_part_of_list)
class Ol(ElementHTML):
"""Wrapper class of ElementHTML for creating ordered list html element."""
def __init__(self, *args, **kwargs):
super().__init__('ol', *args, **kwargs)
@classmethod
def from_list(cls, list_, _no_ul_tag=False):
"""Converts a python list into `Ul` object."""
if _no_ul_tag:
# Returns a python list containing `Li` class objects
items = []
for item in list_:
if isinstance(item, list):
items.append(cls.from_list(item))
elif isinstance(item, dict):
items.append(*cls.from_dict(item, _part_of_list=True))
else:
items.append(Li(item))
return items
else:
# Returns a `Ul` class object
temp_ul = cls()
for item in list_:
if isinstance(item, list):
temp_ul(cls.from_list(item))
elif isinstance(item, dict):
temp_ul(*cls.from_dict(item, _part_of_list=True))
else:
temp_ul(Li(item))
return temp_ul
@classmethod
def from_dict(cls, dict_, _part_of_list=False):
keys = list(dict_.keys())
keys.sort()
items = []
for key in keys:
items.append(key)
if isinstance(dict_[key], list):
items.append(dict_[key])
else:
items.append([dict_[key]])
return cls.from_list(items, _no_ul_tag=_part_of_list)
class Li(ElementHTML):
"""Wrapper class of ElementHTML for creating list item html element."""
def __init__(self, *args, **kwargs):
super().__init__('li', *args, **kwargs)
self.newlines = False
class Table(ElementHTML):
"""Wrapper class of ElementHTML for creating table html element."""
def __init__(self, *args, **kwargs):
super().__init__('table', *args, **kwargs)
@classmethod
def from_list(cls, list_):
"""Converts a python list into `Table` object."""
if isinstance(list_[0], list):
cols = len(list_[0])
else:
raise ValueError('Insert only nested lists.')
for row in list_:
if isinstance(row, list):
if not len(row) == cols:
raise ValueError('All rows in the table must be of same length.')
else:
row = [row]
row.extend([''] * (cols - 1))
temp_table = cls()
header_row = Tr()
temp_table(header_row)
for header in list_[0]:
header_row(Th(header))
for row in list_[1:]:
temp_row = Tr()
temp_table(temp_row)
for cell in row:
temp_row(Td(cell))
return temp_table
class Tr(ElementHTML):
"""Wrapper class of ElementHTML for creating table row html element."""
def __init__(self, *args, **kwargs):
super().__init__('tr', *args, **kwargs)
self.newlines = False
class Th(ElementHTML):
"""Wrapper class of ElementHTML for creating table header html element."""
def __init__(self, *args, **kwargs):
super().__init__('th', *args, **kwargs)
self.newlines = False
class Td(ElementHTML):
"""Wrapper class of ElementHTML for creating table cell html element."""
def __init__(self, *args, **kwargs):
super().__init__('td', *args, **kwargs)
self.newlines = False
class Hr(ElementHTML):
"""Wrapper class of ElementHTML for creating thematic break html element."""
def __init__(self, *args, **kwargs):
super().__init__('hr', *args, end_tag=False, **kwargs)
class Br(ElementHTML):
"""Wrapper class of ElementHTML for creating thematic break html element."""
def __init__(self, *args, **kwargs):
super().__init__('br', *args, end_tag=False, **kwargs)
class Style(ElementHTML):
"""Wrapper class of ElementHTML for creating style html element."""
def __init__(self, *args, **kwargs):
super().__init__('style', *args, **kwargs)
class Center(ElementHTML):
"""Wrapper class of ElementHTML for creating center html element."""
def __init__(self, *args, **kwargs):
super().__init__('center', *args, **kwargs)
# - - - - -
# ========== CSS related code ========== #
def custom_attr_setter(attr_text):
"""Return a wrapper function that sets self[attr_text] to passed value.
This function is used to create custom wrapper methods in Declarations class.
"""
def attr_setter(self, value: str):
self[attr_text] = value
return attr_setter
class _CSSDeclarations:
"""Class that contains custom wrapper methods for adding pre-defined declarations to ElementCSS instances."""
color = custom_attr_setter('color')
direction = custom_attr_setter('direction')
letter_spacing = custom_attr_setter('letter-spacing')
word_spacing = custom_attr_setter('word-spacing')
line_height = custom_attr_setter('line-height')
txt_align = custom_attr_setter('text-align')
vertical_align = custom_attr_setter('vertical-align')
txt_deco = custom_attr_setter('text-decoration')
txt_indent = custom_attr_setter('text-indent')
txt_shadow = custom_attr_setter('text-shadow')
txt_transform = custom_attr_setter('text-transform')
txt_overflow = custom_attr_setter('text-overflow')
unicode = custom_attr_setter('unicode-bidi')
whitespace = custom_attr_setter('white-space')
font = custom_attr_setter('font')
font_family = custom_attr_setter('font-family')
font_size = custom_attr_setter('font-size')
font_style = custom_attr_setter('font-style')
font_variant = custom_attr_setter('font-variant')
font_weight = custom_attr_setter('font-weight')
bg = custom_attr_setter('background')
bg_color = custom_attr_setter('background-color')
bg_img = custom_attr_setter('background-image')
bg_repeat = custom_attr_setter('background-repeat')
bg_attachment = custom_attr_setter('background-attachment')
bg_pos = custom_attr_setter('background-position')
border = custom_attr_setter('border')
border_b = custom_attr_setter('border-bottom')
border_l = custom_attr_setter('border-left')
border_r = custom_attr_setter('border-right')
border_t = custom_attr_setter('border-top')
border_color = custom_attr_setter('border-color')
border_radius = custom_attr_setter('border-radius')
border_style = custom_attr_setter('border-style')
border_width = custom_attr_setter('border-width')
border_collapse = custom_attr_setter('border-collapse')
border_spacing = custom_attr_setter('border-spacing')
caption_side = custom_attr_setter('caption-side')
empty_cells = custom_attr_setter('empty-cells')
table_layout = custom_attr_setter('table-layout')
margin = custom_attr_setter('margin')
margin_b = custom_attr_setter('margin-bottom')
margin_l = custom_attr_setter('margin-left')
margin_t = custom_attr_setter('margin-top')
margin_r = custom_attr_setter('margin-right')
padding = custom_attr_setter('padding')
padding_b = custom_attr_setter('padding-bottom')
padding_l = custom_attr_setter('padding-left')
padding_t = custom_attr_setter('padding-top')
padding_r = custom_attr_setter('padding-right')
height = custom_attr_setter('height')
max_height = custom_attr_setter('max-height')
min_height = custom_attr_setter('min-height')
width = custom_attr_setter('width')
max_width = custom_attr_setter('max-width')
min_width = custom_attr_setter('min-width')
outline = custom_attr_setter('outline')
outline_color = custom_attr_setter('outline-color')
outline_off = custom_attr_setter('outline-offset')
outline_style = custom_attr_setter('outline-style')
outline_width = custom_attr_setter('outline-width')
list_style = custom_attr_setter('list-style')
list_style_img = custom_attr_setter('list-style-image')
list_style_pos = custom_attr_setter('list-style-position')
list_style_type = custom_attr_setter('list-style-type')
display = custom_attr_setter('display')
visible = custom_attr_setter('visibility')
pos = custom_attr_setter('position')
bottom = custom_attr_setter('bottom')
left = custom_attr_setter('left')
top = custom_attr_setter('top')
right = custom_attr_setter('right')
clip = custom_attr_setter('clip')
z_ind = custom_attr_setter('z-index')
overflow = custom_attr_setter('overflow')
overflowX = custom_attr_setter('overflow-x')
overflowY = custom_attr_setter('overflow-y')
clear = custom_attr_setter('clear')
float = custom_attr_setter('float')
class ElementCSS(Dict, _CSSDeclarations):
"""Class that handles css elements."""
def __init__(self, selector, **kwargs):
Dict.__init__(self, **kwargs)
_CSSDeclarations.__init__(self)
self.selector = selector
def __str__(self):
declarations_text = ''
if len(self.contents) != 0:
for key in self.contents.keys():
declarations_text += f'\t{key}: {self.contents[key]};\n'
return f'{self.selector} {"{"}\n' \
f'{declarations_text}' \
f'{"}"}'
class SelectorElement(ElementCSS):
"""Wrapper class of ElementCSS for creating css element selectors."""
def __init__(self, selector, **kwargs):
super().__init__(selector, **kwargs)
class SelectorId(ElementCSS):
"""Wrapper class of ElementCSS for creating css id selectors."""
def __init__(self, selector, **kwargs):
super().__init__(f'#{selector}', **kwargs)
class SelectorClass(ElementCSS):
"""Wrapper class of ElementCSS for creating css class selectors."""
def __init__(self, selector, **kwargs):
super().__init__(f'.{selector}', **kwargs)
# - - - - -
# ========== Javascript related code ========== #
class ElementJS(List):
"""Class that handles javascript elements."""
def __init__(self, *args):
super().__init__(*args)
# - - - - -
| {
"repo_name": "mikister/webgen",
"path": "webgen/dom.py",
"copies": "1",
"size": "21958",
"license": "mit",
"hash": 7141873671371173000,
"line_mean": 30.5488505747,
"line_max": 113,
"alpha_frac": 0.5828399672,
"autogenerated": false,
"ratio": 3.9699873440607485,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5052827311260748,
"avg_score": null,
"num_lines": null
} |
"""Allow selecting multiple lines and indenting them all at once."""
from porcupine import get_tab_manager, tabs, utils
setup_before = ['tabs2spaces'] # see tabs2spaces.py
def on_tab_key(event, shifted):
try:
start_index, end_index = map(str, event.widget.tag_ranges('sel'))
except ValueError as e:
# nothing selected, allow doing other stuff
return None
start = int(start_index.split('.')[0])
end = int(end_index.split('.')[0])
if end_index.split('.')[1] != '0':
# something's selected on the end line, let's indent/dedent it too
end += 1
for lineno in range(start, end):
if shifted:
event.widget.dedent('%d.0' % lineno)
else:
# if the line is empty or it contains nothing but
# whitespace, don't touch it
content = event.widget.get(
'%d.0' % lineno, '%d.0 lineend' % lineno)
if not (content.isspace() or not content):
event.widget.indent('%d.0' % lineno)
# select only the lines we indented but everything on them
event.widget.tag_remove('sel', '1.0', 'end')
event.widget.tag_add('sel', '%d.0' % start, '%d.0' % end)
def on_new_tab(event):
if isinstance(event.data_widget, tabs.FileTab):
utils.bind_tab_key(event.data_widget.textwidget, on_tab_key, add=True)
def setup():
utils.bind_with_data(get_tab_manager(), '<<NewTab>>', on_new_tab, add=True)
| {
"repo_name": "PurpleMyst/porcupine",
"path": "porcupine/plugins/indent_block.py",
"copies": "1",
"size": "1466",
"license": "mit",
"hash": -3346402221056442400,
"line_mean": 33.0930232558,
"line_max": 79,
"alpha_frac": 0.6002728513,
"autogenerated": false,
"ratio": 3.4988066825775657,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45990795338775653,
"avg_score": null,
"num_lines": null
} |
'''Allows executing and formatting ReQL queries and JSON responses'''
from __future__ import print_function
import json
import datetime
import traceback
import os
import sys
import termios
import tty
import re
import functools
import base64
from pygments import highlight
from pygments.lexers import JsonLexer, PythonLexer
from pygments.formatters import Terminal256Formatter
from pygments.styles import STYLE_MAP
import rethinkdb as r
class ReQLExecution(object):
def __init__(self, querystring, files, connection, output):
self.querystring = querystring
self.conn = connection
self.output = output
self.results = None
self._query = None
self.environment = files
self.environment.update({
'r': r,
'__builtins__': {
'True': True,
'False': False,
'None': None,
}
})
@property
def query(self):
'''The compiled query from the input query string'''
if self._query is None:
self._query = r.expr(eval(self.querystring, self.environment))
return self._query
def __call__(self):
'''Executes the query and sends it to the output'''
try:
self.results = self.query.run(
self.conn,
binary_format=self.output.binary_format,
time_format=self.output.time_format)
self.output(self.results, self.query)
except r.RqlError as e:
self.output.error(e)
except NameError as ne:
self.output.error(ne.message)
except SyntaxError as se:
exc_list = traceback.format_exception_only(type(se), se)[1:]
exc_list[0] = self.output.python_format(exc_list[0])
self.output.error('\n', ''.join(exc_list))
except AttributeError as ae:
self.output.error(ae.message)
except KeyboardInterrupt:
pass
def filename_to_var(filename):
'''Transforms a filename into a usable variable name'''
return re.sub(r'\W', '_', os.path.basename(filename).split('.', 1)[0])
def binary_patch(func):
'''decorator to monkey patch the json encoder so it doesn't
accidentally try to print out binaries (which may have null bytes
in them)'''
real_encoder = json.encoder.encode_basestring
def reql_encode_basestring(s):
if isinstance(s, r.ast.RqlBinary):
return '"' + base64.b64encode(s) + '"'
else:
return real_encoder(s)
@functools.wraps(func)
def _wrapper(*args, **kwargs):
orig = json.encoder.encode_basestring
json.encoder.encode_basestring = reql_encode_basestring
try:
return func(*args, **kwargs)
finally:
json.encoder.encode_basestring = orig
return _wrapper
class DateJSONEncoder(json.JSONEncoder):
'''Will format datetimes as iso8601'''
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
class Output(object):
'''Centralizes output behavior'''
input_stream = sys.stdin
output_stream = sys.stdout
error_stream = sys.stderr
@staticmethod
def make(format, style, pagesize):
'''Factory method to create the appropriate output'''
is_atty = os.isatty(sys.stdout.fileno())
if format == 'color' or format == 'auto' and is_atty:
return ColorOutput(style, pagesize)
elif format == 'newline' or format == 'auto' and not is_atty:
return NewlineOutput()
elif format == 'array':
return ArrayOutput()
else:
raise Exception('{} {} {} is illegal!'.format(
format, style, pagesize))
@binary_patch
def format(self, doc):
'''Dumps a json value according to the current format'''
return json.dumps(
doc,
indent=None if self.compact else 4,
sort_keys=not self.compact,
separators=(',', ':') if self.compact else (', ', ': '),
cls=json.JSONEncoder if self.compact else DateJSONEncoder,
ensure_ascii=False,
)
def python_format(self, obj):
return obj
def print(self, *args, **kwargs):
'''Print a value to stdout'''
kwargs.setdefault('file', self.output_stream)
print(*args, **kwargs)
def fprint(self, value, **kwargs):
'''Format string equivalent of printf'''
kwargs.setdefault('file', self.output_stream)
print(self.format(value), **kwargs)
def error(self, value, *args, **kwargs):
'''Print a value to stderr'''
kwargs.setdefault('file', self.error_stream)
print(value, *args, **kwargs)
def getch(self):
"""getch() -> key character
Read a single keypress from stdin and return the resulting character.
Nothing is echoed to the console. This call will block if a keypress
is not already available, but will not wait for Enter to be pressed.
If the pressed key was a modifier key, nothing will be detected; if
it were a special function key, it may return the first character of
of an escape sequence, leaving additional characters in the buffer.
From http://code.activestate.com/recipes/577977-get-single-keypress/
"""
fd = self.input_stream.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch = self.input_stream.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class ColorOutput(Output):
'''User friendly output'''
time_format = 'native'
binary_format = 'native'
compact = False
def __init__(self, style, pagesize):
self.style = style if style in STYLE_MAP else 'monokai'
self.pagesize = pagesize
def format(self, doc):
doc = super(ColorOutput, self).format(doc)
return highlight(
doc, JsonLexer(), Terminal256Formatter(style=self.style))
def primitive_array(self, docs):
'''Whether a document is an array of primitives'''
primitives = (int, float, bool, basestring)
return isinstance(docs, list) and \
all(isinstance(x, primitives) for x in docs)
def python_format(self, obj):
'''Colorizes python strings'''
return highlight(str(obj),
PythonLexer(),
Terminal256Formatter(style=self.style))
def __call__(self, docs, query):
if isinstance(docs, dict) and 'first_error' in docs:
# Detect errors that don't raise exceptions
self.error(docs['first_error'].replace('\t', ' '))
return
if self.primitive_array(docs):
self.compact = True
if isinstance(docs, (dict, int, float, bool, basestring)) or \
self.primitive_array(docs):
# Print small things directly
self.fprint(docs)
self.print('Ran:\n', self.python_format(query))
return
i = 0 # in case no results
for i, doc in enumerate(docs, start=1):
self.fprint(doc)
if i % self.pagesize == 0:
self.print('Running:', self.python_format(query))
self.print('[%s] Hit any key to continue (or q to quit)...' % i)
char = self.getch()
if char.lower() == 'q':
raise SystemExit()
self.print('Total docs:', i)
self.print('Ran:\n', self.python_format(query))
class NewlineOutput(Output):
'''Newline separated compact json document output'''
time_format = 'raw'
binary_format = 'raw'
compact = True
def __call__(self, docs, _):
if isinstance(docs, dict):
self.fprint(docs)
else:
for doc in docs:
self.fprint(doc)
class ArrayOutput(Output):
'''JSON array output. Can be parsed by any JSON interpreter'''
time_format = 'raw'
binary_format = 'raw'
compact = True
def print(self, *args, **kwargs):
super(ArrayOutput, self).print(*args, **kwargs)
self.output_stream.flush()
def __call__(self, docs, _):
if isinstance(docs, dict):
self.fprint(docs)
elif isinstance(docs, r.Cursor):
first = True
self.print('[', end='')
try:
for doc in docs:
if not first:
self.print(',', sep='', end='')
else:
first = False
self.fprint(doc, sep='', end='')
finally:
self.print(']')
else:
self.fprint(docs)
| {
"repo_name": "deontologician/reql_cli",
"path": "reqlcli/__init__.py",
"copies": "1",
"size": "8840",
"license": "mit",
"hash": -6249369954303568000,
"line_mean": 30.9133574007,
"line_max": 80,
"alpha_frac": 0.5771493213,
"autogenerated": false,
"ratio": 4.227642276422764,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5304791597722763,
"avg_score": null,
"num_lines": null
} |
#Allows for creation of new inputs and outputs
from mongoobject import RecursiveMongoObject
import uuid
from bson.objectid import ObjectId
class IO(RecursiveMongoObject):
@staticmethod
def create():
return {
"_id": ObjectId(uuid.uuid4().hex[:24]),
"meta": {}, #Metadata of the entire io
"io": {} #The document containing all registers
}
#Allows for modification of the metadata associated with an io
def getMeta(self):
return self.getChild("meta")
def setMeta(self,m):
self["meta"] = m
meta = property(getMeta,setMeta)
def getIo(self):
return self.getChild("io",1)
def setIo(self,i):
self["io"] = i
io = property(getIo,setIo)
if (__name__=="__main__"):
from pymongo import MongoClient
db = MongoClient().testing.test
#Clear the database
db.remove()
v = IO.create()
val = v["_id"]
db.insert(v)
v = IO(db,{"_id": val},autocommit=False)
assert str(v.meta)=="({})[{}]"
v.meta = {"word":"up","gg":"game"}
#WARNING: if autocommit is off, need to manually commit after setting meta
v.commit()
assert str(v.meta)=="({'gg': 'game', 'word': 'up'})[{}]"
v.meta["hi"]=5
v.meta["word"] = "down"
assert str(v.meta) == "({'gg': 'game', 'word': 'up'})[{'hi': 5, 'word': 'down'}]"
v.commit()
assert str(v.meta) == "({'gg': 'game', 'hi': 5, 'word': 'down'})[{}]"
assert str(db.find_one()["meta"]) == "{u'gg': u'game', u'hi': 5, u'word': u'down'}"
assert len(v.io)==0
v.io["light1"]={"type": "bool"}
v.io["light2"]={"type": "bool"}
v.io["light3"]={"type": "bool"}
assert len(v.io)==0
v.commit()
assert len(v.io)==3
assert "light2" in v.io
assert not "light8" in v.io
v.io.delete("light2")
v.io.commit()
assert not "light2" in v.io
assert str(db.find_one()["io"]) == "{u'light3': {u'type': u'bool'}, u'light1': {u'type': u'bool'}}"
assert v.io["light1"]["type"] == "bool"
v.io["light1"]["type"] = "int"
assert str(v.io["light1"]) == "({'type': 'bool'})[{'type': 'int'}]"
v.io.commit()
assert str(v.io["light1"]) == "({'type': 'int'})[{}]"
| {
"repo_name": "dkumor/meDB",
"path": "datastore/users/inputoutput.py",
"copies": "1",
"size": "2228",
"license": "mit",
"hash": -4258496223690594000,
"line_mean": 25.843373494,
"line_max": 103,
"alpha_frac": 0.5408438061,
"autogenerated": false,
"ratio": 3.111731843575419,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8994345980234272,
"avg_score": 0.031645933888229616,
"num_lines": 83
} |
# Allows for Note objects to initialized via a given frequency and volume
# Imported for use in the module Note_Library.py
# Taken from < https://gist.github.com/ohsqueezy/6540433 >
##############################################################
# Generate a 440 Hz square waveform in Pygame by building an array of samples and play
# it for 5 seconds. Change the hard-coded 440 to another value to generate a different
# pitch.
#
# Run with the following command:
# python pygame-play-tone.py
from array import array
from time import sleep
import pygame
import pygame.mixer as pm
from pygame.mixer import Sound
class Note(Sound):
def __init__(self, frequency, volume=0.1):
self.frequency = frequency
Sound.__init__(self, buffer=self.build_samples())
self.set_volume(volume)
def build_samples(self):
period = int(round(pm.get_init()[0] / self.frequency))
samples = array("h", [0] * period)
amplitude = 2 ** (abs(pm.get_init()[1]) - 1) - 1
for time in range(period): #Originally xrange
if time < period / 2:
samples[time] = amplitude
else:
samples[time] = -amplitude
return samples
#An example of the Note object being used
if __name__ == "__main__":
pm.pre_init(44100, -16, 1, 1024) #Required init for Windows 10
# pm.init() #Required init for other OS's? #TODO Research this further to confirm
test = Note(440)
test.play(-1)
sleep(5)
| {
"repo_name": "cornell-cup/cs-minibot-platform",
"path": "python-interface/src/MiniBotFramework/Sound/pygame_play_tone.py",
"copies": "1",
"size": "1497",
"license": "apache-2.0",
"hash": 6682653252137785000,
"line_mean": 32.2666666667,
"line_max": 87,
"alpha_frac": 0.6205744823,
"autogenerated": false,
"ratio": 3.770780856423174,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48913553387231734,
"avg_score": null,
"num_lines": null
} |
"""Allows for quick movement between git branches, for long branch names."""
import argparse
import subprocess
import sys
from contextlib import contextmanager
from enum import Enum
from typing import List
from typing import Optional
from typing import Tuple
from ..core import color
from ..core import git
from ..exceptions import GitfuException
class BranchNotFoundError(GitfuException):
pass
class ExcessivelyBroadQueryError(GitfuException):
pass
class BranchChangeStrategy(Enum):
DISCARD = 1
OVERWRITE_DEST = 2
SAVE = 3
def main(*argv: str) -> int:
args = parse_args(*argv)
if not args.name:
print(show_git_branches())
return 0
try:
dest_branch = get_branch(args.name)
except BranchNotFoundError:
print(
(
f'{color.colorize("ERROR", color.AnsiColor.RED)}: '
'No branch found with that query.'
),
file=sys.stderr,
)
return 1
except ExcessivelyBroadQueryError as e:
error = f'{color.colorize("ERROR", color.AnsiColor.RED)}: '
error += 'Multiple git branches found:\n - '
error += '\n - '.join(e.args[0])
error += '\n\nTry a different query.'
print(error, file=sys.stderr)
return 1
options = {}
if args.force:
options['should_force'] = True
branch_change_strategy = None
if args.force:
branch_change_strategy = BranchChangeStrategy.DISCARD
elif args.stash:
branch_change_strategy = BranchChangeStrategy.OVERWRITE_DEST
elif args.commit:
branch_change_strategy = BranchChangeStrategy.SAVE
try:
switch_branch(dest_branch, strategy=branch_change_strategy)
except subprocess.CalledProcessError as e:
print(e.stderr, file=sys.stderr)
return 1
return 0
def parse_args(*argv: str) -> argparse.Namespace:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'name',
nargs='?',
help='Branch identifier to switch to.',
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
'-f',
'--force',
action='store_true',
help=(
'Forcefully changes branches, by **discarding** any local changes and '
'removing any untracked files.'
),
)
group.add_argument(
'-s',
'--stash',
action='store_true',
help=(
'Forcefully changes branches, but **preserves** changes by stashing them '
'and overwriting the files after the branch change.'
),
)
group.add_argument(
'-c',
'--commit',
action='store_true',
help=(
'Forcefully changes branches, by **committing** them to the current branch '
'as a WIP commit, so it can be restored when you come back to this branch.'
),
)
# NOTE: `None` is needed to print the help string.
return parser.parse_args(argv or None)
def show_git_branches() -> str:
output = 'These are the branches you can switch to:\n'
output += git.run('branch')
return output
def get_branch(name: str) -> str:
branches = [
candidate.strip('* ')
for candidate in git.run('branch', colorize=False).splitlines()
if name in candidate
]
if not branches:
raise BranchNotFoundError
if len(branches) > 1:
raise ExcessivelyBroadQueryError(branches)
return branches[0]
def switch_branch(name: str, *, strategy: Optional[BranchChangeStrategy] = None) -> None:
try:
git.run('checkout', name)
except subprocess.CalledProcessError as e:
if not strategy:
raise
error = e.stderr
handler = {
BranchChangeStrategy.DISCARD: resolve_errors_through_discard,
BranchChangeStrategy.OVERWRITE_DEST: resolve_errors_through_preservation,
BranchChangeStrategy.SAVE: resolve_errors_through_commit,
}.get(strategy)
with handler(error):
git.run('checkout', name)
last_commit_message = git.run(
'log', '--pretty=format:"%s"', '-1', colorize=False,
)
if last_commit_message == 'WIP: switch-branch-cache':
git.run('reset', 'HEAD~1')
@contextmanager
def resolve_errors_through_discard(error: str):
tracked_files, untracked_files = _get_blocking_files(error)
# TODO: git reset all tracked files (unstage them, if staged)
# TODO: git checkout all tracked files (discard changes)
# TODO: remove all untracked files (discard changes)
yield
@contextmanager
def resolve_errors_through_preservation(error: str):
tracked_files, untracked_files = _get_blocking_files(error)
# TODO: git stash (and see what that resolves)
# TODO: rename untracked files to `.bak`
yield
# TODO: rename untracked files back to original
@contextmanager
def resolve_errors_through_commit(error: str):
tracked_files, untracked_files = _get_blocking_files(error)
git.run('add', *tracked_files, *untracked_files)
git.run('commit', '-m', 'WIP: switch-branch-cache')
yield
def _get_blocking_files(error: str) -> Tuple[List[str], List[str]]:
# NOTE: From trial and error, there are several error messages that may appear at once.
# These are the scenarios I've encountered:
# 1. staged file that would be overwritten by checkout
# 2. modified files (but not staged) that would be overwritten by checkout
# 3. untracked files that would be overwritten by checkout
tracked_files = []
untracked_files = []
collection = None
for line in error.splitlines():
if line == (
'error: Your local changes to the following files would be overwritten by checkout:'
):
collection = tracked_files
elif line == (
'error: The following untracked working tree files would be overwritten by checkout:'
):
collection = untracked_files
elif line not in {
'Please commit your changes or stash them before you switch branches.',
'Please move or remove them before you switch branches.',
'Aborting',
}:
collection.append(line.strip())
return tracked_files, untracked_files
if __name__ == '__main__':
sys.exit(main())
| {
"repo_name": "domanchi/gitfu",
"path": "gitfu/standalone/switch_git_branch.py",
"copies": "1",
"size": "6411",
"license": "mit",
"hash": -7860603707468189000,
"line_mean": 27.6205357143,
"line_max": 97,
"alpha_frac": 0.6268912806,
"autogenerated": false,
"ratio": 4.099104859335038,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5225996139935039,
"avg_score": null,
"num_lines": null
} |
"""Allows for triggering GPIO lines on FTDI based boards."""
import time
import pylibftdi
class FtdiButtons:
"""Allows for triggering GPIO lines on FTDI based boards."""
def __init__(self, ftdi_serial_number, button_map, polarity=False):
"""Initializes the FtdiButtons with the properties provided.
The ftdi_serial_number is needed to unique identify the FTDI device
to control as there may be more than one FTDI device available. The
button map defines the interfaces and pin (bit) to set for each button
and should look like this:
some_button_map = {"a_button": {"interface": 1, "pin": 3},
"b_button": {"interface": 2, "pin": 5}}
Args:
ftdi_serial_number (str): FTDI serial number to use
button_map (dict): button to pin (bit) mapping.
polarity (bool): GPIO pin polarity to use (True to push button).
"""
self._button_map = button_map
self._button_down = {}
self._ftdi_serial_number = ftdi_serial_number
self._polarity = polarity
self._sub_process = None
self._bb_map = {}
self._bitbang_device_init = False
self.bitbang_device_init()
def __del__(self):
self.close()
def close(self):
"""Release any held buttons on close."""
for button in list(self._button_down):
self.release(button)
for interface in self._bb_map:
ftdi_device = self._bb_map[interface]
ftdi_device.close()
self._bb_map.clear()
self._bitbang_device_init = False
def is_valid(self, button):
"""Return True if button specified is in the list of valid buttons.
Args:
button (str): button identifier.
Returns:
bool: True if button specified is in button map provided
"""
return button in list(self._button_map.keys())
def bitbang_device_init(self):
"""Creates BitBangDevices for each button and initializes to 'off'."""
for button in list(self._button_map.keys()):
interface = self._button_map[button]["interface"]
pin = self._button_map[button]["pin"]
if interface not in self._bb_map:
ftdi_device = pylibftdi.BitBangDevice(
device_id=self._ftdi_serial_number,
interface_select=interface,
direction=(1 << pin))
ftdi_device.port = 0 if self._polarity else 255
self._bb_map[interface] = ftdi_device
else:
self._bb_map[interface].direction |= (1 << pin)
self._button_down[button] = False
self._bitbang_device_init = True
def press(self, button, wait=0.0):
"""Presses button and waits for the time specified.
Note: You must call release or close to release the button later.
Args:
button (str): identify which button to press
wait (float): seconds to wait before returning
Raises:
ValueError: invalid button or wait given.
"""
if not self._bitbang_device_init:
self.bitbang_device_init()
if button not in self._button_map:
raise ValueError("Invalid button {} specified".format(button))
elif wait < 0.0:
raise ValueError("Invalid wait {} specified".format(wait))
elif not self._button_down[button]:
self._gpio(button, -1, active_high=self._polarity)
time.sleep(wait)
def click(self, button, duration=.5):
"""Presses the button specified and holds it for the specified duration.
Args:
button (str): button to click.
duration (float): seconds to wait before releasing the button.
Raises:
ValueError: invalid button or duration given.
"""
if not self._bitbang_device_init:
self.bitbang_device_init()
if button not in self._button_map:
raise ValueError("Invalid button {} specified".format(button))
elif duration <= 0.0:
raise ValueError("Invalid duration {} specified".format(duration))
else:
self._gpio(button, duration, active_high=self._polarity)
def release(self, button):
"""Releases the button specified if it was previously pressed.
Args:
button (str): button to click.
Raises:
ValueError: invalid button given.
"""
if not self._bitbang_device_init:
self.bitbang_device_init()
if button not in self._button_map:
raise ValueError("Invalid button {} specified".format(button))
elif self._button_down[button]:
self._gpio(button, -1, active_high=self._polarity)
def read_pin(self, interface, pin):
"""Returns True if the bit is high for the given pin, False otherwise.
Args:
interface (int): the port on the FTDI chip containing the GPIO
pin (int): the bit representing the GPIO to read
Return:
bool: True if pin is high, False if low.
"""
if not self._bitbang_device_init:
self.bitbang_device_init()
return (self._bb_map[interface].port & (1 << pin)) != 0
def valid_buttons(self):
"""Returns a list of valid button names.
Returns:
list: A list of valid button name strings
"""
return list(self._button_map.keys())
def _gpio(self, button, duration, active_high):
"""Drive the GPIO of the button specified for the duration provided.
Args:
button (str): button to be toggled
duration (float): seconds to wait between the two toggles. If
negative, only one toggle will be performed (either a press or
release).
active_high (bool): drive the pin high if True otherwise drive
active_low.
"""
if not self._bitbang_device_init:
self.bitbang_device_init()
interface = self._button_map[button]["interface"]
bb = self._bb_map[interface]
self.__toggle(button, bb, active_high)
if duration >= 0.0:
time.sleep(duration)
self.__toggle(button, bb, active_high)
def __toggle(self, button, bb, active_high):
"""Toggles the button specified based on its previous state.
Args:
button (str): button to be toggled
bb (BitBangDevice): performs the toggle.
active_high (bool): drive the pin high if True, otherwise drive
active_low.
"""
if not self._bitbang_device_init:
self.bitbang_device_init()
pin = self._button_map[button]["pin"]
if self._button_down[button]:
# Release button
if active_high:
self.__clear_pin(bb, pin)
else:
self.__set_pin(bb, pin)
self._button_down[button] = False
else:
# Set button
if active_high:
self.__set_pin(bb, pin)
else:
self.__clear_pin(bb, pin)
self._button_down[button] = True
@staticmethod
def __clear_pin(bb, pin):
"""Clear the bit of the given pin.
Args:
bb (BitBangDevice): performs the work
pin (int): the bit representing the GPIO to clear
"""
bb.port &= 255 ^ (1 << pin)
@staticmethod
def __set_pin(bb, pin):
"""Sets the bit for the given pin.
Args:
bb (BitBangDevice): performs the work
pin (int): the bit representing the GPIO to clear
"""
bb.port |= (1 << pin)
| {
"repo_name": "google/gazoo-device",
"path": "gazoo_device/switchboard/ftdi_buttons.py",
"copies": "1",
"size": "7041",
"license": "apache-2.0",
"hash": 7502745569071379000,
"line_mean": 30.5739910314,
"line_max": 76,
"alpha_frac": 0.6321545235,
"autogenerated": false,
"ratio": 3.8224755700325734,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4954630093532574,
"avg_score": null,
"num_lines": null
} |
"""Allows livesettings to be "locked down" and no longer use the settings page or the database
for settings retrieval.
"""
import logging
from django.conf import settings as djangosettings
from django.contrib.sites.models import Site
__all__ = ['get_overrides']
def _safe_get_siteid(site):
if not site:
try:
site = Site.objects.get_current()
siteid = site.id
except:
siteid = djangosettings.SITE_ID
else:
siteid = site.id
return siteid
def get_overrides(siteid=-1):
"""Check to see if livesettings is allowed to use the database. If not, then
it will only use the values in the dictionary, LIVESETTINGS_OPTIONS[SITEID]['SETTINGS'],
this allows 'lockdown' of a live site.
The LIVESETTINGS dict must be formatted as follows::
LIVESETTINGS_OPTIONS = {
1 : {
'DB' : False, # or True
'SETTINGS' : {
'GROUPKEY' : {'KEY', val, 'KEY2', val},
'GROUPKEY2' : {'KEY', val, 'KEY2', val},
}
}
}
In the settings dict above, the "val" entries must exactly match the format
stored in the database for a setting, which is a string representation of the
value. Do not use e.g. a literal True or an integer.
The easiest way to get a right formated expression is by the URL
http://your.site/settings/export/
Returns a tuple (DB_ALLOWED, SETTINGS)
"""
overrides = (True, {})
if hasattr(djangosettings, 'LIVESETTINGS_OPTIONS'):
if siteid == -1:
siteid = _safe_get_siteid(None)
opts = djangosettings.LIVESETTINGS_OPTIONS
if opts.has_key(siteid):
opts = opts[siteid]
overrides = (opts.get('DB', True), opts['SETTINGS'])
return overrides
| {
"repo_name": "oblalex/django-xlivesettings",
"path": "xlivesettings/overrides.py",
"copies": "1",
"size": "1864",
"license": "bsd-3-clause",
"hash": -6474655173163482000,
"line_mean": 31.701754386,
"line_max": 94,
"alpha_frac": 0.5971030043,
"autogenerated": false,
"ratio": 3.899581589958159,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49966845942581595,
"avg_score": null,
"num_lines": null
} |
"""Allows livesettings to be "locked down" and no longer use the settings page or the database
for settings retrieval.
"""
from django.conf import settings as djangosettings
from django.contrib.sites.models import Site
import logging
__all__ = ['get_overrides']
def _safe_get_siteid(site):
if not site:
try:
site = Site.objects.get_current()
siteid = site.id
except:
siteid = djangosettings.SITE_ID
else:
siteid = site.id
return siteid
def get_overrides(siteid=-1):
"""Check to see if livesettings is allowed to use the database. If not, then
it will only use the values in the dictionary, LIVESETTINGS_OPTIONS[SITEID]['SETTINGS'],
this allows 'lockdown' of a live site.
The LIVESETTINGS dict must be formatted as follows::
LIVESETTINGS_OPTIONS = {
1 : {
'DB' : False, # or True
'SETTINGS' : {
'GROUPKEY' : {'KEY', val, 'KEY2', val},
'GROUPKEY2' : {'KEY', val, 'KEY2', val},
}
}
}
In the settings dict above, the "val" entries must exactly match the format
stored in the database for a setting, which is a string representation of the
value. Do not use e.g. a literal True or an integer.
The easiest way to get a right formated expression is by the URL
http://your.site/settings/export/
Returns a tuple (DB_ALLOWED, SETTINGS)
"""
overrides = (True, {})
if hasattr(djangosettings, 'LIVESETTINGS_OPTIONS'):
if siteid == -1:
siteid = _safe_get_siteid(None)
opts = djangosettings.LIVESETTINGS_OPTIONS
if opts.has_key(siteid):
opts = opts[siteid]
overrides = (opts.get('DB', True), opts['SETTINGS'])
return overrides
| {
"repo_name": "pombredanne/django-livesettings",
"path": "livesettings/overrides.py",
"copies": "3",
"size": "1873",
"license": "bsd-3-clause",
"hash": -4129011167181125600,
"line_mean": 31.8596491228,
"line_max": 94,
"alpha_frac": 0.5942338494,
"autogenerated": false,
"ratio": 3.918410041841004,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005806608549790559,
"num_lines": 57
} |
# allows modules to be imported from the ./dependencies directory
import sys
sys.path.insert(0, './dependencies')
# for calling external APIs
import requests
import json
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
print("on_launch requestId=" + launch_request['requestId'] +
", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] + ", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
print("intent name was: " + intent_name)
if intent_name == "streamers":
return get_response()
elif intent_name == "AMAZON.HelpIntent":
return get_welcome_response()
else:
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "Welcome"
speech_output = "To hear what's streaming on Twitch, say 'what is live right now?'"
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "Sorry, I didn't understand what you said."
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': 'SessionSpeechlet - ' + title,
'content': 'SessionSpeechlet - ' + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
def get_response():
streamers = get_followed_streamers()
names = keep_live_users(streamers)
speech_output = ''
if (len(streamers) == 0):
speech_output = 'No one is streaming right now.'
else:
speech_output = ' and'.join(names) + ' are streaming right now.'
return build_response({}, build_speechlet_response(title='Streamers',
output=speech_output,
reprompt_text="Please try again.",
should_end_session=True))
# takes in [(user id, display name)]
# returns list of users currently streaming
def keep_live_users(streamers):
ids = [ streamer[0] for streamer in streamers ]
headers = { 'Client-ID': 'fb8brzjsgp1lsl7vnj6clt4ozvuz8c' }
url = 'https://api.twitch.tv/helix/streams?user_id=' + '&user_id='.join(ids)
response = requests.get(url)
live_ids = []
if response.status_code == 200:
json_response = json.loads(response.content)
live_ids = [ stream.user_id for stream in json_response['data'] ]
return [ streamer[1] for streamer in streamers if streamer[0] in live_ids ]
# returns list of tuples of people we follow
def get_followed_streamers():
headers = { 'Client-ID': 'fb8brzjsgp1lsl7vnj6clt4ozvuz8c' }
url = 'https://api.twitch.tv/helix/users/follows?from_id=167211639'
response = requests.get(url)
usernames = []
if response.status_code == 200:
json_response = json.loads(response.content)
usernames = [ (follow.id, follow.display_name) for follow in json_response['data'] ]
return usernames | {
"repo_name": "ikottman/alexa-skills",
"path": "is_streaming/main.py",
"copies": "1",
"size": "5663",
"license": "unlicense",
"hash": -664783094762002200,
"line_mean": 33.962962963,
"line_max": 103,
"alpha_frac": 0.6208723292,
"autogenerated": false,
"ratio": 3.9490934449093444,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008748516059265429,
"num_lines": 162
} |
# allows modules to be imported from the ./dependencies directory
import sys
sys.path.insert(0, './dependencies')
# for calling external APIs
import requests
import json
# for handling KMS secret decryption
import os
import boto3
from base64 import b64decode
# get and decode secrets from KMS
ENCRYPTED_KEY = os.environ['google_api_key']
ENCRYPTEED_LOCATION = os.environ['location']
GOOGLE_API_KEY = boto3.client('kms').decrypt(CiphertextBlob=b64decode(ENCRYPTED_KEY))['Plaintext']
LOCATION = boto3.client('kms').decrypt(CiphertextBlob=b64decode(ENCRYPTEED_LOCATION))['Plaintext']
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
print("on_launch requestId=" + launch_request['requestId'] +
", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] + ", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
print("intent name was: " + intent_name)
if intent_name == "Place":
return Maps.is_place_open(intent)
elif intent_name == "AMAZON.HelpIntent":
return get_welcome_response()
else:
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "Welcome"
speech_output = "To see if a business is open, say: ask office hours if Taco Bell is open."
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "Sorry, I didn't understand what you said."
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': 'SessionSpeechlet - ' + title,
'content': 'SessionSpeechlet - ' + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
class Maps:
@staticmethod
def is_place_open(intent):
# call google places to find the nearest location for the search
print(intent)
place = intent['slots']['name']['value']
# search for the place name within a 50,000 meter (roughly 31 mile) radius of the specified gps coordinates.
api_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=" + \
LOCATION + "&name=" + place + "&rankby=distance&key=" + GOOGLE_API_KEY
response = requests.get(api_url)
speech_output = "Sorry, couldn't find anything for " + place
if response is not None and response.content is not None and response.status_code == 200:
json_response = json.loads(response.content)
if len(json_response['results']) > 0:
result = json_response['results'][0]
if result['opening_hours']['open_now']:
speech_output = result['name'] + " at " + result['vicinity'] + " is open."
else:
speech_output = result['name'] + " at " + result['vicinity'] + " is closed."
return build_response({}, build_speechlet_response(title=intent['name'],
output=speech_output,
reprompt_text="Please try again.",
should_end_session=True))
| {
"repo_name": "ikottman/alexa-skills",
"path": "office_hours/office_hours.py",
"copies": "1",
"size": "6237",
"license": "unlicense",
"hash": -4367586624101451000,
"line_mean": 36.8,
"line_max": 116,
"alpha_frac": 0.6185666186,
"autogenerated": false,
"ratio": 4.152463382157124,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004038850814947601,
"num_lines": 165
} |
"""Allows mouse wheel to work within matplotlib
http://stackoverflow.com/questions/11551049/matplotlib-plot-zooming-with-scroll-wheel
"""
from matplotlib.pyplot import figure, show
import numpy
class ZoomPan:
def __init__(self):
self.press = None
self.cur_xlim = None
self.cur_ylim = None
self.x0 = None
self.y0 = None
self.x1 = None
self.y1 = None
self.xpress = None
self.ypress = None
def zoom_factory(self, ax, base_scale = 2.):
def zoom(event):
cur_xlim = ax.get_xlim()
cur_ylim = ax.get_ylim()
xdata = event.xdata # get event x location
ydata = event.ydata # get event y location
if event.button == 'down':
# deal with zoom in
scale_factor = 1 / base_scale
elif event.button == 'up':
# deal with zoom out
scale_factor = base_scale
else:
# deal with something that should never happen
scale_factor = 1
print(event.button)
new_width = (cur_xlim[1] - cur_xlim[0]) * scale_factor
new_height = (cur_ylim[1] - cur_ylim[0]) * scale_factor
relx = (cur_xlim[1] - xdata)/(cur_xlim[1] - cur_xlim[0])
rely = (cur_ylim[1] - ydata)/(cur_ylim[1] - cur_ylim[0])
ax.set_xlim([xdata - new_width * (1-relx), xdata + new_width * (relx)])
ax.set_ylim([ydata - new_height * (1-rely), ydata + new_height * (rely)])
ax.figure.canvas.draw()
fig = ax.get_figure() # get the figure of interest
fig.canvas.mpl_connect('scroll_event', zoom)
return zoom
def pan_factory(self, ax):
def onPress(event):
if event.inaxes != ax: return
self.cur_xlim = ax.get_xlim()
self.cur_ylim = ax.get_ylim()
self.press = self.x0, self.y0, event.xdata, event.ydata
self.x0, self.y0, self.xpress, self.ypress = self.press
def onRelease(event):
self.press = None
ax.figure.canvas.draw()
def onMotion(event):
if self.press is None: return
if event.inaxes != ax: return
dx = event.xdata - self.xpress
dy = event.ydata - self.ypress
self.cur_xlim -= dx
self.cur_ylim -= dy
ax.set_xlim(self.cur_xlim)
ax.set_ylim(self.cur_ylim)
ax.figure.canvas.draw()
fig = ax.get_figure() # get the figure of interest
# attach the call back
fig.canvas.mpl_connect('button_press_event',onPress)
fig.canvas.mpl_connect('button_release_event',onRelease)
fig.canvas.mpl_connect('motion_notify_event',onMotion)
#return the function
return onMotion
def example():
fig = figure()
ax = fig.add_subplot(111, xlim=(0,1), ylim=(0,1), autoscale_on=False)
ax.set_title('Click to zoom')
x,y,s,c = numpy.random.rand(4,200)
s *= 200
ax.scatter(x,y,s,c)
scale = 1.1
zp = ZoomPan()
figZoom = zp.zoom_factory(ax, base_scale = scale)
figPan = zp.pan_factory(ax)
show()
| {
"repo_name": "biokit/biokit",
"path": "biokit/dev/mpl_focus.py",
"copies": "1",
"size": "3225",
"license": "bsd-2-clause",
"hash": -7794963521827931000,
"line_mean": 29.7142857143,
"line_max": 85,
"alpha_frac": 0.5457364341,
"autogenerated": false,
"ratio": 3.5092491838955384,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45549856179955384,
"avg_score": null,
"num_lines": null
} |
# Allows player to choose their next move
from show import *
def chooser(x_or_o, board):
while True:
show(board)
try:
x_or_o_input = input("Select a spot[%s]> " % x_or_o)
x_or_o_input = int(x_or_o_input)
# Subtract 1 to accomodate for indexing vs regular
# human sensibility
x_or_o_input -= 1
if board[x_or_o_input] == 'x' or board[x_or_o_input] == 'o':
cleanup()
print "This spot has already been taken:"
continue
else:
board[x_or_o_input] = x_or_o
break
except IndexError:
continue
except ValueError:
print("That is not a number on the board.")
continue
except TypeError:
print("That is not a number on the board.")
continue
except IndexError:
print("That is not a number on the board.")
continue
except SyntaxError:
print("That is not a number on the board.")
continue
except NameError:
print("That is not a number on the board.")
continue
def _test():
import doctest
doctest.testmod()
| {
"repo_name": "jessebikman/Tictactoe-colorama",
"path": "chooser.py",
"copies": "1",
"size": "1263",
"license": "mit",
"hash": 5985769104496948000,
"line_mean": 30.575,
"line_max": 72,
"alpha_frac": 0.5106888361,
"autogenerated": false,
"ratio": 4.370242214532872,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.027815934065934068,
"num_lines": 40
} |
""" Allows production of cutlists for a given set of required pieces, given
a set of available stock sizes.
"""
import collections
from .stock import Stock
# simple structure to keep track of a specific piece
Piece = collections.namedtuple('Piece', 'id, length')
class Planner(object):
""" Object that can produce a cutlist (plan) for cutting stock. """
def __init__(self, sizes, needed, loss=0.25):
self.stock = []
self.stock_sizes = sorted(sizes)
self.pieces_needed = [Piece(i, s) for i, s in enumerate(needed)]
self.pieces_needed.reverse()
self.cut_loss = loss
self.cur_stock = None
# set the algorithm to use, hard code for now
self.apply_algo = self.apply_next_fit
@property
def largest_stock(self):
""" Returns the size of the largest available stock."""
return self.stock_sizes[-1]
def cut_piece(self, piece):
""" Record the cut for the given piece """
self.cur_stock.cut(piece, self.cut_loss)
def finalize_stock(self):
""" Takes current stock out of use, attempts to shrink """
# shrink as much as possible
for smaller in self.stock_sizes[-2::-1]:
if self.cur_stock.shrink(smaller) is None:
break
self.stock.append(self.cur_stock)
def apply_next_fit(self, piece):
""" Cut from current stock until unable, then move to new stock """
if self.cur_stock.remaining_length < piece.length + self.cut_loss:
# finalize current stock and get fresh stock
self.finalize_stock()
self.cur_stock = Stock(self.largest_stock)
self.cur_stock.cut(piece, self.cut_loss)
def make_cuts(self):
""" Apply the cutting algorithm to generate a cut list."""
self.cur_stock = Stock(self.largest_stock)
while self.pieces_needed:
piece = self.pieces_needed.pop()
self.apply_algo(piece)
self.finalize_stock()
| {
"repo_name": "alanc10n/py-cutplanner",
"path": "cutplanner/planner.py",
"copies": "1",
"size": "2004",
"license": "mit",
"hash": 2501005921870436400,
"line_mean": 31.8524590164,
"line_max": 75,
"alpha_frac": 0.621257485,
"autogenerated": false,
"ratio": 3.9140625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5035319985,
"avg_score": null,
"num_lines": null
} |
"""Allows reading temperatures from ecoal/esterownik.pl controller."""
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import TEMP_CELSIUS
from . import AVAILABLE_SENSORS, DATA_ECOAL_BOILER
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the ecoal sensors."""
if discovery_info is None:
return
devices = []
ecoal_contr = hass.data[DATA_ECOAL_BOILER]
for sensor_id in discovery_info:
name = AVAILABLE_SENSORS[sensor_id]
devices.append(EcoalTempSensor(ecoal_contr, name, sensor_id))
add_entities(devices, True)
class EcoalTempSensor(SensorEntity):
"""Representation of a temperature sensor using ecoal status data."""
def __init__(self, ecoal_contr, name, status_attr):
"""Initialize the sensor."""
self._ecoal_contr = ecoal_contr
self._name = name
self._status_attr = status_attr
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
def update(self):
"""Fetch new state data for the sensor.
This is the only method that should fetch new data for Home Assistant.
"""
# Old values read 0.5 back can still be used
status = self._ecoal_contr.get_cached_status()
self._state = getattr(status, self._status_attr)
| {
"repo_name": "w1ll1am23/home-assistant",
"path": "homeassistant/components/ecoal_boiler/sensor.py",
"copies": "5",
"size": "1632",
"license": "apache-2.0",
"hash": -8282250350400701000,
"line_mean": 30.3846153846,
"line_max": 78,
"alpha_frac": 0.6507352941,
"autogenerated": false,
"ratio": 3.76036866359447,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 52
} |
"""Allows reading temperatures from ecoal/esterownik.pl controller."""
from homeassistant.const import TEMP_CELSIUS
from homeassistant.helpers.entity import Entity
from . import AVAILABLE_SENSORS, DATA_ECOAL_BOILER
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the ecoal sensors."""
if discovery_info is None:
return
devices = []
ecoal_contr = hass.data[DATA_ECOAL_BOILER]
for sensor_id in discovery_info:
name = AVAILABLE_SENSORS[sensor_id]
devices.append(EcoalTempSensor(ecoal_contr, name, sensor_id))
add_entities(devices, True)
class EcoalTempSensor(Entity):
"""Representation of a temperature sensor using ecoal status data."""
def __init__(self, ecoal_contr, name, status_attr):
"""Initialize the sensor."""
self._ecoal_contr = ecoal_contr
self._name = name
self._status_attr = status_attr
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
def update(self):
"""Fetch new state data for the sensor.
This is the only method that should fetch new data for Home Assistant.
"""
# Old values read 0.5 back can still be used
status = self._ecoal_contr.get_cached_status()
self._state = getattr(status, self._status_attr)
| {
"repo_name": "tboyce021/home-assistant",
"path": "homeassistant/components/ecoal_boiler/sensor.py",
"copies": "9",
"size": "1617",
"license": "apache-2.0",
"hash": -8390926505367633000,
"line_mean": 30.0961538462,
"line_max": 78,
"alpha_frac": 0.6474953618,
"autogenerated": false,
"ratio": 3.7430555555555554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8890550917355556,
"avg_score": null,
"num_lines": null
} |
"""Allows reading temperatures from ecoal/esterownik.pl controller."""
import logging
from homeassistant.components.ecoal_boiler import (
DATA_ECOAL_BOILER, AVAILABLE_SENSORS, )
from homeassistant.const import TEMP_CELSIUS
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['ecoal_boiler']
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the ecoal sensors."""
if discovery_info is None:
return
devices = []
ecoal_contr = hass.data[DATA_ECOAL_BOILER]
for sensor_id in discovery_info:
name = AVAILABLE_SENSORS[sensor_id]
devices.append(EcoalTempSensor(ecoal_contr, name, sensor_id))
add_entities(devices, True)
class EcoalTempSensor(Entity):
"""Representation of a temperature sensor using ecoal status data."""
def __init__(self, ecoal_contr, name, status_attr):
"""Initialize the sensor."""
self._ecoal_contr = ecoal_contr
self._name = name
self._status_attr = status_attr
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
def update(self):
"""Fetch new state data for the sensor.
This is the only method that should fetch new data for Home Assistant.
"""
# Old values read 0.5 back can still be used
status = self._ecoal_contr.get_cached_status()
self._state = getattr(status, self._status_attr)
| {
"repo_name": "HydrelioxGitHub/home-assistant",
"path": "homeassistant/components/ecoal_boiler/sensor.py",
"copies": "2",
"size": "1749",
"license": "apache-2.0",
"hash": 7123816952604687000,
"line_mean": 29.1551724138,
"line_max": 78,
"alpha_frac": 0.653516295,
"autogenerated": false,
"ratio": 3.697674418604651,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 58
} |
"""Allows reading temperatures from ecoal/esterownik.pl controller."""
import logging
from homeassistant.const import TEMP_CELSIUS
from homeassistant.helpers.entity import Entity
from . import AVAILABLE_SENSORS, DATA_ECOAL_BOILER
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['ecoal_boiler']
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the ecoal sensors."""
if discovery_info is None:
return
devices = []
ecoal_contr = hass.data[DATA_ECOAL_BOILER]
for sensor_id in discovery_info:
name = AVAILABLE_SENSORS[sensor_id]
devices.append(EcoalTempSensor(ecoal_contr, name, sensor_id))
add_entities(devices, True)
class EcoalTempSensor(Entity):
"""Representation of a temperature sensor using ecoal status data."""
def __init__(self, ecoal_contr, name, status_attr):
"""Initialize the sensor."""
self._ecoal_contr = ecoal_contr
self._name = name
self._status_attr = status_attr
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
def update(self):
"""Fetch new state data for the sensor.
This is the only method that should fetch new data for Home Assistant.
"""
# Old values read 0.5 back can still be used
status = self._ecoal_contr.get_cached_status()
self._state = getattr(status, self._status_attr)
| {
"repo_name": "jamespcole/home-assistant",
"path": "homeassistant/components/ecoal_boiler/sensor.py",
"copies": "1",
"size": "1705",
"license": "apache-2.0",
"hash": -2432014744900283000,
"line_mean": 28.3965517241,
"line_max": 78,
"alpha_frac": 0.6504398827,
"autogenerated": false,
"ratio": 3.698481561822126,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9848921444522125,
"avg_score": 0,
"num_lines": 58
} |
# Allows rendering a given tilespec
from multiple_tiles_affine_renderer import MultipleTilesAffineRenderer
from single_tile_affine_renderer import SingleTileAffineRenderer
import json
import numpy as np
import sys
sys.path.append('../')
import models
class TilespecAffineRenderer:
def __init__(self, tilespec):
self.single_tiles = [SingleTileAffineRenderer(
tile_ts["mipmapLevels"]["0"]["imageUrl"].replace("file://", ""), tile_ts["width"], tile_ts["height"], compute_distances=True)
for tile_ts in tilespec]
# Add the corresponding transformation
for tile_ts, tile in zip(tilespec, self.single_tiles):
for t in tile_ts["transforms"]:
transform = models.Transforms.from_tilespec(t)
tile.add_transformation(transform.get_matrix()[:2])
self.multi_renderer = MultipleTilesAffineRenderer(self.single_tiles, blend_type="LINEAR")
def render(self):
return self.multi_renderer.render()
def crop(self, from_x, from_y, to_x, to_y):
return self.multi_renderer.crop(from_x, from_y, to_x, to_y)
def add_transformation(self, transform_matrix):
"""Adds a transformation to all tiles"""
for single_tile in self.single_tiles:
single_tile.add_transformation(transform_matrix[:2])
| {
"repo_name": "Rhoana/rh_aligner",
"path": "old/renderer/tilespec_affine_renderer.py",
"copies": "1",
"size": "1375",
"license": "mit",
"hash": -1797301617322527500,
"line_mean": 37.1944444444,
"line_max": 157,
"alpha_frac": 0.6516363636,
"autogenerated": false,
"ratio": 3.9173789173789175,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5069015280978917,
"avg_score": null,
"num_lines": null
} |
# Allows rendering a given tilespec
from multiple_tiles_renderer import MultipleTilesRenderer
from single_tile_renderer import SingleTileRenderer
import json
import numpy as np
import sys
sys.path.append('../')
import models
class TilespecRenderer:
def __init__(self, tilespec):
self.single_tiles = [SingleTileRenderer(
tile_ts["mipmapLevels"]["0"]["imageUrl"].replace("file://", ""), tile_ts["width"], tile_ts["height"], compute_distances=True)
for tile_ts in tilespec]
# Add the corresponding transformation
for tile_ts, tile in zip(tilespec, self.single_tiles):
for t in tile_ts["transforms"]:
model = models.Transforms.from_tilespec(t)
tile.add_transformation(model)
self.multi_renderer = MultipleTilesRenderer(self.single_tiles, blend_type="LINEAR")
def render(self):
return self.multi_renderer.render()
def crop(self, from_x, from_y, to_x, to_y):
return self.multi_renderer.crop(from_x, from_y, to_x, to_y)
def add_transformation(self, model):
"""Adds a transformation to all tiles"""
for single_tile in self.single_tiles:
single_tile.add_transformation(model)
| {
"repo_name": "Rhoana/rh_aligner",
"path": "old/renderer/tilespec_renderer.py",
"copies": "1",
"size": "1280",
"license": "mit",
"hash": 3278240346951653000,
"line_mean": 34.5555555556,
"line_max": 157,
"alpha_frac": 0.63671875,
"autogenerated": false,
"ratio": 3.9628482972136223,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5099567047213622,
"avg_score": null,
"num_lines": null
} |
"""Allows running Celery tasks asynchronously using the Tornado IOLoop."""
from tornado import concurrent
from tornado import ioloop
from tornado import stack_context
import logging
import time
logger = logging.getLogger(__name__)
class CeleryPoller(object):
def __init__(self, poll_freq):
self._poll_freq = poll_freq
self._results_callbacks = []
@concurrent.return_future
def run_task(self, task, *args, **kwargs):
callback = kwargs["callback"]
del kwargs["callback"]
# Wrap the callback it takes the result and raises exceptions in the
# correct stack context.
def result_callback(result): callback(result.get())
result_callback = stack_context.wrap(result_callback)
logger.info("Starting task (should be fast)...")
self._results_callbacks.append((task.delay(*args, **kwargs),
result_callback))
logger.info("... task started")
self._poll_tasks()
def _poll_tasks(self):
logger.debug("Polling Celery tasks")
results_callbacks = []
t = time.time()
logger.info("Starting task poll (should be fast)...")
for result, callback in self._results_callbacks:
if result.ready():
logger.info("Finished task")
# Exception should never be raised here or bad things will
# happen.
callback(result)
else:
logger.debug("Task is still pending")
results_callbacks.append((result, callback))
t = time.time() - t
logger.info("... polled {} task(s) in {}ms"
.format(len(self._results_callbacks), int(t * 1000)))
self._results_callbacks = results_callbacks
if len(self._results_callbacks) > 0:
logger.debug("Tasks are still pending, scheduling next poll")
ioloop.IOLoop.instance().add_timeout(self._poll_freq,
self._poll_tasks)
else:
logger.debug("All tasks are complete, no polling necessary")
| {
"repo_name": "tdryer/feeder",
"path": "feedreader/celery_poller.py",
"copies": "1",
"size": "2139",
"license": "mit",
"hash": 3463264437872493600,
"line_mean": 36.5263157895,
"line_max": 76,
"alpha_frac": 0.5848527349,
"autogenerated": false,
"ratio": 4.590128755364807,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 57
} |
# Allows the admin to close individual opened ports from django
# Delete rule from table.filter chain input
# Add the deny rule
import iptc
# Delete the input rule from table
def delete_identical_rule(client_ip, service_port):
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
for rule in chain.rules:
ipList = rule.src.split('/')
currSrcIP = ipList[0]
for match in rule.matches:
currAllowedPort = int(match.dport)
currTarget = rule.target.name
if currSrcIP == client_ip and currAllowedPort == service_port and currTarget == "ACCEPT":
chain.delete_rule(rule)
# Delete any forms of drop from input table
def delete_existing_drop(client_ip, service_port):
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
for rule in chain.rules:
ipList = rule.src.split('/')
currSrcIP = ipList[0]
for match in rule.matches:
currAllowedPort = int(match.dport)
currTarget = rule.target.name
if currSrcIP == client_ip and currAllowedPort == service_port and currTarget == "DROP":
chain.delete_rule(rule)
def add_deny_rule(client_ip, service_port):
rule = iptc.Rule()
rule.in_interface = "wlan0"
rule.out_interface = "eth0"
rule.src = client_ip
rule.protocol = "udp"
match = iptc.Match(rule, "udp")
match.dport = "%d" % service_port
rule.add_match(match)
rule.target = iptc.Target(rule, "DROP")
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
chain.insert_rule(rule)
return
def close_service_port(client_ip, service_port):
delete_existing_drop(client_ip, service_port)
delete_identical_rule(client_ip, service_port)
add_deny_rule(client_ip, service_port)
return
| {
"repo_name": "bb111189/CryptoKnocker",
"path": "CryptoKnocker/libs/port_operations/closeIndividualPort.py",
"copies": "2",
"size": "1798",
"license": "mit",
"hash": -1553334561083069200,
"line_mean": 31.1071428571,
"line_max": 97,
"alpha_frac": 0.6568409344,
"autogenerated": false,
"ratio": 3.431297709923664,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9997011791796939,
"avg_score": 0.018225370505344996,
"num_lines": 56
} |
"""Allows the creation of a sensor that breaks out state_attributes."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.components.sensor import (
DEVICE_CLASSES_SCHEMA,
DOMAIN as SENSOR_DOMAIN,
ENTITY_ID_FORMAT,
PLATFORM_SCHEMA,
SensorEntity,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_DEVICE_CLASS,
CONF_ENTITY_PICTURE_TEMPLATE,
CONF_FRIENDLY_NAME,
CONF_FRIENDLY_NAME_TEMPLATE,
CONF_ICON,
CONF_ICON_TEMPLATE,
CONF_NAME,
CONF_SENSORS,
CONF_STATE,
CONF_UNIQUE_ID,
CONF_UNIT_OF_MEASUREMENT,
CONF_VALUE_TEMPLATE,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import config_validation as cv, template
from homeassistant.helpers.entity import async_generate_entity_id
from .const import (
CONF_ATTRIBUTE_TEMPLATES,
CONF_ATTRIBUTES,
CONF_AVAILABILITY,
CONF_AVAILABILITY_TEMPLATE,
CONF_OBJECT_ID,
CONF_PICTURE,
CONF_TRIGGER,
)
from .template_entity import TemplateEntity
from .trigger_entity import TriggerEntity
LEGACY_FIELDS = {
CONF_ICON_TEMPLATE: CONF_ICON,
CONF_ENTITY_PICTURE_TEMPLATE: CONF_PICTURE,
CONF_AVAILABILITY_TEMPLATE: CONF_AVAILABILITY,
CONF_ATTRIBUTE_TEMPLATES: CONF_ATTRIBUTES,
CONF_FRIENDLY_NAME_TEMPLATE: CONF_NAME,
CONF_FRIENDLY_NAME: CONF_NAME,
CONF_VALUE_TEMPLATE: CONF_STATE,
}
SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.template,
vol.Required(CONF_STATE): cv.template,
vol.Optional(CONF_ICON): cv.template,
vol.Optional(CONF_PICTURE): cv.template,
vol.Optional(CONF_AVAILABILITY): cv.template,
vol.Optional(CONF_ATTRIBUTES): vol.Schema({cv.string: cv.template}),
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
)
LEGACY_SENSOR_SCHEMA = vol.All(
cv.deprecated(ATTR_ENTITY_ID),
vol.Schema(
{
vol.Required(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_ICON_TEMPLATE): cv.template,
vol.Optional(CONF_ENTITY_PICTURE_TEMPLATE): cv.template,
vol.Optional(CONF_FRIENDLY_NAME_TEMPLATE): cv.template,
vol.Optional(CONF_AVAILABILITY_TEMPLATE): cv.template,
vol.Optional(CONF_ATTRIBUTE_TEMPLATES, default={}): vol.Schema(
{cv.string: cv.template}
),
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
),
)
def extra_validation_checks(val):
"""Run extra validation checks."""
if CONF_TRIGGER in val:
raise vol.Invalid(
"You can only add triggers to template entities if they are defined under `template:`. "
"See the template documentation for more information: https://www.home-assistant.io/integrations/template/"
)
if CONF_SENSORS not in val and SENSOR_DOMAIN not in val:
raise vol.Invalid(f"Required key {SENSOR_DOMAIN} not defined")
return val
def rewrite_legacy_to_modern_conf(cfg: dict[str, dict]) -> list[dict]:
"""Rewrite a legacy sensor definitions to modern ones."""
sensors = []
for object_id, entity_cfg in cfg.items():
entity_cfg = {**entity_cfg, CONF_OBJECT_ID: object_id}
for from_key, to_key in LEGACY_FIELDS.items():
if from_key not in entity_cfg or to_key in entity_cfg:
continue
val = entity_cfg.pop(from_key)
if isinstance(val, str):
val = template.Template(val)
entity_cfg[to_key] = val
if CONF_NAME not in entity_cfg:
entity_cfg[CONF_NAME] = template.Template(object_id)
sensors.append(entity_cfg)
return sensors
PLATFORM_SCHEMA = vol.All(
PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_TRIGGER): cv.match_all, # to raise custom warning
vol.Required(CONF_SENSORS): cv.schema_with_slug_keys(LEGACY_SENSOR_SCHEMA),
}
),
extra_validation_checks,
)
@callback
def _async_create_template_tracking_entities(
async_add_entities, hass, definitions: list[dict], unique_id_prefix: str | None
):
"""Create the template sensors."""
sensors = []
for entity_conf in definitions:
# Still available on legacy
object_id = entity_conf.get(CONF_OBJECT_ID)
state_template = entity_conf[CONF_STATE]
icon_template = entity_conf.get(CONF_ICON)
entity_picture_template = entity_conf.get(CONF_PICTURE)
availability_template = entity_conf.get(CONF_AVAILABILITY)
friendly_name_template = entity_conf.get(CONF_NAME)
unit_of_measurement = entity_conf.get(CONF_UNIT_OF_MEASUREMENT)
device_class = entity_conf.get(CONF_DEVICE_CLASS)
attribute_templates = entity_conf.get(CONF_ATTRIBUTES, {})
unique_id = entity_conf.get(CONF_UNIQUE_ID)
if unique_id and unique_id_prefix:
unique_id = f"{unique_id_prefix}-{unique_id}"
sensors.append(
SensorTemplate(
hass,
object_id,
friendly_name_template,
unit_of_measurement,
state_template,
icon_template,
entity_picture_template,
availability_template,
device_class,
attribute_templates,
unique_id,
)
)
async_add_entities(sensors)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the template sensors."""
if discovery_info is None:
_async_create_template_tracking_entities(
async_add_entities,
hass,
rewrite_legacy_to_modern_conf(config[CONF_SENSORS]),
None,
)
return
if "coordinator" in discovery_info:
async_add_entities(
TriggerSensorEntity(hass, discovery_info["coordinator"], config)
for config in discovery_info["entities"]
)
return
_async_create_template_tracking_entities(
async_add_entities,
hass,
discovery_info["entities"],
discovery_info["unique_id"],
)
class SensorTemplate(TemplateEntity, SensorEntity):
"""Representation of a Template Sensor."""
def __init__(
self,
hass: HomeAssistant,
object_id: str | None,
friendly_name_template: template.Template | None,
unit_of_measurement: str | None,
state_template: template.Template,
icon_template: template.Template | None,
entity_picture_template: template.Template | None,
availability_template: template.Template | None,
device_class: str | None,
attribute_templates: dict[str, template.Template],
unique_id: str | None,
) -> None:
"""Initialize the sensor."""
super().__init__(
attribute_templates=attribute_templates,
availability_template=availability_template,
icon_template=icon_template,
entity_picture_template=entity_picture_template,
)
if object_id is not None:
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, object_id, hass=hass
)
self._name: str | None = None
self._friendly_name_template = friendly_name_template
# Try to render the name as it can influence the entity ID
if friendly_name_template:
friendly_name_template.hass = hass
try:
self._name = friendly_name_template.async_render(parse_result=False)
except template.TemplateError:
pass
self._unit_of_measurement = unit_of_measurement
self._template = state_template
self._state = None
self._device_class = device_class
self._unique_id = unique_id
async def async_added_to_hass(self):
"""Register callbacks."""
self.add_template_attribute("_state", self._template, None, self._update_state)
if self._friendly_name_template and not self._friendly_name_template.is_static:
self.add_template_attribute("_name", self._friendly_name_template)
await super().async_added_to_hass()
@callback
def _update_state(self, result):
super()._update_state(result)
self._state = None if isinstance(result, TemplateError) else result
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self):
"""Return the unique id of this sensor."""
return self._unique_id
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_class(self) -> str | None:
"""Return the device class of the sensor."""
return self._device_class
@property
def unit_of_measurement(self):
"""Return the unit_of_measurement of the device."""
return self._unit_of_measurement
class TriggerSensorEntity(TriggerEntity, SensorEntity):
"""Sensor entity based on trigger data."""
domain = SENSOR_DOMAIN
extra_template_keys = (CONF_STATE,)
@property
def state(self) -> str | None:
"""Return state of the sensor."""
return self._rendered.get(CONF_STATE)
| {
"repo_name": "home-assistant/home-assistant",
"path": "homeassistant/components/template/sensor.py",
"copies": "2",
"size": "9782",
"license": "apache-2.0",
"hash": 6994978403340724000,
"line_mean": 30.9673202614,
"line_max": 119,
"alpha_frac": 0.6274790431,
"autogenerated": false,
"ratio": 3.9926530612244897,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00032625696796168653,
"num_lines": 306
} |
"""Allows the creation of a sensor that breaks out state_attributes."""
import logging
from typing import Optional
import voluptuous as vol
from homeassistant.components.sensor import (
DEVICE_CLASSES_SCHEMA,
ENTITY_ID_FORMAT,
PLATFORM_SCHEMA,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_UNIT_OF_MEASUREMENT,
CONF_DEVICE_CLASS,
CONF_ENTITY_PICTURE_TEMPLATE,
CONF_FRIENDLY_NAME_TEMPLATE,
CONF_ICON_TEMPLATE,
CONF_SENSORS,
CONF_VALUE_TEMPLATE,
EVENT_HOMEASSISTANT_START,
MATCH_ALL,
)
from homeassistant.core import callback
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity, async_generate_entity_id
from homeassistant.helpers.event import async_track_state_change
from . import extract_entities, initialise_templates
from .const import CONF_AVAILABILITY_TEMPLATE
CONF_ATTRIBUTE_TEMPLATES = "attribute_templates"
_LOGGER = logging.getLogger(__name__)
SENSOR_SCHEMA = vol.Schema(
{
vol.Required(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_ICON_TEMPLATE): cv.template,
vol.Optional(CONF_ENTITY_PICTURE_TEMPLATE): cv.template,
vol.Optional(CONF_FRIENDLY_NAME_TEMPLATE): cv.template,
vol.Optional(CONF_AVAILABILITY_TEMPLATE): cv.template,
vol.Optional(CONF_ATTRIBUTE_TEMPLATES, default={}): vol.Schema(
{cv.string: cv.template}
),
vol.Optional(ATTR_FRIENDLY_NAME): cv.string,
vol.Optional(ATTR_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_SENSORS): cv.schema_with_slug_keys(SENSOR_SCHEMA)}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the template sensors."""
sensors = []
for device, device_config in config[CONF_SENSORS].items():
state_template = device_config[CONF_VALUE_TEMPLATE]
icon_template = device_config.get(CONF_ICON_TEMPLATE)
entity_picture_template = device_config.get(CONF_ENTITY_PICTURE_TEMPLATE)
availability_template = device_config.get(CONF_AVAILABILITY_TEMPLATE)
friendly_name = device_config.get(ATTR_FRIENDLY_NAME, device)
friendly_name_template = device_config.get(CONF_FRIENDLY_NAME_TEMPLATE)
unit_of_measurement = device_config.get(ATTR_UNIT_OF_MEASUREMENT)
device_class = device_config.get(CONF_DEVICE_CLASS)
attribute_templates = device_config[CONF_ATTRIBUTE_TEMPLATES]
templates = {
CONF_VALUE_TEMPLATE: state_template,
CONF_ICON_TEMPLATE: icon_template,
CONF_ENTITY_PICTURE_TEMPLATE: entity_picture_template,
CONF_FRIENDLY_NAME_TEMPLATE: friendly_name_template,
CONF_AVAILABILITY_TEMPLATE: availability_template,
}
initialise_templates(hass, templates, attribute_templates)
entity_ids = extract_entities(
device,
"sensor",
device_config.get(ATTR_ENTITY_ID),
templates,
attribute_templates,
)
sensors.append(
SensorTemplate(
hass,
device,
friendly_name,
friendly_name_template,
unit_of_measurement,
state_template,
icon_template,
entity_picture_template,
availability_template,
entity_ids,
device_class,
attribute_templates,
)
)
async_add_entities(sensors)
return True
class SensorTemplate(Entity):
"""Representation of a Template Sensor."""
def __init__(
self,
hass,
device_id,
friendly_name,
friendly_name_template,
unit_of_measurement,
state_template,
icon_template,
entity_picture_template,
availability_template,
entity_ids,
device_class,
attribute_templates,
):
"""Initialize the sensor."""
self.hass = hass
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, device_id, hass=hass
)
self._name = friendly_name
self._friendly_name_template = friendly_name_template
self._unit_of_measurement = unit_of_measurement
self._template = state_template
self._state = None
self._icon_template = icon_template
self._entity_picture_template = entity_picture_template
self._availability_template = availability_template
self._icon = None
self._entity_picture = None
self._entities = entity_ids
self._device_class = device_class
self._available = True
self._attribute_templates = attribute_templates
self._attributes = {}
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def template_sensor_state_listener(entity, old_state, new_state):
"""Handle device state changes."""
self.async_schedule_update_ha_state(True)
@callback
def template_sensor_startup(event):
"""Update template on startup."""
if self._entities != MATCH_ALL:
# Track state change only for valid templates
async_track_state_change(
self.hass, self._entities, template_sensor_state_listener
)
self.async_schedule_update_ha_state(True)
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, template_sensor_startup
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._icon
@property
def device_class(self) -> Optional[str]:
"""Return the device class of the sensor."""
return self._device_class
@property
def entity_picture(self):
"""Return the entity_picture to use in the frontend, if any."""
return self._entity_picture
@property
def unit_of_measurement(self):
"""Return the unit_of_measurement of the device."""
return self._unit_of_measurement
@property
def available(self) -> bool:
"""Return if the device is available."""
return self._available
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
@property
def should_poll(self):
"""No polling needed."""
return False
async def async_update(self):
"""Update the state from the template."""
try:
self._state = self._template.async_render()
self._available = True
except TemplateError as ex:
self._available = False
if ex.args and ex.args[0].startswith(
"UndefinedError: 'None' has no attribute"
):
# Common during HA startup - so just a warning
_LOGGER.warning(
"Could not render template %s, the state is unknown.", self._name
)
else:
self._state = None
_LOGGER.error("Could not render template %s: %s", self._name, ex)
attrs = {}
for key, value in self._attribute_templates.items():
try:
attrs[key] = value.async_render()
except TemplateError as err:
_LOGGER.error("Error rendering attribute %s: %s", key, err)
self._attributes = attrs
templates = {
"_icon": self._icon_template,
"_entity_picture": self._entity_picture_template,
"_name": self._friendly_name_template,
"_available": self._availability_template,
}
for property_name, template in templates.items():
if template is None:
continue
try:
value = template.async_render()
if property_name == "_available":
value = value.lower() == "true"
setattr(self, property_name, value)
except TemplateError as ex:
friendly_property_name = property_name[1:].replace("_", " ")
if ex.args and ex.args[0].startswith(
"UndefinedError: 'None' has no attribute"
):
# Common during HA startup - so just a warning
_LOGGER.warning(
"Could not render %s template %s, the state is unknown.",
friendly_property_name,
self._name,
)
continue
try:
setattr(self, property_name, getattr(super(), property_name))
except AttributeError:
_LOGGER.error(
"Could not render %s template %s: %s",
friendly_property_name,
self._name,
ex,
)
| {
"repo_name": "postlund/home-assistant",
"path": "homeassistant/components/template/sensor.py",
"copies": "4",
"size": "9527",
"license": "apache-2.0",
"hash": 8923949257607735000,
"line_mean": 32.4280701754,
"line_max": 86,
"alpha_frac": 0.5847591057,
"autogenerated": false,
"ratio": 4.447712418300654,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7032471524000654,
"avg_score": null,
"num_lines": null
} |
"""Allows the creation of a sensor that breaks out state_attributes."""
import logging
from typing import Optional
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components.sensor import ENTITY_ID_FORMAT, \
PLATFORM_SCHEMA, DEVICE_CLASSES_SCHEMA
from homeassistant.const import (
ATTR_FRIENDLY_NAME, ATTR_UNIT_OF_MEASUREMENT, CONF_VALUE_TEMPLATE,
CONF_ICON_TEMPLATE, CONF_ENTITY_PICTURE_TEMPLATE, ATTR_ENTITY_ID,
CONF_SENSORS, EVENT_HOMEASSISTANT_START, CONF_FRIENDLY_NAME_TEMPLATE,
MATCH_ALL, CONF_DEVICE_CLASS)
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity, async_generate_entity_id
from homeassistant.helpers.event import async_track_state_change
_LOGGER = logging.getLogger(__name__)
SENSOR_SCHEMA = vol.Schema({
vol.Required(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_ICON_TEMPLATE): cv.template,
vol.Optional(CONF_ENTITY_PICTURE_TEMPLATE): cv.template,
vol.Optional(CONF_FRIENDLY_NAME_TEMPLATE): cv.template,
vol.Optional(ATTR_FRIENDLY_NAME): cv.string,
vol.Optional(ATTR_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_SENSORS): cv.schema_with_slug_keys(SENSOR_SCHEMA),
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the template sensors."""
sensors = []
for device, device_config in config[CONF_SENSORS].items():
state_template = device_config[CONF_VALUE_TEMPLATE]
icon_template = device_config.get(CONF_ICON_TEMPLATE)
entity_picture_template = device_config.get(
CONF_ENTITY_PICTURE_TEMPLATE)
friendly_name = device_config.get(ATTR_FRIENDLY_NAME, device)
friendly_name_template = device_config.get(CONF_FRIENDLY_NAME_TEMPLATE)
unit_of_measurement = device_config.get(ATTR_UNIT_OF_MEASUREMENT)
device_class = device_config.get(CONF_DEVICE_CLASS)
entity_ids = set()
manual_entity_ids = device_config.get(ATTR_ENTITY_ID)
invalid_templates = []
for tpl_name, template in (
(CONF_VALUE_TEMPLATE, state_template),
(CONF_ICON_TEMPLATE, icon_template),
(CONF_ENTITY_PICTURE_TEMPLATE, entity_picture_template),
(CONF_FRIENDLY_NAME_TEMPLATE, friendly_name_template),
):
if template is None:
continue
template.hass = hass
if manual_entity_ids is not None:
continue
template_entity_ids = template.extract_entities()
if template_entity_ids == MATCH_ALL:
entity_ids = MATCH_ALL
# Cut off _template from name
invalid_templates.append(tpl_name[:-9])
elif entity_ids != MATCH_ALL:
entity_ids |= set(template_entity_ids)
if invalid_templates:
_LOGGER.warning(
'Template sensor %s has no entity ids configured to track nor'
' were we able to extract the entities to track from the %s '
'template(s). This entity will only be able to be updated '
'manually.', device, ', '.join(invalid_templates))
if manual_entity_ids is not None:
entity_ids = manual_entity_ids
elif entity_ids != MATCH_ALL:
entity_ids = list(entity_ids)
sensors.append(
SensorTemplate(
hass,
device,
friendly_name,
friendly_name_template,
unit_of_measurement,
state_template,
icon_template,
entity_picture_template,
entity_ids,
device_class)
)
if not sensors:
_LOGGER.error("No sensors added")
return False
async_add_entities(sensors)
return True
class SensorTemplate(Entity):
"""Representation of a Template Sensor."""
def __init__(self, hass, device_id, friendly_name, friendly_name_template,
unit_of_measurement, state_template, icon_template,
entity_picture_template, entity_ids, device_class):
"""Initialize the sensor."""
self.hass = hass
self.entity_id = async_generate_entity_id(ENTITY_ID_FORMAT, device_id,
hass=hass)
self._name = friendly_name
self._friendly_name_template = friendly_name_template
self._unit_of_measurement = unit_of_measurement
self._template = state_template
self._state = None
self._icon_template = icon_template
self._entity_picture_template = entity_picture_template
self._icon = None
self._entity_picture = None
self._entities = entity_ids
self._device_class = device_class
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def template_sensor_state_listener(entity, old_state, new_state):
"""Handle device state changes."""
self.async_schedule_update_ha_state(True)
@callback
def template_sensor_startup(event):
"""Update template on startup."""
if self._entities != MATCH_ALL:
# Track state change only for valid templates
async_track_state_change(
self.hass, self._entities, template_sensor_state_listener)
self.async_schedule_update_ha_state(True)
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, template_sensor_startup)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._icon
@property
def device_class(self) -> Optional[str]:
"""Return the device class of the sensor."""
return self._device_class
@property
def entity_picture(self):
"""Return the entity_picture to use in the frontend, if any."""
return self._entity_picture
@property
def unit_of_measurement(self):
"""Return the unit_of_measurement of the device."""
return self._unit_of_measurement
@property
def should_poll(self):
"""No polling needed."""
return False
async def async_update(self):
"""Update the state from the template."""
try:
self._state = self._template.async_render()
except TemplateError as ex:
if ex.args and ex.args[0].startswith(
"UndefinedError: 'None' has no attribute"):
# Common during HA startup - so just a warning
_LOGGER.warning('Could not render template %s,'
' the state is unknown.', self._name)
else:
self._state = None
_LOGGER.error('Could not render template %s: %s', self._name,
ex)
for property_name, template in (
('_icon', self._icon_template),
('_entity_picture', self._entity_picture_template),
('_name', self._friendly_name_template)):
if template is None:
continue
try:
setattr(self, property_name, template.async_render())
except TemplateError as ex:
friendly_property_name = property_name[1:].replace('_', ' ')
if ex.args and ex.args[0].startswith(
"UndefinedError: 'None' has no attribute"):
# Common during HA startup - so just a warning
_LOGGER.warning('Could not render %s template %s,'
' the state is unknown.',
friendly_property_name, self._name)
continue
try:
setattr(self, property_name,
getattr(super(), property_name))
except AttributeError:
_LOGGER.error('Could not render %s template %s: %s',
friendly_property_name, self._name, ex)
| {
"repo_name": "molobrakos/home-assistant",
"path": "homeassistant/components/template/sensor.py",
"copies": "7",
"size": "8662",
"license": "apache-2.0",
"hash": 4834357881655330000,
"line_mean": 37.3274336283,
"line_max": 79,
"alpha_frac": 0.5864696375,
"autogenerated": false,
"ratio": 4.388044579533942,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8474514217033942,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.