text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def rotate(x, y, degree):
"""Rotate a coordinate around point (0,0).
- x and y specify the coordinate.
- degree is a number from 0 to 360.
Returns a new coordinate.
"""
radian = float(degree) * 2 * math.pi / 360.0
newx = math.cos(radian) * x - math.sin(radian) * y
newy = math.sin(radian) * x + math.cos(radian) * y
return (newx, newy) | [
"def",
"rotate",
"(",
"x",
",",
"y",
",",
"degree",
")",
":",
"radian",
"=",
"float",
"(",
"degree",
")",
"*",
"2",
"*",
"math",
".",
"pi",
"/",
"360.0",
"newx",
"=",
"math",
".",
"cos",
"(",
"radian",
")",
"*",
"x",
"-",
"math",
".",
"sin",
"(",
"radian",
")",
"*",
"y",
"newy",
"=",
"math",
".",
"sin",
"(",
"radian",
")",
"*",
"x",
"+",
"math",
".",
"cos",
"(",
"radian",
")",
"*",
"y",
"return",
"(",
"newx",
",",
"newy",
")"
] | 36.2 | 8.3 |
def packet2chain(packet):
"""Fetch Scapy packet protocol chain."""
if scapy_all is None:
raise ModuleNotFound("No module named 'scapy'", name='scapy')
chain = [packet.name]
payload = packet.payload
while not isinstance(payload, scapy_all.packet.NoPayload):
chain.append(payload.name)
payload = payload.payload
return ':'.join(chain) | [
"def",
"packet2chain",
"(",
"packet",
")",
":",
"if",
"scapy_all",
"is",
"None",
":",
"raise",
"ModuleNotFound",
"(",
"\"No module named 'scapy'\"",
",",
"name",
"=",
"'scapy'",
")",
"chain",
"=",
"[",
"packet",
".",
"name",
"]",
"payload",
"=",
"packet",
".",
"payload",
"while",
"not",
"isinstance",
"(",
"payload",
",",
"scapy_all",
".",
"packet",
".",
"NoPayload",
")",
":",
"chain",
".",
"append",
"(",
"payload",
".",
"name",
")",
"payload",
"=",
"payload",
".",
"payload",
"return",
"':'",
".",
"join",
"(",
"chain",
")"
] | 37.1 | 13.5 |
def mswe(w, v):
"""
Calculate mean squared weight error between estimated and true filter
coefficients, in respect to iterations.
Parameters
----------
v : array-like
True coefficients used to generate desired signal, must be a
one-dimensional array.
w : array-like
Estimated coefficients from adaptive filtering algorithm. Must be an
N x M matrix where N is the number of iterations, and M is the number
of filter coefficients.
Returns
-------
mswe : numpy.array
One-dimensional array containing the mean-squared weight error for
every iteration.
Raises
------
TypeError
If inputs have wrong dimensions
Note
----
To use this function with the adaptive filter functions set the optional
parameter returnCoeffs to True. This will return a coefficient matrix w
corresponding with the input-parameter w.
"""
# Ensure inputs are numpy arrays
w = np.array(w)
v = np.array(v)
# Check dimensions
if(len(w.shape) != 2):
raise TypeError('Estimated coefficients must be in NxM matrix')
if(len(v.shape) != 1):
raise TypeError('Real coefficients must be in 1d array')
# Ensure equal length between estimated and real coeffs
N, M = w.shape
L = v.size
if(M < L):
v = v[:-(L-M)]
elif(M > L):
v = np.concatenate((v, np.zeros(M-L)))
# Calculate and return MSWE
mswe = np.mean((w - v)**2, axis=1)
return mswe | [
"def",
"mswe",
"(",
"w",
",",
"v",
")",
":",
"# Ensure inputs are numpy arrays",
"w",
"=",
"np",
".",
"array",
"(",
"w",
")",
"v",
"=",
"np",
".",
"array",
"(",
"v",
")",
"# Check dimensions",
"if",
"(",
"len",
"(",
"w",
".",
"shape",
")",
"!=",
"2",
")",
":",
"raise",
"TypeError",
"(",
"'Estimated coefficients must be in NxM matrix'",
")",
"if",
"(",
"len",
"(",
"v",
".",
"shape",
")",
"!=",
"1",
")",
":",
"raise",
"TypeError",
"(",
"'Real coefficients must be in 1d array'",
")",
"# Ensure equal length between estimated and real coeffs",
"N",
",",
"M",
"=",
"w",
".",
"shape",
"L",
"=",
"v",
".",
"size",
"if",
"(",
"M",
"<",
"L",
")",
":",
"v",
"=",
"v",
"[",
":",
"-",
"(",
"L",
"-",
"M",
")",
"]",
"elif",
"(",
"M",
">",
"L",
")",
":",
"v",
"=",
"np",
".",
"concatenate",
"(",
"(",
"v",
",",
"np",
".",
"zeros",
"(",
"M",
"-",
"L",
")",
")",
")",
"# Calculate and return MSWE",
"mswe",
"=",
"np",
".",
"mean",
"(",
"(",
"w",
"-",
"v",
")",
"**",
"2",
",",
"axis",
"=",
"1",
")",
"return",
"mswe"
] | 28.882353 | 22.647059 |
def _flatten_mesh(self, Xs, term):
"""flatten the mesh and distribute into a feature matrix"""
n = Xs[0].size
if self.terms[term].istensor:
terms = self.terms[term]
else:
terms = [self.terms[term]]
X = np.zeros((n, self.statistics_['m_features']))
for term_, x in zip(terms, Xs):
X[:, term_.feature] = x.ravel()
return X | [
"def",
"_flatten_mesh",
"(",
"self",
",",
"Xs",
",",
"term",
")",
":",
"n",
"=",
"Xs",
"[",
"0",
"]",
".",
"size",
"if",
"self",
".",
"terms",
"[",
"term",
"]",
".",
"istensor",
":",
"terms",
"=",
"self",
".",
"terms",
"[",
"term",
"]",
"else",
":",
"terms",
"=",
"[",
"self",
".",
"terms",
"[",
"term",
"]",
"]",
"X",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
",",
"self",
".",
"statistics_",
"[",
"'m_features'",
"]",
")",
")",
"for",
"term_",
",",
"x",
"in",
"zip",
"(",
"terms",
",",
"Xs",
")",
":",
"X",
"[",
":",
",",
"term_",
".",
"feature",
"]",
"=",
"x",
".",
"ravel",
"(",
")",
"return",
"X"
] | 30.923077 | 14.230769 |
def list_permissions(vhost, runas=None):
'''
Lists permissions for vhost via rabbitmqctl list_permissions
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.list_permissions /myvhost
'''
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
res = __salt__['cmd.run_all'](
[RABBITMQCTL, 'list_permissions', '-q', '-p', vhost],
reset_system_locale=False,
runas=runas,
python_shell=False)
return _output_to_dict(res) | [
"def",
"list_permissions",
"(",
"vhost",
",",
"runas",
"=",
"None",
")",
":",
"if",
"runas",
"is",
"None",
"and",
"not",
"salt",
".",
"utils",
".",
"platform",
".",
"is_windows",
"(",
")",
":",
"runas",
"=",
"salt",
".",
"utils",
".",
"user",
".",
"get_user",
"(",
")",
"res",
"=",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"[",
"RABBITMQCTL",
",",
"'list_permissions'",
",",
"'-q'",
",",
"'-p'",
",",
"vhost",
"]",
",",
"reset_system_locale",
"=",
"False",
",",
"runas",
"=",
"runas",
",",
"python_shell",
"=",
"False",
")",
"return",
"_output_to_dict",
"(",
"res",
")"
] | 27.368421 | 21.052632 |
def p_decl_arr(p):
""" var_arr_decl : DIM idlist LP bound_list RP typedef
"""
if len(p[2]) != 1:
syntax_error(p.lineno(1), "Array declaration only allows one variable name at a time")
else:
id_, lineno = p[2][0]
SYMBOL_TABLE.declare_array(id_, lineno, p[6], p[4])
p[0] = p[2][0] | [
"def",
"p_decl_arr",
"(",
"p",
")",
":",
"if",
"len",
"(",
"p",
"[",
"2",
"]",
")",
"!=",
"1",
":",
"syntax_error",
"(",
"p",
".",
"lineno",
"(",
"1",
")",
",",
"\"Array declaration only allows one variable name at a time\"",
")",
"else",
":",
"id_",
",",
"lineno",
"=",
"p",
"[",
"2",
"]",
"[",
"0",
"]",
"SYMBOL_TABLE",
".",
"declare_array",
"(",
"id_",
",",
"lineno",
",",
"p",
"[",
"6",
"]",
",",
"p",
"[",
"4",
"]",
")",
"p",
"[",
"0",
"]",
"=",
"p",
"[",
"2",
"]",
"[",
"0",
"]"
] | 34.888889 | 19.666667 |
def _create_latent_variables(self):
""" Creates model latent variables
Returns
----------
None (changes model attributes)
"""
self.latent_variables.add_z('Constant', fam.Normal(0,10,transform=None), fam.Normal(0,3))
self.latent_variables.add_z('Ability Scale', fam.Normal(0,1,transform=None), fam.Normal(0,3)) | [
"def",
"_create_latent_variables",
"(",
"self",
")",
":",
"self",
".",
"latent_variables",
".",
"add_z",
"(",
"'Constant'",
",",
"fam",
".",
"Normal",
"(",
"0",
",",
"10",
",",
"transform",
"=",
"None",
")",
",",
"fam",
".",
"Normal",
"(",
"0",
",",
"3",
")",
")",
"self",
".",
"latent_variables",
".",
"add_z",
"(",
"'Ability Scale'",
",",
"fam",
".",
"Normal",
"(",
"0",
",",
"1",
",",
"transform",
"=",
"None",
")",
",",
"fam",
".",
"Normal",
"(",
"0",
",",
"3",
")",
")"
] | 35.8 | 25.1 |
def find1(self, kw: YangIdentifier, arg: str = None,
pref: YangIdentifier = None,
required: bool = False) -> Optional["Statement"]:
"""Return first substatement with the given parameters.
Args:
kw: Statement keyword (local part for extensions).
arg: Argument (all arguments will match if ``None``).
pref: Keyword prefix (``None`` for built-in statements).
required: Should an exception be raised on failure?
Raises:
StatementNotFound: If `required` is ``True`` and the
statement is not found.
"""
for sub in self.substatements:
if (sub.keyword == kw and sub.prefix == pref and
(arg is None or sub.argument == arg)):
return sub
if required:
raise StatementNotFound(str(self), kw) | [
"def",
"find1",
"(",
"self",
",",
"kw",
":",
"YangIdentifier",
",",
"arg",
":",
"str",
"=",
"None",
",",
"pref",
":",
"YangIdentifier",
"=",
"None",
",",
"required",
":",
"bool",
"=",
"False",
")",
"->",
"Optional",
"[",
"\"Statement\"",
"]",
":",
"for",
"sub",
"in",
"self",
".",
"substatements",
":",
"if",
"(",
"sub",
".",
"keyword",
"==",
"kw",
"and",
"sub",
".",
"prefix",
"==",
"pref",
"and",
"(",
"arg",
"is",
"None",
"or",
"sub",
".",
"argument",
"==",
"arg",
")",
")",
":",
"return",
"sub",
"if",
"required",
":",
"raise",
"StatementNotFound",
"(",
"str",
"(",
"self",
")",
",",
"kw",
")"
] | 41.52381 | 17.904762 |
def _get_memory_contents(self):
"""Runs the scheduler to determine memory contents at every point in time.
Returns:
a list of frozenset of strings, where the ith entry describes the tensors
in memory when executing operation i (where schedule[i] is an index into
GetAllOperationNames()).
"""
if self._memory_contents is not None:
return self._memory_contents
schedule = scheduler.minimize_peak_memory(self._graph, self._scheduler_alg)
self._memory_contents = self._graph.compute_memory_contents_under_schedule(
schedule)
return self._memory_contents | [
"def",
"_get_memory_contents",
"(",
"self",
")",
":",
"if",
"self",
".",
"_memory_contents",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_memory_contents",
"schedule",
"=",
"scheduler",
".",
"minimize_peak_memory",
"(",
"self",
".",
"_graph",
",",
"self",
".",
"_scheduler_alg",
")",
"self",
".",
"_memory_contents",
"=",
"self",
".",
"_graph",
".",
"compute_memory_contents_under_schedule",
"(",
"schedule",
")",
"return",
"self",
".",
"_memory_contents"
] | 37.3125 | 22.5 |
def fix_pyqt5_QGraphicsItem_itemChange():
"""
Attempt to remedy:
https://www.riverbankcomputing.com/pipermail/pyqt/2016-February/037015.html
"""
from PyQt5.QtWidgets import QGraphicsObject, QGraphicsItem
class Obj(QGraphicsObject):
def itemChange(self, change, value):
return QGraphicsObject.itemChange(self, change, value)
obj = Obj()
parent = Obj()
obj.setParentItem(parent)
if obj.parentItem() is None:
# There was probably already some signal defined using QObject's
# subclass from QtWidgets.
# We will monkey patch the QGraphicsItem.itemChange and explicitly
# sip.cast all input and output QGraphicsItem instances
import sip
QGraphicsItem_itemChange_old = QGraphicsItem.itemChange
# All the QGraphicsItem.ItemChange flags which accept/return
# a QGraphicsItem
changeset = {
QGraphicsItem.ItemParentChange,
QGraphicsItem.ItemParentHasChanged,
QGraphicsItem.ItemChildAddedChange,
QGraphicsItem.ItemChildRemovedChange,
}
def QGraphicsItem_itemChange(self, change, value):
if change in changeset:
if isinstance(value, QGraphicsItem):
value = sip.cast(value, QGraphicsItem)
rval = QGraphicsItem_itemChange_old(self, change, value)
if isinstance(rval, QGraphicsItem):
rval = sip.cast(rval, QGraphicsItem)
return rval
else:
return QGraphicsItem_itemChange_old(self, change, value)
QGraphicsItem.itemChange = QGraphicsItem_itemChange
warnings.warn("Monkey patching QGraphicsItem.itemChange",
RuntimeWarning) | [
"def",
"fix_pyqt5_QGraphicsItem_itemChange",
"(",
")",
":",
"from",
"PyQt5",
".",
"QtWidgets",
"import",
"QGraphicsObject",
",",
"QGraphicsItem",
"class",
"Obj",
"(",
"QGraphicsObject",
")",
":",
"def",
"itemChange",
"(",
"self",
",",
"change",
",",
"value",
")",
":",
"return",
"QGraphicsObject",
".",
"itemChange",
"(",
"self",
",",
"change",
",",
"value",
")",
"obj",
"=",
"Obj",
"(",
")",
"parent",
"=",
"Obj",
"(",
")",
"obj",
".",
"setParentItem",
"(",
"parent",
")",
"if",
"obj",
".",
"parentItem",
"(",
")",
"is",
"None",
":",
"# There was probably already some signal defined using QObject's",
"# subclass from QtWidgets.",
"# We will monkey patch the QGraphicsItem.itemChange and explicitly",
"# sip.cast all input and output QGraphicsItem instances",
"import",
"sip",
"QGraphicsItem_itemChange_old",
"=",
"QGraphicsItem",
".",
"itemChange",
"# All the QGraphicsItem.ItemChange flags which accept/return",
"# a QGraphicsItem",
"changeset",
"=",
"{",
"QGraphicsItem",
".",
"ItemParentChange",
",",
"QGraphicsItem",
".",
"ItemParentHasChanged",
",",
"QGraphicsItem",
".",
"ItemChildAddedChange",
",",
"QGraphicsItem",
".",
"ItemChildRemovedChange",
",",
"}",
"def",
"QGraphicsItem_itemChange",
"(",
"self",
",",
"change",
",",
"value",
")",
":",
"if",
"change",
"in",
"changeset",
":",
"if",
"isinstance",
"(",
"value",
",",
"QGraphicsItem",
")",
":",
"value",
"=",
"sip",
".",
"cast",
"(",
"value",
",",
"QGraphicsItem",
")",
"rval",
"=",
"QGraphicsItem_itemChange_old",
"(",
"self",
",",
"change",
",",
"value",
")",
"if",
"isinstance",
"(",
"rval",
",",
"QGraphicsItem",
")",
":",
"rval",
"=",
"sip",
".",
"cast",
"(",
"rval",
",",
"QGraphicsItem",
")",
"return",
"rval",
"else",
":",
"return",
"QGraphicsItem_itemChange_old",
"(",
"self",
",",
"change",
",",
"value",
")",
"QGraphicsItem",
".",
"itemChange",
"=",
"QGraphicsItem_itemChange",
"warnings",
".",
"warn",
"(",
"\"Monkey patching QGraphicsItem.itemChange\"",
",",
"RuntimeWarning",
")"
] | 37.934783 | 19.804348 |
async def get_windows_kms_host(cls) -> typing.Optional[str]:
"""Windows KMS activation host.
FQDN or IP address of the host that provides the KMS Windows
activation service. (Only needed for Windows deployments using KMS
activation.)
"""
data = await cls.get_config("windows_kms_host")
return None if data is None or data == "" else data | [
"async",
"def",
"get_windows_kms_host",
"(",
"cls",
")",
"->",
"typing",
".",
"Optional",
"[",
"str",
"]",
":",
"data",
"=",
"await",
"cls",
".",
"get_config",
"(",
"\"windows_kms_host\"",
")",
"return",
"None",
"if",
"data",
"is",
"None",
"or",
"data",
"==",
"\"\"",
"else",
"data"
] | 42.888889 | 19.555556 |
def sys_names(self):
"""
Return a list of unique systematic names from OverallSys and HistoSys
"""
names = {}
for osys in self.overall_sys:
names[osys.name] = None
for hsys in self.histo_sys:
names[hsys.name] = None
return names.keys() | [
"def",
"sys_names",
"(",
"self",
")",
":",
"names",
"=",
"{",
"}",
"for",
"osys",
"in",
"self",
".",
"overall_sys",
":",
"names",
"[",
"osys",
".",
"name",
"]",
"=",
"None",
"for",
"hsys",
"in",
"self",
".",
"histo_sys",
":",
"names",
"[",
"hsys",
".",
"name",
"]",
"=",
"None",
"return",
"names",
".",
"keys",
"(",
")"
] | 30.6 | 11 |
def _build_scalar_declarations(self, with_init=True):
"""Build and return scalar variable declarations"""
# copy scalar declarations from from kernel ast
scalar_declarations = [deepcopy(d) for d in self.kernel_ast.block_items
if type(d) is c_ast.Decl and type(d.type) is c_ast.TypeDecl]
# add init values to declarations
if with_init:
random.seed(2342) # we want reproducible random numbers
for d in scalar_declarations:
if d.type.type.names[0] in ['double', 'float']:
d.init = c_ast.Constant('float', str(random.uniform(1.0, 0.1)))
elif d.type.type.names[0] in ['int', 'long', 'long long',
'unsigned int', 'unsigned long', 'unsigned long long']:
d.init = c_ast.Constant('int', 2)
return scalar_declarations | [
"def",
"_build_scalar_declarations",
"(",
"self",
",",
"with_init",
"=",
"True",
")",
":",
"# copy scalar declarations from from kernel ast",
"scalar_declarations",
"=",
"[",
"deepcopy",
"(",
"d",
")",
"for",
"d",
"in",
"self",
".",
"kernel_ast",
".",
"block_items",
"if",
"type",
"(",
"d",
")",
"is",
"c_ast",
".",
"Decl",
"and",
"type",
"(",
"d",
".",
"type",
")",
"is",
"c_ast",
".",
"TypeDecl",
"]",
"# add init values to declarations",
"if",
"with_init",
":",
"random",
".",
"seed",
"(",
"2342",
")",
"# we want reproducible random numbers",
"for",
"d",
"in",
"scalar_declarations",
":",
"if",
"d",
".",
"type",
".",
"type",
".",
"names",
"[",
"0",
"]",
"in",
"[",
"'double'",
",",
"'float'",
"]",
":",
"d",
".",
"init",
"=",
"c_ast",
".",
"Constant",
"(",
"'float'",
",",
"str",
"(",
"random",
".",
"uniform",
"(",
"1.0",
",",
"0.1",
")",
")",
")",
"elif",
"d",
".",
"type",
".",
"type",
".",
"names",
"[",
"0",
"]",
"in",
"[",
"'int'",
",",
"'long'",
",",
"'long long'",
",",
"'unsigned int'",
",",
"'unsigned long'",
",",
"'unsigned long long'",
"]",
":",
"d",
".",
"init",
"=",
"c_ast",
".",
"Constant",
"(",
"'int'",
",",
"2",
")",
"return",
"scalar_declarations"
] | 57.1875 | 24.125 |
def nltides_coefs(amplitude, n, m1, m2):
"""Calculate the coefficents needed to compute the
shift in t(f) and phi(f) due to non-linear tides.
Parameters
----------
amplitude: float
Amplitude of effect
n: float
Growth dependence of effect
m1: float
Mass of component 1
m2: float
Mass of component 2
Returns
-------
f_ref : float
Reference frequency used to define A and n
t_of_f_factor: float
The constant factor needed to compute t(f)
phi_of_f_factor: float
The constant factor needed to compute phi(f)
"""
# Use 100.0 Hz as a reference frequency
f_ref = 100.0
# Calculate chirp mass
mc = mchirp_from_mass1_mass2(m1, m2)
mc *= lal.lal.MSUN_SI
# Calculate constants in phasing
a = (96./5.) * \
(lal.lal.G_SI * lal.lal.PI * mc * f_ref / lal.lal.C_SI**3.)**(5./3.)
b = 6. * amplitude
t_of_f_factor = -1./(lal.lal.PI*f_ref) * b/(a*a * (n-4.))
phi_of_f_factor = -2.*b / (a*a * (n-3.))
return f_ref, t_of_f_factor, phi_of_f_factor | [
"def",
"nltides_coefs",
"(",
"amplitude",
",",
"n",
",",
"m1",
",",
"m2",
")",
":",
"# Use 100.0 Hz as a reference frequency",
"f_ref",
"=",
"100.0",
"# Calculate chirp mass",
"mc",
"=",
"mchirp_from_mass1_mass2",
"(",
"m1",
",",
"m2",
")",
"mc",
"*=",
"lal",
".",
"lal",
".",
"MSUN_SI",
"# Calculate constants in phasing",
"a",
"=",
"(",
"96.",
"/",
"5.",
")",
"*",
"(",
"lal",
".",
"lal",
".",
"G_SI",
"*",
"lal",
".",
"lal",
".",
"PI",
"*",
"mc",
"*",
"f_ref",
"/",
"lal",
".",
"lal",
".",
"C_SI",
"**",
"3.",
")",
"**",
"(",
"5.",
"/",
"3.",
")",
"b",
"=",
"6.",
"*",
"amplitude",
"t_of_f_factor",
"=",
"-",
"1.",
"/",
"(",
"lal",
".",
"lal",
".",
"PI",
"*",
"f_ref",
")",
"*",
"b",
"/",
"(",
"a",
"*",
"a",
"*",
"(",
"n",
"-",
"4.",
")",
")",
"phi_of_f_factor",
"=",
"-",
"2.",
"*",
"b",
"/",
"(",
"a",
"*",
"a",
"*",
"(",
"n",
"-",
"3.",
")",
")",
"return",
"f_ref",
",",
"t_of_f_factor",
",",
"phi_of_f_factor"
] | 26.375 | 19 |
def avail_images(conn=None, call=None):
'''
Return a list of the server appliances that are on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not conn:
conn = get_conn()
ret = {}
for appliance in conn.list_appliances():
ret[appliance['name']] = appliance
return ret | [
"def",
"avail_images",
"(",
"conn",
"=",
"None",
",",
"call",
"=",
"None",
")",
":",
"if",
"call",
"==",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The avail_images function must be called with '",
"'-f or --function, or with the --list-images option'",
")",
"if",
"not",
"conn",
":",
"conn",
"=",
"get_conn",
"(",
")",
"ret",
"=",
"{",
"}",
"for",
"appliance",
"in",
"conn",
".",
"list_appliances",
"(",
")",
":",
"ret",
"[",
"appliance",
"[",
"'name'",
"]",
"]",
"=",
"appliance",
"return",
"ret"
] | 24.421053 | 23.684211 |
def merge_overlaps(self, data_reducer=None, data_initializer=None, strict=True):
"""
Finds all intervals with overlapping ranges and merges them
into a single interval. If provided, uses data_reducer and
data_initializer with similar semantics to Python's built-in
reduce(reducer_func[, initializer]), as follows:
If data_reducer is set to a function, combines the data
fields of the Intervals with
current_reduced_data = data_reducer(current_reduced_data, new_data)
If data_reducer is None, the merged Interval's data
field will be set to None, ignoring all the data fields
of the merged Intervals.
On encountering the first Interval to merge, if
data_initializer is None (default), uses the first
Interval's data field as the first value for
current_reduced_data. If data_initializer is not None,
current_reduced_data is set to a shallow copy of
data_initializer created with copy.copy(data_initializer).
If strict is True (default), intervals are only merged if
their ranges actually overlap; adjacent, touching intervals
will not be merged. If strict is False, intervals are merged
even if they are only end-to-end adjacent.
Completes in O(n*logn).
"""
if not self:
return
sorted_intervals = sorted(self.all_intervals) # get sorted intervals
merged = []
# use mutable object to allow new_series() to modify it
current_reduced = [None]
higher = None # iterating variable, which new_series() needs access to
def new_series():
if data_initializer is None:
current_reduced[0] = higher.data
merged.append(higher)
return
else: # data_initializer is not None
current_reduced[0] = copy(data_initializer)
current_reduced[0] = data_reducer(current_reduced[0], higher.data)
merged.append(Interval(higher.begin, higher.end, current_reduced[0]))
for higher in sorted_intervals:
if merged: # series already begun
lower = merged[-1]
if (higher.begin < lower.end or
not strict and higher.begin == lower.end): # should merge
upper_bound = max(lower.end, higher.end)
if data_reducer is not None:
current_reduced[0] = data_reducer(current_reduced[0], higher.data)
else: # annihilate the data, since we don't know how to merge it
current_reduced[0] = None
merged[-1] = Interval(lower.begin, upper_bound, current_reduced[0])
else:
new_series()
else: # not merged; is first of Intervals to merge
new_series()
self.__init__(merged) | [
"def",
"merge_overlaps",
"(",
"self",
",",
"data_reducer",
"=",
"None",
",",
"data_initializer",
"=",
"None",
",",
"strict",
"=",
"True",
")",
":",
"if",
"not",
"self",
":",
"return",
"sorted_intervals",
"=",
"sorted",
"(",
"self",
".",
"all_intervals",
")",
"# get sorted intervals",
"merged",
"=",
"[",
"]",
"# use mutable object to allow new_series() to modify it",
"current_reduced",
"=",
"[",
"None",
"]",
"higher",
"=",
"None",
"# iterating variable, which new_series() needs access to",
"def",
"new_series",
"(",
")",
":",
"if",
"data_initializer",
"is",
"None",
":",
"current_reduced",
"[",
"0",
"]",
"=",
"higher",
".",
"data",
"merged",
".",
"append",
"(",
"higher",
")",
"return",
"else",
":",
"# data_initializer is not None",
"current_reduced",
"[",
"0",
"]",
"=",
"copy",
"(",
"data_initializer",
")",
"current_reduced",
"[",
"0",
"]",
"=",
"data_reducer",
"(",
"current_reduced",
"[",
"0",
"]",
",",
"higher",
".",
"data",
")",
"merged",
".",
"append",
"(",
"Interval",
"(",
"higher",
".",
"begin",
",",
"higher",
".",
"end",
",",
"current_reduced",
"[",
"0",
"]",
")",
")",
"for",
"higher",
"in",
"sorted_intervals",
":",
"if",
"merged",
":",
"# series already begun",
"lower",
"=",
"merged",
"[",
"-",
"1",
"]",
"if",
"(",
"higher",
".",
"begin",
"<",
"lower",
".",
"end",
"or",
"not",
"strict",
"and",
"higher",
".",
"begin",
"==",
"lower",
".",
"end",
")",
":",
"# should merge",
"upper_bound",
"=",
"max",
"(",
"lower",
".",
"end",
",",
"higher",
".",
"end",
")",
"if",
"data_reducer",
"is",
"not",
"None",
":",
"current_reduced",
"[",
"0",
"]",
"=",
"data_reducer",
"(",
"current_reduced",
"[",
"0",
"]",
",",
"higher",
".",
"data",
")",
"else",
":",
"# annihilate the data, since we don't know how to merge it",
"current_reduced",
"[",
"0",
"]",
"=",
"None",
"merged",
"[",
"-",
"1",
"]",
"=",
"Interval",
"(",
"lower",
".",
"begin",
",",
"upper_bound",
",",
"current_reduced",
"[",
"0",
"]",
")",
"else",
":",
"new_series",
"(",
")",
"else",
":",
"# not merged; is first of Intervals to merge",
"new_series",
"(",
")",
"self",
".",
"__init__",
"(",
"merged",
")"
] | 45.5 | 22.1875 |
def _add_data(self, plotter_cls, filename_or_obj, fmt={}, make_plot=True,
draw=False, mf_mode=False, ax=None, engine=None, delete=True,
share=False, clear=False, enable_post=None,
concat_dim=_concat_dim_default, load=False,
*args, **kwargs):
"""
Extract data from a dataset and visualize it with the given plotter
Parameters
----------
plotter_cls: type
The subclass of :class:`psyplot.plotter.Plotter` to use for
visualization
filename_or_obj: filename, :class:`xarray.Dataset` or data store
The object (or file name) to open. If not a dataset, the
:func:`psyplot.data.open_dataset` will be used to open a dataset
fmt: dict
Formatoptions that shall be when initializing the plot (you can
however also specify them as extra keyword arguments)
make_plot: bool
If True, the data is plotted at the end. Otherwise you have to
call the :meth:`psyplot.plotter.Plotter.initialize_plot` method or
the :meth:`psyplot.plotter.Plotter.reinit` method by yourself
%(InteractiveBase.start_update.parameters.draw)s
mf_mode: bool
If True, the :func:`psyplot.open_mfdataset` method is used.
Otherwise we use the :func:`psyplot.open_dataset` method which can
open only one single dataset
ax: None, tuple (x, y[, z]) or (list of) matplotlib.axes.Axes
Specifies the subplots on which to plot the new data objects.
- If None, a new figure will be created for each created plotter
- If tuple (x, y[, z]), `x` specifies the number of rows, `y` the
number of columns and the optional third parameter `z` the
maximal number of subplots per figure.
- If :class:`matplotlib.axes.Axes` (or list of those, e.g. created
by the :func:`matplotlib.pyplot.subplots` function), the data
will be plotted on these subplots
%(open_dataset.parameters.engine)s
%(multiple_subplots.parameters.delete)s
share: bool, fmt key or list of fmt keys
Determines whether the first created plotter shares it's
formatoptions with the others. If True, all formatoptions are
shared. Strings or list of strings specify the keys to share.
clear: bool
If True, axes are cleared before making the plot. This is only
necessary if the `ax` keyword consists of subplots with projection
that differs from the one that is needed
enable_post: bool
If True, the :attr:`~psyplot.plotter.Plotter.post` formatoption is
enabled and post processing scripts are allowed. If ``None``, this
parameter is set to True if there is a value given for the `post`
formatoption in `fmt` or `kwargs`
%(xarray.open_mfdataset.parameters.concat_dim)s
This parameter only does have an effect if `mf_mode` is True.
load: bool
If True, load the complete dataset into memory before plotting.
This might be useful if the data of other variables in the dataset
has to be accessed multiple times, e.g. for unstructured grids.
%(ArrayList.from_dataset.parameters.no_base)s
Other Parameters
----------------
%(ArrayList.from_dataset.other_parameters.no_args_kwargs)s
``**kwargs``
Any other dimension or formatoption that shall be passed to `dims`
or `fmt` respectively.
Returns
-------
Project
The subproject that contains the new (visualized) data array"""
if not isinstance(filename_or_obj, xarray.Dataset):
if mf_mode:
filename_or_obj = open_mfdataset(filename_or_obj,
engine=engine,
concat_dim=concat_dim)
else:
filename_or_obj = open_dataset(filename_or_obj,
engine=engine)
if load:
old = filename_or_obj
filename_or_obj = filename_or_obj.load()
old.close()
fmt = dict(fmt)
possible_fmts = list(plotter_cls._get_formatoptions())
additional_fmt, kwargs = utils.sort_kwargs(
kwargs, possible_fmts)
fmt.update(additional_fmt)
if enable_post is None:
enable_post = bool(fmt.get('post'))
# create the subproject
sub_project = self.from_dataset(filename_or_obj, **kwargs)
sub_project.main = self
sub_project.no_auto_update = not (
not sub_project.no_auto_update or not self.no_auto_update)
# create the subplots
proj = plotter_cls._get_sample_projection()
if isinstance(ax, tuple):
axes = iter(multiple_subplots(
*ax, n=len(sub_project), subplot_kw={'projection': proj}))
elif ax is None or isinstance(ax, (mpl.axes.SubplotBase,
mpl.axes.Axes)):
axes = repeat(ax)
else:
axes = iter(ax)
clear = clear or (isinstance(ax, tuple) and proj is not None)
for arr in sub_project:
plotter_cls(arr, make_plot=(not bool(share) and make_plot),
draw=False, ax=next(axes), clear=clear,
project=self, enable_post=enable_post, **fmt)
if share:
if share is True:
share = possible_fmts
elif isinstance(share, six.string_types):
share = [share]
else:
share = list(share)
sub_project[0].psy.plotter.share(
[arr.psy.plotter for arr in sub_project[1:]], keys=share,
draw=False)
if make_plot:
for arr in sub_project:
arr.psy.plotter.reinit(draw=False, clear=clear)
if draw is None:
draw = rcParams['auto_draw']
if draw:
sub_project.draw()
if rcParams['auto_show']:
self.show()
self.extend(sub_project, new_name=True)
if self is gcp(True):
scp(sub_project)
return sub_project | [
"def",
"_add_data",
"(",
"self",
",",
"plotter_cls",
",",
"filename_or_obj",
",",
"fmt",
"=",
"{",
"}",
",",
"make_plot",
"=",
"True",
",",
"draw",
"=",
"False",
",",
"mf_mode",
"=",
"False",
",",
"ax",
"=",
"None",
",",
"engine",
"=",
"None",
",",
"delete",
"=",
"True",
",",
"share",
"=",
"False",
",",
"clear",
"=",
"False",
",",
"enable_post",
"=",
"None",
",",
"concat_dim",
"=",
"_concat_dim_default",
",",
"load",
"=",
"False",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"filename_or_obj",
",",
"xarray",
".",
"Dataset",
")",
":",
"if",
"mf_mode",
":",
"filename_or_obj",
"=",
"open_mfdataset",
"(",
"filename_or_obj",
",",
"engine",
"=",
"engine",
",",
"concat_dim",
"=",
"concat_dim",
")",
"else",
":",
"filename_or_obj",
"=",
"open_dataset",
"(",
"filename_or_obj",
",",
"engine",
"=",
"engine",
")",
"if",
"load",
":",
"old",
"=",
"filename_or_obj",
"filename_or_obj",
"=",
"filename_or_obj",
".",
"load",
"(",
")",
"old",
".",
"close",
"(",
")",
"fmt",
"=",
"dict",
"(",
"fmt",
")",
"possible_fmts",
"=",
"list",
"(",
"plotter_cls",
".",
"_get_formatoptions",
"(",
")",
")",
"additional_fmt",
",",
"kwargs",
"=",
"utils",
".",
"sort_kwargs",
"(",
"kwargs",
",",
"possible_fmts",
")",
"fmt",
".",
"update",
"(",
"additional_fmt",
")",
"if",
"enable_post",
"is",
"None",
":",
"enable_post",
"=",
"bool",
"(",
"fmt",
".",
"get",
"(",
"'post'",
")",
")",
"# create the subproject",
"sub_project",
"=",
"self",
".",
"from_dataset",
"(",
"filename_or_obj",
",",
"*",
"*",
"kwargs",
")",
"sub_project",
".",
"main",
"=",
"self",
"sub_project",
".",
"no_auto_update",
"=",
"not",
"(",
"not",
"sub_project",
".",
"no_auto_update",
"or",
"not",
"self",
".",
"no_auto_update",
")",
"# create the subplots",
"proj",
"=",
"plotter_cls",
".",
"_get_sample_projection",
"(",
")",
"if",
"isinstance",
"(",
"ax",
",",
"tuple",
")",
":",
"axes",
"=",
"iter",
"(",
"multiple_subplots",
"(",
"*",
"ax",
",",
"n",
"=",
"len",
"(",
"sub_project",
")",
",",
"subplot_kw",
"=",
"{",
"'projection'",
":",
"proj",
"}",
")",
")",
"elif",
"ax",
"is",
"None",
"or",
"isinstance",
"(",
"ax",
",",
"(",
"mpl",
".",
"axes",
".",
"SubplotBase",
",",
"mpl",
".",
"axes",
".",
"Axes",
")",
")",
":",
"axes",
"=",
"repeat",
"(",
"ax",
")",
"else",
":",
"axes",
"=",
"iter",
"(",
"ax",
")",
"clear",
"=",
"clear",
"or",
"(",
"isinstance",
"(",
"ax",
",",
"tuple",
")",
"and",
"proj",
"is",
"not",
"None",
")",
"for",
"arr",
"in",
"sub_project",
":",
"plotter_cls",
"(",
"arr",
",",
"make_plot",
"=",
"(",
"not",
"bool",
"(",
"share",
")",
"and",
"make_plot",
")",
",",
"draw",
"=",
"False",
",",
"ax",
"=",
"next",
"(",
"axes",
")",
",",
"clear",
"=",
"clear",
",",
"project",
"=",
"self",
",",
"enable_post",
"=",
"enable_post",
",",
"*",
"*",
"fmt",
")",
"if",
"share",
":",
"if",
"share",
"is",
"True",
":",
"share",
"=",
"possible_fmts",
"elif",
"isinstance",
"(",
"share",
",",
"six",
".",
"string_types",
")",
":",
"share",
"=",
"[",
"share",
"]",
"else",
":",
"share",
"=",
"list",
"(",
"share",
")",
"sub_project",
"[",
"0",
"]",
".",
"psy",
".",
"plotter",
".",
"share",
"(",
"[",
"arr",
".",
"psy",
".",
"plotter",
"for",
"arr",
"in",
"sub_project",
"[",
"1",
":",
"]",
"]",
",",
"keys",
"=",
"share",
",",
"draw",
"=",
"False",
")",
"if",
"make_plot",
":",
"for",
"arr",
"in",
"sub_project",
":",
"arr",
".",
"psy",
".",
"plotter",
".",
"reinit",
"(",
"draw",
"=",
"False",
",",
"clear",
"=",
"clear",
")",
"if",
"draw",
"is",
"None",
":",
"draw",
"=",
"rcParams",
"[",
"'auto_draw'",
"]",
"if",
"draw",
":",
"sub_project",
".",
"draw",
"(",
")",
"if",
"rcParams",
"[",
"'auto_show'",
"]",
":",
"self",
".",
"show",
"(",
")",
"self",
".",
"extend",
"(",
"sub_project",
",",
"new_name",
"=",
"True",
")",
"if",
"self",
"is",
"gcp",
"(",
"True",
")",
":",
"scp",
"(",
"sub_project",
")",
"return",
"sub_project"
] | 46.807407 | 20.940741 |
def progress_bar(**kwargs):
"""Create a `tqdm.tqdm` progress bar
This is just a thin wrapper around `tqdm.tqdm` to set some updated defaults
"""
tqdm_kw = {
'desc': 'Processing',
'file': sys.stdout,
'bar_format': TQDM_BAR_FORMAT,
}
tqdm_kw.update(kwargs)
pbar = tqdm(**tqdm_kw)
if not pbar.disable:
pbar.desc = pbar.desc.rstrip(': ')
pbar.refresh()
return pbar | [
"def",
"progress_bar",
"(",
"*",
"*",
"kwargs",
")",
":",
"tqdm_kw",
"=",
"{",
"'desc'",
":",
"'Processing'",
",",
"'file'",
":",
"sys",
".",
"stdout",
",",
"'bar_format'",
":",
"TQDM_BAR_FORMAT",
",",
"}",
"tqdm_kw",
".",
"update",
"(",
"kwargs",
")",
"pbar",
"=",
"tqdm",
"(",
"*",
"*",
"tqdm_kw",
")",
"if",
"not",
"pbar",
".",
"disable",
":",
"pbar",
".",
"desc",
"=",
"pbar",
".",
"desc",
".",
"rstrip",
"(",
"': '",
")",
"pbar",
".",
"refresh",
"(",
")",
"return",
"pbar"
] | 26.375 | 16.6875 |
def dask_rolling_wrapper(moving_func, a, window, min_count=None, axis=-1):
'''wrapper to apply bottleneck moving window funcs on dask arrays'''
dtype, fill_value = dtypes.maybe_promote(a.dtype)
a = a.astype(dtype)
# inputs for overlap
if axis < 0:
axis = a.ndim + axis
depth = {d: 0 for d in range(a.ndim)}
depth[axis] = (window + 1) // 2
boundary = {d: fill_value for d in range(a.ndim)}
# Create overlap array.
ag = overlap(a, depth=depth, boundary=boundary)
# apply rolling func
out = ag.map_blocks(moving_func, window, min_count=min_count,
axis=axis, dtype=a.dtype)
# trim array
result = trim_internal(out, depth)
return result | [
"def",
"dask_rolling_wrapper",
"(",
"moving_func",
",",
"a",
",",
"window",
",",
"min_count",
"=",
"None",
",",
"axis",
"=",
"-",
"1",
")",
":",
"dtype",
",",
"fill_value",
"=",
"dtypes",
".",
"maybe_promote",
"(",
"a",
".",
"dtype",
")",
"a",
"=",
"a",
".",
"astype",
"(",
"dtype",
")",
"# inputs for overlap",
"if",
"axis",
"<",
"0",
":",
"axis",
"=",
"a",
".",
"ndim",
"+",
"axis",
"depth",
"=",
"{",
"d",
":",
"0",
"for",
"d",
"in",
"range",
"(",
"a",
".",
"ndim",
")",
"}",
"depth",
"[",
"axis",
"]",
"=",
"(",
"window",
"+",
"1",
")",
"//",
"2",
"boundary",
"=",
"{",
"d",
":",
"fill_value",
"for",
"d",
"in",
"range",
"(",
"a",
".",
"ndim",
")",
"}",
"# Create overlap array.",
"ag",
"=",
"overlap",
"(",
"a",
",",
"depth",
"=",
"depth",
",",
"boundary",
"=",
"boundary",
")",
"# apply rolling func",
"out",
"=",
"ag",
".",
"map_blocks",
"(",
"moving_func",
",",
"window",
",",
"min_count",
"=",
"min_count",
",",
"axis",
"=",
"axis",
",",
"dtype",
"=",
"a",
".",
"dtype",
")",
"# trim array",
"result",
"=",
"trim_internal",
"(",
"out",
",",
"depth",
")",
"return",
"result"
] | 39.222222 | 16.111111 |
def choose_samples(samples, force):
""" filter out samples that are already done with this step, unless force"""
## hold samples that pass
subsamples = []
## filter the samples again
if not force:
for sample in samples:
if sample.stats.state >= 2:
print("""\
Skipping Sample {}; Already filtered. Use force argument to overwrite.\
""".format(sample.name))
elif not sample.stats.reads_raw:
print("""\
Skipping Sample {}; No reads found in file {}\
""".format(sample.name, sample.files.fastqs))
else:
subsamples.append(sample)
else:
for sample in samples:
if not sample.stats.reads_raw:
print("""\
Skipping Sample {}; No reads found in file {}\
""".format(sample.name, sample.files.fastqs))
else:
subsamples.append(sample)
return subsamples | [
"def",
"choose_samples",
"(",
"samples",
",",
"force",
")",
":",
"## hold samples that pass",
"subsamples",
"=",
"[",
"]",
"## filter the samples again",
"if",
"not",
"force",
":",
"for",
"sample",
"in",
"samples",
":",
"if",
"sample",
".",
"stats",
".",
"state",
">=",
"2",
":",
"print",
"(",
"\"\"\"\\\n Skipping Sample {}; Already filtered. Use force argument to overwrite.\\\n \"\"\"",
".",
"format",
"(",
"sample",
".",
"name",
")",
")",
"elif",
"not",
"sample",
".",
"stats",
".",
"reads_raw",
":",
"print",
"(",
"\"\"\"\\\n Skipping Sample {}; No reads found in file {}\\\n \"\"\"",
".",
"format",
"(",
"sample",
".",
"name",
",",
"sample",
".",
"files",
".",
"fastqs",
")",
")",
"else",
":",
"subsamples",
".",
"append",
"(",
"sample",
")",
"else",
":",
"for",
"sample",
"in",
"samples",
":",
"if",
"not",
"sample",
".",
"stats",
".",
"reads_raw",
":",
"print",
"(",
"\"\"\"\\\n Skipping Sample {}; No reads found in file {}\\\n \"\"\"",
".",
"format",
"(",
"sample",
".",
"name",
",",
"sample",
".",
"files",
".",
"fastqs",
")",
")",
"else",
":",
"subsamples",
".",
"append",
"(",
"sample",
")",
"return",
"subsamples"
] | 32.892857 | 11.75 |
def memsize(self):
""" Total array cell + indexes size
"""
return self.size + 1 + TYPE.size(gl.BOUND_TYPE) * len(self.bounds) | [
"def",
"memsize",
"(",
"self",
")",
":",
"return",
"self",
".",
"size",
"+",
"1",
"+",
"TYPE",
".",
"size",
"(",
"gl",
".",
"BOUND_TYPE",
")",
"*",
"len",
"(",
"self",
".",
"bounds",
")"
] | 36.5 | 14 |
def get(self, template_name):
"""Get a specific template"""
template = db.Template.find_one(template_name=template_name)
if not template:
return self.make_response('No such template found', HTTP.NOT_FOUND)
return self.make_response({'template': template}) | [
"def",
"get",
"(",
"self",
",",
"template_name",
")",
":",
"template",
"=",
"db",
".",
"Template",
".",
"find_one",
"(",
"template_name",
"=",
"template_name",
")",
"if",
"not",
"template",
":",
"return",
"self",
".",
"make_response",
"(",
"'No such template found'",
",",
"HTTP",
".",
"NOT_FOUND",
")",
"return",
"self",
".",
"make_response",
"(",
"{",
"'template'",
":",
"template",
"}",
")"
] | 36.75 | 23.875 |
def _put_text(irods_path, text):
"""Put raw text into iRODS."""
with tempfile.NamedTemporaryFile() as fh:
fpath = fh.name
try:
# Make Python2 compatible.
text = unicode(text, "utf-8")
except (NameError, TypeError):
# NameError: We are running Python3 => text already unicode.
# TypeError: text is already of type unicode.
pass
fh.write(text.encode("utf-8"))
fh.flush()
cmd = CommandWrapper([
"iput",
"-f",
fpath,
irods_path
])
cmd()
assert not os.path.isfile(fpath) | [
"def",
"_put_text",
"(",
"irods_path",
",",
"text",
")",
":",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
")",
"as",
"fh",
":",
"fpath",
"=",
"fh",
".",
"name",
"try",
":",
"# Make Python2 compatible.",
"text",
"=",
"unicode",
"(",
"text",
",",
"\"utf-8\"",
")",
"except",
"(",
"NameError",
",",
"TypeError",
")",
":",
"# NameError: We are running Python3 => text already unicode.",
"# TypeError: text is already of type unicode.",
"pass",
"fh",
".",
"write",
"(",
"text",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
"fh",
".",
"flush",
"(",
")",
"cmd",
"=",
"CommandWrapper",
"(",
"[",
"\"iput\"",
",",
"\"-f\"",
",",
"fpath",
",",
"irods_path",
"]",
")",
"cmd",
"(",
")",
"assert",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"fpath",
")"
] | 27.347826 | 17.173913 |
def setSectionLength(self, length):
"""
Sets the number of characters per section that are allowed.
:param length | <int>
"""
self._sectionLength = length
for editor in self.editors():
editor.setMaxLength(length) | [
"def",
"setSectionLength",
"(",
"self",
",",
"length",
")",
":",
"self",
".",
"_sectionLength",
"=",
"length",
"for",
"editor",
"in",
"self",
".",
"editors",
"(",
")",
":",
"editor",
".",
"setMaxLength",
"(",
"length",
")"
] | 31.777778 | 8.222222 |
def replace_rsc_img_chars(document):
"""Replace image characters with unicode equivalents."""
image_re = re.compile('http://www.rsc.org/images/entities/(?:h[23]+_)?(?:[ib]+_)?char_([0-9a-f]{4})(?:_([0-9a-f]{4}))?\.gif')
for img in document.xpath('.//img[starts-with(@src, "http://www.rsc.org/images/entities/")]'):
m = image_re.match(img.get('src'))
if m:
u1, u2 = m.group(1), m.group(2)
if not u2 and u1 in RSC_IMG_CHARS:
rep = RSC_IMG_CHARS[u1]
else:
rep = ('\\u%s' % u1).encode('ascii').decode('unicode-escape')
if u2:
rep += ('\\u%s' % u2).encode('ascii').decode('unicode-escape')
if img.tail is not None:
rep += img.tail # Make sure we don't remove any tail text
parent = img.getparent()
if parent is not None:
previous = img.getprevious()
if previous is not None:
previous.tail = (previous.tail or '') + rep
else:
parent.text = (parent.text or '') + rep
parent.remove(img)
return document | [
"def",
"replace_rsc_img_chars",
"(",
"document",
")",
":",
"image_re",
"=",
"re",
".",
"compile",
"(",
"'http://www.rsc.org/images/entities/(?:h[23]+_)?(?:[ib]+_)?char_([0-9a-f]{4})(?:_([0-9a-f]{4}))?\\.gif'",
")",
"for",
"img",
"in",
"document",
".",
"xpath",
"(",
"'.//img[starts-with(@src, \"http://www.rsc.org/images/entities/\")]'",
")",
":",
"m",
"=",
"image_re",
".",
"match",
"(",
"img",
".",
"get",
"(",
"'src'",
")",
")",
"if",
"m",
":",
"u1",
",",
"u2",
"=",
"m",
".",
"group",
"(",
"1",
")",
",",
"m",
".",
"group",
"(",
"2",
")",
"if",
"not",
"u2",
"and",
"u1",
"in",
"RSC_IMG_CHARS",
":",
"rep",
"=",
"RSC_IMG_CHARS",
"[",
"u1",
"]",
"else",
":",
"rep",
"=",
"(",
"'\\\\u%s'",
"%",
"u1",
")",
".",
"encode",
"(",
"'ascii'",
")",
".",
"decode",
"(",
"'unicode-escape'",
")",
"if",
"u2",
":",
"rep",
"+=",
"(",
"'\\\\u%s'",
"%",
"u2",
")",
".",
"encode",
"(",
"'ascii'",
")",
".",
"decode",
"(",
"'unicode-escape'",
")",
"if",
"img",
".",
"tail",
"is",
"not",
"None",
":",
"rep",
"+=",
"img",
".",
"tail",
"# Make sure we don't remove any tail text",
"parent",
"=",
"img",
".",
"getparent",
"(",
")",
"if",
"parent",
"is",
"not",
"None",
":",
"previous",
"=",
"img",
".",
"getprevious",
"(",
")",
"if",
"previous",
"is",
"not",
"None",
":",
"previous",
".",
"tail",
"=",
"(",
"previous",
".",
"tail",
"or",
"''",
")",
"+",
"rep",
"else",
":",
"parent",
".",
"text",
"=",
"(",
"parent",
".",
"text",
"or",
"''",
")",
"+",
"rep",
"parent",
".",
"remove",
"(",
"img",
")",
"return",
"document"
] | 48.5 | 18.75 |
def is_valid_nhs_number(n: int) -> bool:
"""
Validates an integer as an NHS number.
Args:
n: NHS number
Returns:
valid?
Checksum details are at
http://www.datadictionary.nhs.uk/version2/data_dictionary/data_field_notes/n/nhs_number_de.asp
""" # noqa
if not isinstance(n, int):
log.debug("is_valid_nhs_number: parameter was not of integer type")
return False
s = str(n)
# Not 10 digits long?
if len(s) != 10:
log.debug("is_valid_nhs_number: not 10 digits")
return False
main_digits = [int(s[i]) for i in range(9)]
actual_check_digit = int(s[9]) # tenth digit
expected_check_digit = nhs_check_digit(main_digits)
if expected_check_digit == 10:
log.debug("is_valid_nhs_number: calculated check digit invalid")
return False
if expected_check_digit != actual_check_digit:
log.debug("is_valid_nhs_number: check digit mismatch")
return False
# Hooray!
return True | [
"def",
"is_valid_nhs_number",
"(",
"n",
":",
"int",
")",
"->",
"bool",
":",
"# noqa",
"if",
"not",
"isinstance",
"(",
"n",
",",
"int",
")",
":",
"log",
".",
"debug",
"(",
"\"is_valid_nhs_number: parameter was not of integer type\"",
")",
"return",
"False",
"s",
"=",
"str",
"(",
"n",
")",
"# Not 10 digits long?",
"if",
"len",
"(",
"s",
")",
"!=",
"10",
":",
"log",
".",
"debug",
"(",
"\"is_valid_nhs_number: not 10 digits\"",
")",
"return",
"False",
"main_digits",
"=",
"[",
"int",
"(",
"s",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"9",
")",
"]",
"actual_check_digit",
"=",
"int",
"(",
"s",
"[",
"9",
"]",
")",
"# tenth digit",
"expected_check_digit",
"=",
"nhs_check_digit",
"(",
"main_digits",
")",
"if",
"expected_check_digit",
"==",
"10",
":",
"log",
".",
"debug",
"(",
"\"is_valid_nhs_number: calculated check digit invalid\"",
")",
"return",
"False",
"if",
"expected_check_digit",
"!=",
"actual_check_digit",
":",
"log",
".",
"debug",
"(",
"\"is_valid_nhs_number: check digit mismatch\"",
")",
"return",
"False",
"# Hooray!",
"return",
"True"
] | 28.970588 | 21.382353 |
def result(self, timeout=None):
"""
Set all asynchronous results.
:param timeout:
The number of seconds to wait for the result if the futures aren't
done. If None, then there is no limit on the wait time.
:type timeout: float
:return:
Update Solution.
:rtype: Solution
"""
it, exceptions, future_lists = [], [], []
from concurrent.futures import Future, wait as wait_fut
def update(fut, data, key):
if isinstance(fut, Future):
it.append((fut, data, key))
elif isinstance(fut, AsyncList) and fut not in future_lists:
future_lists.append(fut)
it.extend([(j, fut, i)
for i, j in enumerate(fut)
if isinstance(j, Future)][::-1])
for s in self.sub_sol.values():
for k, v in list(s.items()):
update(v, s, k)
for d in s.workflow.nodes.values():
if 'results' in d:
update(d['results'], d, 'results')
for d in s.workflow.edges.values():
if 'value' in d:
update(d['value'], d, 'value')
wait_fut({v[0] for v in it}, timeout)
for f, d, k in it:
try:
d[k] = await_result(f, 0)
except SkipNode as e:
exceptions.append((f, d, k, e.ex))
del d[k]
except (Exception, ExecutorShutdown, DispatcherAbort) as ex:
exceptions.append((f, d, k, ex))
del d[k]
if exceptions:
raise exceptions[0][-1]
return self | [
"def",
"result",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"it",
",",
"exceptions",
",",
"future_lists",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"from",
"concurrent",
".",
"futures",
"import",
"Future",
",",
"wait",
"as",
"wait_fut",
"def",
"update",
"(",
"fut",
",",
"data",
",",
"key",
")",
":",
"if",
"isinstance",
"(",
"fut",
",",
"Future",
")",
":",
"it",
".",
"append",
"(",
"(",
"fut",
",",
"data",
",",
"key",
")",
")",
"elif",
"isinstance",
"(",
"fut",
",",
"AsyncList",
")",
"and",
"fut",
"not",
"in",
"future_lists",
":",
"future_lists",
".",
"append",
"(",
"fut",
")",
"it",
".",
"extend",
"(",
"[",
"(",
"j",
",",
"fut",
",",
"i",
")",
"for",
"i",
",",
"j",
"in",
"enumerate",
"(",
"fut",
")",
"if",
"isinstance",
"(",
"j",
",",
"Future",
")",
"]",
"[",
":",
":",
"-",
"1",
"]",
")",
"for",
"s",
"in",
"self",
".",
"sub_sol",
".",
"values",
"(",
")",
":",
"for",
"k",
",",
"v",
"in",
"list",
"(",
"s",
".",
"items",
"(",
")",
")",
":",
"update",
"(",
"v",
",",
"s",
",",
"k",
")",
"for",
"d",
"in",
"s",
".",
"workflow",
".",
"nodes",
".",
"values",
"(",
")",
":",
"if",
"'results'",
"in",
"d",
":",
"update",
"(",
"d",
"[",
"'results'",
"]",
",",
"d",
",",
"'results'",
")",
"for",
"d",
"in",
"s",
".",
"workflow",
".",
"edges",
".",
"values",
"(",
")",
":",
"if",
"'value'",
"in",
"d",
":",
"update",
"(",
"d",
"[",
"'value'",
"]",
",",
"d",
",",
"'value'",
")",
"wait_fut",
"(",
"{",
"v",
"[",
"0",
"]",
"for",
"v",
"in",
"it",
"}",
",",
"timeout",
")",
"for",
"f",
",",
"d",
",",
"k",
"in",
"it",
":",
"try",
":",
"d",
"[",
"k",
"]",
"=",
"await_result",
"(",
"f",
",",
"0",
")",
"except",
"SkipNode",
"as",
"e",
":",
"exceptions",
".",
"append",
"(",
"(",
"f",
",",
"d",
",",
"k",
",",
"e",
".",
"ex",
")",
")",
"del",
"d",
"[",
"k",
"]",
"except",
"(",
"Exception",
",",
"ExecutorShutdown",
",",
"DispatcherAbort",
")",
"as",
"ex",
":",
"exceptions",
".",
"append",
"(",
"(",
"f",
",",
"d",
",",
"k",
",",
"ex",
")",
")",
"del",
"d",
"[",
"k",
"]",
"if",
"exceptions",
":",
"raise",
"exceptions",
"[",
"0",
"]",
"[",
"-",
"1",
"]",
"return",
"self"
] | 32.192308 | 16.615385 |
def _get_skippers(configure, file_name=None):
"""
Returns the skippers of configuration.
:param configure: The configuration of HaTeMiLe.
:type configure: hatemile.util.configure.Configure
:param file_name: The file path of skippers configuration.
:type file_name: str
:return: The skippers of configuration.
:rtype: list(dict(str, str))
"""
skippers = []
if file_name is None:
file_name = os.path.join(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.realpath(__file__))
)), 'skippers.xml')
xmldoc = minidom.parse(file_name)
skippers_xml = xmldoc.getElementsByTagName(
'skippers'
)[0].getElementsByTagName('skipper')
for skipper_xml in skippers_xml:
skippers.append({
'selector': skipper_xml.attributes['selector'].value,
'description': configure.get_parameter(
skipper_xml.attributes['description'].value
),
'shortcut': skipper_xml.attributes['shortcut'].value
})
return skippers | [
"def",
"_get_skippers",
"(",
"configure",
",",
"file_name",
"=",
"None",
")",
":",
"skippers",
"=",
"[",
"]",
"if",
"file_name",
"is",
"None",
":",
"file_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
")",
")",
",",
"'skippers.xml'",
")",
"xmldoc",
"=",
"minidom",
".",
"parse",
"(",
"file_name",
")",
"skippers_xml",
"=",
"xmldoc",
".",
"getElementsByTagName",
"(",
"'skippers'",
")",
"[",
"0",
"]",
".",
"getElementsByTagName",
"(",
"'skipper'",
")",
"for",
"skipper_xml",
"in",
"skippers_xml",
":",
"skippers",
".",
"append",
"(",
"{",
"'selector'",
":",
"skipper_xml",
".",
"attributes",
"[",
"'selector'",
"]",
".",
"value",
",",
"'description'",
":",
"configure",
".",
"get_parameter",
"(",
"skipper_xml",
".",
"attributes",
"[",
"'description'",
"]",
".",
"value",
")",
",",
"'shortcut'",
":",
"skipper_xml",
".",
"attributes",
"[",
"'shortcut'",
"]",
".",
"value",
"}",
")",
"return",
"skippers"
] | 38.333333 | 15.533333 |
def ajax_kindcat_arr(self, kind_sig):
'''
Get the sub category.
根据kind值(kind_sig)获取相应分类,返回Json格式
'''
out_arr = {}
for catinfo in MCategory.query_kind_cat(kind_sig):
out_arr[catinfo.uid] = catinfo.name
json.dump(out_arr, self) | [
"def",
"ajax_kindcat_arr",
"(",
"self",
",",
"kind_sig",
")",
":",
"out_arr",
"=",
"{",
"}",
"for",
"catinfo",
"in",
"MCategory",
".",
"query_kind_cat",
"(",
"kind_sig",
")",
":",
"out_arr",
"[",
"catinfo",
".",
"uid",
"]",
"=",
"catinfo",
".",
"name",
"json",
".",
"dump",
"(",
"out_arr",
",",
"self",
")"
] | 28.5 | 16.5 |
def make_strain_from_inj_object(self, inj, delta_t, detector_name,
distance_scale=1):
"""Make a h(t) strain time-series from an injection object as read from
an hdf file.
Parameters
-----------
inj : injection object
The injection object to turn into a strain h(t).
delta_t : float
Sample rate to make injection at.
detector_name : string
Name of the detector used for projecting injections.
distance_scale: float, optional
Factor to scale the distance of an injection with. The default (=1)
is no scaling.
Returns
--------
signal : float
h(t) corresponding to the injection.
"""
detector = Detector(detector_name)
# compute the waveform time series
hp, hc = ringdown_td_approximants[inj['approximant']](
inj, delta_t=delta_t, **self.extra_args)
hp._epoch += inj['tc']
hc._epoch += inj['tc']
if distance_scale != 1:
hp /= distance_scale
hc /= distance_scale
# compute the detector response and add it to the strain
signal = detector.project_wave(hp, hc,
inj['ra'], inj['dec'], inj['polarization'])
return signal | [
"def",
"make_strain_from_inj_object",
"(",
"self",
",",
"inj",
",",
"delta_t",
",",
"detector_name",
",",
"distance_scale",
"=",
"1",
")",
":",
"detector",
"=",
"Detector",
"(",
"detector_name",
")",
"# compute the waveform time series",
"hp",
",",
"hc",
"=",
"ringdown_td_approximants",
"[",
"inj",
"[",
"'approximant'",
"]",
"]",
"(",
"inj",
",",
"delta_t",
"=",
"delta_t",
",",
"*",
"*",
"self",
".",
"extra_args",
")",
"hp",
".",
"_epoch",
"+=",
"inj",
"[",
"'tc'",
"]",
"hc",
".",
"_epoch",
"+=",
"inj",
"[",
"'tc'",
"]",
"if",
"distance_scale",
"!=",
"1",
":",
"hp",
"/=",
"distance_scale",
"hc",
"/=",
"distance_scale",
"# compute the detector response and add it to the strain",
"signal",
"=",
"detector",
".",
"project_wave",
"(",
"hp",
",",
"hc",
",",
"inj",
"[",
"'ra'",
"]",
",",
"inj",
"[",
"'dec'",
"]",
",",
"inj",
"[",
"'polarization'",
"]",
")",
"return",
"signal"
] | 33 | 19.05 |
def write(self, value):
"""Set the state of the GPIO to `value`.
Args:
value (bool): ``True`` for high state, ``False`` for low state.
Raises:
GPIOError: if an I/O or OS error occurs.
TypeError: if `value` type is not bool.
"""
if not isinstance(value, bool):
raise TypeError("Invalid value type, should be bool.")
# Write value
try:
if value:
os.write(self._fd, b"1\n")
else:
os.write(self._fd, b"0\n")
except OSError as e:
raise GPIOError(e.errno, "Writing GPIO: " + e.strerror)
# Rewind
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise GPIOError(e.errno, "Rewinding GPIO: " + e.strerror) | [
"def",
"write",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"raise",
"TypeError",
"(",
"\"Invalid value type, should be bool.\"",
")",
"# Write value",
"try",
":",
"if",
"value",
":",
"os",
".",
"write",
"(",
"self",
".",
"_fd",
",",
"b\"1\\n\"",
")",
"else",
":",
"os",
".",
"write",
"(",
"self",
".",
"_fd",
",",
"b\"0\\n\"",
")",
"except",
"OSError",
"as",
"e",
":",
"raise",
"GPIOError",
"(",
"e",
".",
"errno",
",",
"\"Writing GPIO: \"",
"+",
"e",
".",
"strerror",
")",
"# Rewind",
"try",
":",
"os",
".",
"lseek",
"(",
"self",
".",
"_fd",
",",
"0",
",",
"os",
".",
"SEEK_SET",
")",
"except",
"OSError",
"as",
"e",
":",
"raise",
"GPIOError",
"(",
"e",
".",
"errno",
",",
"\"Rewinding GPIO: \"",
"+",
"e",
".",
"strerror",
")"
] | 29.071429 | 20.892857 |
def _rdsignal(fp, file_size, header_size, n_sig, bit_width, is_signed, cut_end):
"""
Read the signal
Parameters
----------
cut_end : bool, optional
If True, enables reading the end of files which appear to terminate
with the incorrect number of samples (ie. sample not present for all channels),
by checking and skipping the reading the end of such files.
Checking this option makes reading slower.
"""
# Cannot initially figure out signal length because there
# are escape sequences.
fp.seek(header_size)
signal_size = file_size - header_size
byte_width = int(bit_width / 8)
# numpy dtype
dtype = str(byte_width)
if is_signed:
dtype = 'i' + dtype
else:
dtype = 'u' + dtype
# big endian
dtype = '>' + dtype
# The maximum possible samples given the file size
# All channels must be present
max_samples = int(signal_size / byte_width)
max_samples = max_samples - max_samples % n_sig
# Output information
signal = np.empty(max_samples, dtype=dtype)
markers = []
triggers = []
# Number of (total) samples read
sample_num = 0
# Read one sample for all channels at a time
if cut_end:
stop_byte = file_size - n_sig * byte_width + 1
while fp.tell() < stop_byte:
chunk = fp.read(2)
sample_num = _get_sample(fp, chunk, n_sig, dtype, signal, markers, triggers, sample_num)
else:
while True:
chunk = fp.read(2)
if not chunk:
break
sample_num = _get_sample(fp, chunk, n_sig, dtype, signal, markers, triggers, sample_num)
# No more bytes to read. Reshape output arguments.
signal = signal[:sample_num]
signal = signal.reshape((-1, n_sig))
markers = np.array(markers, dtype='int')
triggers = np.array(triggers, dtype='int')
return signal, markers, triggers | [
"def",
"_rdsignal",
"(",
"fp",
",",
"file_size",
",",
"header_size",
",",
"n_sig",
",",
"bit_width",
",",
"is_signed",
",",
"cut_end",
")",
":",
"# Cannot initially figure out signal length because there",
"# are escape sequences.",
"fp",
".",
"seek",
"(",
"header_size",
")",
"signal_size",
"=",
"file_size",
"-",
"header_size",
"byte_width",
"=",
"int",
"(",
"bit_width",
"/",
"8",
")",
"# numpy dtype",
"dtype",
"=",
"str",
"(",
"byte_width",
")",
"if",
"is_signed",
":",
"dtype",
"=",
"'i'",
"+",
"dtype",
"else",
":",
"dtype",
"=",
"'u'",
"+",
"dtype",
"# big endian",
"dtype",
"=",
"'>'",
"+",
"dtype",
"# The maximum possible samples given the file size",
"# All channels must be present",
"max_samples",
"=",
"int",
"(",
"signal_size",
"/",
"byte_width",
")",
"max_samples",
"=",
"max_samples",
"-",
"max_samples",
"%",
"n_sig",
"# Output information",
"signal",
"=",
"np",
".",
"empty",
"(",
"max_samples",
",",
"dtype",
"=",
"dtype",
")",
"markers",
"=",
"[",
"]",
"triggers",
"=",
"[",
"]",
"# Number of (total) samples read",
"sample_num",
"=",
"0",
"# Read one sample for all channels at a time",
"if",
"cut_end",
":",
"stop_byte",
"=",
"file_size",
"-",
"n_sig",
"*",
"byte_width",
"+",
"1",
"while",
"fp",
".",
"tell",
"(",
")",
"<",
"stop_byte",
":",
"chunk",
"=",
"fp",
".",
"read",
"(",
"2",
")",
"sample_num",
"=",
"_get_sample",
"(",
"fp",
",",
"chunk",
",",
"n_sig",
",",
"dtype",
",",
"signal",
",",
"markers",
",",
"triggers",
",",
"sample_num",
")",
"else",
":",
"while",
"True",
":",
"chunk",
"=",
"fp",
".",
"read",
"(",
"2",
")",
"if",
"not",
"chunk",
":",
"break",
"sample_num",
"=",
"_get_sample",
"(",
"fp",
",",
"chunk",
",",
"n_sig",
",",
"dtype",
",",
"signal",
",",
"markers",
",",
"triggers",
",",
"sample_num",
")",
"# No more bytes to read. Reshape output arguments.",
"signal",
"=",
"signal",
"[",
":",
"sample_num",
"]",
"signal",
"=",
"signal",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"n_sig",
")",
")",
"markers",
"=",
"np",
".",
"array",
"(",
"markers",
",",
"dtype",
"=",
"'int'",
")",
"triggers",
"=",
"np",
".",
"array",
"(",
"triggers",
",",
"dtype",
"=",
"'int'",
")",
"return",
"signal",
",",
"markers",
",",
"triggers"
] | 34.218182 | 18.618182 |
def number(self):
"""Return a multidigit int or float number."""
number = ''
while self.char is not None and self.char.isdigit():
number += self.char
self.advance()
if self.char == '.':
number += self.char
self.advance()
while self.char is not None and self.char.isdigit():
number += self.char
self.advance()
token = Token(Nature.FLOAT_NUMBER, number)
else:
token = Token(Nature.INT_NUMBER, number)
return token | [
"def",
"number",
"(",
"self",
")",
":",
"number",
"=",
"''",
"while",
"self",
".",
"char",
"is",
"not",
"None",
"and",
"self",
".",
"char",
".",
"isdigit",
"(",
")",
":",
"number",
"+=",
"self",
".",
"char",
"self",
".",
"advance",
"(",
")",
"if",
"self",
".",
"char",
"==",
"'.'",
":",
"number",
"+=",
"self",
".",
"char",
"self",
".",
"advance",
"(",
")",
"while",
"self",
".",
"char",
"is",
"not",
"None",
"and",
"self",
".",
"char",
".",
"isdigit",
"(",
")",
":",
"number",
"+=",
"self",
".",
"char",
"self",
".",
"advance",
"(",
")",
"token",
"=",
"Token",
"(",
"Nature",
".",
"FLOAT_NUMBER",
",",
"number",
")",
"else",
":",
"token",
"=",
"Token",
"(",
"Nature",
".",
"INT_NUMBER",
",",
"number",
")",
"return",
"token"
] | 26.666667 | 20.666667 |
def elmo_loss2ppl(losses: List[np.ndarray]) -> float:
""" Calculates perplexity by loss
Args:
losses: list of numpy arrays of model losses
Returns:
perplexity : float
"""
avg_loss = np.mean(losses)
return float(np.exp(avg_loss)) | [
"def",
"elmo_loss2ppl",
"(",
"losses",
":",
"List",
"[",
"np",
".",
"ndarray",
"]",
")",
"->",
"float",
":",
"avg_loss",
"=",
"np",
".",
"mean",
"(",
"losses",
")",
"return",
"float",
"(",
"np",
".",
"exp",
"(",
"avg_loss",
")",
")"
] | 23.636364 | 17.636364 |
def read_msr(address):
"""
Read the contents of the specified MSR (Machine Specific Register).
@type address: int
@param address: MSR to read.
@rtype: int
@return: Value of the specified MSR.
@raise WindowsError:
Raises an exception on error.
@raise NotImplementedError:
Current architecture is not C{i386} or C{amd64}.
@warning:
It could potentially brick your machine.
It works on my machine, but your mileage may vary.
"""
if win32.arch not in (win32.ARCH_I386, win32.ARCH_AMD64):
raise NotImplementedError(
"MSR reading is only supported on i386 or amd64 processors.")
msr = win32.SYSDBG_MSR()
msr.Address = address
msr.Data = 0
win32.NtSystemDebugControl(win32.SysDbgReadMsr,
InputBuffer = msr,
OutputBuffer = msr)
return msr.Data | [
"def",
"read_msr",
"(",
"address",
")",
":",
"if",
"win32",
".",
"arch",
"not",
"in",
"(",
"win32",
".",
"ARCH_I386",
",",
"win32",
".",
"ARCH_AMD64",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"MSR reading is only supported on i386 or amd64 processors.\"",
")",
"msr",
"=",
"win32",
".",
"SYSDBG_MSR",
"(",
")",
"msr",
".",
"Address",
"=",
"address",
"msr",
".",
"Data",
"=",
"0",
"win32",
".",
"NtSystemDebugControl",
"(",
"win32",
".",
"SysDbgReadMsr",
",",
"InputBuffer",
"=",
"msr",
",",
"OutputBuffer",
"=",
"msr",
")",
"return",
"msr",
".",
"Data"
] | 33.3 | 18.033333 |
def start(self):
"""
Start the instance.
"""
rs = self.connection.start_instances([self.id])
if len(rs) > 0:
self._update(rs[0]) | [
"def",
"start",
"(",
"self",
")",
":",
"rs",
"=",
"self",
".",
"connection",
".",
"start_instances",
"(",
"[",
"self",
".",
"id",
"]",
")",
"if",
"len",
"(",
"rs",
")",
">",
"0",
":",
"self",
".",
"_update",
"(",
"rs",
"[",
"0",
"]",
")"
] | 24.857143 | 11.142857 |
def incr(self, key, delta=1):
"""
Add delta to value in the cache. If the key does not exist, raise a
ValueError exception.
"""
value = self.get(key)
if value is None:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
self.set(key, new_value)
return new_value | [
"def",
"incr",
"(",
"self",
",",
"key",
",",
"delta",
"=",
"1",
")",
":",
"value",
"=",
"self",
".",
"get",
"(",
"key",
")",
"if",
"value",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Key '%s' not found\"",
"%",
"key",
")",
"new_value",
"=",
"value",
"+",
"delta",
"self",
".",
"set",
"(",
"key",
",",
"new_value",
")",
"return",
"new_value"
] | 32.181818 | 11.818182 |
def stream(self, report, callback=None):
"""Queue data for streaming
Args:
report (IOTileReport): A report object to stream to a client
callback (callable): An optional callback that will be called with
a bool value of True when this report actually gets streamed.
If the client disconnects and the report is dropped instead,
callback will be called with False
"""
conn_id = self._find_connection(self.conn_string)
if isinstance(report, BroadcastReport):
self.adapter.notify_event_nowait(self.conn_string, 'broadcast', report)
elif conn_id is not None:
self.adapter.notify_event_nowait(self.conn_string, 'report', report)
if callback is not None:
callback(isinstance(report, BroadcastReport) or (conn_id is not None)) | [
"def",
"stream",
"(",
"self",
",",
"report",
",",
"callback",
"=",
"None",
")",
":",
"conn_id",
"=",
"self",
".",
"_find_connection",
"(",
"self",
".",
"conn_string",
")",
"if",
"isinstance",
"(",
"report",
",",
"BroadcastReport",
")",
":",
"self",
".",
"adapter",
".",
"notify_event_nowait",
"(",
"self",
".",
"conn_string",
",",
"'broadcast'",
",",
"report",
")",
"elif",
"conn_id",
"is",
"not",
"None",
":",
"self",
".",
"adapter",
".",
"notify_event_nowait",
"(",
"self",
".",
"conn_string",
",",
"'report'",
",",
"report",
")",
"if",
"callback",
"is",
"not",
"None",
":",
"callback",
"(",
"isinstance",
"(",
"report",
",",
"BroadcastReport",
")",
"or",
"(",
"conn_id",
"is",
"not",
"None",
")",
")"
] | 43.3 | 25.2 |
def ComplementMembership(*args, **kwargs):
"""Change (x not in y) to not(x in y)."""
return ast.Complement(
ast.Membership(*args, **kwargs), **kwargs) | [
"def",
"ComplementMembership",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"ast",
".",
"Complement",
"(",
"ast",
".",
"Membership",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
"*",
"*",
"kwargs",
")"
] | 40.75 | 6.5 |
def label_for(self, name):
'''Get a human readable label for a method given its name'''
method = getattr(self, name)
if method.__doc__ and method.__doc__.strip():
return method.__doc__.strip().splitlines()[0]
return humanize(name.replace(self._prefix, '')) | [
"def",
"label_for",
"(",
"self",
",",
"name",
")",
":",
"method",
"=",
"getattr",
"(",
"self",
",",
"name",
")",
"if",
"method",
".",
"__doc__",
"and",
"method",
".",
"__doc__",
".",
"strip",
"(",
")",
":",
"return",
"method",
".",
"__doc__",
".",
"strip",
"(",
")",
".",
"splitlines",
"(",
")",
"[",
"0",
"]",
"return",
"humanize",
"(",
"name",
".",
"replace",
"(",
"self",
".",
"_prefix",
",",
"''",
")",
")"
] | 49.166667 | 15.166667 |
def adapters(self, adapters):
"""
Sets the number of Ethernet adapters for this QEMU VM.
:param adapters: number of adapters
"""
self._ethernet_adapters.clear()
for adapter_number in range(0, adapters):
self._ethernet_adapters.append(EthernetAdapter())
log.info('QEMU VM "{name}" [{id}]: number of Ethernet adapters changed to {adapters}'.format(name=self._name,
id=self._id,
adapters=adapters)) | [
"def",
"adapters",
"(",
"self",
",",
"adapters",
")",
":",
"self",
".",
"_ethernet_adapters",
".",
"clear",
"(",
")",
"for",
"adapter_number",
"in",
"range",
"(",
"0",
",",
"adapters",
")",
":",
"self",
".",
"_ethernet_adapters",
".",
"append",
"(",
"EthernetAdapter",
"(",
")",
")",
"log",
".",
"info",
"(",
"'QEMU VM \"{name}\" [{id}]: number of Ethernet adapters changed to {adapters}'",
".",
"format",
"(",
"name",
"=",
"self",
".",
"_name",
",",
"id",
"=",
"self",
".",
"_id",
",",
"adapters",
"=",
"adapters",
")",
")"
] | 46.785714 | 29.785714 |
def student_t_approx(optimize=True, plot=True):
"""
Example of regressing with a student t likelihood using Laplace
"""
real_std = 0.1
#Start a function, any function
X = np.linspace(0.0, np.pi*2, 100)[:, None]
Y = np.sin(X) + np.random.randn(*X.shape)*real_std
Y = Y/Y.max()
Yc = Y.copy()
X_full = np.linspace(0.0, np.pi*2, 500)[:, None]
Y_full = np.sin(X_full)
Y_full = Y_full/Y_full.max()
#Slightly noisy data
Yc[75:80] += 1
#Very noisy data
#Yc[10] += 100
#Yc[25] += 10
#Yc[23] += 10
#Yc[26] += 1000
#Yc[24] += 10
#Yc = Yc/Yc.max()
#Add student t random noise to datapoints
deg_free = 1
print("Real noise: ", real_std)
initial_var_guess = 0.5
edited_real_sd = initial_var_guess
# Kernel object
kernel1 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
kernel2 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
kernel3 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
kernel4 = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
#Gaussian GP model on clean data
m1 = GPy.models.GPRegression(X, Y.copy(), kernel=kernel1)
# optimize
m1['.*white'].constrain_fixed(1e-5)
m1.randomize()
#Gaussian GP model on corrupt data
m2 = GPy.models.GPRegression(X, Yc.copy(), kernel=kernel2)
m2['.*white'].constrain_fixed(1e-5)
m2.randomize()
#Student t GP model on clean data
t_distribution = GPy.likelihoods.StudentT(deg_free=deg_free, sigma2=edited_real_sd)
laplace_inf = GPy.inference.latent_function_inference.Laplace()
m3 = GPy.core.GP(X, Y.copy(), kernel3, likelihood=t_distribution, inference_method=laplace_inf)
m3['.*t_scale2'].constrain_bounded(1e-6, 10.)
m3['.*white'].constrain_fixed(1e-5)
m3.randomize()
#Student t GP model on corrupt data
t_distribution = GPy.likelihoods.StudentT(deg_free=deg_free, sigma2=edited_real_sd)
laplace_inf = GPy.inference.latent_function_inference.Laplace()
m4 = GPy.core.GP(X, Yc.copy(), kernel4, likelihood=t_distribution, inference_method=laplace_inf)
m4['.*t_scale2'].constrain_bounded(1e-6, 10.)
m4['.*white'].constrain_fixed(1e-5)
m4.randomize()
print(m4)
debug=True
if debug:
m4.optimize(messages=1)
from matplotlib import pyplot as pb
pb.plot(m4.X, m4.inference_method.f_hat)
pb.plot(m4.X, m4.Y, 'rx')
m4.plot()
print(m4)
return m4
if optimize:
optimizer='scg'
print("Clean Gaussian")
m1.optimize(optimizer, messages=1)
print("Corrupt Gaussian")
m2.optimize(optimizer, messages=1)
print("Clean student t")
m3.optimize(optimizer, messages=1)
print("Corrupt student t")
m4.optimize(optimizer, messages=1)
if plot:
plt.figure(1)
plt.suptitle('Gaussian likelihood')
ax = plt.subplot(211)
m1.plot(ax=ax)
plt.plot(X_full, Y_full)
plt.ylim(-1.5, 1.5)
plt.title('Gaussian clean')
ax = plt.subplot(212)
m2.plot(ax=ax)
plt.plot(X_full, Y_full)
plt.ylim(-1.5, 1.5)
plt.title('Gaussian corrupt')
plt.figure(2)
plt.suptitle('Student-t likelihood')
ax = plt.subplot(211)
m3.plot(ax=ax)
plt.plot(X_full, Y_full)
plt.ylim(-1.5, 1.5)
plt.title('Student-t rasm clean')
ax = plt.subplot(212)
m4.plot(ax=ax)
plt.plot(X_full, Y_full)
plt.ylim(-1.5, 1.5)
plt.title('Student-t rasm corrupt')
return m1, m2, m3, m4 | [
"def",
"student_t_approx",
"(",
"optimize",
"=",
"True",
",",
"plot",
"=",
"True",
")",
":",
"real_std",
"=",
"0.1",
"#Start a function, any function",
"X",
"=",
"np",
".",
"linspace",
"(",
"0.0",
",",
"np",
".",
"pi",
"*",
"2",
",",
"100",
")",
"[",
":",
",",
"None",
"]",
"Y",
"=",
"np",
".",
"sin",
"(",
"X",
")",
"+",
"np",
".",
"random",
".",
"randn",
"(",
"*",
"X",
".",
"shape",
")",
"*",
"real_std",
"Y",
"=",
"Y",
"/",
"Y",
".",
"max",
"(",
")",
"Yc",
"=",
"Y",
".",
"copy",
"(",
")",
"X_full",
"=",
"np",
".",
"linspace",
"(",
"0.0",
",",
"np",
".",
"pi",
"*",
"2",
",",
"500",
")",
"[",
":",
",",
"None",
"]",
"Y_full",
"=",
"np",
".",
"sin",
"(",
"X_full",
")",
"Y_full",
"=",
"Y_full",
"/",
"Y_full",
".",
"max",
"(",
")",
"#Slightly noisy data",
"Yc",
"[",
"75",
":",
"80",
"]",
"+=",
"1",
"#Very noisy data",
"#Yc[10] += 100",
"#Yc[25] += 10",
"#Yc[23] += 10",
"#Yc[26] += 1000",
"#Yc[24] += 10",
"#Yc = Yc/Yc.max()",
"#Add student t random noise to datapoints",
"deg_free",
"=",
"1",
"print",
"(",
"\"Real noise: \"",
",",
"real_std",
")",
"initial_var_guess",
"=",
"0.5",
"edited_real_sd",
"=",
"initial_var_guess",
"# Kernel object",
"kernel1",
"=",
"GPy",
".",
"kern",
".",
"RBF",
"(",
"X",
".",
"shape",
"[",
"1",
"]",
")",
"+",
"GPy",
".",
"kern",
".",
"White",
"(",
"X",
".",
"shape",
"[",
"1",
"]",
")",
"kernel2",
"=",
"GPy",
".",
"kern",
".",
"RBF",
"(",
"X",
".",
"shape",
"[",
"1",
"]",
")",
"+",
"GPy",
".",
"kern",
".",
"White",
"(",
"X",
".",
"shape",
"[",
"1",
"]",
")",
"kernel3",
"=",
"GPy",
".",
"kern",
".",
"RBF",
"(",
"X",
".",
"shape",
"[",
"1",
"]",
")",
"+",
"GPy",
".",
"kern",
".",
"White",
"(",
"X",
".",
"shape",
"[",
"1",
"]",
")",
"kernel4",
"=",
"GPy",
".",
"kern",
".",
"RBF",
"(",
"X",
".",
"shape",
"[",
"1",
"]",
")",
"+",
"GPy",
".",
"kern",
".",
"White",
"(",
"X",
".",
"shape",
"[",
"1",
"]",
")",
"#Gaussian GP model on clean data",
"m1",
"=",
"GPy",
".",
"models",
".",
"GPRegression",
"(",
"X",
",",
"Y",
".",
"copy",
"(",
")",
",",
"kernel",
"=",
"kernel1",
")",
"# optimize",
"m1",
"[",
"'.*white'",
"]",
".",
"constrain_fixed",
"(",
"1e-5",
")",
"m1",
".",
"randomize",
"(",
")",
"#Gaussian GP model on corrupt data",
"m2",
"=",
"GPy",
".",
"models",
".",
"GPRegression",
"(",
"X",
",",
"Yc",
".",
"copy",
"(",
")",
",",
"kernel",
"=",
"kernel2",
")",
"m2",
"[",
"'.*white'",
"]",
".",
"constrain_fixed",
"(",
"1e-5",
")",
"m2",
".",
"randomize",
"(",
")",
"#Student t GP model on clean data",
"t_distribution",
"=",
"GPy",
".",
"likelihoods",
".",
"StudentT",
"(",
"deg_free",
"=",
"deg_free",
",",
"sigma2",
"=",
"edited_real_sd",
")",
"laplace_inf",
"=",
"GPy",
".",
"inference",
".",
"latent_function_inference",
".",
"Laplace",
"(",
")",
"m3",
"=",
"GPy",
".",
"core",
".",
"GP",
"(",
"X",
",",
"Y",
".",
"copy",
"(",
")",
",",
"kernel3",
",",
"likelihood",
"=",
"t_distribution",
",",
"inference_method",
"=",
"laplace_inf",
")",
"m3",
"[",
"'.*t_scale2'",
"]",
".",
"constrain_bounded",
"(",
"1e-6",
",",
"10.",
")",
"m3",
"[",
"'.*white'",
"]",
".",
"constrain_fixed",
"(",
"1e-5",
")",
"m3",
".",
"randomize",
"(",
")",
"#Student t GP model on corrupt data",
"t_distribution",
"=",
"GPy",
".",
"likelihoods",
".",
"StudentT",
"(",
"deg_free",
"=",
"deg_free",
",",
"sigma2",
"=",
"edited_real_sd",
")",
"laplace_inf",
"=",
"GPy",
".",
"inference",
".",
"latent_function_inference",
".",
"Laplace",
"(",
")",
"m4",
"=",
"GPy",
".",
"core",
".",
"GP",
"(",
"X",
",",
"Yc",
".",
"copy",
"(",
")",
",",
"kernel4",
",",
"likelihood",
"=",
"t_distribution",
",",
"inference_method",
"=",
"laplace_inf",
")",
"m4",
"[",
"'.*t_scale2'",
"]",
".",
"constrain_bounded",
"(",
"1e-6",
",",
"10.",
")",
"m4",
"[",
"'.*white'",
"]",
".",
"constrain_fixed",
"(",
"1e-5",
")",
"m4",
".",
"randomize",
"(",
")",
"print",
"(",
"m4",
")",
"debug",
"=",
"True",
"if",
"debug",
":",
"m4",
".",
"optimize",
"(",
"messages",
"=",
"1",
")",
"from",
"matplotlib",
"import",
"pyplot",
"as",
"pb",
"pb",
".",
"plot",
"(",
"m4",
".",
"X",
",",
"m4",
".",
"inference_method",
".",
"f_hat",
")",
"pb",
".",
"plot",
"(",
"m4",
".",
"X",
",",
"m4",
".",
"Y",
",",
"'rx'",
")",
"m4",
".",
"plot",
"(",
")",
"print",
"(",
"m4",
")",
"return",
"m4",
"if",
"optimize",
":",
"optimizer",
"=",
"'scg'",
"print",
"(",
"\"Clean Gaussian\"",
")",
"m1",
".",
"optimize",
"(",
"optimizer",
",",
"messages",
"=",
"1",
")",
"print",
"(",
"\"Corrupt Gaussian\"",
")",
"m2",
".",
"optimize",
"(",
"optimizer",
",",
"messages",
"=",
"1",
")",
"print",
"(",
"\"Clean student t\"",
")",
"m3",
".",
"optimize",
"(",
"optimizer",
",",
"messages",
"=",
"1",
")",
"print",
"(",
"\"Corrupt student t\"",
")",
"m4",
".",
"optimize",
"(",
"optimizer",
",",
"messages",
"=",
"1",
")",
"if",
"plot",
":",
"plt",
".",
"figure",
"(",
"1",
")",
"plt",
".",
"suptitle",
"(",
"'Gaussian likelihood'",
")",
"ax",
"=",
"plt",
".",
"subplot",
"(",
"211",
")",
"m1",
".",
"plot",
"(",
"ax",
"=",
"ax",
")",
"plt",
".",
"plot",
"(",
"X_full",
",",
"Y_full",
")",
"plt",
".",
"ylim",
"(",
"-",
"1.5",
",",
"1.5",
")",
"plt",
".",
"title",
"(",
"'Gaussian clean'",
")",
"ax",
"=",
"plt",
".",
"subplot",
"(",
"212",
")",
"m2",
".",
"plot",
"(",
"ax",
"=",
"ax",
")",
"plt",
".",
"plot",
"(",
"X_full",
",",
"Y_full",
")",
"plt",
".",
"ylim",
"(",
"-",
"1.5",
",",
"1.5",
")",
"plt",
".",
"title",
"(",
"'Gaussian corrupt'",
")",
"plt",
".",
"figure",
"(",
"2",
")",
"plt",
".",
"suptitle",
"(",
"'Student-t likelihood'",
")",
"ax",
"=",
"plt",
".",
"subplot",
"(",
"211",
")",
"m3",
".",
"plot",
"(",
"ax",
"=",
"ax",
")",
"plt",
".",
"plot",
"(",
"X_full",
",",
"Y_full",
")",
"plt",
".",
"ylim",
"(",
"-",
"1.5",
",",
"1.5",
")",
"plt",
".",
"title",
"(",
"'Student-t rasm clean'",
")",
"ax",
"=",
"plt",
".",
"subplot",
"(",
"212",
")",
"m4",
".",
"plot",
"(",
"ax",
"=",
"ax",
")",
"plt",
".",
"plot",
"(",
"X_full",
",",
"Y_full",
")",
"plt",
".",
"ylim",
"(",
"-",
"1.5",
",",
"1.5",
")",
"plt",
".",
"title",
"(",
"'Student-t rasm corrupt'",
")",
"return",
"m1",
",",
"m2",
",",
"m3",
",",
"m4"
] | 30.267241 | 18.439655 |
def _get_directory_path(context):
"""Get the storage path fro the output."""
path = os.path.join(settings.BASE_PATH, 'store')
path = context.params.get('path', path)
path = os.path.join(path, context.crawler.name)
path = os.path.abspath(os.path.expandvars(path))
try:
os.makedirs(path)
except Exception:
pass
return path | [
"def",
"_get_directory_path",
"(",
"context",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"settings",
".",
"BASE_PATH",
",",
"'store'",
")",
"path",
"=",
"context",
".",
"params",
".",
"get",
"(",
"'path'",
",",
"path",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"context",
".",
"crawler",
".",
"name",
")",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"expandvars",
"(",
"path",
")",
")",
"try",
":",
"os",
".",
"makedirs",
"(",
"path",
")",
"except",
"Exception",
":",
"pass",
"return",
"path"
] | 32.545455 | 14.909091 |
def Geometric(p, tag=None):
"""
A Geometric random variate
Parameters
----------
p : scalar
The probability of success
"""
assert (
0 < p < 1
), 'Geometric probability "p" must be between zero and one, non-inclusive'
return uv(ss.geom(p), tag=tag) | [
"def",
"Geometric",
"(",
"p",
",",
"tag",
"=",
"None",
")",
":",
"assert",
"(",
"0",
"<",
"p",
"<",
"1",
")",
",",
"'Geometric probability \"p\" must be between zero and one, non-inclusive'",
"return",
"uv",
"(",
"ss",
".",
"geom",
"(",
"p",
")",
",",
"tag",
"=",
"tag",
")"
] | 22.461538 | 18.307692 |
def initialize():
"""Initializes the WINDLL resource and populated the CSBI class variable."""
_WindowsCSBI._define_csbi()
_WindowsCSBI.HANDLE_STDERR = _WindowsCSBI.HANDLE_STDERR or _WindowsCSBI.WINDLL.kernel32.GetStdHandle(-12)
_WindowsCSBI.HANDLE_STDOUT = _WindowsCSBI.HANDLE_STDOUT or _WindowsCSBI.WINDLL.kernel32.GetStdHandle(-11)
if _WindowsCSBI.WINDLL.kernel32.GetConsoleScreenBufferInfo.argtypes:
return
_WindowsCSBI.WINDLL.kernel32.GetStdHandle.argtypes = [ctypes.wintypes.DWORD]
_WindowsCSBI.WINDLL.kernel32.GetStdHandle.restype = ctypes.wintypes.HANDLE
_WindowsCSBI.WINDLL.kernel32.GetConsoleScreenBufferInfo.restype = ctypes.wintypes.BOOL
_WindowsCSBI.WINDLL.kernel32.GetConsoleScreenBufferInfo.argtypes = [
ctypes.wintypes.HANDLE, ctypes.POINTER(_WindowsCSBI.CSBI)
] | [
"def",
"initialize",
"(",
")",
":",
"_WindowsCSBI",
".",
"_define_csbi",
"(",
")",
"_WindowsCSBI",
".",
"HANDLE_STDERR",
"=",
"_WindowsCSBI",
".",
"HANDLE_STDERR",
"or",
"_WindowsCSBI",
".",
"WINDLL",
".",
"kernel32",
".",
"GetStdHandle",
"(",
"-",
"12",
")",
"_WindowsCSBI",
".",
"HANDLE_STDOUT",
"=",
"_WindowsCSBI",
".",
"HANDLE_STDOUT",
"or",
"_WindowsCSBI",
".",
"WINDLL",
".",
"kernel32",
".",
"GetStdHandle",
"(",
"-",
"11",
")",
"if",
"_WindowsCSBI",
".",
"WINDLL",
".",
"kernel32",
".",
"GetConsoleScreenBufferInfo",
".",
"argtypes",
":",
"return",
"_WindowsCSBI",
".",
"WINDLL",
".",
"kernel32",
".",
"GetStdHandle",
".",
"argtypes",
"=",
"[",
"ctypes",
".",
"wintypes",
".",
"DWORD",
"]",
"_WindowsCSBI",
".",
"WINDLL",
".",
"kernel32",
".",
"GetStdHandle",
".",
"restype",
"=",
"ctypes",
".",
"wintypes",
".",
"HANDLE",
"_WindowsCSBI",
".",
"WINDLL",
".",
"kernel32",
".",
"GetConsoleScreenBufferInfo",
".",
"restype",
"=",
"ctypes",
".",
"wintypes",
".",
"BOOL",
"_WindowsCSBI",
".",
"WINDLL",
".",
"kernel32",
".",
"GetConsoleScreenBufferInfo",
".",
"argtypes",
"=",
"[",
"ctypes",
".",
"wintypes",
".",
"HANDLE",
",",
"ctypes",
".",
"POINTER",
"(",
"_WindowsCSBI",
".",
"CSBI",
")",
"]"
] | 62.142857 | 36.285714 |
def model_counts_map(self, name=None, exclude=None, use_mask=False):
"""Return the model expectation map for a single source, a set
of sources, or all sources in the ROI. The map will be
computed using the current model parameters.
Parameters
----------
name : str
Parameter that defines the sources for which the model map
will be calculated. If name=None a model map will be
generated for all sources in the model. If name='diffuse'
a map for all diffuse sources will be generated.
exclude : list
Source name or list of source names that will be excluded
from the model map.
use_mask : bool
Parameter that specifies in the model counts map should include
mask pixels (i.e., ones whose weights are <= 0)
Returns
-------
map : `~fermipy.skymap.Map`
A map object containing the counts and WCS projection.
"""
if self.projtype == "WCS":
v = pyLike.FloatVector(self.npix ** 2 * self.enumbins)
elif self.projtype == "HPX":
v = pyLike.FloatVector(np.max(self.geom.npix) * self.enumbins)
else:
raise Exception("Unknown projection type %s", self.projtype)
exclude = utils.arg_to_list(exclude)
names = utils.arg_to_list(name)
excluded_names = []
for i, t in enumerate(exclude):
srcs = self.roi.get_sources_by_name(t)
excluded_names += [s.name for s in srcs]
if not hasattr(self.like.logLike, 'loadSourceMaps'):
# Update fixed model
self.like.logLike.buildFixedModelWts()
# Populate source map hash
self.like.logLike.buildFixedModelWts(True)
elif (name is None or name == 'all') and not exclude:
self.like.logLike.loadSourceMaps()
src_names = []
if (name is None) or (name == 'all'):
src_names = [src.name for src in self.roi.sources]
elif name == 'diffuse':
src_names = [src.name for src in self.roi.sources if
src.diffuse]
else:
srcs = [self.roi.get_source_by_name(t) for t in names]
src_names = [src.name for src in srcs]
# Remove sources in exclude list
src_names = [str(t) for t in src_names if t not in excluded_names]
# EAC we need the try blocks b/c older versions of the ST don't have some of these functions
if len(src_names) == len(self.roi.sources):
try:
self.like.logLike.computeModelMap(v, use_mask)
except (TypeError, NotImplementedError):
self.like.logLike.computeModelMap(v)
elif not hasattr(self.like.logLike, 'setSourceMapImage'):
for s in src_names:
model = self.like.logLike.sourceMap(str(s))
try:
self.like.logLike.updateModelMap(v, model, use_mask)
except (TypeError, NotImplementedError):
self.like.logLike.updateModelMap(v, model)
else:
try:
if hasattr(self.like.logLike, 'has_weights'):
self.like.logLike.computeModelMap(src_names, v, use_mask)
else:
self.like.logLike.computeModelMap(src_names, v)
except:
vsum = np.zeros(v.size())
for s in src_names:
vtmp = pyLike.FloatVector(v.size())
if hasattr(self.like.logLike, 'has_weights'):
self.like.logLike.computeModelMap(
str(s), vtmp, use_mask)
else:
self.like.logLike.computeModelMap(str(s), vtmp)
vsum += vtmp
v = pyLike.FloatVector(vsum)
if self.projtype == "WCS":
z = np.array(v).reshape(self.enumbins, self.npix, self.npix)
return WcsNDMap(copy.deepcopy(self._geom), z)
elif self.projtype == "HPX":
z = np.array(v).reshape(self.enumbins, np.max(self._geom.npix))
return HpxNDMap(copy.deepcopy(self._geom), z)
else:
raise Exception(
"Did not recognize projection type %s", self.projtype) | [
"def",
"model_counts_map",
"(",
"self",
",",
"name",
"=",
"None",
",",
"exclude",
"=",
"None",
",",
"use_mask",
"=",
"False",
")",
":",
"if",
"self",
".",
"projtype",
"==",
"\"WCS\"",
":",
"v",
"=",
"pyLike",
".",
"FloatVector",
"(",
"self",
".",
"npix",
"**",
"2",
"*",
"self",
".",
"enumbins",
")",
"elif",
"self",
".",
"projtype",
"==",
"\"HPX\"",
":",
"v",
"=",
"pyLike",
".",
"FloatVector",
"(",
"np",
".",
"max",
"(",
"self",
".",
"geom",
".",
"npix",
")",
"*",
"self",
".",
"enumbins",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Unknown projection type %s\"",
",",
"self",
".",
"projtype",
")",
"exclude",
"=",
"utils",
".",
"arg_to_list",
"(",
"exclude",
")",
"names",
"=",
"utils",
".",
"arg_to_list",
"(",
"name",
")",
"excluded_names",
"=",
"[",
"]",
"for",
"i",
",",
"t",
"in",
"enumerate",
"(",
"exclude",
")",
":",
"srcs",
"=",
"self",
".",
"roi",
".",
"get_sources_by_name",
"(",
"t",
")",
"excluded_names",
"+=",
"[",
"s",
".",
"name",
"for",
"s",
"in",
"srcs",
"]",
"if",
"not",
"hasattr",
"(",
"self",
".",
"like",
".",
"logLike",
",",
"'loadSourceMaps'",
")",
":",
"# Update fixed model",
"self",
".",
"like",
".",
"logLike",
".",
"buildFixedModelWts",
"(",
")",
"# Populate source map hash",
"self",
".",
"like",
".",
"logLike",
".",
"buildFixedModelWts",
"(",
"True",
")",
"elif",
"(",
"name",
"is",
"None",
"or",
"name",
"==",
"'all'",
")",
"and",
"not",
"exclude",
":",
"self",
".",
"like",
".",
"logLike",
".",
"loadSourceMaps",
"(",
")",
"src_names",
"=",
"[",
"]",
"if",
"(",
"name",
"is",
"None",
")",
"or",
"(",
"name",
"==",
"'all'",
")",
":",
"src_names",
"=",
"[",
"src",
".",
"name",
"for",
"src",
"in",
"self",
".",
"roi",
".",
"sources",
"]",
"elif",
"name",
"==",
"'diffuse'",
":",
"src_names",
"=",
"[",
"src",
".",
"name",
"for",
"src",
"in",
"self",
".",
"roi",
".",
"sources",
"if",
"src",
".",
"diffuse",
"]",
"else",
":",
"srcs",
"=",
"[",
"self",
".",
"roi",
".",
"get_source_by_name",
"(",
"t",
")",
"for",
"t",
"in",
"names",
"]",
"src_names",
"=",
"[",
"src",
".",
"name",
"for",
"src",
"in",
"srcs",
"]",
"# Remove sources in exclude list",
"src_names",
"=",
"[",
"str",
"(",
"t",
")",
"for",
"t",
"in",
"src_names",
"if",
"t",
"not",
"in",
"excluded_names",
"]",
"# EAC we need the try blocks b/c older versions of the ST don't have some of these functions",
"if",
"len",
"(",
"src_names",
")",
"==",
"len",
"(",
"self",
".",
"roi",
".",
"sources",
")",
":",
"try",
":",
"self",
".",
"like",
".",
"logLike",
".",
"computeModelMap",
"(",
"v",
",",
"use_mask",
")",
"except",
"(",
"TypeError",
",",
"NotImplementedError",
")",
":",
"self",
".",
"like",
".",
"logLike",
".",
"computeModelMap",
"(",
"v",
")",
"elif",
"not",
"hasattr",
"(",
"self",
".",
"like",
".",
"logLike",
",",
"'setSourceMapImage'",
")",
":",
"for",
"s",
"in",
"src_names",
":",
"model",
"=",
"self",
".",
"like",
".",
"logLike",
".",
"sourceMap",
"(",
"str",
"(",
"s",
")",
")",
"try",
":",
"self",
".",
"like",
".",
"logLike",
".",
"updateModelMap",
"(",
"v",
",",
"model",
",",
"use_mask",
")",
"except",
"(",
"TypeError",
",",
"NotImplementedError",
")",
":",
"self",
".",
"like",
".",
"logLike",
".",
"updateModelMap",
"(",
"v",
",",
"model",
")",
"else",
":",
"try",
":",
"if",
"hasattr",
"(",
"self",
".",
"like",
".",
"logLike",
",",
"'has_weights'",
")",
":",
"self",
".",
"like",
".",
"logLike",
".",
"computeModelMap",
"(",
"src_names",
",",
"v",
",",
"use_mask",
")",
"else",
":",
"self",
".",
"like",
".",
"logLike",
".",
"computeModelMap",
"(",
"src_names",
",",
"v",
")",
"except",
":",
"vsum",
"=",
"np",
".",
"zeros",
"(",
"v",
".",
"size",
"(",
")",
")",
"for",
"s",
"in",
"src_names",
":",
"vtmp",
"=",
"pyLike",
".",
"FloatVector",
"(",
"v",
".",
"size",
"(",
")",
")",
"if",
"hasattr",
"(",
"self",
".",
"like",
".",
"logLike",
",",
"'has_weights'",
")",
":",
"self",
".",
"like",
".",
"logLike",
".",
"computeModelMap",
"(",
"str",
"(",
"s",
")",
",",
"vtmp",
",",
"use_mask",
")",
"else",
":",
"self",
".",
"like",
".",
"logLike",
".",
"computeModelMap",
"(",
"str",
"(",
"s",
")",
",",
"vtmp",
")",
"vsum",
"+=",
"vtmp",
"v",
"=",
"pyLike",
".",
"FloatVector",
"(",
"vsum",
")",
"if",
"self",
".",
"projtype",
"==",
"\"WCS\"",
":",
"z",
"=",
"np",
".",
"array",
"(",
"v",
")",
".",
"reshape",
"(",
"self",
".",
"enumbins",
",",
"self",
".",
"npix",
",",
"self",
".",
"npix",
")",
"return",
"WcsNDMap",
"(",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"_geom",
")",
",",
"z",
")",
"elif",
"self",
".",
"projtype",
"==",
"\"HPX\"",
":",
"z",
"=",
"np",
".",
"array",
"(",
"v",
")",
".",
"reshape",
"(",
"self",
".",
"enumbins",
",",
"np",
".",
"max",
"(",
"self",
".",
"_geom",
".",
"npix",
")",
")",
"return",
"HpxNDMap",
"(",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"_geom",
")",
",",
"z",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Did not recognize projection type %s\"",
",",
"self",
".",
"projtype",
")"
] | 39.869159 | 21.280374 |
def _getphoto_originalsize(self,pid):
"""Asks flickr for photo original size
returns tuple with width,height
"""
logger.debug('%s - Getting original size from flickr'%(pid))
width=None
height=None
resp=self.flickr.photos_getSizes(photo_id=pid)
if resp.attrib['stat']!='ok':
logger.error("%s - flickr: photos_getSizes failed with status: %s",\
resp.attrib['stat']);
return (None,None)
for size in resp.find('sizes').findall('size'):
if size.attrib['label']=="Original":
width=int(size.attrib['width'])
height=int(size.attrib['height'])
logger.debug('Found pid %s original size of %s,%s'\
%(pid,width,height))
return (width,height) | [
"def",
"_getphoto_originalsize",
"(",
"self",
",",
"pid",
")",
":",
"logger",
".",
"debug",
"(",
"'%s - Getting original size from flickr'",
"%",
"(",
"pid",
")",
")",
"width",
"=",
"None",
"height",
"=",
"None",
"resp",
"=",
"self",
".",
"flickr",
".",
"photos_getSizes",
"(",
"photo_id",
"=",
"pid",
")",
"if",
"resp",
".",
"attrib",
"[",
"'stat'",
"]",
"!=",
"'ok'",
":",
"logger",
".",
"error",
"(",
"\"%s - flickr: photos_getSizes failed with status: %s\"",
",",
"resp",
".",
"attrib",
"[",
"'stat'",
"]",
")",
"return",
"(",
"None",
",",
"None",
")",
"for",
"size",
"in",
"resp",
".",
"find",
"(",
"'sizes'",
")",
".",
"findall",
"(",
"'size'",
")",
":",
"if",
"size",
".",
"attrib",
"[",
"'label'",
"]",
"==",
"\"Original\"",
":",
"width",
"=",
"int",
"(",
"size",
".",
"attrib",
"[",
"'width'",
"]",
")",
"height",
"=",
"int",
"(",
"size",
".",
"attrib",
"[",
"'height'",
"]",
")",
"logger",
".",
"debug",
"(",
"'Found pid %s original size of %s,%s'",
"%",
"(",
"pid",
",",
"width",
",",
"height",
")",
")",
"return",
"(",
"width",
",",
"height",
")"
] | 34.958333 | 16.5 |
def timecds2datetime(tcds):
"""Method for converting time_cds-variables to datetime-objectsself.
Works both with a dictionary and a numpy record_array.
"""
days = int(tcds['Days'])
milliseconds = int(tcds['Milliseconds'])
try:
microseconds = int(tcds['Microseconds'])
except (KeyError, ValueError):
microseconds = 0
try:
microseconds += int(tcds['Nanoseconds']) / 1000.
except (KeyError, ValueError):
pass
reference = datetime(1958, 1, 1)
delta = timedelta(days=days, milliseconds=milliseconds,
microseconds=microseconds)
return reference + delta | [
"def",
"timecds2datetime",
"(",
"tcds",
")",
":",
"days",
"=",
"int",
"(",
"tcds",
"[",
"'Days'",
"]",
")",
"milliseconds",
"=",
"int",
"(",
"tcds",
"[",
"'Milliseconds'",
"]",
")",
"try",
":",
"microseconds",
"=",
"int",
"(",
"tcds",
"[",
"'Microseconds'",
"]",
")",
"except",
"(",
"KeyError",
",",
"ValueError",
")",
":",
"microseconds",
"=",
"0",
"try",
":",
"microseconds",
"+=",
"int",
"(",
"tcds",
"[",
"'Nanoseconds'",
"]",
")",
"/",
"1000.",
"except",
"(",
"KeyError",
",",
"ValueError",
")",
":",
"pass",
"reference",
"=",
"datetime",
"(",
"1958",
",",
"1",
",",
"1",
")",
"delta",
"=",
"timedelta",
"(",
"days",
"=",
"days",
",",
"milliseconds",
"=",
"milliseconds",
",",
"microseconds",
"=",
"microseconds",
")",
"return",
"reference",
"+",
"delta"
] | 30.047619 | 16.857143 |
def safe_pdist(arr, *args, **kwargs):
"""
Kwargs:
metric = ut.absdiff
SeeAlso:
scipy.spatial.distance.pdist
TODO: move to vtool
"""
if arr is None or len(arr) < 2:
return None
else:
import vtool as vt
arr_ = vt.atleast_nd(arr, 2)
return spdist.pdist(arr_, *args, **kwargs) | [
"def",
"safe_pdist",
"(",
"arr",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"arr",
"is",
"None",
"or",
"len",
"(",
"arr",
")",
"<",
"2",
":",
"return",
"None",
"else",
":",
"import",
"vtool",
"as",
"vt",
"arr_",
"=",
"vt",
".",
"atleast_nd",
"(",
"arr",
",",
"2",
")",
"return",
"spdist",
".",
"pdist",
"(",
"arr_",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 20.9375 | 16.1875 |
def to_html(self):
"""Render a Heading MessageElement as html
:returns: The html representation of the Heading MessageElement.
:rtype: str
"""
if self.text is None:
return
level = self.level
if level > 6:
level = 6
return '<h%s%s><a id="%s"></a>%s%s</h%s>' % (
level,
self.html_attributes(),
self.element_id,
self.html_icon(),
self.text.to_html(),
level) | [
"def",
"to_html",
"(",
"self",
")",
":",
"if",
"self",
".",
"text",
"is",
"None",
":",
"return",
"level",
"=",
"self",
".",
"level",
"if",
"level",
">",
"6",
":",
"level",
"=",
"6",
"return",
"'<h%s%s><a id=\"%s\"></a>%s%s</h%s>'",
"%",
"(",
"level",
",",
"self",
".",
"html_attributes",
"(",
")",
",",
"self",
".",
"element_id",
",",
"self",
".",
"html_icon",
"(",
")",
",",
"self",
".",
"text",
".",
"to_html",
"(",
")",
",",
"level",
")"
] | 26.210526 | 17.526316 |
def head(self, url):
'''head request, typically used for status code retrieval, etc.
'''
bot.debug('HEAD %s' %url)
return self._call(url, func=requests.head) | [
"def",
"head",
"(",
"self",
",",
"url",
")",
":",
"bot",
".",
"debug",
"(",
"'HEAD %s'",
"%",
"url",
")",
"return",
"self",
".",
"_call",
"(",
"url",
",",
"func",
"=",
"requests",
".",
"head",
")"
] | 33.8 | 19.4 |
def append_known_secrets(self): # type: () -> None
"""
Read key-value pair files with secrets. For example, .conf and .ini files.
:return:
"""
for file_name in self.files:
if "~" in file_name:
file_name = os.path.expanduser(file_name)
if not os.path.isfile(file_name):
print(
"Don't have "
+ Back.BLACK
+ Fore.YELLOW
+ file_name
+ ", won't use."
)
continue
with open(os.path.expanduser(file_name), "r") as file:
for line in file:
if line and "=" in line:
possible = line.split("=")[1].strip(" \"'\n")
if len(possible) > 4 and possible not in self.false_positives:
self.secrets.append(possible) | [
"def",
"append_known_secrets",
"(",
"self",
")",
":",
"# type: () -> None",
"for",
"file_name",
"in",
"self",
".",
"files",
":",
"if",
"\"~\"",
"in",
"file_name",
":",
"file_name",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"file_name",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"file_name",
")",
":",
"print",
"(",
"\"Don't have \"",
"+",
"Back",
".",
"BLACK",
"+",
"Fore",
".",
"YELLOW",
"+",
"file_name",
"+",
"\", won't use.\"",
")",
"continue",
"with",
"open",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"file_name",
")",
",",
"\"r\"",
")",
"as",
"file",
":",
"for",
"line",
"in",
"file",
":",
"if",
"line",
"and",
"\"=\"",
"in",
"line",
":",
"possible",
"=",
"line",
".",
"split",
"(",
"\"=\"",
")",
"[",
"1",
"]",
".",
"strip",
"(",
"\" \\\"'\\n\"",
")",
"if",
"len",
"(",
"possible",
")",
">",
"4",
"and",
"possible",
"not",
"in",
"self",
".",
"false_positives",
":",
"self",
".",
"secrets",
".",
"append",
"(",
"possible",
")"
] | 40.173913 | 14.434783 |
def _generate_result(self, res_type, channel, result):
"""Generate the result object"""
schema = self.api.ws_result_schema()
schema.context['channel'] = channel
schema.context['response_type'] = res_type
self.callback(schema.load(result), self.context) | [
"def",
"_generate_result",
"(",
"self",
",",
"res_type",
",",
"channel",
",",
"result",
")",
":",
"schema",
"=",
"self",
".",
"api",
".",
"ws_result_schema",
"(",
")",
"schema",
".",
"context",
"[",
"'channel'",
"]",
"=",
"channel",
"schema",
".",
"context",
"[",
"'response_type'",
"]",
"=",
"res_type",
"self",
".",
"callback",
"(",
"schema",
".",
"load",
"(",
"result",
")",
",",
"self",
".",
"context",
")"
] | 47.833333 | 7.833333 |
def pairwise_dxy(pos, gac, start=None, stop=None, is_accessible=None):
"""Convenience function to calculate a pairwise distance matrix using
nucleotide divergence (a.k.a. Dxy) as the distance metric.
Parameters
----------
pos : array_like, int, shape (n_variants,)
Variant positions.
gac : array_like, int, shape (n_variants, n_samples, n_alleles)
Per-genotype allele counts.
start : int, optional
Start position of region to use.
stop : int, optional
Stop position of region to use.
is_accessible : array_like, bool, shape (len(contig),), optional
Boolean array indicating accessibility status for all positions in the
chromosome/contig.
Returns
-------
dist : ndarray
Distance matrix in condensed form.
See Also
--------
allel.model.ndarray.GenotypeArray.to_allele_counts
"""
if not isinstance(pos, SortedIndex):
pos = SortedIndex(pos, copy=False)
gac = asarray_ndim(gac, 3)
# compute this once here, to avoid repeated evaluation within the loop
gan = np.sum(gac, axis=2)
m = gac.shape[1]
dist = list()
for i, j in itertools.combinations(range(m), 2):
ac1 = gac[:, i, ...]
an1 = gan[:, i]
ac2 = gac[:, j, ...]
an2 = gan[:, j]
d = sequence_divergence(pos, ac1, ac2, an1=an1, an2=an2,
start=start, stop=stop,
is_accessible=is_accessible)
dist.append(d)
return np.array(dist) | [
"def",
"pairwise_dxy",
"(",
"pos",
",",
"gac",
",",
"start",
"=",
"None",
",",
"stop",
"=",
"None",
",",
"is_accessible",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"pos",
",",
"SortedIndex",
")",
":",
"pos",
"=",
"SortedIndex",
"(",
"pos",
",",
"copy",
"=",
"False",
")",
"gac",
"=",
"asarray_ndim",
"(",
"gac",
",",
"3",
")",
"# compute this once here, to avoid repeated evaluation within the loop",
"gan",
"=",
"np",
".",
"sum",
"(",
"gac",
",",
"axis",
"=",
"2",
")",
"m",
"=",
"gac",
".",
"shape",
"[",
"1",
"]",
"dist",
"=",
"list",
"(",
")",
"for",
"i",
",",
"j",
"in",
"itertools",
".",
"combinations",
"(",
"range",
"(",
"m",
")",
",",
"2",
")",
":",
"ac1",
"=",
"gac",
"[",
":",
",",
"i",
",",
"...",
"]",
"an1",
"=",
"gan",
"[",
":",
",",
"i",
"]",
"ac2",
"=",
"gac",
"[",
":",
",",
"j",
",",
"...",
"]",
"an2",
"=",
"gan",
"[",
":",
",",
"j",
"]",
"d",
"=",
"sequence_divergence",
"(",
"pos",
",",
"ac1",
",",
"ac2",
",",
"an1",
"=",
"an1",
",",
"an2",
"=",
"an2",
",",
"start",
"=",
"start",
",",
"stop",
"=",
"stop",
",",
"is_accessible",
"=",
"is_accessible",
")",
"dist",
".",
"append",
"(",
"d",
")",
"return",
"np",
".",
"array",
"(",
"dist",
")"
] | 32.73913 | 19.173913 |
def _kill_worker_threads(self):
""" Kill any currently executing worker threads.
See :meth:`ServiceContainer.spawn_worker`
"""
num_workers = len(self._worker_threads)
if num_workers:
_log.warning('killing %s active workers(s)', num_workers)
for worker_ctx, gt in list(self._worker_threads.items()):
_log.warning('killing active worker for %s', worker_ctx)
gt.kill() | [
"def",
"_kill_worker_threads",
"(",
"self",
")",
":",
"num_workers",
"=",
"len",
"(",
"self",
".",
"_worker_threads",
")",
"if",
"num_workers",
":",
"_log",
".",
"warning",
"(",
"'killing %s active workers(s)'",
",",
"num_workers",
")",
"for",
"worker_ctx",
",",
"gt",
"in",
"list",
"(",
"self",
".",
"_worker_threads",
".",
"items",
"(",
")",
")",
":",
"_log",
".",
"warning",
"(",
"'killing active worker for %s'",
",",
"worker_ctx",
")",
"gt",
".",
"kill",
"(",
")"
] | 37.666667 | 18.916667 |
def sqlalch_datetime(dt):
"""Convert a SQLAlchemy datetime string to a datetime object."""
if isinstance(dt, str):
return datetime.strptime(dt, "%Y-%m-%d %H:%M:%S.%f").replace(tzinfo=UTC)
if dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None:
return dt.astimezone(UTC)
return dt.replace(tzinfo=UTC) | [
"def",
"sqlalch_datetime",
"(",
"dt",
")",
":",
"if",
"isinstance",
"(",
"dt",
",",
"str",
")",
":",
"return",
"datetime",
".",
"strptime",
"(",
"dt",
",",
"\"%Y-%m-%d %H:%M:%S.%f\"",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"UTC",
")",
"if",
"dt",
".",
"tzinfo",
"is",
"not",
"None",
"and",
"dt",
".",
"tzinfo",
".",
"utcoffset",
"(",
"dt",
")",
"is",
"not",
"None",
":",
"return",
"dt",
".",
"astimezone",
"(",
"UTC",
")",
"return",
"dt",
".",
"replace",
"(",
"tzinfo",
"=",
"UTC",
")"
] | 47.857143 | 15.857143 |
def partition_horizontal_twice(thelist, numbers):
"""
numbers is split on a comma to n and n2.
Break a list into peices each peice alternating between n and n2 items long
``partition_horizontal_twice(range(14), "3,4")`` gives::
[[0, 1, 2],
[3, 4, 5, 6],
[7, 8, 9],
[10, 11, 12, 13]]
Clear as mud?
"""
n, n2 = numbers.split(',')
try:
n = int(n)
n2 = int(n2)
thelist = list(thelist)
except (ValueError, TypeError):
return [thelist]
newlists = []
while thelist:
newlists.append(thelist[:n])
thelist = thelist[n:]
newlists.append(thelist[:n2])
thelist = thelist[n2:]
return newlists | [
"def",
"partition_horizontal_twice",
"(",
"thelist",
",",
"numbers",
")",
":",
"n",
",",
"n2",
"=",
"numbers",
".",
"split",
"(",
"','",
")",
"try",
":",
"n",
"=",
"int",
"(",
"n",
")",
"n2",
"=",
"int",
"(",
"n2",
")",
"thelist",
"=",
"list",
"(",
"thelist",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"return",
"[",
"thelist",
"]",
"newlists",
"=",
"[",
"]",
"while",
"thelist",
":",
"newlists",
".",
"append",
"(",
"thelist",
"[",
":",
"n",
"]",
")",
"thelist",
"=",
"thelist",
"[",
"n",
":",
"]",
"newlists",
".",
"append",
"(",
"thelist",
"[",
":",
"n2",
"]",
")",
"thelist",
"=",
"thelist",
"[",
"n2",
":",
"]",
"return",
"newlists"
] | 26.37037 | 16.592593 |
def _check_timeindex(self):
"""
Check function to check if all feed-in and load time series contain
values for the specified time index.
"""
try:
self.timeseries.generation_fluctuating
self.timeseries.generation_dispatchable
self.timeseries.load
self.timeseries.generation_reactive_power
self.timeseries.load_reactive_power
except:
message = 'Time index of feed-in and load time series does ' \
'not match.'
logging.error(message)
raise KeyError(message) | [
"def",
"_check_timeindex",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"timeseries",
".",
"generation_fluctuating",
"self",
".",
"timeseries",
".",
"generation_dispatchable",
"self",
".",
"timeseries",
".",
"load",
"self",
".",
"timeseries",
".",
"generation_reactive_power",
"self",
".",
"timeseries",
".",
"load_reactive_power",
"except",
":",
"message",
"=",
"'Time index of feed-in and load time series does '",
"'not match.'",
"logging",
".",
"error",
"(",
"message",
")",
"raise",
"KeyError",
"(",
"message",
")"
] | 35.588235 | 14.411765 |
def init():
"""Initialize the pipeline in maya so everything works
Init environment and load plugins.
This also creates the initial Jukebox Menu entry.
:returns: None
:rtype: None
:raises: None
"""
main.init_environment()
pluginpath = os.pathsep.join((os.environ.get('JUKEBOX_PLUGIN_PATH', ''), BUILTIN_PLUGIN_PATH))
os.environ['JUKEBOX_PLUGIN_PATH'] = pluginpath
try:
maya.standalone.initialize()
jukeboxmaya.STANDALONE_INITIALIZED = True
except RuntimeError as e:
jukeboxmaya.STANDALONE_INITIALIZED = False
if str(e) == "maya.standalone may only be used from an external Python interpreter":
mm = MenuManager.get()
mainmenu = mm.create_menu("Jukebox", tearOff=True)
mm.create_menu("Help", parent=mainmenu, command=show_help)
# load plugins
pmanager = MayaPluginManager.get()
pmanager.load_plugins()
load_mayaplugins() | [
"def",
"init",
"(",
")",
":",
"main",
".",
"init_environment",
"(",
")",
"pluginpath",
"=",
"os",
".",
"pathsep",
".",
"join",
"(",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'JUKEBOX_PLUGIN_PATH'",
",",
"''",
")",
",",
"BUILTIN_PLUGIN_PATH",
")",
")",
"os",
".",
"environ",
"[",
"'JUKEBOX_PLUGIN_PATH'",
"]",
"=",
"pluginpath",
"try",
":",
"maya",
".",
"standalone",
".",
"initialize",
"(",
")",
"jukeboxmaya",
".",
"STANDALONE_INITIALIZED",
"=",
"True",
"except",
"RuntimeError",
"as",
"e",
":",
"jukeboxmaya",
".",
"STANDALONE_INITIALIZED",
"=",
"False",
"if",
"str",
"(",
"e",
")",
"==",
"\"maya.standalone may only be used from an external Python interpreter\"",
":",
"mm",
"=",
"MenuManager",
".",
"get",
"(",
")",
"mainmenu",
"=",
"mm",
".",
"create_menu",
"(",
"\"Jukebox\"",
",",
"tearOff",
"=",
"True",
")",
"mm",
".",
"create_menu",
"(",
"\"Help\"",
",",
"parent",
"=",
"mainmenu",
",",
"command",
"=",
"show_help",
")",
"# load plugins",
"pmanager",
"=",
"MayaPluginManager",
".",
"get",
"(",
")",
"pmanager",
".",
"load_plugins",
"(",
")",
"load_mayaplugins",
"(",
")"
] | 35.692308 | 19.423077 |
def cubes():
""" Get a listing of all publicly available cubes. """
cubes = []
for cube in get_manager().list_cubes():
cubes.append({
'name': cube
})
return jsonify({
'status': 'ok',
'data': cubes
}) | [
"def",
"cubes",
"(",
")",
":",
"cubes",
"=",
"[",
"]",
"for",
"cube",
"in",
"get_manager",
"(",
")",
".",
"list_cubes",
"(",
")",
":",
"cubes",
".",
"append",
"(",
"{",
"'name'",
":",
"cube",
"}",
")",
"return",
"jsonify",
"(",
"{",
"'status'",
":",
"'ok'",
",",
"'data'",
":",
"cubes",
"}",
")"
] | 23 | 19.181818 |
def parse_string(s):
"""Parse a string with units and try to make a bitmath object out of
it.
String inputs may include whitespace characters between the value and
the unit.
"""
# Strings only please
if not isinstance(s, (str, unicode)):
raise ValueError("parse_string only accepts string inputs but a %s was given" %
type(s))
# get the index of the first alphabetic character
try:
index = list([i.isalpha() for i in s]).index(True)
except ValueError:
# If there's no alphabetic characters we won't be able to .index(True)
raise ValueError("No unit detected, can not parse string '%s' into a bitmath object" % s)
# split the string into the value and the unit
val, unit = s[:index], s[index:]
# see if the unit exists as a type in our namespace
if unit == "b":
unit_class = Bit
elif unit == "B":
unit_class = Byte
else:
if not (hasattr(sys.modules[__name__], unit) and isinstance(getattr(sys.modules[__name__], unit), type)):
raise ValueError("The unit %s is not a valid bitmath unit" % unit)
unit_class = globals()[unit]
try:
val = float(val)
except ValueError:
raise
try:
return unit_class(val)
except: # pragma: no cover
raise ValueError("Can't parse string %s into a bitmath object" % s) | [
"def",
"parse_string",
"(",
"s",
")",
":",
"# Strings only please",
"if",
"not",
"isinstance",
"(",
"s",
",",
"(",
"str",
",",
"unicode",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"parse_string only accepts string inputs but a %s was given\"",
"%",
"type",
"(",
"s",
")",
")",
"# get the index of the first alphabetic character",
"try",
":",
"index",
"=",
"list",
"(",
"[",
"i",
".",
"isalpha",
"(",
")",
"for",
"i",
"in",
"s",
"]",
")",
".",
"index",
"(",
"True",
")",
"except",
"ValueError",
":",
"# If there's no alphabetic characters we won't be able to .index(True)",
"raise",
"ValueError",
"(",
"\"No unit detected, can not parse string '%s' into a bitmath object\"",
"%",
"s",
")",
"# split the string into the value and the unit",
"val",
",",
"unit",
"=",
"s",
"[",
":",
"index",
"]",
",",
"s",
"[",
"index",
":",
"]",
"# see if the unit exists as a type in our namespace",
"if",
"unit",
"==",
"\"b\"",
":",
"unit_class",
"=",
"Bit",
"elif",
"unit",
"==",
"\"B\"",
":",
"unit_class",
"=",
"Byte",
"else",
":",
"if",
"not",
"(",
"hasattr",
"(",
"sys",
".",
"modules",
"[",
"__name__",
"]",
",",
"unit",
")",
"and",
"isinstance",
"(",
"getattr",
"(",
"sys",
".",
"modules",
"[",
"__name__",
"]",
",",
"unit",
")",
",",
"type",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"The unit %s is not a valid bitmath unit\"",
"%",
"unit",
")",
"unit_class",
"=",
"globals",
"(",
")",
"[",
"unit",
"]",
"try",
":",
"val",
"=",
"float",
"(",
"val",
")",
"except",
"ValueError",
":",
"raise",
"try",
":",
"return",
"unit_class",
"(",
"val",
")",
"except",
":",
"# pragma: no cover",
"raise",
"ValueError",
"(",
"\"Can't parse string %s into a bitmath object\"",
"%",
"s",
")"
] | 33.146341 | 25.073171 |
def report(self, simulation, state):
"""Generate a report.
Parameters
----------
simulation : Simulation
The Simulation to generate a report for
state : State
The current state of the simulation
"""
if not self._initialized:
self._initial_clock_time = datetime.now()
self._initial_simulation_time = state.getTime()
self._initial_steps = simulation.currentStep
self._initialized = True
steps = simulation.currentStep
time = datetime.now() - self._initial_clock_time
days = time.total_seconds()/86400.0
ns = (state.getTime()-self._initial_simulation_time).value_in_unit(u.nanosecond)
margin = ' ' * self.margin
ns_day = ns/days
delta = ((self.total_steps-steps)*time.total_seconds())/steps
# remove microseconds to have cleaner output
remaining = timedelta(seconds=int(delta))
percentage = 100.0*steps/self.total_steps
if ns_day:
template = '{}{}/{} steps ({:.1f}%) - {} left @ {:.1f} ns/day \r'
else:
template = '{}{}/{} steps ({:.1f}%) \r'
report = template.format(margin, steps, self.total_steps, percentage, remaining, ns_day)
self._out.write(report)
if hasattr(self._out, 'flush'):
self._out.flush() | [
"def",
"report",
"(",
"self",
",",
"simulation",
",",
"state",
")",
":",
"if",
"not",
"self",
".",
"_initialized",
":",
"self",
".",
"_initial_clock_time",
"=",
"datetime",
".",
"now",
"(",
")",
"self",
".",
"_initial_simulation_time",
"=",
"state",
".",
"getTime",
"(",
")",
"self",
".",
"_initial_steps",
"=",
"simulation",
".",
"currentStep",
"self",
".",
"_initialized",
"=",
"True",
"steps",
"=",
"simulation",
".",
"currentStep",
"time",
"=",
"datetime",
".",
"now",
"(",
")",
"-",
"self",
".",
"_initial_clock_time",
"days",
"=",
"time",
".",
"total_seconds",
"(",
")",
"/",
"86400.0",
"ns",
"=",
"(",
"state",
".",
"getTime",
"(",
")",
"-",
"self",
".",
"_initial_simulation_time",
")",
".",
"value_in_unit",
"(",
"u",
".",
"nanosecond",
")",
"margin",
"=",
"' '",
"*",
"self",
".",
"margin",
"ns_day",
"=",
"ns",
"/",
"days",
"delta",
"=",
"(",
"(",
"self",
".",
"total_steps",
"-",
"steps",
")",
"*",
"time",
".",
"total_seconds",
"(",
")",
")",
"/",
"steps",
"# remove microseconds to have cleaner output",
"remaining",
"=",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"delta",
")",
")",
"percentage",
"=",
"100.0",
"*",
"steps",
"/",
"self",
".",
"total_steps",
"if",
"ns_day",
":",
"template",
"=",
"'{}{}/{} steps ({:.1f}%) - {} left @ {:.1f} ns/day \\r'",
"else",
":",
"template",
"=",
"'{}{}/{} steps ({:.1f}%) \\r'",
"report",
"=",
"template",
".",
"format",
"(",
"margin",
",",
"steps",
",",
"self",
".",
"total_steps",
",",
"percentage",
",",
"remaining",
",",
"ns_day",
")",
"self",
".",
"_out",
".",
"write",
"(",
"report",
")",
"if",
"hasattr",
"(",
"self",
".",
"_out",
",",
"'flush'",
")",
":",
"self",
".",
"_out",
".",
"flush",
"(",
")"
] | 40.542857 | 18.885714 |
def get_random_distorted_bottlenecks(
sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,
distorted_image, resized_input_tensor, bottleneck_tensor):
"""Retrieves bottleneck values for training images, after distortions.
If we're training with distortions like crops, scales, or flips, we have to
recalculate the full model for every image, and so we can't use cached
bottleneck values. Instead we find random images for the requested category,
run them through the distortion graph, and then the full graph to get the
bottleneck results for each.
Args:
sess: Current TensorFlow Session.
image_lists: OrderedDict of training images for each label.
how_many: The integer number of bottleneck values to return.
category: Name string of which set of images to fetch - training, testing,
or validation.
image_dir: Root folder string of the subfolders containing the training
images.
input_jpeg_tensor: The input layer we feed the image data to.
distorted_image: The output node of the distortion graph.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays and their corresponding ground truths.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_path = get_image_path(image_lists, label_name, image_index, image_dir,
category)
if not tf.gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
jpeg_data = tf.gfile.GFile(image_path, 'rb').read()
# Note that we materialize the distorted_image_data as a numpy array before
# sending running inference on the image. This involves 2 memory copies and
# might be optimized in other implementations.
distorted_image_data = sess.run(distorted_image,
{input_jpeg_tensor: jpeg_data})
bottleneck_values = sess.run(bottleneck_tensor,
{resized_input_tensor: distorted_image_data})
bottleneck_values = np.squeeze(bottleneck_values)
bottlenecks.append(bottleneck_values)
ground_truths.append(label_index)
return bottlenecks, ground_truths | [
"def",
"get_random_distorted_bottlenecks",
"(",
"sess",
",",
"image_lists",
",",
"how_many",
",",
"category",
",",
"image_dir",
",",
"input_jpeg_tensor",
",",
"distorted_image",
",",
"resized_input_tensor",
",",
"bottleneck_tensor",
")",
":",
"class_count",
"=",
"len",
"(",
"image_lists",
".",
"keys",
"(",
")",
")",
"bottlenecks",
"=",
"[",
"]",
"ground_truths",
"=",
"[",
"]",
"for",
"unused_i",
"in",
"range",
"(",
"how_many",
")",
":",
"label_index",
"=",
"random",
".",
"randrange",
"(",
"class_count",
")",
"label_name",
"=",
"list",
"(",
"image_lists",
".",
"keys",
"(",
")",
")",
"[",
"label_index",
"]",
"image_index",
"=",
"random",
".",
"randrange",
"(",
"MAX_NUM_IMAGES_PER_CLASS",
"+",
"1",
")",
"image_path",
"=",
"get_image_path",
"(",
"image_lists",
",",
"label_name",
",",
"image_index",
",",
"image_dir",
",",
"category",
")",
"if",
"not",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"image_path",
")",
":",
"tf",
".",
"logging",
".",
"fatal",
"(",
"'File does not exist %s'",
",",
"image_path",
")",
"jpeg_data",
"=",
"tf",
".",
"gfile",
".",
"GFile",
"(",
"image_path",
",",
"'rb'",
")",
".",
"read",
"(",
")",
"# Note that we materialize the distorted_image_data as a numpy array before",
"# sending running inference on the image. This involves 2 memory copies and",
"# might be optimized in other implementations.",
"distorted_image_data",
"=",
"sess",
".",
"run",
"(",
"distorted_image",
",",
"{",
"input_jpeg_tensor",
":",
"jpeg_data",
"}",
")",
"bottleneck_values",
"=",
"sess",
".",
"run",
"(",
"bottleneck_tensor",
",",
"{",
"resized_input_tensor",
":",
"distorted_image_data",
"}",
")",
"bottleneck_values",
"=",
"np",
".",
"squeeze",
"(",
"bottleneck_values",
")",
"bottlenecks",
".",
"append",
"(",
"bottleneck_values",
")",
"ground_truths",
".",
"append",
"(",
"label_index",
")",
"return",
"bottlenecks",
",",
"ground_truths"
] | 48.88 | 21.26 |
def get_rows_to_keep(gctoo, rid=None, row_bool=None, ridx=None, exclude_rid=None):
""" Figure out based on the possible row inputs which rows to keep.
Args:
gctoo (GCToo object):
rid (list of strings):
row_bool (boolean array):
ridx (list of integers):
exclude_rid (list of strings):
Returns:
rows_to_keep (list of strings): row ids to be kept
"""
# Use rid if provided
if rid is not None:
assert type(rid) == list, "rid must be a list. rid: {}".format(rid)
rows_to_keep = [gctoo_row for gctoo_row in gctoo.data_df.index if gctoo_row in rid]
# Tell user if some rids not found
num_missing_rids = len(rid) - len(rows_to_keep)
if num_missing_rids != 0:
logger.info("{} rids were not found in the GCT.".format(num_missing_rids))
# Use row_bool if provided
elif row_bool is not None:
assert len(row_bool) == gctoo.data_df.shape[0], (
"row_bool must have length equal to gctoo.data_df.shape[0]. " +
"len(row_bool): {}, gctoo.data_df.shape[0]: {}".format(
len(row_bool), gctoo.data_df.shape[0]))
rows_to_keep = gctoo.data_df.index[row_bool].values
# Use ridx if provided
elif ridx is not None:
assert type(ridx[0]) is int, (
"ridx must be a list of integers. ridx[0]: {}, " +
"type(ridx[0]): {}").format(ridx[0], type(ridx[0]))
assert max(ridx) <= gctoo.data_df.shape[0], (
"ridx contains an integer larger than the number of rows in " +
"the GCToo. max(ridx): {}, gctoo.data_df.shape[0]: {}").format(
max(ridx), gctoo.data_df.shape[0])
rows_to_keep = gctoo.data_df.index[ridx].values
# If rid, row_bool, and ridx are all None, return all rows
else:
rows_to_keep = gctoo.data_df.index.values
# Use exclude_rid if provided
if exclude_rid is not None:
# Keep only those rows that are not in exclude_rid
rows_to_keep = [row_to_keep for row_to_keep in rows_to_keep if row_to_keep not in exclude_rid]
return rows_to_keep | [
"def",
"get_rows_to_keep",
"(",
"gctoo",
",",
"rid",
"=",
"None",
",",
"row_bool",
"=",
"None",
",",
"ridx",
"=",
"None",
",",
"exclude_rid",
"=",
"None",
")",
":",
"# Use rid if provided",
"if",
"rid",
"is",
"not",
"None",
":",
"assert",
"type",
"(",
"rid",
")",
"==",
"list",
",",
"\"rid must be a list. rid: {}\"",
".",
"format",
"(",
"rid",
")",
"rows_to_keep",
"=",
"[",
"gctoo_row",
"for",
"gctoo_row",
"in",
"gctoo",
".",
"data_df",
".",
"index",
"if",
"gctoo_row",
"in",
"rid",
"]",
"# Tell user if some rids not found",
"num_missing_rids",
"=",
"len",
"(",
"rid",
")",
"-",
"len",
"(",
"rows_to_keep",
")",
"if",
"num_missing_rids",
"!=",
"0",
":",
"logger",
".",
"info",
"(",
"\"{} rids were not found in the GCT.\"",
".",
"format",
"(",
"num_missing_rids",
")",
")",
"# Use row_bool if provided",
"elif",
"row_bool",
"is",
"not",
"None",
":",
"assert",
"len",
"(",
"row_bool",
")",
"==",
"gctoo",
".",
"data_df",
".",
"shape",
"[",
"0",
"]",
",",
"(",
"\"row_bool must have length equal to gctoo.data_df.shape[0]. \"",
"+",
"\"len(row_bool): {}, gctoo.data_df.shape[0]: {}\"",
".",
"format",
"(",
"len",
"(",
"row_bool",
")",
",",
"gctoo",
".",
"data_df",
".",
"shape",
"[",
"0",
"]",
")",
")",
"rows_to_keep",
"=",
"gctoo",
".",
"data_df",
".",
"index",
"[",
"row_bool",
"]",
".",
"values",
"# Use ridx if provided",
"elif",
"ridx",
"is",
"not",
"None",
":",
"assert",
"type",
"(",
"ridx",
"[",
"0",
"]",
")",
"is",
"int",
",",
"(",
"\"ridx must be a list of integers. ridx[0]: {}, \"",
"+",
"\"type(ridx[0]): {}\"",
")",
".",
"format",
"(",
"ridx",
"[",
"0",
"]",
",",
"type",
"(",
"ridx",
"[",
"0",
"]",
")",
")",
"assert",
"max",
"(",
"ridx",
")",
"<=",
"gctoo",
".",
"data_df",
".",
"shape",
"[",
"0",
"]",
",",
"(",
"\"ridx contains an integer larger than the number of rows in \"",
"+",
"\"the GCToo. max(ridx): {}, gctoo.data_df.shape[0]: {}\"",
")",
".",
"format",
"(",
"max",
"(",
"ridx",
")",
",",
"gctoo",
".",
"data_df",
".",
"shape",
"[",
"0",
"]",
")",
"rows_to_keep",
"=",
"gctoo",
".",
"data_df",
".",
"index",
"[",
"ridx",
"]",
".",
"values",
"# If rid, row_bool, and ridx are all None, return all rows",
"else",
":",
"rows_to_keep",
"=",
"gctoo",
".",
"data_df",
".",
"index",
".",
"values",
"# Use exclude_rid if provided",
"if",
"exclude_rid",
"is",
"not",
"None",
":",
"# Keep only those rows that are not in exclude_rid",
"rows_to_keep",
"=",
"[",
"row_to_keep",
"for",
"row_to_keep",
"in",
"rows_to_keep",
"if",
"row_to_keep",
"not",
"in",
"exclude_rid",
"]",
"return",
"rows_to_keep"
] | 35.491525 | 24.338983 |
def segment_radii(neurites, neurite_type=NeuriteType.all):
'''arithmetic mean of the radii of the points in segments in a collection of neurites'''
def _seg_radii(sec):
'''vectorized mean radii'''
pts = sec.points[:, COLS.R]
return np.divide(np.add(pts[:-1], pts[1:]), 2.0)
return map_segments(_seg_radii, neurites, neurite_type) | [
"def",
"segment_radii",
"(",
"neurites",
",",
"neurite_type",
"=",
"NeuriteType",
".",
"all",
")",
":",
"def",
"_seg_radii",
"(",
"sec",
")",
":",
"'''vectorized mean radii'''",
"pts",
"=",
"sec",
".",
"points",
"[",
":",
",",
"COLS",
".",
"R",
"]",
"return",
"np",
".",
"divide",
"(",
"np",
".",
"add",
"(",
"pts",
"[",
":",
"-",
"1",
"]",
",",
"pts",
"[",
"1",
":",
"]",
")",
",",
"2.0",
")",
"return",
"map_segments",
"(",
"_seg_radii",
",",
"neurites",
",",
"neurite_type",
")"
] | 44.875 | 21.375 |
def kde_partition_data(data, estimate_tails=True):
"""Convenience method for building a partition and weights using a gaussian Kernel Density Estimate and default bandwidth.
Args:
data (list-like): The data from which to construct the estimate
estimate_tails (bool): Whether to estimate the tails of the distribution to keep the partition object finite
Returns:
A new partition_object::
{
"partition": (list) The endpoints of the partial partition of reals,
"weights": (list) The densities of the bins implied by the partition.
}
"""
kde = stats.kde.gaussian_kde(data)
evaluation_bins = np.linspace(start=np.min(data) - (kde.covariance_factor() / 2),
stop=np.max(data) +
(kde.covariance_factor() / 2),
num=np.floor(((np.max(data) - np.min(data)) / kde.covariance_factor()) + 1).astype(int))
cdf_vals = [kde.integrate_box_1d(-np.inf, x) for x in evaluation_bins]
evaluation_weights = np.diff(cdf_vals)
if estimate_tails:
bins = np.concatenate(([np.min(data) - (1.5 * kde.covariance_factor())],
evaluation_bins,
[np.max(data) + (1.5 * kde.covariance_factor())]))
else:
bins = np.concatenate(([-np.inf], evaluation_bins, [np.inf]))
weights = np.concatenate(
([cdf_vals[0]], evaluation_weights, [1 - cdf_vals[-1]]))
return {
"bins": bins,
"weights": weights
} | [
"def",
"kde_partition_data",
"(",
"data",
",",
"estimate_tails",
"=",
"True",
")",
":",
"kde",
"=",
"stats",
".",
"kde",
".",
"gaussian_kde",
"(",
"data",
")",
"evaluation_bins",
"=",
"np",
".",
"linspace",
"(",
"start",
"=",
"np",
".",
"min",
"(",
"data",
")",
"-",
"(",
"kde",
".",
"covariance_factor",
"(",
")",
"/",
"2",
")",
",",
"stop",
"=",
"np",
".",
"max",
"(",
"data",
")",
"+",
"(",
"kde",
".",
"covariance_factor",
"(",
")",
"/",
"2",
")",
",",
"num",
"=",
"np",
".",
"floor",
"(",
"(",
"(",
"np",
".",
"max",
"(",
"data",
")",
"-",
"np",
".",
"min",
"(",
"data",
")",
")",
"/",
"kde",
".",
"covariance_factor",
"(",
")",
")",
"+",
"1",
")",
".",
"astype",
"(",
"int",
")",
")",
"cdf_vals",
"=",
"[",
"kde",
".",
"integrate_box_1d",
"(",
"-",
"np",
".",
"inf",
",",
"x",
")",
"for",
"x",
"in",
"evaluation_bins",
"]",
"evaluation_weights",
"=",
"np",
".",
"diff",
"(",
"cdf_vals",
")",
"if",
"estimate_tails",
":",
"bins",
"=",
"np",
".",
"concatenate",
"(",
"(",
"[",
"np",
".",
"min",
"(",
"data",
")",
"-",
"(",
"1.5",
"*",
"kde",
".",
"covariance_factor",
"(",
")",
")",
"]",
",",
"evaluation_bins",
",",
"[",
"np",
".",
"max",
"(",
"data",
")",
"+",
"(",
"1.5",
"*",
"kde",
".",
"covariance_factor",
"(",
")",
")",
"]",
")",
")",
"else",
":",
"bins",
"=",
"np",
".",
"concatenate",
"(",
"(",
"[",
"-",
"np",
".",
"inf",
"]",
",",
"evaluation_bins",
",",
"[",
"np",
".",
"inf",
"]",
")",
")",
"weights",
"=",
"np",
".",
"concatenate",
"(",
"(",
"[",
"cdf_vals",
"[",
"0",
"]",
"]",
",",
"evaluation_weights",
",",
"[",
"1",
"-",
"cdf_vals",
"[",
"-",
"1",
"]",
"]",
")",
")",
"return",
"{",
"\"bins\"",
":",
"bins",
",",
"\"weights\"",
":",
"weights",
"}"
] | 41.756757 | 28.810811 |
async def fetch_batch(self, request):
"""Fetches a specific batch from the validator, specified by id.
Request:
path:
- batch_id: The 128-character id of the batch to be fetched
Response:
data: A JSON object with the data from the fully expanded Batch
link: The link to this exact query
"""
error_traps = [error_handlers.BatchNotFoundTrap]
batch_id = request.match_info.get('batch_id', '')
self._validate_id(batch_id)
response = await self._query_validator(
Message.CLIENT_BATCH_GET_REQUEST,
client_batch_pb2.ClientBatchGetResponse,
client_batch_pb2.ClientBatchGetRequest(batch_id=batch_id),
error_traps)
return self._wrap_response(
request,
data=self._expand_batch(response['batch']),
metadata=self._get_metadata(request, response)) | [
"async",
"def",
"fetch_batch",
"(",
"self",
",",
"request",
")",
":",
"error_traps",
"=",
"[",
"error_handlers",
".",
"BatchNotFoundTrap",
"]",
"batch_id",
"=",
"request",
".",
"match_info",
".",
"get",
"(",
"'batch_id'",
",",
"''",
")",
"self",
".",
"_validate_id",
"(",
"batch_id",
")",
"response",
"=",
"await",
"self",
".",
"_query_validator",
"(",
"Message",
".",
"CLIENT_BATCH_GET_REQUEST",
",",
"client_batch_pb2",
".",
"ClientBatchGetResponse",
",",
"client_batch_pb2",
".",
"ClientBatchGetRequest",
"(",
"batch_id",
"=",
"batch_id",
")",
",",
"error_traps",
")",
"return",
"self",
".",
"_wrap_response",
"(",
"request",
",",
"data",
"=",
"self",
".",
"_expand_batch",
"(",
"response",
"[",
"'batch'",
"]",
")",
",",
"metadata",
"=",
"self",
".",
"_get_metadata",
"(",
"request",
",",
"response",
")",
")"
] | 35.423077 | 19.846154 |
def convert_inline_formula_elements(self):
"""
<inline-formula> elements must be converted to be conforming
These elements may contain <inline-graphic> elements, textual content,
or both.
"""
for inline in self.main.getroot().findall('.//inline-formula'):
#inline-formula elements will be modified in situ
remove_all_attributes(inline)
inline.tag = 'span'
inline.attrib['class'] = 'inline-formula'
inline_graphic = inline.find('inline-graphic')
if inline_graphic is None:
# Do nothing more if there is no graphic
continue
#Need to conver the inline-graphic element to an img element
inline_graphic.tag = 'img'
#Get a copy of the attributes, then remove them
inline_graphic_attributes = copy(inline_graphic.attrib)
remove_all_attributes(inline_graphic)
#Create a file reference for the image
xlink_href = ns_format(inline_graphic, 'xlink:href')
graphic_xlink_href = inline_graphic_attributes[xlink_href]
file_name = graphic_xlink_href.split('.')[-1] + '.png'
img_dir = 'images-' + self.doi_suffix()
img_path = '/'.join([img_dir, file_name])
#Set the source to the image path
inline_graphic.attrib['src'] = img_path
inline_graphic.attrib['class'] = 'inline-formula'
inline_graphic.attrib['alt'] = 'An Inline Formula' | [
"def",
"convert_inline_formula_elements",
"(",
"self",
")",
":",
"for",
"inline",
"in",
"self",
".",
"main",
".",
"getroot",
"(",
")",
".",
"findall",
"(",
"'.//inline-formula'",
")",
":",
"#inline-formula elements will be modified in situ",
"remove_all_attributes",
"(",
"inline",
")",
"inline",
".",
"tag",
"=",
"'span'",
"inline",
".",
"attrib",
"[",
"'class'",
"]",
"=",
"'inline-formula'",
"inline_graphic",
"=",
"inline",
".",
"find",
"(",
"'inline-graphic'",
")",
"if",
"inline_graphic",
"is",
"None",
":",
"# Do nothing more if there is no graphic",
"continue",
"#Need to conver the inline-graphic element to an img element",
"inline_graphic",
".",
"tag",
"=",
"'img'",
"#Get a copy of the attributes, then remove them",
"inline_graphic_attributes",
"=",
"copy",
"(",
"inline_graphic",
".",
"attrib",
")",
"remove_all_attributes",
"(",
"inline_graphic",
")",
"#Create a file reference for the image",
"xlink_href",
"=",
"ns_format",
"(",
"inline_graphic",
",",
"'xlink:href'",
")",
"graphic_xlink_href",
"=",
"inline_graphic_attributes",
"[",
"xlink_href",
"]",
"file_name",
"=",
"graphic_xlink_href",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"+",
"'.png'",
"img_dir",
"=",
"'images-'",
"+",
"self",
".",
"doi_suffix",
"(",
")",
"img_path",
"=",
"'/'",
".",
"join",
"(",
"[",
"img_dir",
",",
"file_name",
"]",
")",
"#Set the source to the image path",
"inline_graphic",
".",
"attrib",
"[",
"'src'",
"]",
"=",
"img_path",
"inline_graphic",
".",
"attrib",
"[",
"'class'",
"]",
"=",
"'inline-formula'",
"inline_graphic",
".",
"attrib",
"[",
"'alt'",
"]",
"=",
"'An Inline Formula'"
] | 48.935484 | 16.806452 |
def lonlat2xyz(lon, lat):
"""Convert lon lat to cartesian."""
lat = xu.deg2rad(lat)
lon = xu.deg2rad(lon)
x = xu.cos(lat) * xu.cos(lon)
y = xu.cos(lat) * xu.sin(lon)
z = xu.sin(lat)
return x, y, z | [
"def",
"lonlat2xyz",
"(",
"lon",
",",
"lat",
")",
":",
"lat",
"=",
"xu",
".",
"deg2rad",
"(",
"lat",
")",
"lon",
"=",
"xu",
".",
"deg2rad",
"(",
"lon",
")",
"x",
"=",
"xu",
".",
"cos",
"(",
"lat",
")",
"*",
"xu",
".",
"cos",
"(",
"lon",
")",
"y",
"=",
"xu",
".",
"cos",
"(",
"lat",
")",
"*",
"xu",
".",
"sin",
"(",
"lon",
")",
"z",
"=",
"xu",
".",
"sin",
"(",
"lat",
")",
"return",
"x",
",",
"y",
",",
"z"
] | 27.125 | 12.75 |
def get_raw_record(self, instance, update_fields=None):
"""
Gets the raw record.
If `update_fields` is set, the raw record will be build with only
the objectID and the given fields. Also, `_geoloc` and `_tags` will
not be included.
"""
tmp = {'objectID': self.objectID(instance)}
if update_fields:
if isinstance(update_fields, str):
update_fields = (update_fields,)
for elt in update_fields:
key = self.__translate_fields.get(elt, None)
if key:
tmp[key] = self.__named_fields[key](instance)
else:
for key, value in self.__named_fields.items():
tmp[key] = value(instance)
if self.geo_field:
loc = self.geo_field(instance)
if isinstance(loc, tuple):
tmp['_geoloc'] = {'lat': loc[0], 'lng': loc[1]}
elif isinstance(loc, dict):
self._validate_geolocation(loc)
tmp['_geoloc'] = loc
elif isinstance(loc, list):
[self._validate_geolocation(geo) for geo in loc]
tmp['_geoloc'] = loc
if self.tags:
if callable(self.tags):
tmp['_tags'] = self.tags(instance)
if not isinstance(tmp['_tags'], list):
tmp['_tags'] = list(tmp['_tags'])
logger.debug('BUILD %s FROM %s', tmp['objectID'], self.model)
return tmp | [
"def",
"get_raw_record",
"(",
"self",
",",
"instance",
",",
"update_fields",
"=",
"None",
")",
":",
"tmp",
"=",
"{",
"'objectID'",
":",
"self",
".",
"objectID",
"(",
"instance",
")",
"}",
"if",
"update_fields",
":",
"if",
"isinstance",
"(",
"update_fields",
",",
"str",
")",
":",
"update_fields",
"=",
"(",
"update_fields",
",",
")",
"for",
"elt",
"in",
"update_fields",
":",
"key",
"=",
"self",
".",
"__translate_fields",
".",
"get",
"(",
"elt",
",",
"None",
")",
"if",
"key",
":",
"tmp",
"[",
"key",
"]",
"=",
"self",
".",
"__named_fields",
"[",
"key",
"]",
"(",
"instance",
")",
"else",
":",
"for",
"key",
",",
"value",
"in",
"self",
".",
"__named_fields",
".",
"items",
"(",
")",
":",
"tmp",
"[",
"key",
"]",
"=",
"value",
"(",
"instance",
")",
"if",
"self",
".",
"geo_field",
":",
"loc",
"=",
"self",
".",
"geo_field",
"(",
"instance",
")",
"if",
"isinstance",
"(",
"loc",
",",
"tuple",
")",
":",
"tmp",
"[",
"'_geoloc'",
"]",
"=",
"{",
"'lat'",
":",
"loc",
"[",
"0",
"]",
",",
"'lng'",
":",
"loc",
"[",
"1",
"]",
"}",
"elif",
"isinstance",
"(",
"loc",
",",
"dict",
")",
":",
"self",
".",
"_validate_geolocation",
"(",
"loc",
")",
"tmp",
"[",
"'_geoloc'",
"]",
"=",
"loc",
"elif",
"isinstance",
"(",
"loc",
",",
"list",
")",
":",
"[",
"self",
".",
"_validate_geolocation",
"(",
"geo",
")",
"for",
"geo",
"in",
"loc",
"]",
"tmp",
"[",
"'_geoloc'",
"]",
"=",
"loc",
"if",
"self",
".",
"tags",
":",
"if",
"callable",
"(",
"self",
".",
"tags",
")",
":",
"tmp",
"[",
"'_tags'",
"]",
"=",
"self",
".",
"tags",
"(",
"instance",
")",
"if",
"not",
"isinstance",
"(",
"tmp",
"[",
"'_tags'",
"]",
",",
"list",
")",
":",
"tmp",
"[",
"'_tags'",
"]",
"=",
"list",
"(",
"tmp",
"[",
"'_tags'",
"]",
")",
"logger",
".",
"debug",
"(",
"'BUILD %s FROM %s'",
",",
"tmp",
"[",
"'objectID'",
"]",
",",
"self",
".",
"model",
")",
"return",
"tmp"
] | 36.357143 | 17.642857 |
def repo(
state, host, name, baseurl,
present=True, description=None, enabled=True, gpgcheck=True, gpgkey=None,
):
'''
Add/remove/update yum repositories.
+ name: filename for the repo (in ``/etc/yum/repos.d/``)
+ baseurl: the baseurl of the repo
+ present: whether the ``.repo`` file should be present
+ description: optional verbose description
+ gpgcheck: whether set ``gpgcheck=1``
+ gpgkey: the URL to the gpg key for this repo
'''
# Description defaults to name
description = description or name
filename = '/etc/yum.repos.d/{0}.repo'.format(name)
# If we don't want the repo, just remove any existing file
if not present:
yield files.file(state, host, filename, present=False)
return
# Build the repo file from string
repo_lines = [
'[{0}]'.format(name),
'name={0}'.format(description),
'baseurl={0}'.format(baseurl),
'enabled={0}'.format(1 if enabled else 0),
'gpgcheck={0}'.format(1 if gpgcheck else 0),
]
if gpgkey:
repo_lines.append('gpgkey={0}'.format(gpgkey))
repo_lines.append('')
repo = '\n'.join(repo_lines)
repo = StringIO(repo)
# Ensure this is the file on the server
yield files.put(state, host, repo, filename) | [
"def",
"repo",
"(",
"state",
",",
"host",
",",
"name",
",",
"baseurl",
",",
"present",
"=",
"True",
",",
"description",
"=",
"None",
",",
"enabled",
"=",
"True",
",",
"gpgcheck",
"=",
"True",
",",
"gpgkey",
"=",
"None",
",",
")",
":",
"# Description defaults to name",
"description",
"=",
"description",
"or",
"name",
"filename",
"=",
"'/etc/yum.repos.d/{0}.repo'",
".",
"format",
"(",
"name",
")",
"# If we don't want the repo, just remove any existing file",
"if",
"not",
"present",
":",
"yield",
"files",
".",
"file",
"(",
"state",
",",
"host",
",",
"filename",
",",
"present",
"=",
"False",
")",
"return",
"# Build the repo file from string",
"repo_lines",
"=",
"[",
"'[{0}]'",
".",
"format",
"(",
"name",
")",
",",
"'name={0}'",
".",
"format",
"(",
"description",
")",
",",
"'baseurl={0}'",
".",
"format",
"(",
"baseurl",
")",
",",
"'enabled={0}'",
".",
"format",
"(",
"1",
"if",
"enabled",
"else",
"0",
")",
",",
"'gpgcheck={0}'",
".",
"format",
"(",
"1",
"if",
"gpgcheck",
"else",
"0",
")",
",",
"]",
"if",
"gpgkey",
":",
"repo_lines",
".",
"append",
"(",
"'gpgkey={0}'",
".",
"format",
"(",
"gpgkey",
")",
")",
"repo_lines",
".",
"append",
"(",
"''",
")",
"repo",
"=",
"'\\n'",
".",
"join",
"(",
"repo_lines",
")",
"repo",
"=",
"StringIO",
"(",
"repo",
")",
"# Ensure this is the file on the server",
"yield",
"files",
".",
"put",
"(",
"state",
",",
"host",
",",
"repo",
",",
"filename",
")"
] | 29.302326 | 20.046512 |
def _validate_inputs(self, inputdict):
""" Validate input links.
"""
# Check inputdict
try:
parameters = inputdict.pop(self.get_linkname('parameters'))
except KeyError:
raise InputValidationError("No parameters specified for this "
"calculation")
if not isinstance(parameters, ParameterData):
raise InputValidationError("parameters not of type "
"ParameterData")
# Check code
try:
code = inputdict.pop(self.get_linkname('code'))
except KeyError:
raise InputValidationError("No code specified for this "
"calculation")
# Check input files
try:
structure = inputdict.pop(self.get_linkname('structure'))
if not isinstance(structure, SinglefileData):
raise InputValidationError(
"structure not of type SinglefileData")
except KeyError:
raise InputValidationError(
"No input structure specified for calculation")
try:
surface_sample = inputdict.pop(self.get_linkname('surface_sample'))
if not isinstance(surface_sample, SinglefileData):
raise InputValidationError(
"surface_sample not of type SinglefileData")
except KeyError:
raise InputValidationError(
"No surface sample specified for calculation")
# Check that nothing is left unparsed
if inputdict:
raise ValidationError("Unrecognized inputs: {}".format(inputdict))
return parameters, code, structure, surface_sample | [
"def",
"_validate_inputs",
"(",
"self",
",",
"inputdict",
")",
":",
"# Check inputdict",
"try",
":",
"parameters",
"=",
"inputdict",
".",
"pop",
"(",
"self",
".",
"get_linkname",
"(",
"'parameters'",
")",
")",
"except",
"KeyError",
":",
"raise",
"InputValidationError",
"(",
"\"No parameters specified for this \"",
"\"calculation\"",
")",
"if",
"not",
"isinstance",
"(",
"parameters",
",",
"ParameterData",
")",
":",
"raise",
"InputValidationError",
"(",
"\"parameters not of type \"",
"\"ParameterData\"",
")",
"# Check code",
"try",
":",
"code",
"=",
"inputdict",
".",
"pop",
"(",
"self",
".",
"get_linkname",
"(",
"'code'",
")",
")",
"except",
"KeyError",
":",
"raise",
"InputValidationError",
"(",
"\"No code specified for this \"",
"\"calculation\"",
")",
"# Check input files",
"try",
":",
"structure",
"=",
"inputdict",
".",
"pop",
"(",
"self",
".",
"get_linkname",
"(",
"'structure'",
")",
")",
"if",
"not",
"isinstance",
"(",
"structure",
",",
"SinglefileData",
")",
":",
"raise",
"InputValidationError",
"(",
"\"structure not of type SinglefileData\"",
")",
"except",
"KeyError",
":",
"raise",
"InputValidationError",
"(",
"\"No input structure specified for calculation\"",
")",
"try",
":",
"surface_sample",
"=",
"inputdict",
".",
"pop",
"(",
"self",
".",
"get_linkname",
"(",
"'surface_sample'",
")",
")",
"if",
"not",
"isinstance",
"(",
"surface_sample",
",",
"SinglefileData",
")",
":",
"raise",
"InputValidationError",
"(",
"\"surface_sample not of type SinglefileData\"",
")",
"except",
"KeyError",
":",
"raise",
"InputValidationError",
"(",
"\"No surface sample specified for calculation\"",
")",
"# Check that nothing is left unparsed",
"if",
"inputdict",
":",
"raise",
"ValidationError",
"(",
"\"Unrecognized inputs: {}\"",
".",
"format",
"(",
"inputdict",
")",
")",
"return",
"parameters",
",",
"code",
",",
"structure",
",",
"surface_sample"
] | 40.209302 | 19.976744 |
def regions(self):
"""gets the regions value"""
url = "%s/regions" % self.root
params = {"f": "json"}
return self._get(url=url,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | [
"def",
"regions",
"(",
"self",
")",
":",
"url",
"=",
"\"%s/regions\"",
"%",
"self",
".",
"root",
"params",
"=",
"{",
"\"f\"",
":",
"\"json\"",
"}",
"return",
"self",
".",
"_get",
"(",
"url",
"=",
"url",
",",
"param_dict",
"=",
"params",
",",
"proxy_url",
"=",
"self",
".",
"_proxy_url",
",",
"proxy_port",
"=",
"self",
".",
"_proxy_port",
")"
] | 38.875 | 9.625 |
def ask_password(*question: Token) -> str:
"""Ask the user to enter a password.
"""
tokens = get_ask_tokens(question)
info(*tokens)
answer = read_password()
return answer | [
"def",
"ask_password",
"(",
"*",
"question",
":",
"Token",
")",
"->",
"str",
":",
"tokens",
"=",
"get_ask_tokens",
"(",
"question",
")",
"info",
"(",
"*",
"tokens",
")",
"answer",
"=",
"read_password",
"(",
")",
"return",
"answer"
] | 26.857143 | 9 |
def _to_representation(self, instance):
"""Uncached `to_representation`."""
if self.enable_optimization:
representation = self._faster_to_representation(instance)
else:
representation = super(
WithDynamicSerializerMixin,
self
).to_representation(instance)
if settings.ENABLE_LINKS:
# TODO: Make this function configurable to support other
# formats like JSON API link objects.
representation = merge_link_object(
self, representation, instance
)
if self.debug:
representation['_meta'] = {
'id': instance.pk,
'type': self.get_plural_name()
}
# tag the representation with the serializer and instance
return tag_dict(
representation,
serializer=self,
instance=instance,
embed=self.embed
) | [
"def",
"_to_representation",
"(",
"self",
",",
"instance",
")",
":",
"if",
"self",
".",
"enable_optimization",
":",
"representation",
"=",
"self",
".",
"_faster_to_representation",
"(",
"instance",
")",
"else",
":",
"representation",
"=",
"super",
"(",
"WithDynamicSerializerMixin",
",",
"self",
")",
".",
"to_representation",
"(",
"instance",
")",
"if",
"settings",
".",
"ENABLE_LINKS",
":",
"# TODO: Make this function configurable to support other",
"# formats like JSON API link objects.",
"representation",
"=",
"merge_link_object",
"(",
"self",
",",
"representation",
",",
"instance",
")",
"if",
"self",
".",
"debug",
":",
"representation",
"[",
"'_meta'",
"]",
"=",
"{",
"'id'",
":",
"instance",
".",
"pk",
",",
"'type'",
":",
"self",
".",
"get_plural_name",
"(",
")",
"}",
"# tag the representation with the serializer and instance",
"return",
"tag_dict",
"(",
"representation",
",",
"serializer",
"=",
"self",
",",
"instance",
"=",
"instance",
",",
"embed",
"=",
"self",
".",
"embed",
")"
] | 31.16129 | 16.677419 |
def draw(self,
category,
num_top_words_to_annotate=4,
words_to_annotate=[],
scores=None,
transform=percentile_alphabetical):
'''Outdated. MPLD3 drawing.
Parameters
----------
category
num_top_words_to_annotate
words_to_annotate
scores
transform
Returns
-------
pd.DataFrame, html of fgure
'''
try:
import matplotlib.pyplot as plt
except:
raise Exception("matplotlib and mpld3 need to be installed to use this function.")
try:
from mpld3 import plugins, fig_to_html
except:
raise Exception("mpld3 need to be installed to use this function.")
all_categories, other_categories = self._get_category_names(category)
df = self._term_rank_score_and_frequency_df(all_categories, category, other_categories, scores)
if self.x_coords is None:
df['x'], df['y'] = self._get_coordinates_from_transform_and_jitter_frequencies \
(category, df, other_categories, transform)
df_to_annotate = df[(df['not category score rank'] <= num_top_words_to_annotate)
| (df['category score rank'] <= num_top_words_to_annotate)
| df['term'].isin(words_to_annotate)]
words = list(df['term'])
font = {'family': 'sans-serif',
'color': 'black',
'weight': 'normal',
'size': 'large'}
fig, ax = plt.subplots()
plt.figure(figsize=(10, 10))
plt.gcf().subplots_adjust(bottom=0.2)
plt.gcf().subplots_adjust(right=0.2)
points = ax.scatter(self.x_coords,
self.y_coords,
c=-df['color_scores'],
cmap='seismic',
s=10,
edgecolors='none',
alpha=0.9)
tooltip = plugins.PointHTMLTooltip(points,
['<span id=a>%s</span>' % w for w in words],
css='#a {background-color: white;}')
plugins.connect(fig, tooltip)
ax.set_ylim([-.2, 1.2])
ax.set_xlim([-.2, 1.2])
ax.xaxis.set_ticks([0., 0.5, 1.])
ax.yaxis.set_ticks([0., 0.5, 1.])
ax.set_ylabel(category.title() + ' Frequency Percentile', fontdict=font, labelpad=20)
ax.set_xlabel('Not ' + category.title() + ' Frequency Percentile', fontdict=font, labelpad=20)
for i, row in df_to_annotate.iterrows():
# alignment_criteria = row['category score rank'] < row['not category score rank']
alignment_criteria = i % 2 == 0
horizontalalignment = 'right' if alignment_criteria else 'left'
verticalalignment = 'bottom' if alignment_criteria else 'top'
term = row['term']
ax.annotate(term,
(self.x_coords[i], self.y_data[i]),
size=15,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
)
# texts.append(
# ax.text(row['dem freq scaled'], row['rep freq scaled'], row['word'])
# )
# adjust_text(texts, arrowprops=dict(arrowstyle="->", color='r', lw=0.5))
plt.show()
return df, fig_to_html(fig) | [
"def",
"draw",
"(",
"self",
",",
"category",
",",
"num_top_words_to_annotate",
"=",
"4",
",",
"words_to_annotate",
"=",
"[",
"]",
",",
"scores",
"=",
"None",
",",
"transform",
"=",
"percentile_alphabetical",
")",
":",
"try",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"except",
":",
"raise",
"Exception",
"(",
"\"matplotlib and mpld3 need to be installed to use this function.\"",
")",
"try",
":",
"from",
"mpld3",
"import",
"plugins",
",",
"fig_to_html",
"except",
":",
"raise",
"Exception",
"(",
"\"mpld3 need to be installed to use this function.\"",
")",
"all_categories",
",",
"other_categories",
"=",
"self",
".",
"_get_category_names",
"(",
"category",
")",
"df",
"=",
"self",
".",
"_term_rank_score_and_frequency_df",
"(",
"all_categories",
",",
"category",
",",
"other_categories",
",",
"scores",
")",
"if",
"self",
".",
"x_coords",
"is",
"None",
":",
"df",
"[",
"'x'",
"]",
",",
"df",
"[",
"'y'",
"]",
"=",
"self",
".",
"_get_coordinates_from_transform_and_jitter_frequencies",
"(",
"category",
",",
"df",
",",
"other_categories",
",",
"transform",
")",
"df_to_annotate",
"=",
"df",
"[",
"(",
"df",
"[",
"'not category score rank'",
"]",
"<=",
"num_top_words_to_annotate",
")",
"|",
"(",
"df",
"[",
"'category score rank'",
"]",
"<=",
"num_top_words_to_annotate",
")",
"|",
"df",
"[",
"'term'",
"]",
".",
"isin",
"(",
"words_to_annotate",
")",
"]",
"words",
"=",
"list",
"(",
"df",
"[",
"'term'",
"]",
")",
"font",
"=",
"{",
"'family'",
":",
"'sans-serif'",
",",
"'color'",
":",
"'black'",
",",
"'weight'",
":",
"'normal'",
",",
"'size'",
":",
"'large'",
"}",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
")",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"10",
",",
"10",
")",
")",
"plt",
".",
"gcf",
"(",
")",
".",
"subplots_adjust",
"(",
"bottom",
"=",
"0.2",
")",
"plt",
".",
"gcf",
"(",
")",
".",
"subplots_adjust",
"(",
"right",
"=",
"0.2",
")",
"points",
"=",
"ax",
".",
"scatter",
"(",
"self",
".",
"x_coords",
",",
"self",
".",
"y_coords",
",",
"c",
"=",
"-",
"df",
"[",
"'color_scores'",
"]",
",",
"cmap",
"=",
"'seismic'",
",",
"s",
"=",
"10",
",",
"edgecolors",
"=",
"'none'",
",",
"alpha",
"=",
"0.9",
")",
"tooltip",
"=",
"plugins",
".",
"PointHTMLTooltip",
"(",
"points",
",",
"[",
"'<span id=a>%s</span>'",
"%",
"w",
"for",
"w",
"in",
"words",
"]",
",",
"css",
"=",
"'#a {background-color: white;}'",
")",
"plugins",
".",
"connect",
"(",
"fig",
",",
"tooltip",
")",
"ax",
".",
"set_ylim",
"(",
"[",
"-",
".2",
",",
"1.2",
"]",
")",
"ax",
".",
"set_xlim",
"(",
"[",
"-",
".2",
",",
"1.2",
"]",
")",
"ax",
".",
"xaxis",
".",
"set_ticks",
"(",
"[",
"0.",
",",
"0.5",
",",
"1.",
"]",
")",
"ax",
".",
"yaxis",
".",
"set_ticks",
"(",
"[",
"0.",
",",
"0.5",
",",
"1.",
"]",
")",
"ax",
".",
"set_ylabel",
"(",
"category",
".",
"title",
"(",
")",
"+",
"' Frequency Percentile'",
",",
"fontdict",
"=",
"font",
",",
"labelpad",
"=",
"20",
")",
"ax",
".",
"set_xlabel",
"(",
"'Not '",
"+",
"category",
".",
"title",
"(",
")",
"+",
"' Frequency Percentile'",
",",
"fontdict",
"=",
"font",
",",
"labelpad",
"=",
"20",
")",
"for",
"i",
",",
"row",
"in",
"df_to_annotate",
".",
"iterrows",
"(",
")",
":",
"# alignment_criteria = row['category score rank'] < row['not category score rank']",
"alignment_criteria",
"=",
"i",
"%",
"2",
"==",
"0",
"horizontalalignment",
"=",
"'right'",
"if",
"alignment_criteria",
"else",
"'left'",
"verticalalignment",
"=",
"'bottom'",
"if",
"alignment_criteria",
"else",
"'top'",
"term",
"=",
"row",
"[",
"'term'",
"]",
"ax",
".",
"annotate",
"(",
"term",
",",
"(",
"self",
".",
"x_coords",
"[",
"i",
"]",
",",
"self",
".",
"y_data",
"[",
"i",
"]",
")",
",",
"size",
"=",
"15",
",",
"horizontalalignment",
"=",
"horizontalalignment",
",",
"verticalalignment",
"=",
"verticalalignment",
",",
")",
"# texts.append(",
"# ax.text(row['dem freq scaled'], row['rep freq scaled'], row['word'])",
"# )",
"# adjust_text(texts, arrowprops=dict(arrowstyle=\"->\", color='r', lw=0.5))",
"plt",
".",
"show",
"(",
")",
"return",
"df",
",",
"fig_to_html",
"(",
"fig",
")"
] | 41.095238 | 20.928571 |
def actor2ImageData(actor, spacing=(1, 1, 1)):
"""
Convert a mesh it into volume representation as ``vtkImageData``
where the foreground (exterior) voxels are 1 and the background
(interior) voxels are 0.
Internally the ``vtkPolyDataToImageStencil`` class is used.
.. hint:: |mesh2volume| |mesh2volume.py|_
"""
# https://vtk.org/Wiki/VTK/Examples/Cxx/PolyData/PolyDataToImageData
pd = actor.polydata()
whiteImage = vtk.vtkImageData()
bounds = pd.GetBounds()
whiteImage.SetSpacing(spacing)
# compute dimensions
dim = [0, 0, 0]
for i in [0, 1, 2]:
dim[i] = int(np.ceil((bounds[i * 2 + 1] - bounds[i * 2]) / spacing[i]))
whiteImage.SetDimensions(dim)
whiteImage.SetExtent(0, dim[0] - 1, 0, dim[1] - 1, 0, dim[2] - 1)
origin = [0, 0, 0]
origin[0] = bounds[0] + spacing[0] / 2
origin[1] = bounds[2] + spacing[1] / 2
origin[2] = bounds[4] + spacing[2] / 2
whiteImage.SetOrigin(origin)
whiteImage.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 1)
# fill the image with foreground voxels:
inval = 255
count = whiteImage.GetNumberOfPoints()
for i in range(count):
whiteImage.GetPointData().GetScalars().SetTuple1(i, inval)
# polygonal data --> image stencil:
pol2stenc = vtk.vtkPolyDataToImageStencil()
pol2stenc.SetInputData(pd)
pol2stenc.SetOutputOrigin(origin)
pol2stenc.SetOutputSpacing(spacing)
pol2stenc.SetOutputWholeExtent(whiteImage.GetExtent())
pol2stenc.Update()
# cut the corresponding white image and set the background:
outval = 0
imgstenc = vtk.vtkImageStencil()
imgstenc.SetInputData(whiteImage)
imgstenc.SetStencilConnection(pol2stenc.GetOutputPort())
imgstenc.ReverseStencilOff()
imgstenc.SetBackgroundValue(outval)
imgstenc.Update()
return imgstenc.GetOutput() | [
"def",
"actor2ImageData",
"(",
"actor",
",",
"spacing",
"=",
"(",
"1",
",",
"1",
",",
"1",
")",
")",
":",
"# https://vtk.org/Wiki/VTK/Examples/Cxx/PolyData/PolyDataToImageData",
"pd",
"=",
"actor",
".",
"polydata",
"(",
")",
"whiteImage",
"=",
"vtk",
".",
"vtkImageData",
"(",
")",
"bounds",
"=",
"pd",
".",
"GetBounds",
"(",
")",
"whiteImage",
".",
"SetSpacing",
"(",
"spacing",
")",
"# compute dimensions",
"dim",
"=",
"[",
"0",
",",
"0",
",",
"0",
"]",
"for",
"i",
"in",
"[",
"0",
",",
"1",
",",
"2",
"]",
":",
"dim",
"[",
"i",
"]",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"(",
"bounds",
"[",
"i",
"*",
"2",
"+",
"1",
"]",
"-",
"bounds",
"[",
"i",
"*",
"2",
"]",
")",
"/",
"spacing",
"[",
"i",
"]",
")",
")",
"whiteImage",
".",
"SetDimensions",
"(",
"dim",
")",
"whiteImage",
".",
"SetExtent",
"(",
"0",
",",
"dim",
"[",
"0",
"]",
"-",
"1",
",",
"0",
",",
"dim",
"[",
"1",
"]",
"-",
"1",
",",
"0",
",",
"dim",
"[",
"2",
"]",
"-",
"1",
")",
"origin",
"=",
"[",
"0",
",",
"0",
",",
"0",
"]",
"origin",
"[",
"0",
"]",
"=",
"bounds",
"[",
"0",
"]",
"+",
"spacing",
"[",
"0",
"]",
"/",
"2",
"origin",
"[",
"1",
"]",
"=",
"bounds",
"[",
"2",
"]",
"+",
"spacing",
"[",
"1",
"]",
"/",
"2",
"origin",
"[",
"2",
"]",
"=",
"bounds",
"[",
"4",
"]",
"+",
"spacing",
"[",
"2",
"]",
"/",
"2",
"whiteImage",
".",
"SetOrigin",
"(",
"origin",
")",
"whiteImage",
".",
"AllocateScalars",
"(",
"vtk",
".",
"VTK_UNSIGNED_CHAR",
",",
"1",
")",
"# fill the image with foreground voxels:",
"inval",
"=",
"255",
"count",
"=",
"whiteImage",
".",
"GetNumberOfPoints",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"count",
")",
":",
"whiteImage",
".",
"GetPointData",
"(",
")",
".",
"GetScalars",
"(",
")",
".",
"SetTuple1",
"(",
"i",
",",
"inval",
")",
"# polygonal data --> image stencil:",
"pol2stenc",
"=",
"vtk",
".",
"vtkPolyDataToImageStencil",
"(",
")",
"pol2stenc",
".",
"SetInputData",
"(",
"pd",
")",
"pol2stenc",
".",
"SetOutputOrigin",
"(",
"origin",
")",
"pol2stenc",
".",
"SetOutputSpacing",
"(",
"spacing",
")",
"pol2stenc",
".",
"SetOutputWholeExtent",
"(",
"whiteImage",
".",
"GetExtent",
"(",
")",
")",
"pol2stenc",
".",
"Update",
"(",
")",
"# cut the corresponding white image and set the background:",
"outval",
"=",
"0",
"imgstenc",
"=",
"vtk",
".",
"vtkImageStencil",
"(",
")",
"imgstenc",
".",
"SetInputData",
"(",
"whiteImage",
")",
"imgstenc",
".",
"SetStencilConnection",
"(",
"pol2stenc",
".",
"GetOutputPort",
"(",
")",
")",
"imgstenc",
".",
"ReverseStencilOff",
"(",
")",
"imgstenc",
".",
"SetBackgroundValue",
"(",
"outval",
")",
"imgstenc",
".",
"Update",
"(",
")",
"return",
"imgstenc",
".",
"GetOutput",
"(",
")"
] | 33.425926 | 16.87037 |
def gzip_open_text(path, encoding=None):
"""Opens a plain-text file that may be gzip'ed.
Parameters
----------
path : str
The file.
encoding : str, optional
The encoding to use.
Returns
-------
file-like
A file-like object.
Notes
-----
Generally, reading gzip'ed files with gzip.open is very slow, and it is
preferable to pipe the file into the python script using ``gunzip -c``.
The script then reads the file from stdin.
"""
if encoding is None:
encoding = sys.getdefaultencoding()
assert os.path.isfile(path)
is_compressed = False
try:
gzip.open(path, mode='rb').read(1)
except IOError:
pass
else:
is_compressed = True
if is_compressed:
if six.PY2:
import codecs
zf = gzip.open(path, 'rb')
reader = codecs.getreader(encoding)
fh = reader(zf)
else:
fh = gzip.open(path, mode='rt', encoding=encoding)
else:
# the following works in Python 2.7, thanks to future
fh = open(path, mode='r', encoding=encoding)
return fh | [
"def",
"gzip_open_text",
"(",
"path",
",",
"encoding",
"=",
"None",
")",
":",
"if",
"encoding",
"is",
"None",
":",
"encoding",
"=",
"sys",
".",
"getdefaultencoding",
"(",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
"is_compressed",
"=",
"False",
"try",
":",
"gzip",
".",
"open",
"(",
"path",
",",
"mode",
"=",
"'rb'",
")",
".",
"read",
"(",
"1",
")",
"except",
"IOError",
":",
"pass",
"else",
":",
"is_compressed",
"=",
"True",
"if",
"is_compressed",
":",
"if",
"six",
".",
"PY2",
":",
"import",
"codecs",
"zf",
"=",
"gzip",
".",
"open",
"(",
"path",
",",
"'rb'",
")",
"reader",
"=",
"codecs",
".",
"getreader",
"(",
"encoding",
")",
"fh",
"=",
"reader",
"(",
"zf",
")",
"else",
":",
"fh",
"=",
"gzip",
".",
"open",
"(",
"path",
",",
"mode",
"=",
"'rt'",
",",
"encoding",
"=",
"encoding",
")",
"else",
":",
"# the following works in Python 2.7, thanks to future",
"fh",
"=",
"open",
"(",
"path",
",",
"mode",
"=",
"'r'",
",",
"encoding",
"=",
"encoding",
")",
"return",
"fh"
] | 22.795918 | 22.591837 |
def LateBind(self, target=None):
"""Bind the field descriptor to the owner once the target is defined."""
self.type = target
self._GetPrimitiveEncoder()
# Now re-add the descriptor to the owner protobuf.
self.late_bound = False
self.owner.AddDescriptor(self) | [
"def",
"LateBind",
"(",
"self",
",",
"target",
"=",
"None",
")",
":",
"self",
".",
"type",
"=",
"target",
"self",
".",
"_GetPrimitiveEncoder",
"(",
")",
"# Now re-add the descriptor to the owner protobuf.",
"self",
".",
"late_bound",
"=",
"False",
"self",
".",
"owner",
".",
"AddDescriptor",
"(",
"self",
")"
] | 34.5 | 13.5 |
def send_until(self,
send,
regexps,
not_there=False,
cadence=2,
retries=100,
echo=None,
note=None,
debug_command=None,
pause_point_on_fail=True,
nonewline=False,
loglevel=logging.INFO):
"""Send string on a regular cadence until a string is either seen, or the timeout is triggered.
@param send: See send()
@param regexps: List of regexps to wait for.
@param not_there: If True, wait until this a regexp is not seen in the output. If False
wait until a regexp is seen in the output (default)
@param echo: See send()
@param note: See send()
@param debug_command: Command to send if the output was not there.
"""
shutit = self.shutit
shutit.handle_note(note, command=send + ' \nuntil one of these seen:\n' + str(regexps))
shutit.log('Sending: "' + send + '" until one of these regexps seen: ' + str(regexps), level=loglevel)
if isinstance(regexps, str):
regexps = [regexps]
if not isinstance(regexps, list):
shutit.fail('regexps should be list') # pragma: no cover
while retries > 0:
retries -= 1
echo = shutit.get_echo_override(echo)
output = self.send_and_get_output(send,
retry=1,
strip=True,
echo=echo,
loglevel=loglevel,
nonewline=nonewline,
fail_on_empty_before=False)
shutit.log('Failed to match regexps -> ' + str(regexps) + ' <- retries left:' + str(retries), level=loglevel)
if not not_there:
for regexp in regexps:
if not shutit_util.check_regexp(regexp):
shutit.fail('Illegal regexp found in send_until call: ' + regexp) # pragma: no cover
if shutit.match_string(output, regexp):
return True
else:
# Only return if _not_ seen in the output
missing = False
for regexp in regexps:
if not shutit_util.check_regexp(regexp):
shutit.fail('Illegal regexp found in send_until call: ' + regexp) # pragma: no cover
if not shutit.match_string(output, regexp):
missing = True
break
if missing:
shutit.handle_note_after(note=note)
return True
if debug_command is not None:
self.send(ShutItSendSpec(self,
send=debug_command,
check_exit=False,
echo=echo,
nonewline=nonewline,
loglevel=loglevel,
ignore_background=True))
time.sleep(cadence)
shutit.handle_note_after(note=note)
if pause_point_on_fail:
shutit.pause_point('send_until failed sending: ' + send + '\r\nand expecting: ' + str(regexps))
return True
return False | [
"def",
"send_until",
"(",
"self",
",",
"send",
",",
"regexps",
",",
"not_there",
"=",
"False",
",",
"cadence",
"=",
"2",
",",
"retries",
"=",
"100",
",",
"echo",
"=",
"None",
",",
"note",
"=",
"None",
",",
"debug_command",
"=",
"None",
",",
"pause_point_on_fail",
"=",
"True",
",",
"nonewline",
"=",
"False",
",",
"loglevel",
"=",
"logging",
".",
"INFO",
")",
":",
"shutit",
"=",
"self",
".",
"shutit",
"shutit",
".",
"handle_note",
"(",
"note",
",",
"command",
"=",
"send",
"+",
"' \\nuntil one of these seen:\\n'",
"+",
"str",
"(",
"regexps",
")",
")",
"shutit",
".",
"log",
"(",
"'Sending: \"'",
"+",
"send",
"+",
"'\" until one of these regexps seen: '",
"+",
"str",
"(",
"regexps",
")",
",",
"level",
"=",
"loglevel",
")",
"if",
"isinstance",
"(",
"regexps",
",",
"str",
")",
":",
"regexps",
"=",
"[",
"regexps",
"]",
"if",
"not",
"isinstance",
"(",
"regexps",
",",
"list",
")",
":",
"shutit",
".",
"fail",
"(",
"'regexps should be list'",
")",
"# pragma: no cover",
"while",
"retries",
">",
"0",
":",
"retries",
"-=",
"1",
"echo",
"=",
"shutit",
".",
"get_echo_override",
"(",
"echo",
")",
"output",
"=",
"self",
".",
"send_and_get_output",
"(",
"send",
",",
"retry",
"=",
"1",
",",
"strip",
"=",
"True",
",",
"echo",
"=",
"echo",
",",
"loglevel",
"=",
"loglevel",
",",
"nonewline",
"=",
"nonewline",
",",
"fail_on_empty_before",
"=",
"False",
")",
"shutit",
".",
"log",
"(",
"'Failed to match regexps -> '",
"+",
"str",
"(",
"regexps",
")",
"+",
"' <- retries left:'",
"+",
"str",
"(",
"retries",
")",
",",
"level",
"=",
"loglevel",
")",
"if",
"not",
"not_there",
":",
"for",
"regexp",
"in",
"regexps",
":",
"if",
"not",
"shutit_util",
".",
"check_regexp",
"(",
"regexp",
")",
":",
"shutit",
".",
"fail",
"(",
"'Illegal regexp found in send_until call: '",
"+",
"regexp",
")",
"# pragma: no cover",
"if",
"shutit",
".",
"match_string",
"(",
"output",
",",
"regexp",
")",
":",
"return",
"True",
"else",
":",
"# Only return if _not_ seen in the output",
"missing",
"=",
"False",
"for",
"regexp",
"in",
"regexps",
":",
"if",
"not",
"shutit_util",
".",
"check_regexp",
"(",
"regexp",
")",
":",
"shutit",
".",
"fail",
"(",
"'Illegal regexp found in send_until call: '",
"+",
"regexp",
")",
"# pragma: no cover",
"if",
"not",
"shutit",
".",
"match_string",
"(",
"output",
",",
"regexp",
")",
":",
"missing",
"=",
"True",
"break",
"if",
"missing",
":",
"shutit",
".",
"handle_note_after",
"(",
"note",
"=",
"note",
")",
"return",
"True",
"if",
"debug_command",
"is",
"not",
"None",
":",
"self",
".",
"send",
"(",
"ShutItSendSpec",
"(",
"self",
",",
"send",
"=",
"debug_command",
",",
"check_exit",
"=",
"False",
",",
"echo",
"=",
"echo",
",",
"nonewline",
"=",
"nonewline",
",",
"loglevel",
"=",
"loglevel",
",",
"ignore_background",
"=",
"True",
")",
")",
"time",
".",
"sleep",
"(",
"cadence",
")",
"shutit",
".",
"handle_note_after",
"(",
"note",
"=",
"note",
")",
"if",
"pause_point_on_fail",
":",
"shutit",
".",
"pause_point",
"(",
"'send_until failed sending: '",
"+",
"send",
"+",
"'\\r\\nand expecting: '",
"+",
"str",
"(",
"regexps",
")",
")",
"return",
"True",
"return",
"False"
] | 41.472222 | 17.611111 |
def plotGenCost(generators):
""" Plots the costs of the given generators.
"""
figure()
plots = []
for generator in generators:
if generator.pcost_model == PW_LINEAR:
x = [x for x, _ in generator.p_cost]
y = [y for _, y in generator.p_cost]
elif generator.pcost_model == POLYNOMIAL:
x = scipy.arange(generator.p_min, generator.p_max, 5)
y = scipy.polyval(scipy.array(generator.p_cost), x)
else:
raise
plots.append(plot(x, y))
xlabel("P (MW)")
ylabel("Cost ($)")
legend(plots, [g.name for g in generators])
show() | [
"def",
"plotGenCost",
"(",
"generators",
")",
":",
"figure",
"(",
")",
"plots",
"=",
"[",
"]",
"for",
"generator",
"in",
"generators",
":",
"if",
"generator",
".",
"pcost_model",
"==",
"PW_LINEAR",
":",
"x",
"=",
"[",
"x",
"for",
"x",
",",
"_",
"in",
"generator",
".",
"p_cost",
"]",
"y",
"=",
"[",
"y",
"for",
"_",
",",
"y",
"in",
"generator",
".",
"p_cost",
"]",
"elif",
"generator",
".",
"pcost_model",
"==",
"POLYNOMIAL",
":",
"x",
"=",
"scipy",
".",
"arange",
"(",
"generator",
".",
"p_min",
",",
"generator",
".",
"p_max",
",",
"5",
")",
"y",
"=",
"scipy",
".",
"polyval",
"(",
"scipy",
".",
"array",
"(",
"generator",
".",
"p_cost",
")",
",",
"x",
")",
"else",
":",
"raise",
"plots",
".",
"append",
"(",
"plot",
"(",
"x",
",",
"y",
")",
")",
"xlabel",
"(",
"\"P (MW)\"",
")",
"ylabel",
"(",
"\"Cost ($)\"",
")",
"legend",
"(",
"plots",
",",
"[",
"g",
".",
"name",
"for",
"g",
"in",
"generators",
"]",
")",
"show",
"(",
")"
] | 33.105263 | 14.631579 |
def populate_timestamps(self,update_header=False):
""" Populate time axis.
IF update_header then only return tstart
"""
#Check to see how many integrations requested
ii_start, ii_stop = 0, self.n_ints_in_file
if self.t_start:
ii_start = self.t_start
if self.t_stop:
ii_stop = self.t_stop
## Setup time axis
t0 = self.header[b'tstart']
t_delt = self.header[b'tsamp']
if update_header:
timestamps = ii_start * t_delt / 24./60./60. + t0
else:
timestamps = np.arange(ii_start, ii_stop) * t_delt / 24./60./60. + t0
return timestamps | [
"def",
"populate_timestamps",
"(",
"self",
",",
"update_header",
"=",
"False",
")",
":",
"#Check to see how many integrations requested",
"ii_start",
",",
"ii_stop",
"=",
"0",
",",
"self",
".",
"n_ints_in_file",
"if",
"self",
".",
"t_start",
":",
"ii_start",
"=",
"self",
".",
"t_start",
"if",
"self",
".",
"t_stop",
":",
"ii_stop",
"=",
"self",
".",
"t_stop",
"## Setup time axis",
"t0",
"=",
"self",
".",
"header",
"[",
"b'tstart'",
"]",
"t_delt",
"=",
"self",
".",
"header",
"[",
"b'tsamp'",
"]",
"if",
"update_header",
":",
"timestamps",
"=",
"ii_start",
"*",
"t_delt",
"/",
"24.",
"/",
"60.",
"/",
"60.",
"+",
"t0",
"else",
":",
"timestamps",
"=",
"np",
".",
"arange",
"(",
"ii_start",
",",
"ii_stop",
")",
"*",
"t_delt",
"/",
"24.",
"/",
"60.",
"/",
"60.",
"+",
"t0",
"return",
"timestamps"
] | 30.318182 | 17.727273 |
def _set_preferences(self, node):
'''
Set preferences.
:return:
'''
pref = etree.SubElement(node, 'preferences')
pacman = etree.SubElement(pref, 'packagemanager')
pacman.text = self._get_package_manager()
p_version = etree.SubElement(pref, 'version')
p_version.text = '0.0.1'
p_type = etree.SubElement(pref, 'type')
p_type.set('image', 'vmx')
for disk_id, disk_data in self._data.system.get('disks', {}).items():
if disk_id.startswith('/dev'):
p_type.set('filesystem', disk_data.get('type') or 'ext3')
break
p_type.set('installiso', 'true')
p_type.set('boot', "vmxboot/suse-leap42.1")
p_type.set('format', self.format)
p_type.set('bootloader', 'grub2')
p_type.set('timezone', __salt__['timezone.get_zone']())
p_type.set('hwclock', __salt__['timezone.get_hwclock']())
return pref | [
"def",
"_set_preferences",
"(",
"self",
",",
"node",
")",
":",
"pref",
"=",
"etree",
".",
"SubElement",
"(",
"node",
",",
"'preferences'",
")",
"pacman",
"=",
"etree",
".",
"SubElement",
"(",
"pref",
",",
"'packagemanager'",
")",
"pacman",
".",
"text",
"=",
"self",
".",
"_get_package_manager",
"(",
")",
"p_version",
"=",
"etree",
".",
"SubElement",
"(",
"pref",
",",
"'version'",
")",
"p_version",
".",
"text",
"=",
"'0.0.1'",
"p_type",
"=",
"etree",
".",
"SubElement",
"(",
"pref",
",",
"'type'",
")",
"p_type",
".",
"set",
"(",
"'image'",
",",
"'vmx'",
")",
"for",
"disk_id",
",",
"disk_data",
"in",
"self",
".",
"_data",
".",
"system",
".",
"get",
"(",
"'disks'",
",",
"{",
"}",
")",
".",
"items",
"(",
")",
":",
"if",
"disk_id",
".",
"startswith",
"(",
"'/dev'",
")",
":",
"p_type",
".",
"set",
"(",
"'filesystem'",
",",
"disk_data",
".",
"get",
"(",
"'type'",
")",
"or",
"'ext3'",
")",
"break",
"p_type",
".",
"set",
"(",
"'installiso'",
",",
"'true'",
")",
"p_type",
".",
"set",
"(",
"'boot'",
",",
"\"vmxboot/suse-leap42.1\"",
")",
"p_type",
".",
"set",
"(",
"'format'",
",",
"self",
".",
"format",
")",
"p_type",
".",
"set",
"(",
"'bootloader'",
",",
"'grub2'",
")",
"p_type",
".",
"set",
"(",
"'timezone'",
",",
"__salt__",
"[",
"'timezone.get_zone'",
"]",
"(",
")",
")",
"p_type",
".",
"set",
"(",
"'hwclock'",
",",
"__salt__",
"[",
"'timezone.get_hwclock'",
"]",
"(",
")",
")",
"return",
"pref"
] | 35.259259 | 18.888889 |
def get_resource_ids_by_bins(self, bin_ids):
"""Gets the list of ``Resource Ids`` corresponding to a list of ``Bin`` objects.
arg: bin_ids (osid.id.IdList): list of bin ``Ids``
return: (osid.id.IdList) - list of resource ``Ids``
raise: NullArgument - ``bin_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_resource_ids_by_bins
id_list = []
for resource in self.get_resources_by_bins(bin_ids):
id_list.append(resource.get_id())
return IdList(id_list) | [
"def",
"get_resource_ids_by_bins",
"(",
"self",
",",
"bin_ids",
")",
":",
"# Implemented from template for",
"# osid.resource.ResourceBinSession.get_resource_ids_by_bins",
"id_list",
"=",
"[",
"]",
"for",
"resource",
"in",
"self",
".",
"get_resources_by_bins",
"(",
"bin_ids",
")",
":",
"id_list",
".",
"append",
"(",
"resource",
".",
"get_id",
"(",
")",
")",
"return",
"IdList",
"(",
"id_list",
")"
] | 44.764706 | 16.705882 |
def sendIq(self, entity):
"""
:type entity: IqProtocolEntity
"""
if entity.getType() == IqProtocolEntity.TYPE_SET and entity.getXmlns() == "w:m":
#media upload!
self._sendIq(entity, self.onRequestUploadSuccess, self.onRequestUploadError) | [
"def",
"sendIq",
"(",
"self",
",",
"entity",
")",
":",
"if",
"entity",
".",
"getType",
"(",
")",
"==",
"IqProtocolEntity",
".",
"TYPE_SET",
"and",
"entity",
".",
"getXmlns",
"(",
")",
"==",
"\"w:m\"",
":",
"#media upload!",
"self",
".",
"_sendIq",
"(",
"entity",
",",
"self",
".",
"onRequestUploadSuccess",
",",
"self",
".",
"onRequestUploadError",
")"
] | 41 | 18.142857 |
def get_args():
"""Get the script arguments."""
description = "wal - Generate colorschemes on the fly"
arg = argparse.ArgumentParser(description=description)
arg.add_argument("-a", metavar="\"alpha\"",
help="Set terminal background transparency. \
*Only works in URxvt*")
arg.add_argument("-b", metavar="background",
help="Custom background color to use.")
arg.add_argument("--backend", metavar="backend",
help="Which color backend to use. \
Use 'wal --backend' to list backends.",
const="list_backends", type=str, nargs="?")
arg.add_argument("--theme", "-f", metavar="/path/to/file or theme_name",
help="Which colorscheme file to use. \
Use 'wal --theme' to list builtin themes.",
const="list_themes", nargs="?")
arg.add_argument("--iterative", action="store_true",
help="When pywal is given a directory as input and this "
"flag is used: Go through the images in order "
"instead of shuffled.")
arg.add_argument("--saturate", metavar="0.0-1.0",
help="Set the color saturation.")
arg.add_argument("--preview", action="store_true",
help="Print the current color palette.")
arg.add_argument("--vte", action="store_true",
help="Fix text-artifacts printed in VTE terminals.")
arg.add_argument("-c", action="store_true",
help="Delete all cached colorschemes.")
arg.add_argument("-i", metavar="\"/path/to/img.jpg\"",
help="Which image or directory to use.")
arg.add_argument("-l", action="store_true",
help="Generate a light colorscheme.")
arg.add_argument("-n", action="store_true",
help="Skip setting the wallpaper.")
arg.add_argument("-o", metavar="\"script_name\"", action="append",
help="External script to run after \"wal\".")
arg.add_argument("-q", action="store_true",
help="Quiet mode, don\'t print anything.")
arg.add_argument("-r", action="store_true",
help="'wal -r' is deprecated: Use \
(cat ~/.cache/wal/sequences &) instead.")
arg.add_argument("-R", action="store_true",
help="Restore previous colorscheme.")
arg.add_argument("-s", action="store_true",
help="Skip changing colors in terminals.")
arg.add_argument("-t", action="store_true",
help="Skip changing colors in tty.")
arg.add_argument("-v", action="store_true",
help="Print \"wal\" version.")
arg.add_argument("-e", action="store_true",
help="Skip reloading gtk/xrdb/i3/sway/polybar")
return arg | [
"def",
"get_args",
"(",
")",
":",
"description",
"=",
"\"wal - Generate colorschemes on the fly\"",
"arg",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"description",
")",
"arg",
".",
"add_argument",
"(",
"\"-a\"",
",",
"metavar",
"=",
"\"\\\"alpha\\\"\"",
",",
"help",
"=",
"\"Set terminal background transparency. \\\n *Only works in URxvt*\"",
")",
"arg",
".",
"add_argument",
"(",
"\"-b\"",
",",
"metavar",
"=",
"\"background\"",
",",
"help",
"=",
"\"Custom background color to use.\"",
")",
"arg",
".",
"add_argument",
"(",
"\"--backend\"",
",",
"metavar",
"=",
"\"backend\"",
",",
"help",
"=",
"\"Which color backend to use. \\\n Use 'wal --backend' to list backends.\"",
",",
"const",
"=",
"\"list_backends\"",
",",
"type",
"=",
"str",
",",
"nargs",
"=",
"\"?\"",
")",
"arg",
".",
"add_argument",
"(",
"\"--theme\"",
",",
"\"-f\"",
",",
"metavar",
"=",
"\"/path/to/file or theme_name\"",
",",
"help",
"=",
"\"Which colorscheme file to use. \\\n Use 'wal --theme' to list builtin themes.\"",
",",
"const",
"=",
"\"list_themes\"",
",",
"nargs",
"=",
"\"?\"",
")",
"arg",
".",
"add_argument",
"(",
"\"--iterative\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"When pywal is given a directory as input and this \"",
"\"flag is used: Go through the images in order \"",
"\"instead of shuffled.\"",
")",
"arg",
".",
"add_argument",
"(",
"\"--saturate\"",
",",
"metavar",
"=",
"\"0.0-1.0\"",
",",
"help",
"=",
"\"Set the color saturation.\"",
")",
"arg",
".",
"add_argument",
"(",
"\"--preview\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Print the current color palette.\"",
")",
"arg",
".",
"add_argument",
"(",
"\"--vte\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Fix text-artifacts printed in VTE terminals.\"",
")",
"arg",
".",
"add_argument",
"(",
"\"-c\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Delete all cached colorschemes.\"",
")",
"arg",
".",
"add_argument",
"(",
"\"-i\"",
",",
"metavar",
"=",
"\"\\\"/path/to/img.jpg\\\"\"",
",",
"help",
"=",
"\"Which image or directory to use.\"",
")",
"arg",
".",
"add_argument",
"(",
"\"-l\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Generate a light colorscheme.\"",
")",
"arg",
".",
"add_argument",
"(",
"\"-n\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Skip setting the wallpaper.\"",
")",
"arg",
".",
"add_argument",
"(",
"\"-o\"",
",",
"metavar",
"=",
"\"\\\"script_name\\\"\"",
",",
"action",
"=",
"\"append\"",
",",
"help",
"=",
"\"External script to run after \\\"wal\\\".\"",
")",
"arg",
".",
"add_argument",
"(",
"\"-q\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Quiet mode, don\\'t print anything.\"",
")",
"arg",
".",
"add_argument",
"(",
"\"-r\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"'wal -r' is deprecated: Use \\\n (cat ~/.cache/wal/sequences &) instead.\"",
")",
"arg",
".",
"add_argument",
"(",
"\"-R\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Restore previous colorscheme.\"",
")",
"arg",
".",
"add_argument",
"(",
"\"-s\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Skip changing colors in terminals.\"",
")",
"arg",
".",
"add_argument",
"(",
"\"-t\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Skip changing colors in tty.\"",
")",
"arg",
".",
"add_argument",
"(",
"\"-v\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Print \\\"wal\\\" version.\"",
")",
"arg",
".",
"add_argument",
"(",
"\"-e\"",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Skip reloading gtk/xrdb/i3/sway/polybar\"",
")",
"return",
"arg"
] | 39.594595 | 23.743243 |
def search_database(word):
'''offline search.'''
conn = sqlite3.connect(os.path.join(DEFAULT_PATH, 'word.db'))
curs = conn.cursor()
curs.execute(r'SELECT expl, pr FROM Word WHERE name LIKE "%s%%"' % word)
res = curs.fetchall()
if res:
print(colored(word + ' 在数据库中存在', 'white', 'on_green'))
print()
print(colored('★ ' * res[0][1], 'red'), colored('☆ ' * (5 - res[0][1]), 'yellow'), sep='')
colorful_print(res[0][0])
else:
print(colored(word + ' 不在本地,从有道词典查询', 'white', 'on_red'))
search_online(word)
input_msg = '若存入本地,请输入优先级(1~5) ,否则 Enter 跳过\n>>> '
if sys.version_info[0] == 2:
add_in_db_pr = raw_input(input_msg)
else:
add_in_db_pr = input(input_msg)
if add_in_db_pr and add_in_db_pr.isdigit():
if(int(add_in_db_pr) >= 1 and int(add_in_db_pr) <= 5):
add_word(word, int(add_in_db_pr))
print(colored('单词 {word} 已加入数据库中'.format(word=word), 'white', 'on_red'))
curs.close()
conn.close() | [
"def",
"search_database",
"(",
"word",
")",
":",
"conn",
"=",
"sqlite3",
".",
"connect",
"(",
"os",
".",
"path",
".",
"join",
"(",
"DEFAULT_PATH",
",",
"'word.db'",
")",
")",
"curs",
"=",
"conn",
".",
"cursor",
"(",
")",
"curs",
".",
"execute",
"(",
"r'SELECT expl, pr FROM Word WHERE name LIKE \"%s%%\"'",
"%",
"word",
")",
"res",
"=",
"curs",
".",
"fetchall",
"(",
")",
"if",
"res",
":",
"print",
"(",
"colored",
"(",
"word",
"+",
"' 在数据库中存在', 'white', 'on",
"_",
"reen'))",
"",
"",
"",
"",
"print",
"(",
")",
"print",
"(",
"colored",
"(",
"'★ ' *",
"r",
"s[0",
"]",
"[",
"1",
"]",
",",
" ",
"'",
"ed'),",
" ",
"c",
"lored('",
"☆",
" ' * (",
" ",
" ",
"r",
"s",
"0][",
"1",
"]",
")",
",",
" ",
"'",
"y",
"e",
"low'), s",
"e",
"p",
"'')",
"",
"",
"",
"colorful_print",
"(",
"res",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"else",
":",
"print",
"(",
"colored",
"(",
"word",
"+",
"' 不在本地,从有道词典查询', 'white', 'on_red'))",
"",
"",
"",
"",
"",
"",
"search_online",
"(",
"word",
")",
"input_msg",
"=",
"'若存入本地,请输入优先级(1~5) ,否则 Enter 跳过\\n>>> '",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
"==",
"2",
":",
"add_in_db_pr",
"=",
"raw_input",
"(",
"input_msg",
")",
"else",
":",
"add_in_db_pr",
"=",
"input",
"(",
"input_msg",
")",
"if",
"add_in_db_pr",
"and",
"add_in_db_pr",
".",
"isdigit",
"(",
")",
":",
"if",
"(",
"int",
"(",
"add_in_db_pr",
")",
">=",
"1",
"and",
"int",
"(",
"add_in_db_pr",
")",
"<=",
"5",
")",
":",
"add_word",
"(",
"word",
",",
"int",
"(",
"add_in_db_pr",
")",
")",
"print",
"(",
"colored",
"(",
"'单词 {word} 已加入数据库中'.format(word=word)",
",",
" 'whit",
"e",
"', '",
"o",
"n_re",
"d",
"'",
")",
"",
"",
"",
"",
"curs",
".",
"close",
"(",
")",
"conn",
".",
"close",
"(",
")"
] | 38.666667 | 22.666667 |
def is_iterable(val):
"""
Check if val is not a list, but is a collections.Iterable type. This is used to determine
when list() should be called on val
>>> l = [1, 2]
>>> is_iterable(l)
False
>>> is_iterable(iter(l))
True
:param val: value to check
:return: True if it is not a list, but is a collections.Iterable
"""
if isinstance(val, list):
return False
return isinstance(val, collections.Iterable) | [
"def",
"is_iterable",
"(",
"val",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"list",
")",
":",
"return",
"False",
"return",
"isinstance",
"(",
"val",
",",
"collections",
".",
"Iterable",
")"
] | 26.294118 | 20.294118 |
def Parse(self, persistence, knowledge_base, download_pathtype):
"""Convert persistence collector output to downloadable rdfvalues."""
pathspecs = []
if isinstance(persistence, rdf_client.WindowsServiceInformation):
if persistence.HasField("binary"):
pathspecs.append(persistence.binary.pathspec)
elif persistence.HasField("image_path"):
pathspecs = self._GetFilePaths(persistence.image_path,
download_pathtype, knowledge_base)
if isinstance(
persistence,
rdf_client_fs.StatEntry) and persistence.HasField("registry_type"):
pathspecs = self._GetFilePaths(persistence.registry_data.string,
download_pathtype, knowledge_base)
for pathspec in pathspecs:
yield rdf_standard.PersistenceFile(pathspec=pathspec) | [
"def",
"Parse",
"(",
"self",
",",
"persistence",
",",
"knowledge_base",
",",
"download_pathtype",
")",
":",
"pathspecs",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"persistence",
",",
"rdf_client",
".",
"WindowsServiceInformation",
")",
":",
"if",
"persistence",
".",
"HasField",
"(",
"\"binary\"",
")",
":",
"pathspecs",
".",
"append",
"(",
"persistence",
".",
"binary",
".",
"pathspec",
")",
"elif",
"persistence",
".",
"HasField",
"(",
"\"image_path\"",
")",
":",
"pathspecs",
"=",
"self",
".",
"_GetFilePaths",
"(",
"persistence",
".",
"image_path",
",",
"download_pathtype",
",",
"knowledge_base",
")",
"if",
"isinstance",
"(",
"persistence",
",",
"rdf_client_fs",
".",
"StatEntry",
")",
"and",
"persistence",
".",
"HasField",
"(",
"\"registry_type\"",
")",
":",
"pathspecs",
"=",
"self",
".",
"_GetFilePaths",
"(",
"persistence",
".",
"registry_data",
".",
"string",
",",
"download_pathtype",
",",
"knowledge_base",
")",
"for",
"pathspec",
"in",
"pathspecs",
":",
"yield",
"rdf_standard",
".",
"PersistenceFile",
"(",
"pathspec",
"=",
"pathspec",
")"
] | 44.263158 | 22.947368 |
def stress_positions(stress: str, scansion: str) -> List[int]:
"""
Given a stress value and a scansion line, return the index positions of the stresses.
:param stress:
:param scansion:
:return:
>>> stress_positions("-", " - U U - UU - U U")
[0, 3, 6]
"""
line = scansion.replace(" ", "")
stresses = []
for idx, char in enumerate(line):
if char == stress:
stresses.append(idx)
return stresses | [
"def",
"stress_positions",
"(",
"stress",
":",
"str",
",",
"scansion",
":",
"str",
")",
"->",
"List",
"[",
"int",
"]",
":",
"line",
"=",
"scansion",
".",
"replace",
"(",
"\" \"",
",",
"\"\"",
")",
"stresses",
"=",
"[",
"]",
"for",
"idx",
",",
"char",
"in",
"enumerate",
"(",
"line",
")",
":",
"if",
"char",
"==",
"stress",
":",
"stresses",
".",
"append",
"(",
"idx",
")",
"return",
"stresses"
] | 26.705882 | 20 |
def count_features_type(features):
""" Counts three different types of features (float, integer, binary).
:param features: pandas.DataFrame
A dataset in a panda's data frame
:returns a tuple (binary, integer, float)
"""
counter={k.name: v for k, v in features.columns.to_series().groupby(features.dtypes)}
binary=0
if ('int64' in counter):
binary=len(set(features.loc[:, (features<=1).all(axis=0)].columns.values)
& set(features.loc[:, (features>=0).all(axis=0)].columns.values)
& set(counter['int64']))
return (binary,len(counter['int64'])-binary if 'int64' in counter else 0,len(counter['float64']) if 'float64' in counter else 0) | [
"def",
"count_features_type",
"(",
"features",
")",
":",
"counter",
"=",
"{",
"k",
".",
"name",
":",
"v",
"for",
"k",
",",
"v",
"in",
"features",
".",
"columns",
".",
"to_series",
"(",
")",
".",
"groupby",
"(",
"features",
".",
"dtypes",
")",
"}",
"binary",
"=",
"0",
"if",
"(",
"'int64'",
"in",
"counter",
")",
":",
"binary",
"=",
"len",
"(",
"set",
"(",
"features",
".",
"loc",
"[",
":",
",",
"(",
"features",
"<=",
"1",
")",
".",
"all",
"(",
"axis",
"=",
"0",
")",
"]",
".",
"columns",
".",
"values",
")",
"&",
"set",
"(",
"features",
".",
"loc",
"[",
":",
",",
"(",
"features",
">=",
"0",
")",
".",
"all",
"(",
"axis",
"=",
"0",
")",
"]",
".",
"columns",
".",
"values",
")",
"&",
"set",
"(",
"counter",
"[",
"'int64'",
"]",
")",
")",
"return",
"(",
"binary",
",",
"len",
"(",
"counter",
"[",
"'int64'",
"]",
")",
"-",
"binary",
"if",
"'int64'",
"in",
"counter",
"else",
"0",
",",
"len",
"(",
"counter",
"[",
"'float64'",
"]",
")",
"if",
"'float64'",
"in",
"counter",
"else",
"0",
")"
] | 54.615385 | 21.769231 |
def polygon(self):
'''return a polygon for the fence'''
points = []
for fp in self.points[1:]:
points.append((fp.lat, fp.lng))
return points | [
"def",
"polygon",
"(",
"self",
")",
":",
"points",
"=",
"[",
"]",
"for",
"fp",
"in",
"self",
".",
"points",
"[",
"1",
":",
"]",
":",
"points",
".",
"append",
"(",
"(",
"fp",
".",
"lat",
",",
"fp",
".",
"lng",
")",
")",
"return",
"points"
] | 33.833333 | 12.5 |
def create_from_byte(control_flags):
"""Create a ControlFlags class from a control flags byte."""
in_use = bool(control_flags & 1 << 7)
controller = bool(control_flags & 1 << 6)
bit5 = bool(control_flags & 1 << 5)
bit4 = bool(control_flags & 1 << 4)
used_before = bool(control_flags & 1 << 1)
flags = ControlFlags(in_use, controller, used_before,
bit5=bit5, bit4=bit4)
return flags | [
"def",
"create_from_byte",
"(",
"control_flags",
")",
":",
"in_use",
"=",
"bool",
"(",
"control_flags",
"&",
"1",
"<<",
"7",
")",
"controller",
"=",
"bool",
"(",
"control_flags",
"&",
"1",
"<<",
"6",
")",
"bit5",
"=",
"bool",
"(",
"control_flags",
"&",
"1",
"<<",
"5",
")",
"bit4",
"=",
"bool",
"(",
"control_flags",
"&",
"1",
"<<",
"4",
")",
"used_before",
"=",
"bool",
"(",
"control_flags",
"&",
"1",
"<<",
"1",
")",
"flags",
"=",
"ControlFlags",
"(",
"in_use",
",",
"controller",
",",
"used_before",
",",
"bit5",
"=",
"bit5",
",",
"bit4",
"=",
"bit4",
")",
"return",
"flags"
] | 46.5 | 8.5 |
def requests(self, code=None, **kwargs):
"""
Retrieve open requests. You can also enter a specific service code
argument.
>>> Three('api.city.gov').requests()
{'all': {'requests': 'data'}}
>>> Three('api.city.gov').requests('123')
{'123': {'requests': 'data'}}
"""
if code:
kwargs['service_code'] = code
data = self.get('requests', **kwargs)
return data | [
"def",
"requests",
"(",
"self",
",",
"code",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"code",
":",
"kwargs",
"[",
"'service_code'",
"]",
"=",
"code",
"data",
"=",
"self",
".",
"get",
"(",
"'requests'",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | 31.5 | 11.928571 |
def query_bankcard(self, out_trade_no):
"""
企业付款查询接口
:param out_trade_no: 商户调用企业付款API时使用的商户订单号
:return: 返回的结果数据
"""
data = {
'mch_id': self.mch_id,
'partner_trade_no': out_trade_no,
}
return self._post('mmpaysptrans/query_bank', data=data) | [
"def",
"query_bankcard",
"(",
"self",
",",
"out_trade_no",
")",
":",
"data",
"=",
"{",
"'mch_id'",
":",
"self",
".",
"mch_id",
",",
"'partner_trade_no'",
":",
"out_trade_no",
",",
"}",
"return",
"self",
".",
"_post",
"(",
"'mmpaysptrans/query_bank'",
",",
"data",
"=",
"data",
")"
] | 26.416667 | 14.916667 |
def point_line_distance(p, l_p, l_v):
'''Calculate the distance between a point and a line defined
by a point and a direction vector.
'''
l_v = normalize(l_v)
u = p - l_p
return np.linalg.norm(u - np.dot(u, l_v) * l_v) | [
"def",
"point_line_distance",
"(",
"p",
",",
"l_p",
",",
"l_v",
")",
":",
"l_v",
"=",
"normalize",
"(",
"l_v",
")",
"u",
"=",
"p",
"-",
"l_p",
"return",
"np",
".",
"linalg",
".",
"norm",
"(",
"u",
"-",
"np",
".",
"dot",
"(",
"u",
",",
"l_v",
")",
"*",
"l_v",
")"
] | 33.714286 | 16.285714 |
def has_verified_email(self):
"""
Has the user verified that the email he has given is legit?
Verified e-mail is required to the gallery. Confirmation happens by
sending an email to the user and the owner of the email user verifying
that he is the same as the Imgur user.
"""
url = (self._imgur._base_url + "/3/account/{0}/"
"verifyemail".format(self.name))
return self._imgur._send_request(url, needs_auth=True) | [
"def",
"has_verified_email",
"(",
"self",
")",
":",
"url",
"=",
"(",
"self",
".",
"_imgur",
".",
"_base_url",
"+",
"\"/3/account/{0}/\"",
"\"verifyemail\"",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"return",
"self",
".",
"_imgur",
".",
"_send_request",
"(",
"url",
",",
"needs_auth",
"=",
"True",
")"
] | 43.818182 | 18.363636 |
def lineincols (inlist,colsize):
"""
Returns a string composed of elements in inlist, with each element
right-aligned in columns of (fixed) colsize.
Usage: lineincols (inlist,colsize) where colsize is an integer
"""
outstr = ''
for item in inlist:
if type(item) != StringType:
item = str(item)
size = len(item)
if size <= colsize:
for i in range(colsize-size):
outstr = outstr + ' '
outstr = outstr + item
else:
outstr = outstr + item[0:colsize+1]
return outstr | [
"def",
"lineincols",
"(",
"inlist",
",",
"colsize",
")",
":",
"outstr",
"=",
"''",
"for",
"item",
"in",
"inlist",
":",
"if",
"type",
"(",
"item",
")",
"!=",
"StringType",
":",
"item",
"=",
"str",
"(",
"item",
")",
"size",
"=",
"len",
"(",
"item",
")",
"if",
"size",
"<=",
"colsize",
":",
"for",
"i",
"in",
"range",
"(",
"colsize",
"-",
"size",
")",
":",
"outstr",
"=",
"outstr",
"+",
"' '",
"outstr",
"=",
"outstr",
"+",
"item",
"else",
":",
"outstr",
"=",
"outstr",
"+",
"item",
"[",
"0",
":",
"colsize",
"+",
"1",
"]",
"return",
"outstr"
] | 29.473684 | 13.578947 |
def _rm_outlier_by_amp(self, params, model, signal, ii):
"""
Helper function to reject outliers based on mean amplitude
"""
maxamps = np.nanmax(np.abs(model),0)
z_score = (maxamps - np.nanmean(maxamps,0))/np.nanstd(maxamps,0)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
outlier_idx = np.where(np.abs(z_score)>2.0)[0]
nan_idx = np.where(np.isnan(params))[0]
outlier_idx = np.unique(np.hstack([nan_idx, outlier_idx]))
ii[outlier_idx] = 0
model[outlier_idx] = np.nan
signal[outlier_idx] = np.nan
params[outlier_idx] = np.nan
return model, signal, params, ii | [
"def",
"_rm_outlier_by_amp",
"(",
"self",
",",
"params",
",",
"model",
",",
"signal",
",",
"ii",
")",
":",
"maxamps",
"=",
"np",
".",
"nanmax",
"(",
"np",
".",
"abs",
"(",
"model",
")",
",",
"0",
")",
"z_score",
"=",
"(",
"maxamps",
"-",
"np",
".",
"nanmean",
"(",
"maxamps",
",",
"0",
")",
")",
"/",
"np",
".",
"nanstd",
"(",
"maxamps",
",",
"0",
")",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"\"ignore\"",
")",
"outlier_idx",
"=",
"np",
".",
"where",
"(",
"np",
".",
"abs",
"(",
"z_score",
")",
">",
"2.0",
")",
"[",
"0",
"]",
"nan_idx",
"=",
"np",
".",
"where",
"(",
"np",
".",
"isnan",
"(",
"params",
")",
")",
"[",
"0",
"]",
"outlier_idx",
"=",
"np",
".",
"unique",
"(",
"np",
".",
"hstack",
"(",
"[",
"nan_idx",
",",
"outlier_idx",
"]",
")",
")",
"ii",
"[",
"outlier_idx",
"]",
"=",
"0",
"model",
"[",
"outlier_idx",
"]",
"=",
"np",
".",
"nan",
"signal",
"[",
"outlier_idx",
"]",
"=",
"np",
".",
"nan",
"params",
"[",
"outlier_idx",
"]",
"=",
"np",
".",
"nan",
"return",
"model",
",",
"signal",
",",
"params",
",",
"ii"
] | 41.823529 | 11.235294 |
def affinity(self,affinity):
"""Set the affinity for all threads in this group.
If setting affinity fails on any thread, the affinity of all threads
is restored to its previous value.
"""
with self.__lock:
old_affinities = {}
try:
for thread in self.__threads:
old_affinities[thread] = thread.affinity
thread.affinity = affinity
except Exception:
for (thread,old_affinity) in old_affinities.iteritems():
try:
thread.affinity = old_affinity
except Exception:
pass
raise
else:
self.__affinity = affinity | [
"def",
"affinity",
"(",
"self",
",",
"affinity",
")",
":",
"with",
"self",
".",
"__lock",
":",
"old_affinities",
"=",
"{",
"}",
"try",
":",
"for",
"thread",
"in",
"self",
".",
"__threads",
":",
"old_affinities",
"[",
"thread",
"]",
"=",
"thread",
".",
"affinity",
"thread",
".",
"affinity",
"=",
"affinity",
"except",
"Exception",
":",
"for",
"(",
"thread",
",",
"old_affinity",
")",
"in",
"old_affinities",
".",
"iteritems",
"(",
")",
":",
"try",
":",
"thread",
".",
"affinity",
"=",
"old_affinity",
"except",
"Exception",
":",
"pass",
"raise",
"else",
":",
"self",
".",
"__affinity",
"=",
"affinity"
] | 36.285714 | 14.333333 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.