text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def get_linestyle(self, increment=1):
"""
Returns the current marker, then increments the marker by what's specified
"""
i = self.linestyles_index
self.linestyles_index += increment
if self.linestyles_index >= len(self.linestyles):
self.linestyles_index = self.linestyles_index-len(self.linestyles)
if self.linestyles_index >= len(self.linestyles): self.linestyles_index=0 # to be safe
return self.linestyles[i] | [
"def",
"get_linestyle",
"(",
"self",
",",
"increment",
"=",
"1",
")",
":",
"i",
"=",
"self",
".",
"linestyles_index",
"self",
".",
"linestyles_index",
"+=",
"increment",
"if",
"self",
".",
"linestyles_index",
">=",
"len",
"(",
"self",
".",
"linestyles",
")... | 37.076923 | 22.615385 |
def create_attachment(self, upload_stream, scope_identifier, hub_name, plan_id, timeline_id, record_id, type, name, **kwargs):
"""CreateAttachment.
[Preview API]
:param object upload_stream: Stream to upload
:param str scope_identifier: The project GUID to scope the request
:param str hub_name: The name of the server hub: "build" for the Build server or "rm" for the Release Management server
:param str plan_id:
:param str timeline_id:
:param str record_id:
:param str type:
:param str name:
:rtype: :class:`<TaskAttachment> <azure.devops.v5_0.task.models.TaskAttachment>`
"""
route_values = {}
if scope_identifier is not None:
route_values['scopeIdentifier'] = self._serialize.url('scope_identifier', scope_identifier, 'str')
if hub_name is not None:
route_values['hubName'] = self._serialize.url('hub_name', hub_name, 'str')
if plan_id is not None:
route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str')
if timeline_id is not None:
route_values['timelineId'] = self._serialize.url('timeline_id', timeline_id, 'str')
if record_id is not None:
route_values['recordId'] = self._serialize.url('record_id', record_id, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
if name is not None:
route_values['name'] = self._serialize.url('name', name, 'str')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
content = self._client.stream_upload(upload_stream, callback=callback)
response = self._send(http_method='PUT',
location_id='7898f959-9cdf-4096-b29e-7f293031629e',
version='5.0-preview.1',
route_values=route_values,
content=content,
media_type='application/octet-stream')
return self._deserialize('TaskAttachment', response) | [
"def",
"create_attachment",
"(",
"self",
",",
"upload_stream",
",",
"scope_identifier",
",",
"hub_name",
",",
"plan_id",
",",
"timeline_id",
",",
"record_id",
",",
"type",
",",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"route_values",
"=",
"{",
"}",
"if",... | 53.3 | 24.325 |
def hpd_credible_interval(mu_in, post, alpha=0.9, tolerance=1e-3):
'''
Returns the minimum and maximum rate values of the HPD
(Highest Posterior Density) credible interval for a posterior
post defined at the sample values mu_in. Samples need not be
uniformly spaced and posterior need not be normalized.
Will not return a correct credible interval if the posterior
is multimodal and the correct interval is not contiguous;
in this case will over-cover by including the whole range from
minimum to maximum mu.
'''
if alpha == 1:
nonzero_samples = mu_in[post > 0]
mu_low = numpy.min(nonzero_samples)
mu_high = numpy.max(nonzero_samples)
elif 0 < alpha < 1:
# determine the highest PDF for which the region with
# higher density has sufficient coverage
pthresh = hpd_threshold(mu_in, post, alpha, tol=tolerance)
samples_over_threshold = mu_in[post > pthresh]
mu_low = numpy.min(samples_over_threshold)
mu_high = numpy.max(samples_over_threshold)
return mu_low, mu_high | [
"def",
"hpd_credible_interval",
"(",
"mu_in",
",",
"post",
",",
"alpha",
"=",
"0.9",
",",
"tolerance",
"=",
"1e-3",
")",
":",
"if",
"alpha",
"==",
"1",
":",
"nonzero_samples",
"=",
"mu_in",
"[",
"post",
">",
"0",
"]",
"mu_low",
"=",
"numpy",
".",
"mi... | 42.72 | 19.76 |
def SettingsAddConnection(self, connection_settings):
'''Add a connection.
connection_settings is a String String Variant Map Map. See
https://developer.gnome.org/NetworkManager/0.9/spec.html
#type-String_String_Variant_Map_Map
If you omit uuid, this method adds one for you.
'''
if 'uuid' not in connection_settings['connection']:
connection_settings['connection']['uuid'] = str(uuid.uuid4())
NM = dbusmock.get_object(MANAGER_OBJ)
settings_obj = dbusmock.get_object(SETTINGS_OBJ)
main_connections = settings_obj.ListConnections()
# Mimic how NM names connections
count = 0
while True:
connection_obj_path = dbus.ObjectPath(SETTINGS_OBJ + '/' + str(count))
if connection_obj_path not in main_connections:
break
count += 1
connection_path = str(connection_obj_path)
self.AddObject(connection_path,
CSETTINGS_IFACE,
{
'Unsaved': False
},
[
('Delete', '', '', 'self.ConnectionDelete(self)'),
('GetSettings', '', 'a{sa{sv}}', 'ret = self.ConnectionGetSettings(self)'),
('GetSecrets', 's', 'a{sa{sv}}', 'ret = self.ConnectionGetSecrets(self, args[0])'),
('Update', 'a{sa{sv}}', '', 'self.ConnectionUpdate(self, args[0])'),
])
self.object_manager_emit_added(connection_path)
connection_obj = dbusmock.get_object(connection_path)
connection_obj.settings = connection_settings
connection_obj.connection_path = connection_path
connection_obj.ConnectionDelete = ConnectionDelete
connection_obj.ConnectionGetSettings = ConnectionGetSettings
connection_obj.ConnectionGetSecrets = ConnectionGetSecrets
connection_obj.ConnectionUpdate = ConnectionUpdate
main_connections.append(connection_path)
settings_obj.Set(SETTINGS_IFACE, 'Connections', main_connections)
settings_obj.EmitSignal(SETTINGS_IFACE, 'NewConnection', 'o', [connection_path])
auto_connect = False
if 'autoconnect' in connection_settings['connection']:
auto_connect = connection_settings['connection']['autoconnect']
if auto_connect:
dev = None
devices = NM.GetDevices()
# Grab the first device.
if len(devices) > 0:
dev = devices[0]
if dev:
activate_connection(NM, connection_path, dev, connection_path)
return connection_path | [
"def",
"SettingsAddConnection",
"(",
"self",
",",
"connection_settings",
")",
":",
"if",
"'uuid'",
"not",
"in",
"connection_settings",
"[",
"'connection'",
"]",
":",
"connection_settings",
"[",
"'connection'",
"]",
"[",
"'uuid'",
"]",
"=",
"str",
"(",
"uuid",
... | 36.397059 | 23.897059 |
def _notice_broker_or_pool(obj):
"""
Used by :mod:`mitogen.core` and :mod:`mitogen.service` to automatically
register every broker and pool on Python 2.4/2.5.
"""
if isinstance(obj, mitogen.core.Broker):
_brokers[obj] = True
else:
_pools[obj] = True | [
"def",
"_notice_broker_or_pool",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"mitogen",
".",
"core",
".",
"Broker",
")",
":",
"_brokers",
"[",
"obj",
"]",
"=",
"True",
"else",
":",
"_pools",
"[",
"obj",
"]",
"=",
"True"
] | 31.222222 | 13 |
def parse_networks_output(out):
"""
Parses the output of the Docker CLI 'docker network ls' and returns it in the format similar to the Docker API.
:param out: CLI output.
:type out: unicode | str
:return: Parsed result.
:rtype: list[dict]
"""
if not out:
return []
line_iter = islice(out.splitlines(), 1, None) # Skip header
return list(map(_network_info, line_iter)) | [
"def",
"parse_networks_output",
"(",
"out",
")",
":",
"if",
"not",
"out",
":",
"return",
"[",
"]",
"line_iter",
"=",
"islice",
"(",
"out",
".",
"splitlines",
"(",
")",
",",
"1",
",",
"None",
")",
"# Skip header",
"return",
"list",
"(",
"map",
"(",
"_... | 31.230769 | 19.846154 |
def get_calculation_dependencies_for(service):
"""Calculation dependencies of this service and the calculation of each
dependent service (recursively).
"""
def calc_dependencies_gen(service, collector=None):
"""Generator for recursive dependency resolution.
"""
# The UID of the service
service_uid = api.get_uid(service)
# maintain an internal dependency mapping
if collector is None:
collector = {}
# Stop iteration if we processed this service already
if service_uid in collector:
raise StopIteration
# Get the calculation of the service.
# The calculation comes either from an assigned method or the user
# has set a calculation manually (see content/analysisservice.py).
calculation = service.getCalculation()
# Stop iteration if there is no calculation
if not calculation:
raise StopIteration
# The services used in this calculation.
# These are the actual dependencies of the used formula.
dep_services = calculation.getDependentServices()
for dep_service in dep_services:
# get the UID of the dependent service
dep_service_uid = api.get_uid(dep_service)
# remember the dependent service
collector[dep_service_uid] = dep_service
# yield the dependent service
yield dep_service
# check the dependencies of the dependent services
for ddep_service in calc_dependencies_gen(dep_service,
collector=collector):
yield ddep_service
dependencies = {}
for dep_service in calc_dependencies_gen(service):
# Skip the initial (requested) service
if dep_service == service:
continue
uid = api.get_uid(dep_service)
dependencies[uid] = dep_service
return dependencies | [
"def",
"get_calculation_dependencies_for",
"(",
"service",
")",
":",
"def",
"calc_dependencies_gen",
"(",
"service",
",",
"collector",
"=",
"None",
")",
":",
"\"\"\"Generator for recursive dependency resolution.\n \"\"\"",
"# The UID of the service",
"service_uid",
"=",
... | 34.5 | 17.321429 |
def ModifyInstance(self, ModifiedInstance, IncludeQualifiers=None,
PropertyList=None, **extra):
# pylint: disable=invalid-name,line-too-long
"""
Modify the property values of an instance.
This method performs the ModifyInstance operation
(see :term:`DSP0200`). See :ref:`WBEM operations` for a list of all
methods performing such operations.
The `PropertyList` parameter determines the set of properties that are
designated to be modified (see its description for details).
The properties provided in the `ModifiedInstance` parameter specify
the new property values for the properties that are designated to be
modified.
Pywbem sends the property values provided in the `ModifiedInstance`
parameter to the WBEM server as provided; it does not add any default
values for properties not provided but designated to be modified, nor
does it reduce the properties by those not designated to be modified.
The properties that are actually modified by the WBEM server as a result
of this operation depend on a number of things:
* The WBEM server will reject modification requests for key properties
and for properties that are not exposed by the creation class of the
target instance.
* The WBEM server may consider some properties as read-only, as a
result of requirements at the CIM modeling level (schema or
management profiles), or as a result of an implementation decision.
Note that the WRITE qualifier on a property is not a safe indicator
as to whether the property can actually be modified. It is an
expression at the level of the CIM schema that may or may not be
considered in DMTF management profiles or in implementations.
Specifically, a qualifier value of True on a property does not
guarantee modifiability of the property, and a value of False does
not prevent modifiability.
* The WBEM server may detect invalid new values or conflicts resulting
from the new property values and may reject modification of a property
for such reasons.
If the WBEM server rejects modification of a property for any reason,
it will cause this operation to fail and will not modify any property
on the target instance. If this operation succeeds, all properties
designated to be modified have their new values (see the description
of the `ModifiedInstance` parameter for details on how the new values
are determined).
Note that properties (including properties not designated to be
modified) may change their values as an indirect result of this
operation. For example, a property that was not designated to be
modified may be derived from another property that was modified, and
may show a changed value due to that.
If the operation succeeds, this method returns.
Otherwise, this method raises an exception.
Parameters:
ModifiedInstance (:class:`~pywbem.CIMInstance`):
A representation of the modified instance, also indicating its
instance path.
The `path` attribute of this object identifies the instance to be
modified. Its `keybindings` attribute is required. If its
`namespace` attribute is `None`, the default namespace of the
connection will be used. Its `host` attribute will be ignored.
The `classname` attribute of the instance path and the `classname`
attribute of the instance must specify the same class name.
The properties defined in this object specify the new property
values (including `None` for NULL). If a property is designated to
be modified but is not specified in this object, the WBEM server
will use the default value of the property declaration if specified
(including `None`), and otherwise may update the property to any
value (including `None`).
Typically, this object has been retrieved by other operations,
such as :meth:`~pywbem.WBEMConnection.GetInstance`.
IncludeQualifiers (:class:`py:bool`):
Indicates that qualifiers are to be modified as specified in the
`ModifiedInstance` parameter, as follows:
* If `False`, qualifiers not modified.
* If `True`, qualifiers are modified if the WBEM server implements
support for this parameter.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default to be used. :term:`DSP0200`
defines that the server-implemented default is `True`.
This parameter has been deprecated in :term:`DSP0200`. Clients
cannot rely on qualifiers to be modified.
PropertyList (:term:`string` or :term:`py:iterable` of :term:`string`):
This parameter defines which properties are designated to be
modified.
This parameter is an iterable specifying the names of the
properties, or a string that specifies a single property name. In
all cases, the property names are matched case insensitively.
The specified properties are designated to be modified. Properties
not specified are not designated to be modified.
An empty iterable indicates that no properties are designated to be
modified.
If `None`, DSP0200 states that the properties with values different
from the current values in the instance are designated to be
modified, but for all practical purposes this is equivalent to
stating that all properties exposed by the instance are designated
to be modified.
**extra :
Additional keyword arguments are passed as additional operation
parameters to the WBEM server.
Note that :term:`DSP0200` does not define any additional parameters
for this operation.
Raises:
Exceptions described in :class:`~pywbem.WBEMConnection`.
""" # noqa: E501
exc = None
method_name = 'ModifyInstance'
if self._operation_recorders:
self.operation_recorder_reset()
self.operation_recorder_stage_pywbem_args(
method=method_name,
ModifiedInstance=ModifiedInstance,
IncludeQualifiers=IncludeQualifiers,
PropertyList=PropertyList,
**extra)
try:
stats = self.statistics.start_timer('ModifyInstance')
# Must pass a named CIMInstance here (i.e path attribute set)
if ModifiedInstance.path is None:
raise ValueError(
'ModifiedInstance parameter must have path attribute set')
if ModifiedInstance.path.classname is None:
raise ValueError(
'ModifiedInstance parameter must have classname set in '
' path')
if ModifiedInstance.classname is None:
raise ValueError(
'ModifiedInstance parameter must have classname set in '
'instance')
namespace = self._iparam_namespace_from_objectname(
ModifiedInstance.path, 'ModifiedInstance.path')
PropertyList = _iparam_propertylist(PropertyList)
# Strip off host and namespace to avoid producing an INSTANCEPATH or
# LOCALINSTANCEPATH element instead of the desired INSTANCENAME
# element.
instance = ModifiedInstance.copy()
instance.path.namespace = None
instance.path.host = None
self._imethodcall(
method_name,
namespace,
ModifiedInstance=instance,
IncludeQualifiers=IncludeQualifiers,
PropertyList=PropertyList,
has_return_value=False,
**extra)
return
except (CIMXMLParseError, XMLParseError) as exce:
exce.request_data = self.last_raw_request
exce.response_data = self.last_raw_reply
exc = exce
raise
except Exception as exce:
exc = exce
raise
finally:
self._last_operation_time = stats.stop_timer(
self.last_request_len, self.last_reply_len,
self.last_server_response_time, exc)
if self._operation_recorders:
self.operation_recorder_stage_result(None, exc) | [
"def",
"ModifyInstance",
"(",
"self",
",",
"ModifiedInstance",
",",
"IncludeQualifiers",
"=",
"None",
",",
"PropertyList",
"=",
"None",
",",
"*",
"*",
"extra",
")",
":",
"# pylint: disable=invalid-name,line-too-long",
"# noqa: E501",
"exc",
"=",
"None",
"method_name... | 45.25 | 26.114583 |
def errorstr(self, space, use_repr=False):
"The exception class and value, as a string."
w_value = self.get_w_value(space)
if space is None:
# this part NOT_RPYTHON
exc_typename = str(self.w_type)
exc_value = str(w_value)
else:
w = space.wrap
if space.is_w(space.type(self.w_type), space.w_text):
exc_typename = space.str_w(self.w_type)
else:
exc_typename = space.str_w(
space.getattr(self.w_type, w('__name__')))
if space.is_w(w_value, space.w_None):
exc_value = ""
else:
try:
if use_repr:
exc_value = space.str_w(space.repr(w_value))
else:
exc_value = space.str_w(space.str(w_value))
except OperationError:
# oups, cannot __str__ the exception object
exc_value = "<oups, exception object itself cannot be str'd>"
if not exc_value:
return exc_typename
else:
return '%s: %s' % (exc_typename, exc_value) | [
"def",
"errorstr",
"(",
"self",
",",
"space",
",",
"use_repr",
"=",
"False",
")",
":",
"w_value",
"=",
"self",
".",
"get_w_value",
"(",
"space",
")",
"if",
"space",
"is",
"None",
":",
"# this part NOT_RPYTHON",
"exc_typename",
"=",
"str",
"(",
"self",
".... | 40.344828 | 15.310345 |
def attribs_to_string(attrib_dict, keys):
"""
A more specific version of the subdict utility aimed at handling
node and edge attribute dictionaries for NetworkX file formats such as
gexf (which does not allow attributes to have a list type) by making
them writable in those formats
"""
for key, value in attrib_dict.iteritems():
if (isinstance(value, list) or isinstance(value, dict) or
isinstance(value, tuple)):
attrib_dict[key] = value
return attrib_dict | [
"def",
"attribs_to_string",
"(",
"attrib_dict",
",",
"keys",
")",
":",
"for",
"key",
",",
"value",
"in",
"attrib_dict",
".",
"iteritems",
"(",
")",
":",
"if",
"(",
"isinstance",
"(",
"value",
",",
"list",
")",
"or",
"isinstance",
"(",
"value",
",",
"di... | 39.230769 | 15.076923 |
def get_submissions_multiple_assignments_by_sis_id(
self, is_section, sis_id, students=None, assignments=None,
**params):
"""
List submissions for multiple assignments by course/section sis id and
optionally student
https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.for_students
"""
if is_section:
return self.get_submissions_multiple_assignments(
is_section, self._sis_id(sis_id, 'section'), students,
assignments, **params)
else:
return self.get_submissions_multiple_assignments(
is_section, self._sis_id(sis_id, 'course'), students,
assignments, **params) | [
"def",
"get_submissions_multiple_assignments_by_sis_id",
"(",
"self",
",",
"is_section",
",",
"sis_id",
",",
"students",
"=",
"None",
",",
"assignments",
"=",
"None",
",",
"*",
"*",
"params",
")",
":",
"if",
"is_section",
":",
"return",
"self",
".",
"get_submi... | 43.529412 | 21.176471 |
def StoreResults(self, responses):
"""Stores the responses."""
client_id = responses.request.client_id
if responses.success:
logging.info("Client %s has a file %s.", client_id, self.args.filename)
else:
logging.info("Client %s has no file %s.", client_id, self.args.filename)
self.MarkClientDone(client_id) | [
"def",
"StoreResults",
"(",
"self",
",",
"responses",
")",
":",
"client_id",
"=",
"responses",
".",
"request",
".",
"client_id",
"if",
"responses",
".",
"success",
":",
"logging",
".",
"info",
"(",
"\"Client %s has a file %s.\"",
",",
"client_id",
",",
"self",... | 33.1 | 21.6 |
def punct(self, text):
"""Push punctuation onto the token queue."""
cls = self.PUNCTUATION[text]
self.push_token(cls(text, self.lineno, self.offset)) | [
"def",
"punct",
"(",
"self",
",",
"text",
")",
":",
"cls",
"=",
"self",
".",
"PUNCTUATION",
"[",
"text",
"]",
"self",
".",
"push_token",
"(",
"cls",
"(",
"text",
",",
"self",
".",
"lineno",
",",
"self",
".",
"offset",
")",
")"
] | 42.5 | 10.5 |
def _get_dvportgroup_dict(pg_ref):
'''
Returns a dictionary with a distributed virutal portgroup data
pg_ref
Portgroup reference
'''
props = salt.utils.vmware.get_properties_of_managed_object(
pg_ref, ['name', 'config.description', 'config.numPorts',
'config.type', 'config.defaultPortConfig'])
pg_dict = {'name': props['name'],
'description': props.get('config.description'),
'num_ports': props['config.numPorts'],
'type': props['config.type']}
if props['config.defaultPortConfig']:
dpg = props['config.defaultPortConfig']
if dpg.vlan and \
isinstance(dpg.vlan,
vim.VmwareDistributedVirtualSwitchVlanIdSpec):
pg_dict.update({'vlan_id': dpg.vlan.vlanId})
pg_dict.update({'out_shaping':
_get_dvportgroup_out_shaping(
props['name'],
props['config.defaultPortConfig'])})
pg_dict.update({'security_policy':
_get_dvportgroup_security_policy(
props['name'],
props['config.defaultPortConfig'])})
pg_dict.update({'teaming':
_get_dvportgroup_teaming(
props['name'],
props['config.defaultPortConfig'])})
return pg_dict | [
"def",
"_get_dvportgroup_dict",
"(",
"pg_ref",
")",
":",
"props",
"=",
"salt",
".",
"utils",
".",
"vmware",
".",
"get_properties_of_managed_object",
"(",
"pg_ref",
",",
"[",
"'name'",
",",
"'config.description'",
",",
"'config.numPorts'",
",",
"'config.type'",
","... | 40.342857 | 17.028571 |
def mean_squared_error(pred:Tensor, targ:Tensor)->Rank0Tensor:
"Mean squared error between `pred` and `targ`."
pred,targ = flatten_check(pred,targ)
return F.mse_loss(pred, targ) | [
"def",
"mean_squared_error",
"(",
"pred",
":",
"Tensor",
",",
"targ",
":",
"Tensor",
")",
"->",
"Rank0Tensor",
":",
"pred",
",",
"targ",
"=",
"flatten_check",
"(",
"pred",
",",
"targ",
")",
"return",
"F",
".",
"mse_loss",
"(",
"pred",
",",
"targ",
")"
... | 46.5 | 10 |
def variance(self):
"""Returns variance"""
if self.counter.value <= 1:
return 0.0
return self.var.value[1] / (self.counter.value - 1) | [
"def",
"variance",
"(",
"self",
")",
":",
"if",
"self",
".",
"counter",
".",
"value",
"<=",
"1",
":",
"return",
"0.0",
"return",
"self",
".",
"var",
".",
"value",
"[",
"1",
"]",
"/",
"(",
"self",
".",
"counter",
".",
"value",
"-",
"1",
")"
] | 33 | 12.6 |
def _session_key(self, key):
"""
Generates session key string.
:param str key:
e.g. ``"authomatic:facebook:key"``
"""
return '{0}:{1}:{2}'.format(self.settings.prefix, self.name, key) | [
"def",
"_session_key",
"(",
"self",
",",
"key",
")",
":",
"return",
"'{0}:{1}:{2}'",
".",
"format",
"(",
"self",
".",
"settings",
".",
"prefix",
",",
"self",
".",
"name",
",",
"key",
")"
] | 22.9 | 19.1 |
def check_download(obj, *args, **kwargs):
"""Verify a download"""
version = args[0]
workdir = args[1]
signame = args[2]
if version:
local_version = get_local_version(workdir, signame)
if not verify_sigfile(workdir, signame) or version != local_version:
error("[-] \033[91mFailed to verify signature: %s from: %s\033[0m"
% (signame, obj.url))
raise ValueError('Failed to verify signature: %s' % signame) | [
"def",
"check_download",
"(",
"obj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"version",
"=",
"args",
"[",
"0",
"]",
"workdir",
"=",
"args",
"[",
"1",
"]",
"signame",
"=",
"args",
"[",
"2",
"]",
"if",
"version",
":",
"local_version",
... | 42.727273 | 19 |
def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return Parameter(key)
if key not in Parameter._member_map_:
extend_enum(Parameter, key, default)
return Parameter[key] | [
"def",
"get",
"(",
"key",
",",
"default",
"=",
"-",
"1",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"int",
")",
":",
"return",
"Parameter",
"(",
"key",
")",
"if",
"key",
"not",
"in",
"Parameter",
".",
"_member_map_",
":",
"extend_enum",
"(",
"P... | 37.428571 | 7.714286 |
def set_base_prompt(
self, pri_prompt_terminator=":", alt_prompt_terminator="#", delay_factor=2
):
"""Sets self.base_prompt: used as delimiter for stripping of trailing prompt in output."""
super(AccedianSSH, self).set_base_prompt(
pri_prompt_terminator=pri_prompt_terminator,
alt_prompt_terminator=alt_prompt_terminator,
delay_factor=delay_factor,
)
return self.base_prompt | [
"def",
"set_base_prompt",
"(",
"self",
",",
"pri_prompt_terminator",
"=",
"\":\"",
",",
"alt_prompt_terminator",
"=",
"\"#\"",
",",
"delay_factor",
"=",
"2",
")",
":",
"super",
"(",
"AccedianSSH",
",",
"self",
")",
".",
"set_base_prompt",
"(",
"pri_prompt_termin... | 44.5 | 17.9 |
def _indexable_roles_and_users(self):
"""Return a string made for indexing roles having :any:`READ`
permission on this object."""
from abilian.services.indexing import indexable_role
from abilian.services.security import READ, Admin, Anonymous, Creator, Owner
from abilian.services import get_service
result = []
security = get_service("security")
# roles - required to match when user has a global role
assignments = security.get_permissions_assignments(permission=READ, obj=self)
allowed_roles = assignments.get(READ, set())
allowed_roles.add(Admin)
for r in allowed_roles:
result.append(indexable_role(r))
for role, attr in ((Creator, "creator"), (Owner, "owner")):
if role in allowed_roles:
user = getattr(self, attr)
if user:
result.append(indexable_role(user))
# users and groups
principals = set()
for user, role in security.get_role_assignements(self):
if role in allowed_roles:
principals.add(user)
if Anonymous in principals:
# it's a role listed in role assignments - legacy when there wasn't
# permission-role assignments
principals.remove(Anonymous)
for p in principals:
result.append(indexable_role(p))
return " ".join(result) | [
"def",
"_indexable_roles_and_users",
"(",
"self",
")",
":",
"from",
"abilian",
".",
"services",
".",
"indexing",
"import",
"indexable_role",
"from",
"abilian",
".",
"services",
".",
"security",
"import",
"READ",
",",
"Admin",
",",
"Anonymous",
",",
"Creator",
... | 36.25641 | 18.205128 |
def reset(self):
"""Reset emulator. All registers and memory are reset.
"""
self.__mem.reset()
self.__cpu.reset()
self.__tainter.reset()
# Instructions pre and post handlers.
self.__instr_handler_pre = None, None
self.__instr_handler_post = None, None
self.__set_default_handlers() | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"__mem",
".",
"reset",
"(",
")",
"self",
".",
"__cpu",
".",
"reset",
"(",
")",
"self",
".",
"__tainter",
".",
"reset",
"(",
")",
"# Instructions pre and post handlers.",
"self",
".",
"__instr_handler_pre"... | 28.666667 | 13.416667 |
def rc2_cbc_pkcs5_decrypt(key, data, iv):
"""
Decrypts RC2 ciphertext using a 64 bit key
:param key:
The encryption key - a byte string 8 bytes long
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector used for encryption - a byte string
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the plaintext
"""
if len(key) < 5 or len(key) > 16:
raise ValueError(pretty_message(
'''
key must be 5 to 16 bytes (40 to 128 bits) long - is %s
''',
len(key)
))
if len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
return _decrypt(Security.kSecAttrKeyTypeRC2, key, data, iv, Security.kSecPaddingPKCS5Key) | [
"def",
"rc2_cbc_pkcs5_decrypt",
"(",
"key",
",",
"data",
",",
"iv",
")",
":",
"if",
"len",
"(",
"key",
")",
"<",
"5",
"or",
"len",
"(",
"key",
")",
">",
"16",
":",
"raise",
"ValueError",
"(",
"pretty_message",
"(",
"'''\n key must be 5 to 16 byt... | 26.641026 | 23.051282 |
def get_resourceprovider_logger(name=None, short_name=" ", log_to_file=True):
"""
Get a logger for ResourceProvider and it's components, such as Allocators.
:param name: Name for logger
:param short_name: Shorthand name for the logger
:param log_to_file: Boolean, True if logger should log to a file as well.
:return: Logger
"""
global LOGGERS
loggername = name
logger = _check_existing_logger(loggername, short_name)
if logger is not None:
return logger
logger_config = LOGGING_CONFIG.get(name, DEFAULT_LOGGING_CONFIG)
logger = _get_basic_logger(loggername, log_to_file, get_base_logfilename(loggername + ".log"))
cbh = logging.StreamHandler()
cbh.formatter = BenchFormatterWithType(COLOR_ON)
if VERBOSE_LEVEL > 0 and not SILENT_ON:
cbh.setLevel(logging.DEBUG)
elif SILENT_ON:
cbh.setLevel(logging.WARN)
else:
cbh.setLevel(getattr(logging, logger_config.get("level")))
logger.addHandler(cbh)
LOGGERS[loggername] = BenchLoggerAdapter(logger, {"source": short_name})
return LOGGERS[loggername] | [
"def",
"get_resourceprovider_logger",
"(",
"name",
"=",
"None",
",",
"short_name",
"=",
"\" \"",
",",
"log_to_file",
"=",
"True",
")",
":",
"global",
"LOGGERS",
"loggername",
"=",
"name",
"logger",
"=",
"_check_existing_logger",
"(",
"loggername",
",",
"short_na... | 34.935484 | 22.677419 |
def cublasSgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy):
"""
Matrix-vector product for real general matrix.
"""
status = _libcublas.cublasSgemv_v2(handle,
_CUBLAS_OP[trans], m, n,
ctypes.byref(ctypes.c_float(alpha)), int(A), lda,
int(x), incx,
ctypes.byref(ctypes.c_float(beta)), int(y), incy)
cublasCheckStatus(status) | [
"def",
"cublasSgemv",
"(",
"handle",
",",
"trans",
",",
"m",
",",
"n",
",",
"alpha",
",",
"A",
",",
"lda",
",",
"x",
",",
"incx",
",",
"beta",
",",
"y",
",",
"incy",
")",
":",
"status",
"=",
"_libcublas",
".",
"cublasSgemv_v2",
"(",
"handle",
","... | 42.25 | 22.916667 |
def keyword_search(rows, **kwargs):
"""
Takes a list of dictionaries and finds all the dictionaries where the
keys and values match those found in the keyword arguments.
Keys in the row data have ' ' and '-' replaced with '_', so they can
match the keyword argument parsing. For example, the keyword argument
'fix_up_path' will match a key named 'fix-up path'.
In addition, several suffixes can be added to the key name to do partial
matching of values:
* '__contains' will test whether the data value contains the given
value.
* '__startswith' tests if the data value starts with the given value
* '__lower_value' compares the lower-case version of the data and given
values.
Arguments:
rows (list): A list of dictionaries representing the data to be
searched.
**kwargs (dict): keyword-value pairs corresponding to the fields that
need to be found and their required values in the data rows.
Returns:
(list): The list of rows that match the search keywords. If no
keyword arguments are given, no rows are returned.
Examples:
>>> rows = [
... {'domain': 'oracle', 'type': 'soft', 'item': 'nofile', 'value': 1024},
... {'domain': 'oracle', 'type': 'hard', 'item': 'nofile', 'value': 65536},
... {'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240},
... {'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276},
... {'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
...
>>> keyword_search(rows, domain='root')
[{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
>>> keyword_search(rows, item__contains='c')
[{'domain': 'oracle', 'type': 'soft', 'item': 'stack', 'value': 10240},
{'domain': 'oracle', 'type': 'hard', 'item': 'stack', 'value': 3276},
{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
>>> keyword_search(rows, domain__startswith='r')
[{'domain': 'root', 'type': 'soft', 'item': 'nproc', 'value': -1}]
"""
results = []
if not kwargs:
return results
# Allows us to transform the key and do lookups like __contains and
# __startswith
matchers = {
'default': lambda s, v: s == v,
'contains': lambda s, v: v in s,
'startswith': lambda s, v: s.startswith(v),
'lower_value': lambda s, v: s.lower() == v.lower(),
}
def key_match(row, key, value):
# Translate ' ' and '-' of keys in dict to '_' to match keyword arguments.
my_row = {}
for my_key, val in row.items():
my_row[my_key.replace(' ', '_').replace('-', '_')] = val
matcher_fn = matchers['default']
if '__' in key:
key, matcher = key.split('__', 1)
if matcher not in matchers:
# put key back the way we found it, matcher fn unchanged
key = key + '__' + matcher
else:
matcher_fn = matchers[matcher]
return key in my_row and matcher_fn(my_row[key], value)
data = []
for row in rows:
if all(map(lambda kv: key_match(row, kv[0], kv[1]), kwargs.items())):
data.append(row)
return data | [
"def",
"keyword_search",
"(",
"rows",
",",
"*",
"*",
"kwargs",
")",
":",
"results",
"=",
"[",
"]",
"if",
"not",
"kwargs",
":",
"return",
"results",
"# Allows us to transform the key and do lookups like __contains and",
"# __startswith",
"matchers",
"=",
"{",
"'defau... | 41.846154 | 25.358974 |
def update_launch_config(self, scaling_group, server_name=None, image=None,
flavor=None, disk_config=None, metadata=None, personality=None,
networks=None, load_balancers=None, key_name=None, config_drive=False,
user_data=None):
"""
Updates the server launch configuration for an existing scaling group.
One or more of the available attributes can be specified.
NOTE: if you specify metadata, it will *replace* any existing metadata.
If you want to add to it, you either need to pass the complete dict of
metadata, or call the update_launch_metadata() method.
"""
return self._manager.update_launch_config(scaling_group,
server_name=server_name, image=image, flavor=flavor,
disk_config=disk_config, metadata=metadata,
personality=personality, networks=networks,
load_balancers=load_balancers, key_name=key_name,
config_drive=config_drive, user_data=user_data) | [
"def",
"update_launch_config",
"(",
"self",
",",
"scaling_group",
",",
"server_name",
"=",
"None",
",",
"image",
"=",
"None",
",",
"flavor",
"=",
"None",
",",
"disk_config",
"=",
"None",
",",
"metadata",
"=",
"None",
",",
"personality",
"=",
"None",
",",
... | 56.777778 | 25.777778 |
def spit_config(self, conf_file, firstwordonly=False):
"""conf_file a file opened for writing."""
cfg = ConfigParser.RawConfigParser()
for sec in _CONFIG_SECS:
cfg.add_section(sec)
sec = 'channels'
for i in sorted(self.pack.D):
cfg.set(sec, str(i),
self.pack.name(i, firstwordonly=firstwordonly))
sec = 'conditions'
for k in self.sorted_conkeys():
cfg.set(sec, k, self.conditions[k])
cfg.write(conf_file) | [
"def",
"spit_config",
"(",
"self",
",",
"conf_file",
",",
"firstwordonly",
"=",
"False",
")",
":",
"cfg",
"=",
"ConfigParser",
".",
"RawConfigParser",
"(",
")",
"for",
"sec",
"in",
"_CONFIG_SECS",
":",
"cfg",
".",
"add_section",
"(",
"sec",
")",
"sec",
"... | 30.117647 | 16.588235 |
def _init_file(self):
"""Initialise the file header. This will erase any data previously in the file."""
header_length = 2*SECTOR_LENGTH
if self.size > header_length:
self.file.truncate(header_length)
self.file.seek(0)
self.file.write(header_length*b'\x00')
self.size = header_length | [
"def",
"_init_file",
"(",
"self",
")",
":",
"header_length",
"=",
"2",
"*",
"SECTOR_LENGTH",
"if",
"self",
".",
"size",
">",
"header_length",
":",
"self",
".",
"file",
".",
"truncate",
"(",
"header_length",
")",
"self",
".",
"file",
".",
"seek",
"(",
"... | 42 | 7 |
def _assign_as_root(self, id_):
"""Assign an id_ a root object in the hierarchy"""
rfc = self._ras.get_relationship_form_for_create(self._phantom_root_id, id_, [])
rfc.set_display_name('Implicit Root to ' + str(id_) + ' Parent-Child Relationship')
rfc.set_description(self._relationship_type.get_display_name().get_text() + ' relationship for implicit root and child: ' + str(id_))
rfc.set_genus_type(self._relationship_type)
self._ras.create_relationship(rfc) | [
"def",
"_assign_as_root",
"(",
"self",
",",
"id_",
")",
":",
"rfc",
"=",
"self",
".",
"_ras",
".",
"get_relationship_form_for_create",
"(",
"self",
".",
"_phantom_root_id",
",",
"id_",
",",
"[",
"]",
")",
"rfc",
".",
"set_display_name",
"(",
"'Implicit Root ... | 71.714286 | 31.714286 |
def function(self, addr=None, name=None, create=False, syscall=False, plt=None):
"""
Get a function object from the function manager.
Pass either `addr` or `name` with the appropriate values.
:param int addr: Address of the function.
:param str name: Name of the function.
:param bool create: Whether to create the function or not if the function does not exist.
:param bool syscall: True to create the function as a syscall, False otherwise.
:param bool or None plt: True to find the PLT stub, False to find a non-PLT stub, None to disable this
restriction.
:return: The Function instance, or None if the function is not found and create is False.
:rtype: Function or None
"""
if addr is not None:
try:
f = self._function_map.get(addr)
if plt is None or f.is_plt == plt:
return f
except KeyError:
if create:
# the function is not found
f = self._function_map[addr]
if name is not None:
f.name = name
if syscall:
f.is_syscall=True
return f
elif name is not None:
for func in self._function_map.values():
if func.name == name:
if plt is None or func.is_plt == plt:
return func
return None | [
"def",
"function",
"(",
"self",
",",
"addr",
"=",
"None",
",",
"name",
"=",
"None",
",",
"create",
"=",
"False",
",",
"syscall",
"=",
"False",
",",
"plt",
"=",
"None",
")",
":",
"if",
"addr",
"is",
"not",
"None",
":",
"try",
":",
"f",
"=",
"sel... | 42 | 18.333333 |
def walk(self, cli):
""" Walk through children. """
yield self
for i in self.content.walk(cli):
yield i
for f in self.floats:
for i in f.content.walk(cli):
yield i | [
"def",
"walk",
"(",
"self",
",",
"cli",
")",
":",
"yield",
"self",
"for",
"i",
"in",
"self",
".",
"content",
".",
"walk",
"(",
"cli",
")",
":",
"yield",
"i",
"for",
"f",
"in",
"self",
".",
"floats",
":",
"for",
"i",
"in",
"f",
".",
"content",
... | 22.8 | 17.2 |
def parse_coordinate(string_rep):
"""
Parse a single coordinate
"""
# Any CRTF coordinate representation (sexagesimal or degrees)
if 'pix' in string_rep:
return u.Quantity(string_rep[:-3], u.dimensionless_unscaled)
if 'h' in string_rep or 'rad' in string_rep:
return coordinates.Angle(string_rep)
if len(string_rep.split('.')) >= 3:
string_rep = string_rep.replace('.', ':', 2)
return coordinates.Angle(string_rep, u.deg) | [
"def",
"parse_coordinate",
"(",
"string_rep",
")",
":",
"# Any CRTF coordinate representation (sexagesimal or degrees)",
"if",
"'pix'",
"in",
"string_rep",
":",
"return",
"u",
".",
"Quantity",
"(",
"string_rep",
"[",
":",
"-",
"3",
"]",
",",
"u",
".",
"dimensionle... | 36.428571 | 15.285714 |
def _init_hex(self, hexval: str) -> None:
""" Initialize from a hex value string. """
self.hexval = hex2termhex(fix_hex(hexval))
self.code = hex2term(self.hexval)
self.rgb = hex2rgb(self.hexval) | [
"def",
"_init_hex",
"(",
"self",
",",
"hexval",
":",
"str",
")",
"->",
"None",
":",
"self",
".",
"hexval",
"=",
"hex2termhex",
"(",
"fix_hex",
"(",
"hexval",
")",
")",
"self",
".",
"code",
"=",
"hex2term",
"(",
"self",
".",
"hexval",
")",
"self",
"... | 44.4 | 2.6 |
def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return ECDSA_Curve(key)
if key not in ECDSA_Curve._member_map_:
extend_enum(ECDSA_Curve, key, default)
return ECDSA_Curve[key] | [
"def",
"get",
"(",
"key",
",",
"default",
"=",
"-",
"1",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"int",
")",
":",
"return",
"ECDSA_Curve",
"(",
"key",
")",
"if",
"key",
"not",
"in",
"ECDSA_Curve",
".",
"_member_map_",
":",
"extend_enum",
"(",
... | 38.571429 | 7.714286 |
def build_service(service_descriptor, did):
"""
Build a service.
:param service_descriptor: Tuples of length 2. The first item must be one of ServiceTypes
and the second item is a dict of parameters and values required by the service
:param did: DID, str
:return: Service
"""
assert isinstance(service_descriptor, tuple) and len(
service_descriptor) == 2, 'Unknown service descriptor format.'
service_type, kwargs = service_descriptor
if service_type == ServiceTypes.METADATA:
return ServiceFactory.build_metadata_service(
did,
kwargs['metadata'],
kwargs['serviceEndpoint']
)
elif service_type == ServiceTypes.AUTHORIZATION:
return ServiceFactory.build_authorization_service(
kwargs['serviceEndpoint']
)
elif service_type == ServiceTypes.ASSET_ACCESS:
return ServiceFactory.build_access_service(
did, kwargs['price'],
kwargs['consumeEndpoint'], kwargs['serviceEndpoint'],
kwargs['timeout'], kwargs['templateId']
)
elif service_type == ServiceTypes.CLOUD_COMPUTE:
return ServiceFactory.build_compute_service(
did, kwargs['price'],
kwargs['consumeEndpoint'], kwargs['serviceEndpoint'], kwargs['timeout']
)
raise ValueError(f'Unknown service type {service_type}') | [
"def",
"build_service",
"(",
"service_descriptor",
",",
"did",
")",
":",
"assert",
"isinstance",
"(",
"service_descriptor",
",",
"tuple",
")",
"and",
"len",
"(",
"service_descriptor",
")",
"==",
"2",
",",
"'Unknown service descriptor format.'",
"service_type",
",",
... | 39.263158 | 20.947368 |
def file_decrypt( blockchain_id, hostname, sender_blockchain_id, sender_key_id, input_path, output_path, passphrase=None, config_path=CONFIG_PATH, wallet_keys=None ):
"""
Decrypt a file from a sender's blockchain ID.
Try our current key, and then the old keys
(but warn if there are revoked keys)
Return {'status': True} on success, and write plaintext to output_path
Return {'error': ...} on failure
"""
config_dir = os.path.dirname(config_path)
decrypted = False
old_key = False
old_key_index = 0
sender_old_key_index = 0
# get the sender key
sender_key_info = file_key_lookup( sender_blockchain_id, None, None, key_id=sender_key_id, config_path=config_path, wallet_keys=wallet_keys )
if 'error' in sender_key_info:
log.error("Failed to look up sender key: %s" % sender_key_info['error'])
return {'error': 'Failed to lookup sender key'}
if 'stale_key_index' in sender_key_info.keys():
old_key = True
sender_old_key_index = sender_key_info['sender_key_index']
# try each of our keys
# current key...
key_info = file_key_lookup( blockchain_id, 0, hostname, config_path=config_path, wallet_keys=wallet_keys )
if 'error' not in key_info:
res = file_decrypt_from_key_info( sender_key_info, blockchain_id, 0, hostname, input_path, output_path, passphrase=passphrase, config_path=config_path, wallet_keys=wallet_keys )
if 'error' in res:
if not res['status']:
# permanent failure
log.error("Failed to decrypt: %s" % res['error'])
return {'error': 'Failed to decrypt'}
else:
decrypted = True
else:
# did not look up key
log.error("Failed to lookup key: %s" % key_info['error'])
if not decrypted:
# try old keys
for i in xrange(1, MAX_EXPIRED_KEYS):
res = file_decrypt_from_key_info( sender_key_info, blockchain_id, i, hostname, input_path, output_path, passphrase=passphrase, config_path=config_path, wallet_keys=wallet_keys )
if 'error' in res:
# key is not online, but don't try again
log.error("Failed to decrypt: %s" % res['error'])
return {'error': 'Failed to decrypt'}
else:
decrypted = True
old_key = True
old_key_index = i
break
if decrypted:
log.debug("Decrypted with %s.%s" % (blockchain_id, hostname))
ret = {'status': True}
if old_key:
ret['warning'] = "Used stale key"
ret['stale_key_index'] = old_key_index
ret['stale_sender_key_index'] = sender_old_key_index
return ret
else:
return {'error': 'No keys could decrypt'} | [
"def",
"file_decrypt",
"(",
"blockchain_id",
",",
"hostname",
",",
"sender_blockchain_id",
",",
"sender_key_id",
",",
"input_path",
",",
"output_path",
",",
"passphrase",
"=",
"None",
",",
"config_path",
"=",
"CONFIG_PATH",
",",
"wallet_keys",
"=",
"None",
")",
... | 39.913043 | 26.434783 |
def get_current_nodes(self, clusters):
"""
Returns two dictionaries, the current nodes and the enabled nodes.
The current_nodes dictionary is keyed off of the cluster name and
values are a list of nodes known to HAProxy.
The enabled_nodes dictionary is also keyed off of the cluster name
and values are list of *enabled* nodes, i.e. the same values as
current_nodes but limited to servers currently taking traffic.
"""
current_nodes = self.control.get_active_nodes()
enabled_nodes = collections.defaultdict(list)
for cluster in clusters:
if not cluster.nodes:
continue
if cluster.name not in current_nodes:
logger.debug(
"New cluster '%s' added, restart required.",
cluster.name
)
self.restart_required = True
for node in cluster.nodes:
if node.name not in [
current_node["svname"]
for current_node in current_nodes.get(cluster.name, [])
]:
logger.debug(
"New node added to cluster '%s', restart required.",
cluster.name
)
self.restart_required = True
enabled_nodes[cluster.name].append(node.name)
return current_nodes, enabled_nodes | [
"def",
"get_current_nodes",
"(",
"self",
",",
"clusters",
")",
":",
"current_nodes",
"=",
"self",
".",
"control",
".",
"get_active_nodes",
"(",
")",
"enabled_nodes",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"cluster",
"in",
"clusters",
... | 36.974359 | 19.589744 |
def check_X(X, n_feats=None, min_samples=1, edge_knots=None, dtypes=None,
features=None, verbose=True):
"""
tool to ensure that X:
- is 2 dimensional
- contains float-compatible data-types
- has at least min_samples
- has n_feats
- has categorical features in the right range
- is finite
Parameters
----------
X : array-like
n_feats : int. default: None
represents number of features that X should have.
not enforced if n_feats is None.
min_samples : int, default: 1
edge_knots : list of arrays, default: None
dtypes : list of strings, default: None
features : list of ints,
which features are considered by the model
verbose : bool, default: True
whether to print warnings
Returns
-------
X : array with ndims == 2 containing validated X-data
"""
# check all features are there
if bool(features):
features = flatten(features)
max_feat = max(flatten(features))
if n_feats is None:
n_feats = max_feat
n_feats = max(n_feats, max_feat)
# basic diagnostics
X = check_array(X, force_2d=True, n_feats=n_feats, min_samples=min_samples,
name='X data', verbose=verbose)
# check our categorical data has no new categories
if (edge_knots is not None) and (dtypes is not None) and (features is not None):
# get a flattened list of tuples
edge_knots = flatten(edge_knots)[::-1]
dtypes = flatten(dtypes)
assert len(edge_knots) % 2 == 0 # sanity check
# form pairs
n = len(edge_knots) // 2
edge_knots = [(edge_knots.pop(), edge_knots.pop()) for _ in range(n)]
# check each categorical term
for i, ek in enumerate(edge_knots):
dt = dtypes[i]
feature = features[i]
x = X[:, feature]
if dt == 'categorical':
min_ = ek[0]
max_ = ek[-1]
if (np.unique(x) < min_).any() or \
(np.unique(x) > max_).any():
min_ += .5
max_ -= 0.5
raise ValueError('X data is out of domain for categorical '\
'feature {}. Expected data on [{}, {}], '\
'but found data on [{}, {}]'\
.format(i, min_, max_, x.min(), x.max()))
return X | [
"def",
"check_X",
"(",
"X",
",",
"n_feats",
"=",
"None",
",",
"min_samples",
"=",
"1",
",",
"edge_knots",
"=",
"None",
",",
"dtypes",
"=",
"None",
",",
"features",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"# check all features are there",
"if"... | 32.662162 | 18.337838 |
def _hasReturnValue(self, node):
"""
Determine whether the given method or function has a return statement.
@param node: the node currently checks
"""
returnFound = False
for subnode in node.body:
if type(subnode) == node_classes.Return and subnode.value:
returnFound = True
break
return returnFound | [
"def",
"_hasReturnValue",
"(",
"self",
",",
"node",
")",
":",
"returnFound",
"=",
"False",
"for",
"subnode",
"in",
"node",
".",
"body",
":",
"if",
"type",
"(",
"subnode",
")",
"==",
"node_classes",
".",
"Return",
"and",
"subnode",
".",
"value",
":",
"r... | 32.416667 | 15.083333 |
def fast_group_adder(wires_to_add, reducer=wallace_reducer, final_adder=kogge_stone):
"""
A generalization of the carry save adder, this is designed to add many numbers
together in a both area and time efficient manner. Uses a tree reducer
to achieve this performance
:param [WireVector] wires_to_add: an array of wirevectors to add
:param reducer: the tree reducer to use
:param final_adder: The two value adder to use at the end
:return: a wirevector with the result of the addition
The length of the result is:
max(len(w) for w in wires_to_add) + ceil(len(wires_to_add))
"""
import math
longest_wire_len = max(len(w) for w in wires_to_add)
result_bitwidth = longest_wire_len + int(math.ceil(math.log(len(wires_to_add), 2)))
bits = [[] for i in range(longest_wire_len)]
for wire in wires_to_add:
for bit_loc, bit in enumerate(wire):
bits[bit_loc].append(bit)
return reducer(bits, result_bitwidth, final_adder) | [
"def",
"fast_group_adder",
"(",
"wires_to_add",
",",
"reducer",
"=",
"wallace_reducer",
",",
"final_adder",
"=",
"kogge_stone",
")",
":",
"import",
"math",
"longest_wire_len",
"=",
"max",
"(",
"len",
"(",
"w",
")",
"for",
"w",
"in",
"wires_to_add",
")",
"res... | 39.36 | 22.32 |
def _getAuth(self):
"""
Main step in authorizing with Reader.
Sends request to Google ClientAuthMethod URL which returns an Auth token.
Returns Auth token or raises IOError on error.
"""
parameters = {
'service' : 'reader',
'Email' : self.username,
'Passwd' : self.password,
'accountType' : 'GOOGLE'}
req = requests.post(ClientAuthMethod.CLIENT_URL, data=parameters)
if req.status_code != 200:
raise IOError("Error getting the Auth token, have you entered a"
"correct username and password?")
data = req.text
#Strip newline and non token text.
token_dict = dict(x.split('=') for x in data.split('\n') if x)
return token_dict["Auth"] | [
"def",
"_getAuth",
"(",
"self",
")",
":",
"parameters",
"=",
"{",
"'service'",
":",
"'reader'",
",",
"'Email'",
":",
"self",
".",
"username",
",",
"'Passwd'",
":",
"self",
".",
"password",
",",
"'accountType'",
":",
"'GOOGLE'",
"}",
"req",
"=",
"requests... | 40.25 | 14.65 |
def use_session(self, session_id):
"""
Use the specified lightning session.
Specify a lightning session by id number. Check the number of an existing
session in the attribute lightning.session.id.
"""
self.session = Session(lgn=self, id=session_id)
return self.session | [
"def",
"use_session",
"(",
"self",
",",
"session_id",
")",
":",
"self",
".",
"session",
"=",
"Session",
"(",
"lgn",
"=",
"self",
",",
"id",
"=",
"session_id",
")",
"return",
"self",
".",
"session"
] | 35.222222 | 14.777778 |
def from_json(j):
"""
load an nparray object from a json-formatted string
@parameter str j: json-formatted string
"""
if isinstance(j, dict):
return from_dict(j)
if not (isinstance(j, str) or isinstance(j, unicode)):
raise TypeError("argument must be of type str")
return from_dict(json.loads(j)) | [
"def",
"from_json",
"(",
"j",
")",
":",
"if",
"isinstance",
"(",
"j",
",",
"dict",
")",
":",
"return",
"from_dict",
"(",
"j",
")",
"if",
"not",
"(",
"isinstance",
"(",
"j",
",",
"str",
")",
"or",
"isinstance",
"(",
"j",
",",
"unicode",
")",
")",
... | 25.461538 | 17.307692 |
def _parse_type(self, element, types):
"""Parse a 'complexType' element.
@param element: The top-level complexType element
@param types: A map of the elements of all available complexType's.
@return: The schema for the complexType.
"""
name = element.attrib["name"]
type = element.attrib["type"]
if not type.startswith("tns:"):
raise RuntimeError("Unexpected element type %s" % type)
type = type[4:]
[children] = types[type][0]
types[type][1] = True
self._remove_namespace_from_tag(children)
if children.tag not in ("sequence", "choice"):
raise RuntimeError("Unexpected children type %s" % children.tag)
if children[0].attrib["name"] == "item":
schema = SequenceSchema(name)
else:
schema = NodeSchema(name)
for child in children:
self._remove_namespace_from_tag(child)
if child.tag == "element":
name, type, min_occurs, max_occurs = self._parse_child(child)
if type in self.leaf_types:
if max_occurs != 1:
raise RuntimeError("Unexpected max value for leaf")
if not isinstance(schema, NodeSchema):
raise RuntimeError("Attempt to add leaf to a non-node")
schema.add(LeafSchema(name), min_occurs=min_occurs)
else:
if name == "item": # sequence
if not isinstance(schema, SequenceSchema):
raise RuntimeError("Attempt to set child for "
"non-sequence")
schema.set(self._parse_type(child, types),
min_occurs=min_occurs,
max_occurs=max_occurs)
else:
if max_occurs != 1:
raise RuntimeError("Unexpected max for node")
if not isinstance(schema, NodeSchema):
raise RuntimeError("Unexpected schema type")
schema.add(self._parse_type(child, types),
min_occurs=min_occurs)
elif child.tag == "choice":
pass
else:
raise RuntimeError("Unexpected child type")
return schema | [
"def",
"_parse_type",
"(",
"self",
",",
"element",
",",
"types",
")",
":",
"name",
"=",
"element",
".",
"attrib",
"[",
"\"name\"",
"]",
"type",
"=",
"element",
".",
"attrib",
"[",
"\"type\"",
"]",
"if",
"not",
"type",
".",
"startswith",
"(",
"\"tns:\""... | 44.036364 | 18.436364 |
def emit(self):
"""We are finished processing one element. Emit it"""
self.count += 1
# event_name = 'on_{0}'.format(self.context.subcategory.lower())
event_name = self.context.subcategory
if hasattr(self.handler, event_name):
getattr(self.handler, event_name)(self.context)
elif hasattr(self.handler, 'default'):
self.handler.default(self.context) | [
"def",
"emit",
"(",
"self",
")",
":",
"self",
".",
"count",
"+=",
"1",
"# event_name = 'on_{0}'.format(self.context.subcategory.lower())",
"event_name",
"=",
"self",
".",
"context",
".",
"subcategory",
"if",
"hasattr",
"(",
"self",
".",
"handler",
",",
"event_name... | 37.454545 | 17.727273 |
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args) | [
"def",
"with_context",
"(",
"self",
",",
"required_by",
")",
":",
"if",
"not",
"required_by",
":",
"return",
"self",
"args",
"=",
"self",
".",
"args",
"+",
"(",
"required_by",
",",
")",
"return",
"ContextualVersionConflict",
"(",
"*",
"args",
")"
] | 33.444444 | 8.777778 |
def get_value(self, field, quick):
# type: (Field, bool) -> Any
""" Ask user the question represented by this instance.
Args:
field (Field):
The field we're asking the user to provide the value for.
quick (bool):
Enable quick mode. In quick mode, the form will reduce the
number of question asked by using defaults wherever possible.
This can greatly reduce the number of interactions required on
the user part, but will obviously limit the user choices. This
should probably be enabled only by a specific user action
(like passing a ``--quick`` flag etc.).
Returns:
The user response converted to a python type using the
:py:attr:`cliform.core.Field.type` converter.
"""
if callable(field.default):
default = field.default(self)
else:
default = field.default
if quick and default is not None:
return default
shell.cprint('<90>{}', field.help)
while True:
try:
answer = click.prompt(field.pretty_prompt, default=default)
return field.type(answer)
except ValueError:
shell.cprint("<31>Unsupported value") | [
"def",
"get_value",
"(",
"self",
",",
"field",
",",
"quick",
")",
":",
"# type: (Field, bool) -> Any",
"if",
"callable",
"(",
"field",
".",
"default",
")",
":",
"default",
"=",
"field",
".",
"default",
"(",
"self",
")",
"else",
":",
"default",
"=",
"fiel... | 37.771429 | 20.571429 |
def validate_type(prop, value, expected):
""" Default validation for all types """
# Validate on expected type(s), but ignore None: defaults handled elsewhere
if value is not None and not isinstance(value, expected):
_validation_error(prop, type(value).__name__, None, expected) | [
"def",
"validate_type",
"(",
"prop",
",",
"value",
",",
"expected",
")",
":",
"# Validate on expected type(s), but ignore None: defaults handled elsewhere",
"if",
"value",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"value",
",",
"expected",
")",
":",
"_va... | 49 | 21.666667 |
def list_projects(self, entity=None):
"""Lists projects in W&B scoped by entity.
Args:
entity (str, optional): The entity to scope this project to.
Returns:
[{"id","name","description"}]
"""
query = gql('''
query Models($entity: String!) {
models(first: 10, entityName: $entity) {
edges {
node {
id
name
description
}
}
}
}
''')
return self._flatten_edges(self.gql(query, variable_values={
'entity': entity or self.settings('entity')})['models']) | [
"def",
"list_projects",
"(",
"self",
",",
"entity",
"=",
"None",
")",
":",
"query",
"=",
"gql",
"(",
"'''\n query Models($entity: String!) {\n models(first: 10, entityName: $entity) {\n edges {\n node {\n id\n ... | 29.375 | 18.583333 |
def dic(self):
r""" Returns the corrected Deviance Information Criterion (DIC) for all chains loaded into ChainConsumer.
If a chain does not have a posterior, this method will return `None` for that chain. **Note that
the DIC metric is only valid on posterior surfaces which closely resemble multivariate normals!**
Formally, we follow Liddle (2007) and first define *Bayesian complexity* as
.. math::
p_D = \bar{D}(\theta) - D(\bar{\theta}),
where :math:`D(\theta) = -2\ln(P(\theta)) + C` is the deviance, where :math:`P` is the posterior
and :math:`C` a constant. From here the DIC is defined as
.. math::
DIC \equiv D(\bar{\theta}) + 2p_D = \bar{D}(\theta) + p_D.
Returns
-------
list[float]
A list of all the DIC values - one per chain, in the order in which the chains were added.
References
----------
[1] Andrew R. Liddle, "Information criteria for astrophysical model selection", MNRAS (2007)
"""
dics = []
dics_bool = []
for i, chain in enumerate(self.parent.chains):
p = chain.posterior
if p is None:
dics_bool.append(False)
self._logger.warn("You need to set the posterior for chain %s to get the DIC" % chain.name)
else:
dics_bool.append(True)
num_params = chain.chain.shape[1]
means = np.array([np.average(chain.chain[:, ii], weights=chain.weights) for ii in range(num_params)])
d = -2 * p
d_of_mean = griddata(chain.chain, d, means, method='nearest')[0]
mean_d = np.average(d, weights=chain.weights)
p_d = mean_d - d_of_mean
dic = mean_d + p_d
dics.append(dic)
if len(dics) > 0:
dics -= np.min(dics)
dics_fin = []
i = 0
for b in dics_bool:
if not b:
dics_fin.append(None)
else:
dics_fin.append(dics[i])
i += 1
return dics_fin | [
"def",
"dic",
"(",
"self",
")",
":",
"dics",
"=",
"[",
"]",
"dics_bool",
"=",
"[",
"]",
"for",
"i",
",",
"chain",
"in",
"enumerate",
"(",
"self",
".",
"parent",
".",
"chains",
")",
":",
"p",
"=",
"chain",
".",
"posterior",
"if",
"p",
"is",
"Non... | 39.792453 | 25.679245 |
def reparent(self, other, name):
"""
Remove :meth:`get_toplevel` from any current parent and add it to
*other[name]*.
"""
# http://developer.gnome.org/gtk-faq/stable/x635.html
# warns against reparent()
old = self.toplevel.get_parent()
if old:
old.remove(self.toplevel)
new = other[name]
new.add(self.toplevel) | [
"def",
"reparent",
"(",
"self",
",",
"other",
",",
"name",
")",
":",
"# http://developer.gnome.org/gtk-faq/stable/x635.html",
"# warns against reparent()",
"old",
"=",
"self",
".",
"toplevel",
".",
"get_parent",
"(",
")",
"if",
"old",
":",
"old",
".",
"remove",
... | 32.583333 | 11.583333 |
def polar(x, y, deg=0): # radian if deg=0; degree if deg=1
""" Convert from rectangular (x,y) to polar (r,w)
r = sqrt(x^2 + y^2)
w = arctan(y/x) = [-\pi,\pi] = [-180,180]
"""
if deg:
return hypot(x, y), 180.0 * atan2(y, x) / pi
else:
return hypot(x, y), atan2(y, x) | [
"def",
"polar",
"(",
"x",
",",
"y",
",",
"deg",
"=",
"0",
")",
":",
"# radian if deg=0; degree if deg=1",
"if",
"deg",
":",
"return",
"hypot",
"(",
"x",
",",
"y",
")",
",",
"180.0",
"*",
"atan2",
"(",
"y",
",",
"x",
")",
"/",
"pi",
"else",
":",
... | 28.363636 | 18.181818 |
def strip_db_antsignal(self, idx):
"""strip(1 byte) radiotap.db_antsignal
:return: int
idx
:return: int
"""
db_antsignal, = struct.unpack_from('<B', self._rtap, idx)
return idx + 1, db_antsignal | [
"def",
"strip_db_antsignal",
"(",
"self",
",",
"idx",
")",
":",
"db_antsignal",
",",
"=",
"struct",
".",
"unpack_from",
"(",
"'<B'",
",",
"self",
".",
"_rtap",
",",
"idx",
")",
"return",
"idx",
"+",
"1",
",",
"db_antsignal"
] | 30.875 | 12.5 |
def tempo_account_get_all_account_by_customer_id(self, customer_id):
"""
Get un-archived Accounts by customer. The Caller must have the Browse Account permission for the Account.
:param customer_id: the Customer id.
:return:
"""
url = 'rest/tempo-accounts/1/account/customer/{customerId}/'.format(customerId=customer_id)
return self.get(url) | [
"def",
"tempo_account_get_all_account_by_customer_id",
"(",
"self",
",",
"customer_id",
")",
":",
"url",
"=",
"'rest/tempo-accounts/1/account/customer/{customerId}/'",
".",
"format",
"(",
"customerId",
"=",
"customer_id",
")",
"return",
"self",
".",
"get",
"(",
"url",
... | 48.75 | 25 |
def _add_timedeltalike_scalar(self, other):
"""
Parameters
----------
other : timedelta, Tick, np.timedelta64
Returns
-------
result : ndarray[int64]
"""
assert isinstance(self.freq, Tick) # checked by calling function
assert isinstance(other, (timedelta, np.timedelta64, Tick))
if notna(other):
# special handling for np.timedelta64("NaT"), avoid calling
# _check_timedeltalike_freq_compat as that would raise TypeError
other = self._check_timedeltalike_freq_compat(other)
# Note: when calling parent class's _add_timedeltalike_scalar,
# it will call delta_to_nanoseconds(delta). Because delta here
# is an integer, delta_to_nanoseconds will return it unchanged.
ordinals = super()._add_timedeltalike_scalar(other)
return ordinals | [
"def",
"_add_timedeltalike_scalar",
"(",
"self",
",",
"other",
")",
":",
"assert",
"isinstance",
"(",
"self",
".",
"freq",
",",
"Tick",
")",
"# checked by calling function",
"assert",
"isinstance",
"(",
"other",
",",
"(",
"timedelta",
",",
"np",
".",
"timedelt... | 38.304348 | 23.086957 |
def crude_tokenizer(line):
"""This is a very crude tokenizer from pynlpl"""
tokens = []
buffer = ''
for c in line.strip():
if c == ' ' or c in string.punctuation:
if buffer:
tokens.append(buffer)
buffer = ''
else:
buffer += c
if buffer: tokens.append(buffer)
return tokens | [
"def",
"crude_tokenizer",
"(",
"line",
")",
":",
"tokens",
"=",
"[",
"]",
"buffer",
"=",
"''",
"for",
"c",
"in",
"line",
".",
"strip",
"(",
")",
":",
"if",
"c",
"==",
"' '",
"or",
"c",
"in",
"string",
".",
"punctuation",
":",
"if",
"buffer",
":",... | 27.384615 | 14.615385 |
def fit(self, X, y=None, sample_weight=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
y : Ignored
not used, present here for API consistency by convention.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None)
"""
if self.normalize:
X = normalize(X)
random_state = check_random_state(self.random_state)
# TODO: add check that all data is unit-normalized
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = spherical_k_means(
X,
n_clusters=self.n_clusters,
sample_weight=sample_weight,
init=self.init,
n_init=self.n_init,
max_iter=self.max_iter,
verbose=self.verbose,
tol=self.tol,
random_state=random_state,
copy_x=self.copy_x,
n_jobs=self.n_jobs,
return_n_iter=True,
)
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
",",
"sample_weight",
"=",
"None",
")",
":",
"if",
"self",
".",
"normalize",
":",
"X",
"=",
"normalize",
"(",
"X",
")",
"random_state",
"=",
"check_random_state",
"(",
"self",
".",
"random_s... | 30.236842 | 20.947368 |
def get_fragment_language() -> ParserElement:
"""Build a protein fragment parser."""
_fragment_value_inner = fragment_range | missing_fragment(FRAGMENT_MISSING)
_fragment_value = _fragment_value_inner | And([Suppress('"'), _fragment_value_inner, Suppress('"')])
parser_element = fragment_tag + nest(_fragment_value + Optional(WCW + quote(FRAGMENT_DESCRIPTION)))
return parser_element | [
"def",
"get_fragment_language",
"(",
")",
"->",
"ParserElement",
":",
"_fragment_value_inner",
"=",
"fragment_range",
"|",
"missing_fragment",
"(",
"FRAGMENT_MISSING",
")",
"_fragment_value",
"=",
"_fragment_value_inner",
"|",
"And",
"(",
"[",
"Suppress",
"(",
"'\"'",... | 66.333333 | 31 |
def __fetch_question(self, question):
"""Fetch an Askbot HTML question body.
The method fetchs the HTML question retrieving the
question body of the item question received
:param question: item with the question itself
:returns: a list of HTML page/s for the question
"""
html_question_items = []
npages = 1
next_request = True
while next_request:
try:
html_question = self.client.get_html_question(question['id'], npages)
html_question_items.append(html_question)
tpages = self.ab_parser.parse_number_of_html_pages(html_question)
if npages == tpages:
next_request = False
npages = npages + 1
except requests.exceptions.TooManyRedirects as e:
logger.warning("%s, data not retrieved for question %s", e, question['id'])
next_request = False
return html_question_items | [
"def",
"__fetch_question",
"(",
"self",
",",
"question",
")",
":",
"html_question_items",
"=",
"[",
"]",
"npages",
"=",
"1",
"next_request",
"=",
"True",
"while",
"next_request",
":",
"try",
":",
"html_question",
"=",
"self",
".",
"client",
".",
"get_html_qu... | 32.966667 | 21.866667 |
def get_relations(self, database, schema):
"""Case-insensitively yield all relations matching the given schema.
:param str schema: The case-insensitive schema name to list from.
:return List[BaseRelation]: The list of relations with the given
schema
"""
schema = _lower(schema)
with self.lock:
results = [
r.inner for r in self.relations.values()
if (r.schema == _lower(schema) and
r.database == _lower(database))
]
if None in results:
dbt.exceptions.raise_cache_inconsistent(
'in get_relations, a None relation was found in the cache!'
)
return results | [
"def",
"get_relations",
"(",
"self",
",",
"database",
",",
"schema",
")",
":",
"schema",
"=",
"_lower",
"(",
"schema",
")",
"with",
"self",
".",
"lock",
":",
"results",
"=",
"[",
"r",
".",
"inner",
"for",
"r",
"in",
"self",
".",
"relations",
".",
"... | 36.4 | 19.05 |
def setup(app):
"""Register the extension with Sphinx.
Args:
app: The Sphinx application.
"""
for name, (default, rebuild, _) in ref.CONFIG_VALUES.iteritems():
app.add_config_value(name, default, rebuild)
app.add_directive('javaimport', ref.JavarefImportDirective)
app.add_role('javaref', ref.JavarefRole(app))
app.connect('builder-inited', initialize_env)
app.connect('env-purge-doc', ref.purge_imports)
app.connect('env-merge-info', ref.merge_imports)
app.connect('build-finished', ref.cleanup) | [
"def",
"setup",
"(",
"app",
")",
":",
"for",
"name",
",",
"(",
"default",
",",
"rebuild",
",",
"_",
")",
"in",
"ref",
".",
"CONFIG_VALUES",
".",
"iteritems",
"(",
")",
":",
"app",
".",
"add_config_value",
"(",
"name",
",",
"default",
",",
"rebuild",
... | 31.764706 | 19.470588 |
def set_drop_target(obj, root, designer, inspector):
"Recursively create and set the drop target for obj and childs"
if obj._meta.container:
dt = ToolBoxDropTarget(obj, root, designer=designer,
inspector=inspector)
obj.drop_target = dt
for child in obj:
set_drop_target(child, root, designer, inspector) | [
"def",
"set_drop_target",
"(",
"obj",
",",
"root",
",",
"designer",
",",
"inspector",
")",
":",
"if",
"obj",
".",
"_meta",
".",
"container",
":",
"dt",
"=",
"ToolBoxDropTarget",
"(",
"obj",
",",
"root",
",",
"designer",
"=",
"designer",
",",
"inspector",... | 46.875 | 17.875 |
def load(self, **kwargs):
"""Override load to retrieve object based on exists above."""
tmos_v = self._meta_data['bigip']._meta_data['tmos_version']
if self._check_existence_by_collection(
self._meta_data['container'], kwargs['name']):
if LooseVersion(tmos_v) == LooseVersion('11.5.4'):
return self._load_11_5_4(**kwargs)
else:
return self._load(**kwargs)
msg = 'The Policy named, {}, does not exist on the device.'.format(
kwargs['name'])
raise NonExtantVirtualPolicy(msg) | [
"def",
"load",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"tmos_v",
"=",
"self",
".",
"_meta_data",
"[",
"'bigip'",
"]",
".",
"_meta_data",
"[",
"'tmos_version'",
"]",
"if",
"self",
".",
"_check_existence_by_collection",
"(",
"self",
".",
"_meta_data"... | 48.833333 | 14.916667 |
def choose_font(self, font=None):
"""Choose a font for the label through a dialog"""
fmt_widget = self.parent()
if font is None:
if self.current_font:
font, ok = QFontDialog.getFont(
self.current_font, fmt_widget,
'Select %s font' % self.fmto_name,
QFontDialog.DontUseNativeDialog)
else:
font, ok = QFontDialog.getFont(fmt_widget)
if not ok:
return
self.current_font = font
properties = self.load_properties()
properties.update(self.qfont_to_artist_props(font))
fmt_widget.set_obj(properties)
self.refresh() | [
"def",
"choose_font",
"(",
"self",
",",
"font",
"=",
"None",
")",
":",
"fmt_widget",
"=",
"self",
".",
"parent",
"(",
")",
"if",
"font",
"is",
"None",
":",
"if",
"self",
".",
"current_font",
":",
"font",
",",
"ok",
"=",
"QFontDialog",
".",
"getFont",... | 38.777778 | 11.444444 |
def _insert_common_sphinx_configs(c, *, project_name):
"""Add common core Sphinx configurations to the state.
"""
c['project'] = project_name
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
c['source_suffix'] = '.rst'
# The encoding of source files.
c['source_encoding'] = 'utf-8-sig'
# The master toctree document.
c['master_doc'] = 'index'
# Configure figure numbering
c['numfig'] = True
c['numfig_format'] = {'figure': 'Figure %s',
'table': 'Table %s',
'code-block': 'Listing %s'}
# The reST default role (used for this markup: `text`)
c['default_role'] = 'obj'
# This is added to the end of RST files - a good place to put substitutions
# to be used globally.
c['rst_epilog'] = """
.. _Astropy: http://astropy.org
"""
# A list of warning types to suppress arbitrary warning messages. We mean
# to override directives in
# astropy_helpers.sphinx.ext.autodoc_enhancements, thus need to ignore
# those warning. This can be removed once the patch gets released in
# upstream Sphinx (https://github.com/sphinx-doc/sphinx/pull/1843).
# Suppress the warnings requires Sphinx v1.4.2
c['suppress_warnings'] = ['app.add_directive', ]
return c | [
"def",
"_insert_common_sphinx_configs",
"(",
"c",
",",
"*",
",",
"project_name",
")",
":",
"c",
"[",
"'project'",
"]",
"=",
"project_name",
"# The suffix(es) of source filenames.",
"# You can specify multiple suffix as a list of string:",
"c",
"[",
"'source_suffix'",
"]",
... | 33.615385 | 18.769231 |
def path_from_keywords(keywords,into='path'):
'''
turns keyword pairs into path or filename
if `into=='path'`, then keywords are separted by underscores, else keywords are used to create a directory hierarchy
'''
subdirs = []
def prepare_string(s):
s = str(s)
s = re.sub('[][{},*"'+f"'{os.sep}]",'_',s)#replace characters that make bash life difficult by underscore
if into=='file':
s = s.replace('_', ' ')#Remove underscore because they will be used as separator
if ' ' in s:
s = s.title()
s = s.replace(' ','')
return s
if isinstance(keywords,set):
keywords_list = sorted(keywords)
for property in keywords_list:
subdirs.append(prepare_string(property))
else:
keywords_list = sorted(keywords.items())
for property,value in keywords_list: # @reservedassignment
if Bool.valid(value):
subdirs.append(('' if value else ('not_' if into=='path' else 'not'))+prepare_string(property))
#elif String.valid(value):
# subdirs.append(prepare_string(value))
elif (Float|Integer).valid(value):
subdirs.append('{}{}'.format(prepare_string(property),prepare_string(value)))
else:
subdirs.append('{}{}{}'.format(prepare_string(property),'_' if into == 'path' else '',prepare_string(value)))
if into == 'path':
out = os.path.join(*subdirs)
else:
out = '_'.join(subdirs)
return out | [
"def",
"path_from_keywords",
"(",
"keywords",
",",
"into",
"=",
"'path'",
")",
":",
"subdirs",
"=",
"[",
"]",
"def",
"prepare_string",
"(",
"s",
")",
":",
"s",
"=",
"str",
"(",
"s",
")",
"s",
"=",
"re",
".",
"sub",
"(",
"'[][{},*\"'",
"+",
"f\"'{os... | 42.444444 | 24.944444 |
async def handle_frame(self, frame):
"""Handle incoming API frame, return True if this was the expected frame."""
if not isinstance(frame, FramePasswordEnterConfirmation):
return False
if frame.status == PasswordEnterConfirmationStatus.FAILED:
PYVLXLOG.warning('Failed to authenticate with password "%s****"', self.password[:2])
self.success = False
if frame.status == PasswordEnterConfirmationStatus.SUCCESSFUL:
self.success = True
return True | [
"async",
"def",
"handle_frame",
"(",
"self",
",",
"frame",
")",
":",
"if",
"not",
"isinstance",
"(",
"frame",
",",
"FramePasswordEnterConfirmation",
")",
":",
"return",
"False",
"if",
"frame",
".",
"status",
"==",
"PasswordEnterConfirmationStatus",
".",
"FAILED"... | 52.3 | 19.5 |
def get_thumbnail(file_, name):
"""
get_thumbnail version that uses aliasses defined in THUMBNAIL_OPTIONS_DICT
"""
options = settings.OPTIONS_DICT[name]
opt = copy(options)
geometry = opt.pop('geometry')
return original_get_thumbnail(file_, geometry, **opt) | [
"def",
"get_thumbnail",
"(",
"file_",
",",
"name",
")",
":",
"options",
"=",
"settings",
".",
"OPTIONS_DICT",
"[",
"name",
"]",
"opt",
"=",
"copy",
"(",
"options",
")",
"geometry",
"=",
"opt",
".",
"pop",
"(",
"'geometry'",
")",
"return",
"original_get_t... | 30.888889 | 14.222222 |
def _float(text):
"""Fonction to convert the 'decimal point assumed' format of TLE to actual
float
>>> _float('0000+0')
0.0
>>> _float('+0000+0')
0.0
>>> _float('34473-3')
0.00034473
>>> _float('-60129-4')
-6.0129e-05
>>> _float('+45871-4')
4.5871e-05
"""
text = text.strip()
if text[0] in ('-', '+'):
text = "%s.%s" % (text[0], text[1:])
else:
text = "+.%s" % text
if "+" in text[1:] or "-" in text[1:]:
value, exp_sign, expo = text.rpartition('+') if '+' in text[1:] else text.rpartition('-')
v = float('{value}e{exp_sign}{expo}'.format(value=value, exp_sign=exp_sign, expo=expo))
else:
v = float(text)
return v | [
"def",
"_float",
"(",
"text",
")",
":",
"text",
"=",
"text",
".",
"strip",
"(",
")",
"if",
"text",
"[",
"0",
"]",
"in",
"(",
"'-'",
",",
"'+'",
")",
":",
"text",
"=",
"\"%s.%s\"",
"%",
"(",
"text",
"[",
"0",
"]",
",",
"text",
"[",
"1",
":",... | 22.741935 | 25.032258 |
def _generate_dockerfile(base_image, layers):
"""
Generate the Dockerfile contents
A generated Dockerfile will look like the following:
```
FROM lambci/lambda:python3.6
ADD --chown=sbx_user1051:495 layer1 /opt
ADD --chown=sbx_user1051:495 layer2 /opt
```
Parameters
----------
base_image str
Base Image to use for the new image
layers list(samcli.commands.local.lib.provider.Layer)
List of Layers to be use to mount in the image
Returns
-------
str
String representing the Dockerfile contents for the image
"""
dockerfile_content = "FROM {}\n".format(base_image)
for layer in layers:
dockerfile_content = dockerfile_content + \
"ADD --chown=sbx_user1051:495 {} {}\n".format(layer.name, LambdaImage._LAYERS_DIR)
return dockerfile_content | [
"def",
"_generate_dockerfile",
"(",
"base_image",
",",
"layers",
")",
":",
"dockerfile_content",
"=",
"\"FROM {}\\n\"",
".",
"format",
"(",
"base_image",
")",
"for",
"layer",
"in",
"layers",
":",
"dockerfile_content",
"=",
"dockerfile_content",
"+",
"\"ADD --chown=s... | 30.483871 | 22.16129 |
def getComic(number, silent=True):
""" Produces a :class:`Comic` object with index equal to the provided argument.
Prints an error in the event of a failure (i.e. the number is less than zero
or greater than the latest comic number) and returns an empty Comic object.
Arguments:
an integer or string that represents a number, "number", that is the index of the comic in question.
silent: boolean, defaults to True. If set to False, an error will be printed
to standard output should the provided integer argument not be valid.
Returns the resulting Comic object for the provided index if successful,
or a Comic object with -1 as the index if not."""
numComics = getLatestComicNum()
if type(number) is str and number.isdigit():
number = int(number)
if number > numComics or number <= 0:
if not silent:
print("Error: You have requested an invalid comic.")
return Comic(-1)
return Comic(number) | [
"def",
"getComic",
"(",
"number",
",",
"silent",
"=",
"True",
")",
":",
"numComics",
"=",
"getLatestComicNum",
"(",
")",
"if",
"type",
"(",
"number",
")",
"is",
"str",
"and",
"number",
".",
"isdigit",
"(",
")",
":",
"number",
"=",
"int",
"(",
"number... | 41.272727 | 24.954545 |
def max_lemma_count(ambiguous_word: str) -> "wn.Synset":
"""
Returns the sense with the highest lemma_name count.
The max_lemma_count() can be treated as a rough gauge for the
Most Frequent Sense (MFS), if no other sense annotated corpus is available.
NOTE: The lemma counts are from the Brown Corpus
:param ambiguous_word: String, a single word.
:return: The estimated most common Synset.
"""
sense2lemmacounts = {}
for i in wn.synsets(ambiguous_word, pos=None):
sense2lemmacounts[i] = sum(j.count() for j in i.lemmas())
return max(sense2lemmacounts, key=sense2lemmacounts.get) | [
"def",
"max_lemma_count",
"(",
"ambiguous_word",
":",
"str",
")",
"->",
"\"wn.Synset\"",
":",
"sense2lemmacounts",
"=",
"{",
"}",
"for",
"i",
"in",
"wn",
".",
"synsets",
"(",
"ambiguous_word",
",",
"pos",
"=",
"None",
")",
":",
"sense2lemmacounts",
"[",
"i... | 44.142857 | 16.571429 |
def add(self, snapshot, distributions, component='main', storage=""):
""" Add mirror or repo to publish """
for dist in distributions:
self.publish(dist, storage=storage).add(snapshot, component) | [
"def",
"add",
"(",
"self",
",",
"snapshot",
",",
"distributions",
",",
"component",
"=",
"'main'",
",",
"storage",
"=",
"\"\"",
")",
":",
"for",
"dist",
"in",
"distributions",
":",
"self",
".",
"publish",
"(",
"dist",
",",
"storage",
"=",
"storage",
")... | 55 | 16.75 |
def to_string(self):
'''
API: to_string(self)
Description:
This method is based on pydot Graph class with the same name.
Returns a string representation of the graph in dot language.
It will return the graph and all its subelements in string form.
Return:
String that represents graph in dot language.
'''
graph = list()
processed_edges = {}
graph.append('%s %s {\n' %(self.graph_type, self.name))
for a in self.attr:
if a not in GRAPH_ATTRIBUTES:
continue
val = self.attr[a]
if val is not None:
graph.append( '%s=%s' % (a, quote_if_necessary(val)) )
else:
graph.append(a)
graph.append( ';\n' )
# clusters
for c in self.cluster:
graph.append('subgraph cluster_%s {\n' %c)
for a in self.cluster[c]['attrs']:
if a=='label':
graph.append(a+'='+quote_if_necessary(self.cluster[c]['attrs'][a])+';\n')
continue
graph.append(a+'='+self.cluster[c]['attrs'][a]+';\n')
if len(self.cluster[c]['node_attrs'])!=0:
graph.append('node [')
for a in self.cluster[c]['node_attrs']:
graph.append(a+'='+self.cluster[c]['node_attrs'][a])
graph.append(',')
if len(self.cluster[c]['node_attrs'])!=0:
graph.pop()
graph.append('];\n')
# process cluster nodes
for n in self.cluster[c]['node_list']:
data = self.get_node(n).to_string()
graph.append(data + ';\n')
# process cluster edges
for n in self.cluster[c]['node_list']:
for m in self.cluster[c]['node_list']:
if self.check_edge(n,m):
data = self.edge_to_string((n,m))
graph.append(data + ';\n')
processed_edges[(n,m)]=None
graph.append('}\n')
# process remaining (non-cluster) nodes
for n in self.neighbors:
for c in self.cluster:
if n in self.cluster[c]['node_list']:
break
else:
data = self.get_node(n).to_string()
graph.append(data + ';\n')
# process edges
for e in self.edge_attr:
if e in processed_edges:
continue
data = self.edge_to_string(e)
graph.append(data + ';\n')
graph.append( '}\n' )
return ''.join(graph) | [
"def",
"to_string",
"(",
"self",
")",
":",
"graph",
"=",
"list",
"(",
")",
"processed_edges",
"=",
"{",
"}",
"graph",
".",
"append",
"(",
"'%s %s {\\n'",
"%",
"(",
"self",
".",
"graph_type",
",",
"self",
".",
"name",
")",
")",
"for",
"a",
"in",
"se... | 39.681818 | 13.984848 |
def load(self, data):
"""
Load an image that was previously saved using
:py:meth:`~docker.models.images.Image.save` (or ``docker save``).
Similar to ``docker load``.
Args:
data (binary): Image data to be loaded.
Returns:
(list of :py:class:`Image`): The images.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.load_image(data)
images = []
for chunk in resp:
if 'stream' in chunk:
match = re.search(
r'(^Loaded image ID: |^Loaded image: )(.+)$',
chunk['stream']
)
if match:
image_id = match.group(2)
images.append(image_id)
if 'error' in chunk:
raise ImageLoadError(chunk['error'])
return [self.get(i) for i in images] | [
"def",
"load",
"(",
"self",
",",
"data",
")",
":",
"resp",
"=",
"self",
".",
"client",
".",
"api",
".",
"load_image",
"(",
"data",
")",
"images",
"=",
"[",
"]",
"for",
"chunk",
"in",
"resp",
":",
"if",
"'stream'",
"in",
"chunk",
":",
"match",
"="... | 31 | 16.032258 |
def view_sbo(self):
"""View slackbuild.org
"""
sbo_url = self.sbo_url.replace("/slackbuilds/", "/repository/")
br1, br2, fix_sp = "", "", " "
if self.meta.use_colors in ["off", "OFF"]:
br1 = "("
br2 = ")"
fix_sp = ""
print("") # new line at start
self.msg.template(78)
print("| {0}{1}SlackBuilds Repository{2}".format(" " * 28, self.grey,
self.endc))
self.msg.template(78)
print("| {0} > {1} > {2}{3}{4}".format(slack_ver(),
sbo_url.split("/")[-3].title(),
self.cyan, self.name, self.endc))
self.msg.template(78)
print("| {0}Package url{1}: {2}".format(self.green, self.endc, sbo_url))
self.msg.template(78)
print("| {0}Description: {1}{2}".format(self.green,
self.endc, self.sbo_desc))
print("| {0}SlackBuild: {1}{2}".format(self.green, self.endc,
self.sbo_dwn.split("/")[-1]))
print("| {0}Sources: {1}{2}".format(
self.green, self.endc,
(", ".join([src.split("/")[-1] for src in self.source_dwn]))))
print("| {0}Requirements: {1}{2}".format(self.yellow,
self.endc,
", ".join(self.sbo_req)))
self.msg.template(78)
print("| {0}R{1}{2}EADME View the README file".format(
self.red, self.endc, br2))
print("| {0}S{1}{2}lackBuild View the .SlackBuild "
"file".format(self.red, self.endc, br2))
print("| In{0}{1}f{2}{3}o{4} View the .info "
"file".format(br1, self.red, self.endc, br2, fix_sp))
if "doinst.sh" in self.sbo_files.split():
print("| D{0}{1}o{2}{3}inst.sh{4} View the doinst.sh "
"file".format(br1, self.red, self.endc, br2, fix_sp))
print("| {0}D{1}{2}ownload Download this package".format(
self.red, self.endc, br2))
print("| {0}B{1}{2}uild Download and build".format(
self.red, self.endc, br2))
print("| {0}I{1}{2}nstall Download/Build/Install".format(
self.red, self.endc, br2))
print("| {0}C{1}{2}lear Clear screen".format(self.red,
self.endc,
br2))
print("| {0}Q{1}{2}uit Quit".format(self.red,
self.endc, br2))
self.msg.template(78) | [
"def",
"view_sbo",
"(",
"self",
")",
":",
"sbo_url",
"=",
"self",
".",
"sbo_url",
".",
"replace",
"(",
"\"/slackbuilds/\"",
",",
"\"/repository/\"",
")",
"br1",
",",
"br2",
",",
"fix_sp",
"=",
"\"\"",
",",
"\"\"",
",",
"\" \"",
"if",
"self",
".",
"meta... | 53.716981 | 21.962264 |
def yaml_to_dict(yaml_str=None, str_or_buffer=None, ordered=False):
"""
Load YAML from a string, file, or buffer (an object with a .read method).
Parameters are mutually exclusive.
Parameters
----------
yaml_str : str, optional
A string of YAML.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
ordered: bool, optional, default False
If True, an OrderedDict is returned.
Returns
-------
dict
Conversion from YAML.
"""
if not yaml_str and not str_or_buffer:
raise ValueError('One of yaml_str or str_or_buffer is required.')
# determine which load method to use
if ordered:
loader = __ordered_load
else:
loader = yaml.load
if yaml_str:
d = loader(yaml_str)
elif isinstance(str_or_buffer, str):
with open(str_or_buffer) as f:
d = loader(f)
else:
d = loader(str_or_buffer)
return d | [
"def",
"yaml_to_dict",
"(",
"yaml_str",
"=",
"None",
",",
"str_or_buffer",
"=",
"None",
",",
"ordered",
"=",
"False",
")",
":",
"if",
"not",
"yaml_str",
"and",
"not",
"str_or_buffer",
":",
"raise",
"ValueError",
"(",
"'One of yaml_str or str_or_buffer is required.... | 25.184211 | 19.552632 |
def handle(self):
"The actual service to which the user has connected."
if self.TELNET_ISSUE:
self.writeline(self.TELNET_ISSUE)
if not self.authentication_ok():
return
if self.DOECHO:
self.writeline(self.WELCOME)
self.session_start()
while self.RUNSHELL:
raw_input = self.readline(prompt=self.PROMPT).strip()
self.input = self.input_reader(self, raw_input)
self.raw_input = self.input.raw
if self.input.cmd:
cmd = self.input.cmd.upper()
params = self.input.params
if self.COMMANDS.has_key(cmd):
try:
self.COMMANDS[cmd](params)
except:
log.exception('Error calling %s.' % cmd)
(t, p, tb) = sys.exc_info()
if self.handleException(t, p, tb):
break
else:
self.writeerror("Unknown command '%s'" % cmd)
log.debug("Exiting handler") | [
"def",
"handle",
"(",
"self",
")",
":",
"if",
"self",
".",
"TELNET_ISSUE",
":",
"self",
".",
"writeline",
"(",
"self",
".",
"TELNET_ISSUE",
")",
"if",
"not",
"self",
".",
"authentication_ok",
"(",
")",
":",
"return",
"if",
"self",
".",
"DOECHO",
":",
... | 38.821429 | 13.535714 |
def _getPayload(self, record):
"""
The data that will be sent to loggly.
"""
payload = super(LogglyHandler, self)._getPayload(record)
payload['tags'] = self._implodeTags()
return payload | [
"def",
"_getPayload",
"(",
"self",
",",
"record",
")",
":",
"payload",
"=",
"super",
"(",
"LogglyHandler",
",",
"self",
")",
".",
"_getPayload",
"(",
"record",
")",
"payload",
"[",
"'tags'",
"]",
"=",
"self",
".",
"_implodeTags",
"(",
")",
"return",
"p... | 28.5 | 12.75 |
def check_status_code(response, codes=None):
"""
Checks response.status_code is in codes.
:param requests.request response: Requests response
:param list codes: List of accepted codes or callable
:raises: StatusCodeError if code invalid
"""
codes = codes or [200]
if response.status_code not in codes:
raise StatusCodeError(response.status_code) | [
"def",
"check_status_code",
"(",
"response",
",",
"codes",
"=",
"None",
")",
":",
"codes",
"=",
"codes",
"or",
"[",
"200",
"]",
"if",
"response",
".",
"status_code",
"not",
"in",
"codes",
":",
"raise",
"StatusCodeError",
"(",
"response",
".",
"status_code"... | 34.181818 | 10 |
def from_json(cls, data):
"""Create an analysis period from a dictionary.
Args:
data: {
st_month: An integer between 1-12 for starting month (default = 1)
st_day: An integer between 1-31 for starting day (default = 1).
Note that some months are shorter than 31 days.
st_hour: An integer between 0-23 for starting hour (default = 0)
end_month: An integer between 1-12 for ending month (default = 12)
end_day: An integer between 1-31 for ending day (default = 31)
Note that some months are shorter than 31 days.
end_hour: An integer between 0-23 for ending hour (default = 23)
timestep: An integer number from 1, 2, 3, 4, 5, 6, 10, 12, 15, 20, 30, 60
}
"""
keys = ('st_month', 'st_day', 'st_hour', 'end_month',
'end_day', 'end_hour', 'timestep', 'is_leap_year')
for key in keys:
if key not in data:
data[key] = None
return cls(
data['st_month'], data['st_day'], data['st_hour'], data['end_month'],
data['end_day'], data['end_hour'], data['timestep'],
data['is_leap_year']) | [
"def",
"from_json",
"(",
"cls",
",",
"data",
")",
":",
"keys",
"=",
"(",
"'st_month'",
",",
"'st_day'",
",",
"'st_hour'",
",",
"'end_month'",
",",
"'end_day'",
",",
"'end_hour'",
",",
"'timestep'",
",",
"'is_leap_year'",
")",
"for",
"key",
"in",
"keys",
... | 48.92 | 24.76 |
def tbframes(tb):
'unwind traceback tb_next structure to array'
frames=[tb.tb_frame]
while tb.tb_next: tb=tb.tb_next; frames.append(tb.tb_frame)
return frames | [
"def",
"tbframes",
"(",
"tb",
")",
":",
"frames",
"=",
"[",
"tb",
".",
"tb_frame",
"]",
"while",
"tb",
".",
"tb_next",
":",
"tb",
"=",
"tb",
".",
"tb_next",
"frames",
".",
"append",
"(",
"tb",
".",
"tb_frame",
")",
"return",
"frames"
] | 32.4 | 18.8 |
def set_default_property_values(self, dev_class, class_prop, dev_prop):
"""
set_default_property_values(self, dev_class, class_prop, dev_prop) -> None
Sets the default property values
Parameters :
- dev_class : (DeviceClass) device class object
- class_prop : (dict<str,>) class properties
- dev_prop : (dict<str,>) device properties
Return : None
"""
for name in class_prop:
type = self.get_property_type(name, class_prop)
val = self.get_property_values(name, class_prop)
val = self.values2string(val, type)
desc = self.get_property_description(name, class_prop)
dev_class.add_wiz_class_prop(name, desc, val)
for name in dev_prop:
type = self.get_property_type(name, dev_prop)
val = self.get_property_values(name, dev_prop)
val = self.values2string(val, type)
desc = self.get_property_description(name, dev_prop)
dev_class.add_wiz_dev_prop(name, desc, val) | [
"def",
"set_default_property_values",
"(",
"self",
",",
"dev_class",
",",
"class_prop",
",",
"dev_prop",
")",
":",
"for",
"name",
"in",
"class_prop",
":",
"type",
"=",
"self",
".",
"get_property_type",
"(",
"name",
",",
"class_prop",
")",
"val",
"=",
"self",... | 42 | 20.153846 |
def db_snapshot_append(cls, cur, block_id, consensus_hash, ops_hash, timestamp):
"""
Append hash info for the last block processed, and the time at which it was done.
Meant to be executed as part of a transaction.
Return True on success
Raise an exception on invalid block number
Abort on db error
"""
query = 'INSERT INTO snapshots (block_id,consensus_hash,ops_hash,timestamp) VALUES (?,?,?,?);'
args = (block_id,consensus_hash,ops_hash,timestamp)
cls.db_query_execute(cur, query, args)
return True | [
"def",
"db_snapshot_append",
"(",
"cls",
",",
"cur",
",",
"block_id",
",",
"consensus_hash",
",",
"ops_hash",
",",
"timestamp",
")",
":",
"query",
"=",
"'INSERT INTO snapshots (block_id,consensus_hash,ops_hash,timestamp) VALUES (?,?,?,?);'",
"args",
"=",
"(",
"block_id",
... | 39.466667 | 23.333333 |
def _methodcall(self, methodname, objectname, Params=None, **params):
"""
Perform an extrinsic CIM-XML method call.
Parameters:
methodname (string): CIM method name.
objectname (string or CIMInstanceName or CIMClassName):
Target object. Strings are interpreted as class names.
Params: CIM method input parameters, for details see InvokeMethod().
**params: CIM method input parameters, for details see InvokeMethod().
"""
if isinstance(objectname, (CIMInstanceName, CIMClassName)):
localobject = objectname.copy()
if localobject.namespace is None:
localobject.namespace = self.default_namespace
localobject.host = None
elif isinstance(objectname, six.string_types):
# a string is always interpreted as a class name
localobject = CIMClassName(objectname,
namespace=self.default_namespace)
else:
raise TypeError(
_format("The 'ObjectName' argument of the WBEMConnection "
"operation has invalid type {0} (must be a string, "
"a CIMClassName, or a CIMInstanceName)",
type(objectname)))
# Create HTTP extension headers for CIM-XML.
# Note: The two-step encoding required by DSP0200 will be performed in
# wbem_request().
cimxml_headers = [
('CIMOperation', 'MethodCall'),
('CIMMethod', methodname),
('CIMObject', get_cimobject_header(localobject)),
]
# Add a special HTTP header for SFCB's special password expiration
# update mechanism. For details, see the documentation of the
# pywbem config variable AUTO_GENERATE_SFCB_UEP_HEADER.
if AUTO_GENERATE_SFCB_UEP_HEADER and \
methodname == 'UpdateExpiredPassword' and \
objectname.classname == 'SFCB_Account':
cimxml_headers.append(('Pragma', 'UpdateExpiredPassword'))
# Create parameter list
def infer_type(obj, param_name):
"""
Infer the CIM data type name of a parameter value.
"""
if isinstance(obj, CIMType): # pylint: disable=no-else-return
return obj.cimtype
elif isinstance(obj, bool):
return 'boolean'
elif isinstance(obj, six.string_types):
return 'string'
elif isinstance(obj, (datetime, timedelta)):
return 'datetime'
elif isinstance(obj, (CIMClassName, CIMInstanceName)):
return 'reference'
elif isinstance(obj, (CIMClass, CIMInstance)):
return 'string'
elif isinstance(obj, list):
return infer_type(obj[0], param_name) if obj else None
elif obj is None:
return None
if isinstance(obj, int):
hint = " (use a CIM integer type such as pywbem.Uint32)"
else:
hint = ""
raise TypeError(
_format("Method parameter {0!A} has type {1} which cannot "
"be used to infer a valid CIM data type{2}",
param_name, type(obj), hint))
def paramvalue(obj):
"""
Return a cim_xml node to be used as the value for a parameter.
"""
if isinstance(obj, (datetime, timedelta)):
obj = CIMDateTime(obj)
if isinstance(obj, (CIMType, bool, six.string_types)):
# This includes CIMDateTime (subclass of CIMType)
return cim_xml.VALUE(atomic_to_cim_xml(obj))
if isinstance(obj, (CIMClassName, CIMInstanceName)):
return cim_xml.VALUE_REFERENCE(obj.tocimxml())
if isinstance(obj, CIMInstance):
return cim_xml.VALUE(obj.tocimxml(ignore_path=True).toxml())
if isinstance(obj, CIMClass):
# CIMClass.tocimxml() always ignores path
return cim_xml.VALUE(obj.tocimxml().toxml())
if isinstance(obj, list):
if obj and isinstance(obj[0], (CIMClassName, CIMInstanceName)):
return cim_xml.VALUE_REFARRAY([paramvalue(x) for x in obj])
return cim_xml.VALUE_ARRAY([paramvalue(x) for x in obj])
# The type has been checked in infer_type(), so we can assert
assert obj is None
def infer_embedded_object(obj):
"""
Infer the embedded_object value of a parameter value.
"""
if isinstance(obj, list) and obj:
return infer_embedded_object(obj[0])
if isinstance(obj, CIMClass):
return 'object'
if isinstance(obj, CIMInstance):
return 'instance'
return None
ptuples = [] # tuple (name, value, type, embedded_object)
if Params is not None:
for p in Params:
if isinstance(p, CIMParameter):
ptuple = (p.name, p.value, p.type, p.embedded_object)
else: # p is a tuple of name, value
ptuple = (p[0], p[1], infer_type(p[1], p[0]),
infer_embedded_object(p[1]))
ptuples.append(ptuple)
for n, v in params.items():
ptuple = (n, v, infer_type(v, n), infer_embedded_object(v))
ptuples.append(ptuple)
plist = [cim_xml.PARAMVALUE(n, paramvalue(v), t, embedded_object=eo)
for n, v, t, eo in ptuples]
# Build XML request
req_xml = cim_xml.CIM(
cim_xml.MESSAGE(
cim_xml.SIMPLEREQ(
cim_xml.METHODCALL(
methodname,
localobject.tocimxml(),
plist)),
'1001', '1.0'),
'2.0', '2.0')
request_data = req_xml.toxml()
# Set attributes recording the request.
# Also, reset attributes recording the reply in case we fail.
self._last_raw_request = request_data
self._last_request_len = len(request_data)
self._last_raw_reply = None
self._last_reply_len = 0
self._last_server_response_time = None
if self.debug:
self._last_request = req_xml.toprettyxml(indent=' ')
self._last_reply = None
# Send request and receive response
reply_data, self._last_server_response_time = wbem_request(
self.url, request_data, self.creds, cimxml_headers,
x509=self.x509,
verify_callback=self.verify_callback,
ca_certs=self.ca_certs,
no_verification=self.no_verification,
timeout=self.timeout,
debug=self.debug,
recorders=self._operation_recorders,
conn_id=self.conn_id)
# Set attributes recording the response, part 1.
# Only those that can be done without parsing (which can fail).
self._last_raw_reply = reply_data
self._last_reply_len = len(reply_data)
# Parse the XML into a tuple tree (may raise CIMXMLParseError or
# XMLParseError):
tt_ = xml_to_tupletree_sax(reply_data, "CIM-XML response")
tp = TupleParser(self.conn_id)
tup_tree = tp.parse_cim(tt_)
# Set attributes recording the response, part 2.
if self.debug:
self._last_reply = _to_pretty_xml(reply_data)
# Check the tuple tree
if tup_tree[0] != 'CIM':
raise CIMXMLParseError(
_format("Expecting CIM element, got {0}", tup_tree[0]),
conn_id=self.conn_id)
tup_tree = tup_tree[2]
if tup_tree[0] != 'MESSAGE':
raise CIMXMLParseError(
_format("Expecting MESSAGE element, got {0}", tup_tree[0]),
conn_id=self.conn_id)
tup_tree = tup_tree[2]
if tup_tree[0] != 'SIMPLERSP':
raise CIMXMLParseError(
_format("Expecting SIMPLERSP element, got {0}", tup_tree[0]),
conn_id=self.conn_id)
tup_tree = tup_tree[2]
if tup_tree[0] != 'METHODRESPONSE':
raise CIMXMLParseError(
_format("Expecting METHODRESPONSE element, got {0}",
tup_tree[0]),
conn_id=self.conn_id)
if tup_tree[1]['NAME'] != methodname:
raise CIMXMLParseError(
_format("Expecting attribute NAME={0!A}, got {1!A}",
methodname, tup_tree[1]['NAME']),
conn_id=self.conn_id)
tup_tree = tup_tree[2]
# At this point we have an optional RETURNVALUE and zero or
# more PARAMVALUE elements representing output parameters.
if tup_tree and tup_tree[0][0] == 'ERROR':
# Operation failed
err = tup_tree[0]
code = int(err[1]['CODE'])
err_insts = err[2] or None # List of CIMInstance objects
if 'DESCRIPTION' in err[1]:
desc = err[1]['DESCRIPTION']
else:
desc = _format("Error code {0}", err[1]['CODE'])
raise CIMError(
code, desc, instances=err_insts, conn_id=self.conn_id)
# # Original code return tup_tree
# Convert optional RETURNVALUE into a Python object
returnvalue = None
if tup_tree and tup_tree[0][0] == 'RETURNVALUE':
returnvalue = cimvalue(tup_tree[0][2], tup_tree[0][1]['PARAMTYPE'])
tup_tree = tup_tree[1:]
# Convert zero or more PARAMVALUE elements into dictionary
output_params = NocaseDict()
for p in tup_tree:
if p[1] == 'reference':
output_params[p[0]] = p[2]
else:
output_params[p[0]] = cimvalue(p[2], p[1])
return (returnvalue, output_params) | [
"def",
"_methodcall",
"(",
"self",
",",
"methodname",
",",
"objectname",
",",
"Params",
"=",
"None",
",",
"*",
"*",
"params",
")",
":",
"if",
"isinstance",
"(",
"objectname",
",",
"(",
"CIMInstanceName",
",",
"CIMClassName",
")",
")",
":",
"localobject",
... | 38.662745 | 18.521569 |
def convolve(image, pixel_filter, channels=3, name=None):
"""Perform a 2D pixel convolution on the given image.
Arguments:
image: A 3D `float32` `Tensor` of shape `[height, width, channels]`,
where `channels` is the third argument to this function and the
first two dimensions are arbitrary.
pixel_filter: A 2D `Tensor`, representing pixel weightings for the
kernel. This will be used to create a 4D kernel---the extra two
dimensions are for channels (see `tf.nn.conv2d` documentation),
and the kernel will be constructed so that the channels are
independent: each channel only observes the data from neighboring
pixels of the same channel.
channels: An integer representing the number of channels in the
image (e.g., 3 for RGB).
Returns:
A 3D `float32` `Tensor` of the same shape as the input.
"""
with tf.name_scope(name, 'convolve'):
tf.compat.v1.assert_type(image, tf.float32)
channel_filter = tf.eye(channels)
filter_ = (tf.expand_dims(tf.expand_dims(pixel_filter, -1), -1) *
tf.expand_dims(tf.expand_dims(channel_filter, 0), 0))
result_batch = tf.nn.conv2d(tf.stack([image]), # batch
filter=filter_,
strides=[1, 1, 1, 1],
padding='SAME')
return result_batch[0] | [
"def",
"convolve",
"(",
"image",
",",
"pixel_filter",
",",
"channels",
"=",
"3",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
",",
"'convolve'",
")",
":",
"tf",
".",
"compat",
".",
"v1",
".",
"assert_type",
"(",
... | 46.413793 | 19.034483 |
def filter_graph(g, cutoff=7.0, min_kihs=2):
""" Get subgraph formed from edges that have max_kh_distance < cutoff.
Parameters
----------
g : MultiDiGraph representing KIHs
g is the output from graph_from_protein
cutoff : float
Socket cutoff in Angstroms.
Default is 7.0.
min_kihs : int
Minimum number of KIHs shared between all pairs of connected nodes in the graph.
Returns
-------
networkx.MultiDigraph
subgraph formed from edges that have max_kh_distance < cutoff.
"""
edge_list = [e for e in g.edges(keys=True, data=True) if e[3]['kih'].max_kh_distance <= cutoff]
if min_kihs > 0:
c = Counter([(e[0], e[1]) for e in edge_list])
# list of nodes that share > min_kihs edges with at least one other node.
node_list = set(list(itertools.chain.from_iterable([k for k, v in c.items() if v > min_kihs])))
edge_list = [e for e in edge_list if (e[0] in node_list) and (e[1] in node_list)]
return networkx.MultiDiGraph(edge_list) | [
"def",
"filter_graph",
"(",
"g",
",",
"cutoff",
"=",
"7.0",
",",
"min_kihs",
"=",
"2",
")",
":",
"edge_list",
"=",
"[",
"e",
"for",
"e",
"in",
"g",
".",
"edges",
"(",
"keys",
"=",
"True",
",",
"data",
"=",
"True",
")",
"if",
"e",
"[",
"3",
"]... | 44.56 | 24.28 |
def look(self):
"""Look at the next token."""
old_token = next(self)
result = self.current
self.push(result)
self.current = old_token
return result | [
"def",
"look",
"(",
"self",
")",
":",
"old_token",
"=",
"next",
"(",
"self",
")",
"result",
"=",
"self",
".",
"current",
"self",
".",
"push",
"(",
"result",
")",
"self",
".",
"current",
"=",
"old_token",
"return",
"result"
] | 27 | 12.571429 |
def cashdraw(self, pin):
""" Send pulse to kick the cash drawer """
if pin == 2:
self._raw(CD_KICK_2)
elif pin == 5:
self._raw(CD_KICK_5)
else:
raise CashDrawerError() | [
"def",
"cashdraw",
"(",
"self",
",",
"pin",
")",
":",
"if",
"pin",
"==",
"2",
":",
"self",
".",
"_raw",
"(",
"CD_KICK_2",
")",
"elif",
"pin",
"==",
"5",
":",
"self",
".",
"_raw",
"(",
"CD_KICK_5",
")",
"else",
":",
"raise",
"CashDrawerError",
"(",
... | 28.5 | 12.75 |
def to_dict(self, remove_nones=False):
"""
Creates a dictionary representation of the enclave.
:param remove_nones: Whether ``None`` values should be filtered out of the dictionary. Defaults to ``False``.
:return: A dictionary representation of the enclave.
"""
if remove_nones:
return super().to_dict(remove_nones=True)
return {
'id': self.id,
'name': self.name,
'type': self.type
} | [
"def",
"to_dict",
"(",
"self",
",",
"remove_nones",
"=",
"False",
")",
":",
"if",
"remove_nones",
":",
"return",
"super",
"(",
")",
".",
"to_dict",
"(",
"remove_nones",
"=",
"True",
")",
"return",
"{",
"'id'",
":",
"self",
".",
"id",
",",
"'name'",
"... | 30.25 | 22.375 |
def covariance(x,
y=None,
sample_axis=0,
event_axis=-1,
keepdims=False,
name=None):
"""Sample covariance between observations indexed by `event_axis`.
Given `N` samples of scalar random variables `X` and `Y`, covariance may be
estimated as
```none
Cov[X, Y] := N^{-1} sum_{n=1}^N (X_n - Xbar) Conj{(Y_n - Ybar)}
Xbar := N^{-1} sum_{n=1}^N X_n
Ybar := N^{-1} sum_{n=1}^N Y_n
```
For vector-variate random variables `X = (X1, ..., Xd)`, `Y = (Y1, ..., Yd)`,
one is often interested in the covariance matrix, `C_{ij} := Cov[Xi, Yj]`.
```python
x = tf.random_normal(shape=(100, 2, 3))
y = tf.random_normal(shape=(100, 2, 3))
# cov[i, j] is the sample covariance between x[:, i, j] and y[:, i, j].
cov = tfp.stats.covariance(x, y, sample_axis=0, event_axis=None)
# cov_matrix[i, m, n] is the sample covariance of x[:, i, m] and y[:, i, n]
cov_matrix = tfp.stats.covariance(x, y, sample_axis=0, event_axis=-1)
```
Notice we divide by `N` (the numpy default), which does not create `NaN`
when `N = 1`, but is slightly biased.
Args:
x: A numeric `Tensor` holding samples.
y: Optional `Tensor` with same `dtype` and `shape` as `x`.
Default value: `None` (`y` is effectively set to `x`).
sample_axis: Scalar or vector `Tensor` designating axis holding samples, or
`None` (meaning all axis hold samples).
Default value: `0` (leftmost dimension).
event_axis: Scalar or vector `Tensor`, or `None` (scalar events).
Axis indexing random events, whose covariance we are interested in.
If a vector, entries must form a contiguous block of dims. `sample_axis`
and `event_axis` should not intersect.
Default value: `-1` (rightmost axis holds events).
keepdims: Boolean. Whether to keep the sample axis as singletons.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., `'covariance'`).
Returns:
cov: A `Tensor` of same `dtype` as the `x`, and rank equal to
`rank(x) - len(sample_axis) + 2 * len(event_axis)`.
Raises:
AssertionError: If `x` and `y` are found to have different shape.
ValueError: If `sample_axis` and `event_axis` are found to overlap.
ValueError: If `event_axis` is found to not be contiguous.
"""
with tf.compat.v1.name_scope(
name, 'covariance', values=[x, y, event_axis, sample_axis]):
x = tf.convert_to_tensor(value=x, name='x')
# Covariance *only* uses the centered versions of x (and y).
x -= tf.reduce_mean(input_tensor=x, axis=sample_axis, keepdims=True)
if y is None:
y = x
else:
y = tf.convert_to_tensor(value=y, name='y', dtype=x.dtype)
# If x and y have different shape, sample_axis and event_axis will likely
# be wrong for one of them!
x.shape.assert_is_compatible_with(y.shape)
y -= tf.reduce_mean(input_tensor=y, axis=sample_axis, keepdims=True)
if event_axis is None:
return tf.reduce_mean(
input_tensor=x * tf.math.conj(y), axis=sample_axis, keepdims=keepdims)
if sample_axis is None:
raise ValueError(
'sample_axis was None, which means all axis hold events, and this '
'overlaps with event_axis ({})'.format(event_axis))
event_axis = _make_positive_axis(event_axis, tf.rank(x))
sample_axis = _make_positive_axis(sample_axis, tf.rank(x))
# If we get lucky and axis is statically defined, we can do some checks.
if _is_list_like(event_axis) and _is_list_like(sample_axis):
if set(event_axis).intersection(sample_axis):
raise ValueError(
'sample_axis ({}) and event_axis ({}) overlapped'.format(
sample_axis, event_axis))
if (np.diff(sorted(event_axis)) > 1).any():
raise ValueError(
'event_axis must be contiguous. Found: {}'.format(event_axis))
batch_axis = list(
sorted(
set(range(x.shape.ndims)).difference(sample_axis + event_axis)))
else:
batch_axis, _ = tf.compat.v1.setdiff1d(
tf.range(0, tf.rank(x)), tf.concat((sample_axis, event_axis), 0))
event_axis = tf.convert_to_tensor(
value=event_axis, name='event_axis', dtype=tf.int32)
sample_axis = tf.convert_to_tensor(
value=sample_axis, name='sample_axis', dtype=tf.int32)
batch_axis = tf.convert_to_tensor(
value=batch_axis, name='batch_axis', dtype=tf.int32)
# Permute x/y until shape = B + E + S
perm_for_xy = tf.concat((batch_axis, event_axis, sample_axis), 0)
x_permed = tf.transpose(a=x, perm=perm_for_xy)
y_permed = tf.transpose(a=y, perm=perm_for_xy)
batch_ndims = tf.size(input=batch_axis)
batch_shape = tf.shape(input=x_permed)[:batch_ndims]
event_ndims = tf.size(input=event_axis)
event_shape = tf.shape(input=x_permed)[batch_ndims:batch_ndims +
event_ndims]
sample_shape = tf.shape(input=x_permed)[batch_ndims + event_ndims:]
sample_ndims = tf.size(input=sample_shape)
n_samples = tf.reduce_prod(input_tensor=sample_shape)
n_events = tf.reduce_prod(input_tensor=event_shape)
# Flatten sample_axis into one long dim.
x_permed_flat = tf.reshape(
x_permed, tf.concat((batch_shape, event_shape, [n_samples]), 0))
y_permed_flat = tf.reshape(
y_permed, tf.concat((batch_shape, event_shape, [n_samples]), 0))
# Do the same for event_axis.
x_permed_flat = tf.reshape(
x_permed, tf.concat((batch_shape, [n_events], [n_samples]), 0))
y_permed_flat = tf.reshape(
y_permed, tf.concat((batch_shape, [n_events], [n_samples]), 0))
# After matmul, cov.shape = batch_shape + [n_events, n_events]
cov = tf.matmul(
x_permed_flat, y_permed_flat, adjoint_b=True) / tf.cast(
n_samples, x.dtype)
# Insert some singletons to make
# cov.shape = batch_shape + event_shape**2 + [1,...,1]
# This is just like x_permed.shape, except the sample_axis is all 1's, and
# the [n_events] became event_shape**2.
cov = tf.reshape(
cov,
tf.concat(
(
batch_shape,
# event_shape**2 used here because it is the same length as
# event_shape, and has the same number of elements as one
# batch of covariance.
event_shape**2,
tf.ones([sample_ndims], tf.int32)),
0))
# Permuting by the argsort inverts the permutation, making
# cov.shape have ones in the position where there were samples, and
# [n_events * n_events] in the event position.
cov = tf.transpose(a=cov, perm=tf.math.invert_permutation(perm_for_xy))
# Now expand event_shape**2 into event_shape + event_shape.
# We here use (for the first time) the fact that we require event_axis to be
# contiguous.
e_start = event_axis[0]
e_len = 1 + event_axis[-1] - event_axis[0]
cov = tf.reshape(
cov,
tf.concat((tf.shape(input=cov)[:e_start], event_shape, event_shape,
tf.shape(input=cov)[e_start + e_len:]), 0))
# tf.squeeze requires python ints for axis, not Tensor. This is enough to
# require our axis args to be constants.
if not keepdims:
squeeze_axis = tf.where(sample_axis < e_start, sample_axis,
sample_axis + e_len)
cov = _squeeze(cov, axis=squeeze_axis)
return cov | [
"def",
"covariance",
"(",
"x",
",",
"y",
"=",
"None",
",",
"sample_axis",
"=",
"0",
",",
"event_axis",
"=",
"-",
"1",
",",
"keepdims",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(... | 40.972067 | 22.944134 |
def write(self, data):
"""Sends some data to the client."""
# I don't want to add a separate 'Client disconnected' logic for sending.
# Therefore I just ignore any writes after the first error - the server
# won't send that much data anyway. Afterwards the read will detect the
# broken connection and we quit.
if self._ignore_write_operations:
return
assert self.is_connected()
try:
self._connection.send(data.encode('ascii'))
except socket.error:
self.close()
self._ignore_write_operations = True | [
"def",
"write",
"(",
"self",
",",
"data",
")",
":",
"# I don't want to add a separate 'Client disconnected' logic for sending.",
"# Therefore I just ignore any writes after the first error - the server",
"# won't send that much data anyway. Afterwards the read will detect the",
"# broken conne... | 43.214286 | 17.5 |
def population_counts(
self,
population_size,
weighted=True,
include_missing=False,
include_transforms_for_dims=None,
prune=False,
):
"""Return counts scaled in proportion to overall population.
The return value is a numpy.ndarray object. Count values are scaled
proportionally to approximate their value if the entire population
had been sampled. This calculation is based on the estimated size of
the population provided as *population size*. The remaining arguments
have the same meaning as they do for the `.proportions()` method.
Example::
>>> cube = CrunchCube(fixt_cat_x_cat)
>>> cube.as_array()
np.array([
[5, 2],
[5, 3],
])
>>> cube.population_counts(9000)
np.array([
[3000, 1200],
[3000, 1800],
])
"""
population_counts = [
slice_.population_counts(
population_size,
weighted=weighted,
include_missing=include_missing,
include_transforms_for_dims=include_transforms_for_dims,
prune=prune,
)
for slice_ in self.slices
]
if len(population_counts) > 1:
return np.array(population_counts)
return population_counts[0] | [
"def",
"population_counts",
"(",
"self",
",",
"population_size",
",",
"weighted",
"=",
"True",
",",
"include_missing",
"=",
"False",
",",
"include_transforms_for_dims",
"=",
"None",
",",
"prune",
"=",
"False",
",",
")",
":",
"population_counts",
"=",
"[",
"sli... | 31.931818 | 18.727273 |
def _cleanJsbAllClassesSection(self, config):
"""
Fixes two issues with the sencha created JSB:
- All extjs urls are prefixed by ``../static`` instead of
``/static`` (no idea why).
- We assume static files are served at ``/static``, but collectstatic may
not build files in ``static/``. Therefore, we replace ``/static`` with
the relative path to ``settings.STATIC_ROOT``.
"""
allclasses = config['builds'][0]
for fileinfo in allclasses['files']:
path = fileinfo['path']
if path.startswith('..'):
path = path[2:]
path = path.replace('/static', self.static_root)
fileinfo['path'] = path | [
"def",
"_cleanJsbAllClassesSection",
"(",
"self",
",",
"config",
")",
":",
"allclasses",
"=",
"config",
"[",
"'builds'",
"]",
"[",
"0",
"]",
"for",
"fileinfo",
"in",
"allclasses",
"[",
"'files'",
"]",
":",
"path",
"=",
"fileinfo",
"[",
"'path'",
"]",
"if... | 43.529412 | 14.235294 |
def parse(name, content, releases, get_head_fn):
"""
Parses the given content for a valid changelog
:param name: str, package name
:param content: str, content
:param releases: list, releases
:param get_head_fn: function
:return: dict, changelog
"""
changelog = {}
releases = frozenset(releases)
head = False
for line in content.splitlines():
new_head = get_head_fn(name=name, line=line, releases=releases)
if new_head:
head = new_head
changelog[head] = ""
continue
if not head:
continue
line = line.replace("@", "")
line = line.replace("#", "")
changelog[head] += line + "\n"
return changelog | [
"def",
"parse",
"(",
"name",
",",
"content",
",",
"releases",
",",
"get_head_fn",
")",
":",
"changelog",
"=",
"{",
"}",
"releases",
"=",
"frozenset",
"(",
"releases",
")",
"head",
"=",
"False",
"for",
"line",
"in",
"content",
".",
"splitlines",
"(",
")... | 29.916667 | 11.416667 |
def hsla_to_rgba(h, s, l, a):
""" 0 <= H < 360, 0 <= s,l,a < 1
"""
h = h % 360
s = max(0, min(1, s))
l = max(0, min(1, l))
a = max(0, min(1, a))
c = (1 - abs(2*l - 1)) * s
x = c * (1 - abs(h/60%2 - 1))
m = l - c/2
if h<60:
r, g, b = c, x, 0
elif h<120:
r, g, b = x, c, 0
elif h<180:
r, g, b = 0, c, x
elif h<240:
r, g, b = 0, x, c
elif h<300:
r, g, b = x, 0, c
else:
r, g, b = c, 0, x
return (int((r+m)*255), int((g+m)*255), int((b+m)*255), int(a*255)) | [
"def",
"hsla_to_rgba",
"(",
"h",
",",
"s",
",",
"l",
",",
"a",
")",
":",
"h",
"=",
"h",
"%",
"360",
"s",
"=",
"max",
"(",
"0",
",",
"min",
"(",
"1",
",",
"s",
")",
")",
"l",
"=",
"max",
"(",
"0",
",",
"min",
"(",
"1",
",",
"l",
")",
... | 20.074074 | 20.851852 |
def get_instance(self, payload):
"""
Build an instance of DomainInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.sip.domain.DomainInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.DomainInstance
"""
return DomainInstance(self._version, payload, account_sid=self._solution['account_sid'], ) | [
"def",
"get_instance",
"(",
"self",
",",
"payload",
")",
":",
"return",
"DomainInstance",
"(",
"self",
".",
"_version",
",",
"payload",
",",
"account_sid",
"=",
"self",
".",
"_solution",
"[",
"'account_sid'",
"]",
",",
")"
] | 39.7 | 23.1 |
async def connection_exists(ssid: str) -> Optional[str]:
""" If there is already a connection for this ssid, return the name of
the connection; if there is not, return None.
"""
nmcli_conns = await connections()
for wifi in [c['name']
for c in nmcli_conns if c['type'] == 'wireless']:
res, _ = await _call(['-t', '-f', '802-11-wireless.ssid',
'-m', 'tabular',
'connection', 'show', wifi])
if res == ssid:
return wifi
return None | [
"async",
"def",
"connection_exists",
"(",
"ssid",
":",
"str",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"nmcli_conns",
"=",
"await",
"connections",
"(",
")",
"for",
"wifi",
"in",
"[",
"c",
"[",
"'name'",
"]",
"for",
"c",
"in",
"nmcli_conns",
"if",
... | 41.923077 | 13.538462 |
def cumulative_statistics(self):
"""
Access the cumulative_statistics
:returns: twilio.rest.taskrouter.v1.workspace.worker.workers_cumulative_statistics.WorkersCumulativeStatisticsList
:rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_cumulative_statistics.WorkersCumulativeStatisticsList
"""
if self._cumulative_statistics is None:
self._cumulative_statistics = WorkersCumulativeStatisticsList(
self._version,
workspace_sid=self._solution['workspace_sid'],
)
return self._cumulative_statistics | [
"def",
"cumulative_statistics",
"(",
"self",
")",
":",
"if",
"self",
".",
"_cumulative_statistics",
"is",
"None",
":",
"self",
".",
"_cumulative_statistics",
"=",
"WorkersCumulativeStatisticsList",
"(",
"self",
".",
"_version",
",",
"workspace_sid",
"=",
"self",
"... | 46.461538 | 24 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.