repo stringlengths 7 54 | path stringlengths 4 192 | url stringlengths 87 284 | code stringlengths 78 104k | code_tokens list | docstring stringlengths 1 46.9k | docstring_tokens list | language stringclasses 1
value | partition stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|
aetros/aetros-cli | aetros/utils/image.py | https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/utils/image.py#L274-L339 | def get_layer_vis_square(data,
allow_heatmap=True,
normalize=True,
min_img_dim=100,
max_width=1200,
channel_order='RGB',
colormap='jet',
):
"""
Returns a vis_square for the given layer data
Arguments:
data -- a np.ndarray
Keyword arguments:
allow_heatmap -- if True, convert single channel images to heatmaps
normalize -- whether to normalize the data when visualizing
max_width -- maximum width for the vis_square
"""
if channel_order not in ['RGB', 'BGR']:
raise ValueError('Unsupported channel_order %s' % channel_order)
if data.ndim == 1:
# interpret as 1x1 grayscale images
# (N, 1, 1)
data = data[:, np.newaxis, np.newaxis]
elif data.ndim == 2:
# interpret as 1x1 grayscale images
# (N, 1, 1)
data = data.reshape((data.shape[0] * data.shape[1], 1, 1))
elif data.ndim == 3:
if data.shape[0] == 3:
# interpret as a color image
# (1, H, W,3)
if channel_order == 'BGR':
data = data[[2, 1, 0], ...] # BGR to RGB (see issue #59)
data = data.transpose(1, 2, 0)
data = data[np.newaxis, ...]
else:
# interpret as grayscale images
# (N, H, W)
pass
elif data.ndim == 4:
if data.shape[0] == 3:
# interpret as HxW color images
# (N, H, W, 3)
data = data.transpose(1, 2, 3, 0)
if channel_order == 'BGR':
data = data[:, :, :, [2, 1, 0]] # BGR to RGB (see issue #59)
elif data.shape[1] == 3:
# interpret as HxW color images
# (N, H, W, 3)
data = data.transpose(0, 2, 3, 1)
if channel_order == 'BGR':
data = data[:, :, :, [2, 1, 0]] # BGR to RGB (see issue #59)
else:
# interpret as HxW grayscale images
# (N, H, W)
data = data.reshape((data.shape[0] * data.shape[1], data.shape[2], data.shape[3]))
else:
raise RuntimeError('unrecognized data shape: %s' % (data.shape,))
return get_layer_vis_square_raw(data,
allow_heatmap,
normalize,
min_img_dim,
max_width,
colormap,
) | [
"def",
"get_layer_vis_square",
"(",
"data",
",",
"allow_heatmap",
"=",
"True",
",",
"normalize",
"=",
"True",
",",
"min_img_dim",
"=",
"100",
",",
"max_width",
"=",
"1200",
",",
"channel_order",
"=",
"'RGB'",
",",
"colormap",
"=",
"'jet'",
",",
")",
":",
... | Returns a vis_square for the given layer data
Arguments:
data -- a np.ndarray
Keyword arguments:
allow_heatmap -- if True, convert single channel images to heatmaps
normalize -- whether to normalize the data when visualizing
max_width -- maximum width for the vis_square | [
"Returns",
"a",
"vis_square",
"for",
"the",
"given",
"layer",
"data",
"Arguments",
":",
"data",
"--",
"a",
"np",
".",
"ndarray",
"Keyword",
"arguments",
":",
"allow_heatmap",
"--",
"if",
"True",
"convert",
"single",
"channel",
"images",
"to",
"heatmaps",
"no... | python | train |
saulpw/visidata | visidata/vdtui.py | https://github.com/saulpw/visidata/blob/32771e0cea6c24fc7902683d14558391395c591f/visidata/vdtui.py#L1537-L1543 | def unselect(self, rows, status=True, progress=True):
"Unselect given rows. Don't show progress if progress=False; don't show status if status=False."
before = len(self._selectedRows)
for r in (Progress(rows, 'unselecting') if progress else rows):
self.unselectRow(r)
if status:
vd().status('unselected %s/%s %s' % (before-len(self._selectedRows), before, self.rowtype)) | [
"def",
"unselect",
"(",
"self",
",",
"rows",
",",
"status",
"=",
"True",
",",
"progress",
"=",
"True",
")",
":",
"before",
"=",
"len",
"(",
"self",
".",
"_selectedRows",
")",
"for",
"r",
"in",
"(",
"Progress",
"(",
"rows",
",",
"'unselecting'",
")",
... | Unselect given rows. Don't show progress if progress=False; don't show status if status=False. | [
"Unselect",
"given",
"rows",
".",
"Don",
"t",
"show",
"progress",
"if",
"progress",
"=",
"False",
";",
"don",
"t",
"show",
"status",
"if",
"status",
"=",
"False",
"."
] | python | train |
Nic30/hwt | hwt/hdl/transTmpl.py | https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/hdl/transTmpl.py#L162-L171 | def _loadFromUnion(self, dtype: HdlType, bitAddr: int) -> int:
"""
Parse HUnion type to this transaction template instance
:return: address of it's end
"""
for field in dtype.fields.values():
ch = TransTmpl(field.dtype, 0, parent=self, origin=field)
self.children.append(ch)
return bitAddr + dtype.bit_length() | [
"def",
"_loadFromUnion",
"(",
"self",
",",
"dtype",
":",
"HdlType",
",",
"bitAddr",
":",
"int",
")",
"->",
"int",
":",
"for",
"field",
"in",
"dtype",
".",
"fields",
".",
"values",
"(",
")",
":",
"ch",
"=",
"TransTmpl",
"(",
"field",
".",
"dtype",
"... | Parse HUnion type to this transaction template instance
:return: address of it's end | [
"Parse",
"HUnion",
"type",
"to",
"this",
"transaction",
"template",
"instance"
] | python | test |
saltstack/salt | salt/modules/iptables.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/iptables.py#L956-L974 | def flush(table='filter', chain='', family='ipv4'):
'''
Flush the chain in the specified table, flush all chains in the specified
table if not specified chain.
CLI Example:
.. code-block:: bash
salt '*' iptables.flush filter INPUT
IPv6:
salt '*' iptables.flush filter INPUT family=ipv6
'''
wait = '--wait' if _has_option('--wait', family) else ''
cmd = '{0} {1} -t {2} -F {3}'.format(_iptables_cmd(family), wait, table, chain)
out = __salt__['cmd.run'](cmd)
return out | [
"def",
"flush",
"(",
"table",
"=",
"'filter'",
",",
"chain",
"=",
"''",
",",
"family",
"=",
"'ipv4'",
")",
":",
"wait",
"=",
"'--wait'",
"if",
"_has_option",
"(",
"'--wait'",
",",
"family",
")",
"else",
"''",
"cmd",
"=",
"'{0} {1} -t {2} -F {3}'",
".",
... | Flush the chain in the specified table, flush all chains in the specified
table if not specified chain.
CLI Example:
.. code-block:: bash
salt '*' iptables.flush filter INPUT
IPv6:
salt '*' iptables.flush filter INPUT family=ipv6 | [
"Flush",
"the",
"chain",
"in",
"the",
"specified",
"table",
"flush",
"all",
"chains",
"in",
"the",
"specified",
"table",
"if",
"not",
"specified",
"chain",
"."
] | python | train |
tango-controls/pytango | tango/device_class.py | https://github.com/tango-controls/pytango/blob/9cf78c517c9cdc1081ff6d080a9646a740cc1d36/tango/device_class.py#L119-L164 | def get_device_properties(self, dev, class_prop, dev_prop):
"""
get_device_properties(self, dev, class_prop, dev_prop) -> None
Returns the device properties
Parameters :
- dev : (DeviceImpl) the device object
- class_prop : (dict<str, obj>) the class properties
- dev_prop : [in,out] (dict<str, None>) the device property names
Return : None"""
# initialize default properties
if dev_prop == {} or not Util._UseDb:
return
# Call database to get properties
props = self.db.get_device_property(dev.get_name(), list(dev_prop.keys()))
# if value defined in database, store it
for name in dev_prop:
prop_value = props[name]
if len(prop_value):
data_type = self.get_property_type(name, dev_prop)
values = self.stringArray2values(prop_value, data_type)
if not self.is_empty_seq(values):
self.set_property_values(name, dev_prop, values)
else:
# Try to get it from class property
values = self.get_property_values(name, class_prop)
if not self.is_empty_seq(values):
if not self.is_seq(values):
values = [values]
data_type = self.get_property_type(name, class_prop)
values = self.stringArray2values(values, data_type)
if not self.is_empty_seq(values):
self.set_property_values(name, dev_prop, values)
else:
# Try to get it from class property
values = self.get_property_values(name, class_prop)
if not self.is_empty_seq(values):
if not self.is_seq(values):
values = [values]
data_type = self.get_property_type(name, class_prop)
values = self.stringArray2values(values, data_type)
if not self.is_empty_seq(values):
self.set_property_values(name, dev_prop, values) | [
"def",
"get_device_properties",
"(",
"self",
",",
"dev",
",",
"class_prop",
",",
"dev_prop",
")",
":",
"# initialize default properties",
"if",
"dev_prop",
"==",
"{",
"}",
"or",
"not",
"Util",
".",
"_UseDb",
":",
"return",
"# Call database to get properties",
"... | get_device_properties(self, dev, class_prop, dev_prop) -> None
Returns the device properties
Parameters :
- dev : (DeviceImpl) the device object
- class_prop : (dict<str, obj>) the class properties
- dev_prop : [in,out] (dict<str, None>) the device property names
Return : None | [
"get_device_properties",
"(",
"self",
"dev",
"class_prop",
"dev_prop",
")",
"-",
">",
"None"
] | python | train |
serkanyersen/underscore.py | src/underscore.py | https://github.com/serkanyersen/underscore.py/blob/07c25c3f0f789536e4ad47aa315faccc0da9602f/src/underscore.py#L1202-L1210 | def isFile(self):
""" Check if the given object is a file
"""
try:
filetype = file
except NameError:
filetype = io.IOBase
return self._wrap(type(self.obj) is filetype) | [
"def",
"isFile",
"(",
"self",
")",
":",
"try",
":",
"filetype",
"=",
"file",
"except",
"NameError",
":",
"filetype",
"=",
"io",
".",
"IOBase",
"return",
"self",
".",
"_wrap",
"(",
"type",
"(",
"self",
".",
"obj",
")",
"is",
"filetype",
")"
] | Check if the given object is a file | [
"Check",
"if",
"the",
"given",
"object",
"is",
"a",
"file"
] | python | train |
mkaz/termgraph | termgraph/termgraph.py | https://github.com/mkaz/termgraph/blob/c40b86454d380d685785b98834364b111734c163/termgraph/termgraph.py#L166-L173 | def find_max_label_length(labels):
"""Return the maximum length for the labels."""
length = 0
for i in range(len(labels)):
if len(labels[i]) > length:
length = len(labels[i])
return length | [
"def",
"find_max_label_length",
"(",
"labels",
")",
":",
"length",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"labels",
")",
")",
":",
"if",
"len",
"(",
"labels",
"[",
"i",
"]",
")",
">",
"length",
":",
"length",
"=",
"len",
"(",
"labe... | Return the maximum length for the labels. | [
"Return",
"the",
"maximum",
"length",
"for",
"the",
"labels",
"."
] | python | train |
brocade/pynos | pynos/versions/ver_7/ver_7_1_0/yang/brocade_tunnels.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_tunnels.py#L192-L207 | def overlay_gateway_attach_vlan_vid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
attach = ET.SubElement(overlay_gateway, "attach")
vlan = ET.SubElement(attach, "vlan")
mac_key = ET.SubElement(vlan, "mac")
mac_key.text = kwargs.pop('mac')
vid = ET.SubElement(vlan, "vid")
vid.text = kwargs.pop('vid')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"overlay_gateway_attach_vlan_vid",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"overlay_gateway",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"overlay-gateway\"",
",",
"xmlns",
"... | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
spacetelescope/stsci.tools | lib/stsci/tools/wcsutil.py | https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/wcsutil.py#L452-L454 | def compute_pscale(self,cd11,cd21):
""" Compute the pixel scale based on active WCS values. """
return N.sqrt(N.power(cd11,2)+N.power(cd21,2)) * 3600. | [
"def",
"compute_pscale",
"(",
"self",
",",
"cd11",
",",
"cd21",
")",
":",
"return",
"N",
".",
"sqrt",
"(",
"N",
".",
"power",
"(",
"cd11",
",",
"2",
")",
"+",
"N",
".",
"power",
"(",
"cd21",
",",
"2",
")",
")",
"*",
"3600."
] | Compute the pixel scale based on active WCS values. | [
"Compute",
"the",
"pixel",
"scale",
"based",
"on",
"active",
"WCS",
"values",
"."
] | python | train |
wummel/dosage | dosagelib/loader.py | https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/dosagelib/loader.py#L73-L91 | def get_module_plugins(module, classobj):
"""Return all subclasses of a class in the module.
If the module defines __all__, only those entries will be searched,
otherwise all objects not starting with '_' will be searched.
"""
try:
names = module.__all__
except AttributeError:
names = [x for x in vars(module) if not x.startswith('_')]
for name in names:
try:
obj = getattr(module, name)
except AttributeError:
continue
try:
if issubclass(obj, classobj):
yield obj
except TypeError:
continue | [
"def",
"get_module_plugins",
"(",
"module",
",",
"classobj",
")",
":",
"try",
":",
"names",
"=",
"module",
".",
"__all__",
"except",
"AttributeError",
":",
"names",
"=",
"[",
"x",
"for",
"x",
"in",
"vars",
"(",
"module",
")",
"if",
"not",
"x",
".",
"... | Return all subclasses of a class in the module.
If the module defines __all__, only those entries will be searched,
otherwise all objects not starting with '_' will be searched. | [
"Return",
"all",
"subclasses",
"of",
"a",
"class",
"in",
"the",
"module",
".",
"If",
"the",
"module",
"defines",
"__all__",
"only",
"those",
"entries",
"will",
"be",
"searched",
"otherwise",
"all",
"objects",
"not",
"starting",
"with",
"_",
"will",
"be",
"... | python | train |
StackStorm/pybind | pybind/slxos/v17s_1_02/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/__init__.py#L14148-L14171 | def _set_cfm_state(self, v, load=False):
"""
Setter method for cfm_state, mapped from YANG variable /cfm_state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_cfm_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cfm_state() directly.
YANG Description: CFM Operational Information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=cfm_state.cfm_state, is_container='container', presence=False, yang_name="cfm-state", rest_name="cfm-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'dot1ag-cfm', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cfm_state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=cfm_state.cfm_state, is_container='container', presence=False, yang_name="cfm-state", rest_name="cfm-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'dot1ag-cfm', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='container', is_config=True)""",
})
self.__cfm_state = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_cfm_state",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base... | Setter method for cfm_state, mapped from YANG variable /cfm_state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_cfm_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cfm_state() directly.
YANG Description: CFM Operational Information | [
"Setter",
"method",
"for",
"cfm_state",
"mapped",
"from",
"YANG",
"variable",
"/",
"cfm_state",
"(",
"container",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"... | python | train |
trailofbits/manticore | manticore/native/cpu/abstractcpu.py | https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/abstractcpu.py#L291-L299 | def values_from(self, base):
"""
A reusable generator for increasing pointer-sized values from an address
(usually the stack).
"""
word_bytes = self._cpu.address_bit_size // 8
while True:
yield base
base += word_bytes | [
"def",
"values_from",
"(",
"self",
",",
"base",
")",
":",
"word_bytes",
"=",
"self",
".",
"_cpu",
".",
"address_bit_size",
"//",
"8",
"while",
"True",
":",
"yield",
"base",
"base",
"+=",
"word_bytes"
] | A reusable generator for increasing pointer-sized values from an address
(usually the stack). | [
"A",
"reusable",
"generator",
"for",
"increasing",
"pointer",
"-",
"sized",
"values",
"from",
"an",
"address",
"(",
"usually",
"the",
"stack",
")",
"."
] | python | valid |
Rediker-Software/doac | doac/decorators.py | https://github.com/Rediker-Software/doac/blob/398fdd64452e4ff8662297b0381926addd77505a/doac/decorators.py#L7-L77 | def scope_required(*scopes):
"""
Test for specific scopes that the access token has been authenticated for before
processing the request and eventual response.
The scopes that are passed in determine how the decorator will respond to incoming
requests:
- If no scopes are passed in the arguments, the decorator will test for any available
scopes and determine the response based on that.
- If specific scopes are passed, the access token will be checked to make sure it has
all of the scopes that were requested.
This decorator will change the response if the access toke does not have the scope:
- If an invalid scope is requested (one that does not exist), all requests will be
denied, as no access tokens will be able to fulfill the scope request and the
request will be denied.
- If the access token does not have one of the requested scopes, the request will be
denied and the user will be returned one of two responses:
- A 400 response (Bad Request) will be returned if an unauthenticated user tries to
access the resource.
- A 403 response (Forbidden) will be returned if an authenticated user ties to access
the resource but does not have the correct scope.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
from django.http import HttpResponseBadRequest, HttpResponseForbidden
from .exceptions.base import InvalidRequest, InsufficientScope
from .models import Scope
from .utils import request_error_header
try:
if not hasattr(request, "access_token"):
raise CredentialsNotProvided()
access_token = request.access_token
for scope_name in scopes:
try:
scope = access_token.scope.for_short_name(scope_name)
except Scope.DoesNotExist:
raise ScopeNotEnough()
except InvalidRequest as e:
response = HttpResponseBadRequest()
response["WWW-Authenticate"] = request_error_header(e)
return response
except InsufficientScope as e:
response = HttpResponseForbidden()
response["WWW-Authenticate"] = request_error_header(e)
return response
return view_func(request, *args, **kwargs)
return _wrapped_view
if scopes and hasattr(scopes[0], "__call__"):
func = scopes[0]
scopes = scopes[1:]
return decorator(func)
return decorator | [
"def",
"scope_required",
"(",
"*",
"scopes",
")",
":",
"def",
"decorator",
"(",
"view_func",
")",
":",
"@",
"wraps",
"(",
"view_func",
",",
"assigned",
"=",
"available_attrs",
"(",
"view_func",
")",
")",
"def",
"_wrapped_view",
"(",
"request",
",",
"*",
... | Test for specific scopes that the access token has been authenticated for before
processing the request and eventual response.
The scopes that are passed in determine how the decorator will respond to incoming
requests:
- If no scopes are passed in the arguments, the decorator will test for any available
scopes and determine the response based on that.
- If specific scopes are passed, the access token will be checked to make sure it has
all of the scopes that were requested.
This decorator will change the response if the access toke does not have the scope:
- If an invalid scope is requested (one that does not exist), all requests will be
denied, as no access tokens will be able to fulfill the scope request and the
request will be denied.
- If the access token does not have one of the requested scopes, the request will be
denied and the user will be returned one of two responses:
- A 400 response (Bad Request) will be returned if an unauthenticated user tries to
access the resource.
- A 403 response (Forbidden) will be returned if an authenticated user ties to access
the resource but does not have the correct scope. | [
"Test",
"for",
"specific",
"scopes",
"that",
"the",
"access",
"token",
"has",
"been",
"authenticated",
"for",
"before",
"processing",
"the",
"request",
"and",
"eventual",
"response",
"."
] | python | train |
gem/oq-engine | openquake/hazardlib/geo/polygon.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/geo/polygon.py#L249-L291 | def get_resampled_coordinates(lons, lats):
"""
Resample polygon line segments and return the coordinates of the new
vertices. This limits distortions when projecting a polygon onto a
spherical surface.
Parameters define longitudes and latitudes of a point collection in the
form of lists or numpy arrays.
:return:
A tuple of two numpy arrays: longitudes and latitudes
of resampled vertices.
"""
num_coords = len(lons)
assert num_coords == len(lats)
lons1 = numpy.array(lons)
lats1 = numpy.array(lats)
lons2 = numpy.concatenate((lons1[1:], lons1[:1]))
lats2 = numpy.concatenate((lats1[1:], lats1[:1]))
distances = geodetic.geodetic_distance(lons1, lats1, lons2, lats2)
resampled_lons = [lons[0]]
resampled_lats = [lats[0]]
for i in range(num_coords):
next_point = (i + 1) % num_coords
lon1, lat1 = lons[i], lats[i]
lon2, lat2 = lons[next_point], lats[next_point]
distance = distances[i]
num_points = int(distance / UPSAMPLING_STEP_KM) + 1
if num_points >= 2:
# We need to increase the resolution of this arc by adding new
# points.
new_lons, new_lats, _ = geodetic.npoints_between(
lon1, lat1, 0, lon2, lat2, 0, num_points)
resampled_lons.extend(new_lons[1:])
resampled_lats.extend(new_lats[1:])
else:
resampled_lons.append(lon2)
resampled_lats.append(lat2)
# NB: we cut off the last point because it repeats the first one
return numpy.array(resampled_lons[:-1]), numpy.array(resampled_lats[:-1]) | [
"def",
"get_resampled_coordinates",
"(",
"lons",
",",
"lats",
")",
":",
"num_coords",
"=",
"len",
"(",
"lons",
")",
"assert",
"num_coords",
"==",
"len",
"(",
"lats",
")",
"lons1",
"=",
"numpy",
".",
"array",
"(",
"lons",
")",
"lats1",
"=",
"numpy",
"."... | Resample polygon line segments and return the coordinates of the new
vertices. This limits distortions when projecting a polygon onto a
spherical surface.
Parameters define longitudes and latitudes of a point collection in the
form of lists or numpy arrays.
:return:
A tuple of two numpy arrays: longitudes and latitudes
of resampled vertices. | [
"Resample",
"polygon",
"line",
"segments",
"and",
"return",
"the",
"coordinates",
"of",
"the",
"new",
"vertices",
".",
"This",
"limits",
"distortions",
"when",
"projecting",
"a",
"polygon",
"onto",
"a",
"spherical",
"surface",
"."
] | python | train |
pantsbuild/pants | src/python/pants/build_graph/build_file_parser.py | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/build_graph/build_file_parser.py#L90-L171 | def parse_build_file(self, build_file):
"""Capture Addressable instances from parsing `build_file`.
Prepare a context for parsing, read a BUILD file from the filesystem, and return the
Addressable instances generated by executing the code.
"""
def _format_context_msg(lineno, offset, error_type, message):
"""Show the line of the BUILD file that has the error along with a few line of context"""
build_contents = build_file.source().decode('utf-8')
context = "While parsing {build_file}:\n".format(build_file=build_file)
curr_lineno = 0
for line in build_contents.split('\n'):
line = line.encode('ascii', 'backslashreplace')
curr_lineno += 1
if curr_lineno == lineno:
highlight = '*'
else:
highlight = ' '
if curr_lineno >= lineno - 3:
context += "{highlight}{curr_lineno:4d}: {line}\n".format(
highlight=highlight, line=line, curr_lineno=curr_lineno)
if lineno == curr_lineno:
if offset:
context += (" {caret:>{width}} {error_type}: {message}\n\n"
.format(caret="^", width=int(offset), error_type=error_type,
message=message))
else:
context += (" {error_type}: {message}\n\n"
.format(error_type=error_type, message=message))
if curr_lineno > lineno + 3:
break
return context
logger.debug("Parsing BUILD file {build_file}."
.format(build_file=build_file))
try:
build_file_code = build_file.code()
except SyntaxError as e:
raise self.ParseError(_format_context_msg(e.lineno, e.offset, e.__class__.__name__, e))
except Exception as e:
raise self.ParseError("{error_type}: {message}\n while parsing BUILD file {build_file}"
.format(error_type=e.__class__.__name__,
message=e, build_file=build_file))
parse_state = self._build_configuration.initialize_parse_state(build_file)
try:
with warnings.catch_warnings(record=True) as warns:
six.exec_(build_file_code, parse_state.parse_globals)
for warn in warns:
logger.warning(_format_context_msg(lineno=warn.lineno,
offset=None,
error_type=warn.category.__name__,
message=warn.message))
except Exception as e:
raise self.ExecuteError("{message}\n while executing BUILD file {build_file}"
.format(message=e, build_file=build_file))
name_map = {}
for addressable in parse_state.objects:
name = addressable.addressed_name
logger.debug('Adding {addressable} to the BuildFileParser address map for {build_file} with {name}'
.format(addressable=addressable,
build_file=build_file,
name=name))
if name in name_map:
raise self.AddressableConflictException(
"File {conflicting_file} defines address '{target_name}' more than once."
.format(conflicting_file=build_file,
target_name=name))
name_map[name] = addressable
logger.debug("{build_file} produced the following Addressables:"
.format(build_file=build_file))
address_map = {}
for name, addressable in name_map.items():
address_map[BuildFileAddress(build_file=build_file, target_name=name)] = addressable
logger.debug(" * {name}: {addressable}"
.format(name=name,
addressable=addressable))
return address_map | [
"def",
"parse_build_file",
"(",
"self",
",",
"build_file",
")",
":",
"def",
"_format_context_msg",
"(",
"lineno",
",",
"offset",
",",
"error_type",
",",
"message",
")",
":",
"\"\"\"Show the line of the BUILD file that has the error along with a few line of context\"\"\"",
"... | Capture Addressable instances from parsing `build_file`.
Prepare a context for parsing, read a BUILD file from the filesystem, and return the
Addressable instances generated by executing the code. | [
"Capture",
"Addressable",
"instances",
"from",
"parsing",
"build_file",
".",
"Prepare",
"a",
"context",
"for",
"parsing",
"read",
"a",
"BUILD",
"file",
"from",
"the",
"filesystem",
"and",
"return",
"the",
"Addressable",
"instances",
"generated",
"by",
"executing",... | python | train |
ucbvislab/radiotool | radiotool/composer/track.py | https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/composer/track.py#L200-L210 | def zero_crossing_after(self, n):
"""Find nearest zero crossing in waveform after frame ``n``"""
n_in_samples = int(n * self.samplerate)
search_end = n_in_samples + self.samplerate
if search_end > self.duration:
search_end = self.duration
frame = zero_crossing_first(
self.range_as_mono(n_in_samples, search_end)) + n_in_samples
return frame / float(self.samplerate) | [
"def",
"zero_crossing_after",
"(",
"self",
",",
"n",
")",
":",
"n_in_samples",
"=",
"int",
"(",
"n",
"*",
"self",
".",
"samplerate",
")",
"search_end",
"=",
"n_in_samples",
"+",
"self",
".",
"samplerate",
"if",
"search_end",
">",
"self",
".",
"duration",
... | Find nearest zero crossing in waveform after frame ``n`` | [
"Find",
"nearest",
"zero",
"crossing",
"in",
"waveform",
"after",
"frame",
"n"
] | python | train |
tkf/rash | rash/indexer.py | https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/indexer.py#L76-L107 | def index_record(self, json_path):
"""
Import `json_path` and remove it if :attr:`keep_json` is false.
"""
self.logger.debug('Indexing record: %s', json_path)
json_path = os.path.abspath(json_path)
self.check_path(json_path, '`json_path`')
with open(json_path) as fp:
try:
dct = json.load(fp)
except ValueError:
warnings.warn(
'Ignoring invalid JSON file at: {0}'.format(json_path))
return
record_type = self.get_record_type(json_path)
kwds = {}
if record_type == 'command':
importer = self.db.import_dict
kwds.update(check_duplicate=self.check_duplicate)
elif record_type == 'init':
importer = self.db.import_init_dict
elif record_type == 'exit':
importer = self.db.import_exit_dict
else:
raise ValueError("Unknown record type: {0}".format(record_type))
importer(dct, **kwds)
if not self.keep_json:
self.logger.info('Removing JSON record: %s', json_path)
os.remove(json_path) | [
"def",
"index_record",
"(",
"self",
",",
"json_path",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Indexing record: %s'",
",",
"json_path",
")",
"json_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"json_path",
")",
"self",
".",
"check_path",... | Import `json_path` and remove it if :attr:`keep_json` is false. | [
"Import",
"json_path",
"and",
"remove",
"it",
"if",
":",
"attr",
":",
"keep_json",
"is",
"false",
"."
] | python | train |
hotdoc/hotdoc | hotdoc/utils/loggable.py | https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/utils/loggable.py#L298-L304 | def get_issues():
"""Get actual issues in the journal."""
issues = []
for entry in Logger.journal:
if entry.level >= WARNING:
issues.append(entry)
return issues | [
"def",
"get_issues",
"(",
")",
":",
"issues",
"=",
"[",
"]",
"for",
"entry",
"in",
"Logger",
".",
"journal",
":",
"if",
"entry",
".",
"level",
">=",
"WARNING",
":",
"issues",
".",
"append",
"(",
"entry",
")",
"return",
"issues"
] | Get actual issues in the journal. | [
"Get",
"actual",
"issues",
"in",
"the",
"journal",
"."
] | python | train |
lreis2415/PyGeoC | pygeoc/raster.py | https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/raster.py#L340-L366 | def raster_reclassify(srcfile, v_dict, dstfile, gdaltype=GDT_Float32):
"""Reclassify raster by given classifier dict.
Args:
srcfile: source raster file.
v_dict: classifier dict.
dstfile: destination file path.
gdaltype (:obj:`pygeoc.raster.GDALDataType`): GDT_Float32 as default.
"""
src_r = RasterUtilClass.read_raster(srcfile)
src_data = src_r.data
dst_data = numpy.copy(src_data)
if gdaltype == GDT_Float32 and src_r.dataType != GDT_Float32:
gdaltype = src_r.dataType
no_data = src_r.noDataValue
new_no_data = DEFAULT_NODATA
if gdaltype in [GDT_Unknown, GDT_Byte, GDT_UInt16, GDT_UInt32]:
new_no_data = 0
if not MathClass.floatequal(new_no_data, src_r.noDataValue):
if src_r.noDataValue not in v_dict:
v_dict[src_r.noDataValue] = new_no_data
no_data = new_no_data
for (k, v) in iteritems(v_dict):
dst_data[src_data == k] = v
RasterUtilClass.write_gtiff_file(dstfile, src_r.nRows, src_r.nCols, dst_data,
src_r.geotrans, src_r.srs, no_data, gdaltype) | [
"def",
"raster_reclassify",
"(",
"srcfile",
",",
"v_dict",
",",
"dstfile",
",",
"gdaltype",
"=",
"GDT_Float32",
")",
":",
"src_r",
"=",
"RasterUtilClass",
".",
"read_raster",
"(",
"srcfile",
")",
"src_data",
"=",
"src_r",
".",
"data",
"dst_data",
"=",
"numpy... | Reclassify raster by given classifier dict.
Args:
srcfile: source raster file.
v_dict: classifier dict.
dstfile: destination file path.
gdaltype (:obj:`pygeoc.raster.GDALDataType`): GDT_Float32 as default. | [
"Reclassify",
"raster",
"by",
"given",
"classifier",
"dict",
"."
] | python | train |
twilio/twilio-python | twilio/rest/api/v2010/account/usage/record/daily.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/usage/record/daily.py#L102-L137 | def page(self, category=values.unset, start_date=values.unset,
end_date=values.unset, include_subaccounts=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of DailyInstance records from the API.
Request is executed immediately
:param DailyInstance.Category category: The usage category of the UsageRecord resources to read
:param date start_date: Only include usage that has occurred on or after this date
:param date end_date: Only include usage that occurred on or before this date
:param bool include_subaccounts: Whether to include usage from the master account and all its subaccounts
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of DailyInstance
:rtype: twilio.rest.api.v2010.account.usage.record.daily.DailyPage
"""
params = values.of({
'Category': category,
'StartDate': serialize.iso8601_date(start_date),
'EndDate': serialize.iso8601_date(end_date),
'IncludeSubaccounts': include_subaccounts,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return DailyPage(self._version, response, self._solution) | [
"def",
"page",
"(",
"self",
",",
"category",
"=",
"values",
".",
"unset",
",",
"start_date",
"=",
"values",
".",
"unset",
",",
"end_date",
"=",
"values",
".",
"unset",
",",
"include_subaccounts",
"=",
"values",
".",
"unset",
",",
"page_token",
"=",
"valu... | Retrieve a single page of DailyInstance records from the API.
Request is executed immediately
:param DailyInstance.Category category: The usage category of the UsageRecord resources to read
:param date start_date: Only include usage that has occurred on or after this date
:param date end_date: Only include usage that occurred on or before this date
:param bool include_subaccounts: Whether to include usage from the master account and all its subaccounts
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of DailyInstance
:rtype: twilio.rest.api.v2010.account.usage.record.daily.DailyPage | [
"Retrieve",
"a",
"single",
"page",
"of",
"DailyInstance",
"records",
"from",
"the",
"API",
".",
"Request",
"is",
"executed",
"immediately"
] | python | train |
spyder-ide/spyder | spyder/plugins/editor/widgets/base.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/widgets/base.py#L1065-L1075 | def hide_tooltip_if_necessary(self, key):
"""Hide calltip when necessary"""
try:
calltip_char = self.get_character(self.calltip_position)
before = self.is_cursor_before(self.calltip_position,
char_offset=1)
other = key in (Qt.Key_ParenRight, Qt.Key_Period, Qt.Key_Tab)
if calltip_char not in ('?', '(') or before or other:
QToolTip.hideText()
except (IndexError, TypeError):
QToolTip.hideText() | [
"def",
"hide_tooltip_if_necessary",
"(",
"self",
",",
"key",
")",
":",
"try",
":",
"calltip_char",
"=",
"self",
".",
"get_character",
"(",
"self",
".",
"calltip_position",
")",
"before",
"=",
"self",
".",
"is_cursor_before",
"(",
"self",
".",
"calltip_position... | Hide calltip when necessary | [
"Hide",
"calltip",
"when",
"necessary"
] | python | train |
astraw/stdeb | stdeb/util.py | https://github.com/astraw/stdeb/blob/493ab88e8a60be053b1baef81fb39b45e17ceef5/stdeb/util.py#L595-L609 | def parse_vals(cfg,section,option):
"""parse comma separated values in debian control file style from .cfg"""
try:
vals = cfg.get(section,option)
except ConfigParser.NoSectionError as err:
if section != 'DEFAULT':
vals = cfg.get('DEFAULT',option)
else:
raise err
vals = vals.split('#')[0]
vals = vals.strip()
vals = vals.split(',')
vals = [v.strip() for v in vals]
vals = [v for v in vals if len(v)]
return vals | [
"def",
"parse_vals",
"(",
"cfg",
",",
"section",
",",
"option",
")",
":",
"try",
":",
"vals",
"=",
"cfg",
".",
"get",
"(",
"section",
",",
"option",
")",
"except",
"ConfigParser",
".",
"NoSectionError",
"as",
"err",
":",
"if",
"section",
"!=",
"'DEFAUL... | parse comma separated values in debian control file style from .cfg | [
"parse",
"comma",
"separated",
"values",
"in",
"debian",
"control",
"file",
"style",
"from",
".",
"cfg"
] | python | train |
i3visio/deepify | deepify/zeronet.py | https://github.com/i3visio/deepify/blob/2af04e0bea3eaabe96b0565e10f7eeb29b042a2b/deepify/zeronet.py#L52-L81 | def _grabContentFromUrl(self, url):
"""
Function that abstracts capturing a URL. This method rewrites the one from Wrapper.
:param url: The URL to be processed.
:return: The response in a Json format.
"""
# Defining an empty object for the response
info = {}
# This part has to be modified...
try:
# Configuring the socket
queryURL = "http://" + self.info["host"] + ":" + self.info["port"] + "/" + url
response = urllib2.urlopen(queryURL)
# Rebuilding data to be processed
data = str(response.headers) + "\n"
data += response.read()
# Processing data as expected
info = self._createDataStructure(data)
# Try to make the errors clear for other users
except Exception, e:
errMsg = "ERROR Exception. Something seems to be wrong with the Zeronet Bundler."
raise Exception( errMsg + " " + str(e))
return info | [
"def",
"_grabContentFromUrl",
"(",
"self",
",",
"url",
")",
":",
"# Defining an empty object for the response",
"info",
"=",
"{",
"}",
"# This part has to be modified... ",
"try",
":",
"# Configuring the socket",
"queryURL",
"=",
"\"http://\"",
"+",
"self",
".",
... | Function that abstracts capturing a URL. This method rewrites the one from Wrapper.
:param url: The URL to be processed.
:return: The response in a Json format. | [
"Function",
"that",
"abstracts",
"capturing",
"a",
"URL",
".",
"This",
"method",
"rewrites",
"the",
"one",
"from",
"Wrapper",
".",
":",
"param",
"url",
":",
"The",
"URL",
"to",
"be",
"processed",
".",
":",
"return",
":",
"The",
"response",
"in",
"a",
"... | python | train |
stan-dev/pystan | pystan/misc.py | https://github.com/stan-dev/pystan/blob/57bdccea11888157e7aaafba083003080a934805/pystan/misc.py#L715-L740 | def _organize_inits(inits, pars, dims):
"""Obtain a list of initial values for each chain.
The parameter 'lp__' will be removed from the chains.
Parameters
----------
inits : list
list of initial values for each chain.
pars : list of str
dims : list of list of int
from (via cython conversion) vector[vector[uint]] dims
Returns
-------
inits : list of dict
"""
try:
idx_of_lp = pars.index('lp__')
del pars[idx_of_lp]
del dims[idx_of_lp]
except ValueError:
pass
starts = _calc_starts(dims)
return [_par_vector2dict(init, pars, dims, starts) for init in inits] | [
"def",
"_organize_inits",
"(",
"inits",
",",
"pars",
",",
"dims",
")",
":",
"try",
":",
"idx_of_lp",
"=",
"pars",
".",
"index",
"(",
"'lp__'",
")",
"del",
"pars",
"[",
"idx_of_lp",
"]",
"del",
"dims",
"[",
"idx_of_lp",
"]",
"except",
"ValueError",
":",... | Obtain a list of initial values for each chain.
The parameter 'lp__' will be removed from the chains.
Parameters
----------
inits : list
list of initial values for each chain.
pars : list of str
dims : list of list of int
from (via cython conversion) vector[vector[uint]] dims
Returns
-------
inits : list of dict | [
"Obtain",
"a",
"list",
"of",
"initial",
"values",
"for",
"each",
"chain",
"."
] | python | train |
tamasgal/km3pipe | km3pipe/tools.py | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/tools.py#L369-L382 | def get_jpp_revision(via_command='JPrint'):
"""Retrieves the Jpp revision number"""
try:
output = subprocess.check_output([via_command, '-v'],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
if e.returncode == 1:
output = e.output
else:
return None
except OSError:
return None
revision = output.decode().split('\n')[0].split()[1].strip()
return revision | [
"def",
"get_jpp_revision",
"(",
"via_command",
"=",
"'JPrint'",
")",
":",
"try",
":",
"output",
"=",
"subprocess",
".",
"check_output",
"(",
"[",
"via_command",
",",
"'-v'",
"]",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"except",
"subprocess",
... | Retrieves the Jpp revision number | [
"Retrieves",
"the",
"Jpp",
"revision",
"number"
] | python | train |
ask/carrot | carrot/backends/base.py | https://github.com/ask/carrot/blob/5889a25cd2e274642071c9bba39772f4b3e3d9da/carrot/backends/base.py#L31-L35 | def decode(self):
"""Deserialize the message body, returning the original
python structure sent by the publisher."""
return serialization.decode(self.body, self.content_type,
self.content_encoding) | [
"def",
"decode",
"(",
"self",
")",
":",
"return",
"serialization",
".",
"decode",
"(",
"self",
".",
"body",
",",
"self",
".",
"content_type",
",",
"self",
".",
"content_encoding",
")"
] | Deserialize the message body, returning the original
python structure sent by the publisher. | [
"Deserialize",
"the",
"message",
"body",
"returning",
"the",
"original",
"python",
"structure",
"sent",
"by",
"the",
"publisher",
"."
] | python | train |
BernardFW/bernard | src/bernard/misc/main/_base.py | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/misc/main/_base.py#L58-L78 | def main():
"""
Run the appropriate main function according to the output of the parser.
"""
parser = make_parser()
args = parser.parse_args()
if not hasattr(args, 'action'):
parser.print_help()
exit(1)
if args.action == 'sheet':
from bernard.misc.sheet_sync import main as main_sheet
main_sheet(args)
elif args.action == 'run':
from bernard.cli import main as main_run
main_run()
elif args.action == 'start_project':
from bernard.misc.start_project import main as main_sp
main_sp(args) | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"make_parser",
"(",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"not",
"hasattr",
"(",
"args",
",",
"'action'",
")",
":",
"parser",
".",
"print_help",
"(",
")",
"exit",
"(",
"1",
")",
... | Run the appropriate main function according to the output of the parser. | [
"Run",
"the",
"appropriate",
"main",
"function",
"according",
"to",
"the",
"output",
"of",
"the",
"parser",
"."
] | python | train |
gem/oq-engine | openquake/hazardlib/gsim/zhao_2006.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/zhao_2006.py#L142-L157 | def _compute_focal_depth_term(self, C, hypo_depth):
"""
Compute fourth term in equation 1, p. 901.
"""
# p. 901. "(i.e, depth is capped at 125 km)".
focal_depth = hypo_depth
if focal_depth > 125.0:
focal_depth = 125.0
# p. 902. "We used the value of 15 km for the
# depth coefficient hc ...".
hc = 15.0
# p. 901. "When h is larger than hc, the depth terms takes
# effect ...". The next sentence specifies h>=hc.
return float(focal_depth >= hc) * C['e'] * (focal_depth - hc) | [
"def",
"_compute_focal_depth_term",
"(",
"self",
",",
"C",
",",
"hypo_depth",
")",
":",
"# p. 901. \"(i.e, depth is capped at 125 km)\".",
"focal_depth",
"=",
"hypo_depth",
"if",
"focal_depth",
">",
"125.0",
":",
"focal_depth",
"=",
"125.0",
"# p. 902. \"We used the value... | Compute fourth term in equation 1, p. 901. | [
"Compute",
"fourth",
"term",
"in",
"equation",
"1",
"p",
".",
"901",
"."
] | python | train |
readbeyond/aeneas | aeneas/ttswrappers/basettswrapper.py | https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/ttswrappers/basettswrapper.py#L128-L137 | def clear(self):
"""
Clear the cache and remove all the files from disk.
"""
self.log(u"Clearing cache...")
for file_handler, file_info in self.cache.values():
self.log([u" Removing file '%s'", file_info])
gf.delete_file(file_handler, file_info)
self._initialize_cache()
self.log(u"Clearing cache... done") | [
"def",
"clear",
"(",
"self",
")",
":",
"self",
".",
"log",
"(",
"u\"Clearing cache...\"",
")",
"for",
"file_handler",
",",
"file_info",
"in",
"self",
".",
"cache",
".",
"values",
"(",
")",
":",
"self",
".",
"log",
"(",
"[",
"u\" Removing file '%s'\"",
"... | Clear the cache and remove all the files from disk. | [
"Clear",
"the",
"cache",
"and",
"remove",
"all",
"the",
"files",
"from",
"disk",
"."
] | python | train |
UpCloudLtd/upcloud-python-api | upcloud_api/server.py | https://github.com/UpCloudLtd/upcloud-python-api/blob/954b0ad7c4b932b2be31a95d88975f6b0eeac8ed/upcloud_api/server.py#L435-L437 | def get_public_ip(self, addr_family=None, *args, **kwargs):
"""Alias for get_ip('public')"""
return self.get_ip('public', addr_family, *args, **kwargs) | [
"def",
"get_public_ip",
"(",
"self",
",",
"addr_family",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"get_ip",
"(",
"'public'",
",",
"addr_family",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Alias for get_ip('public') | [
"Alias",
"for",
"get_ip",
"(",
"public",
")"
] | python | train |
websauna/pyramid_notebook | pyramid_notebook/views.py | https://github.com/websauna/pyramid_notebook/blob/8a7ecfa0259810de1a818e4b415a62811a7b077a/pyramid_notebook/views.py#L155-L171 | def notebook_proxy(request, username):
"""Renders a IPython Notebook frame wrapper.
Starts or reattachs ot an existing Notebook session.
"""
security_check(request, username)
manager = get_notebook_manager(request)
notebook_info = manager.get_context(username)
if not notebook_info:
raise HTTPInternalServerError("Apparently IPython Notebook daemon process is not running for {}".format(username))
if 'http_port' not in notebook_info:
raise RuntimeError("Notebook terminated prematurely before managed to tell us its HTTP port")
return proxy_it(request, notebook_info["http_port"]) | [
"def",
"notebook_proxy",
"(",
"request",
",",
"username",
")",
":",
"security_check",
"(",
"request",
",",
"username",
")",
"manager",
"=",
"get_notebook_manager",
"(",
"request",
")",
"notebook_info",
"=",
"manager",
".",
"get_context",
"(",
"username",
")",
... | Renders a IPython Notebook frame wrapper.
Starts or reattachs ot an existing Notebook session. | [
"Renders",
"a",
"IPython",
"Notebook",
"frame",
"wrapper",
"."
] | python | train |
eventbrite/pysoa | pysoa/common/transport/local.py | https://github.com/eventbrite/pysoa/blob/9c052cae2397d13de3df8ae2c790846a70b53f18/pysoa/common/transport/local.py#L103-L107 | def send_response_message(self, request_id, meta, body):
"""
Add the response to the deque.
"""
self.response_messages.append((request_id, meta, body)) | [
"def",
"send_response_message",
"(",
"self",
",",
"request_id",
",",
"meta",
",",
"body",
")",
":",
"self",
".",
"response_messages",
".",
"append",
"(",
"(",
"request_id",
",",
"meta",
",",
"body",
")",
")"
] | Add the response to the deque. | [
"Add",
"the",
"response",
"to",
"the",
"deque",
"."
] | python | train |
apple/turicreate | src/unity/python/turicreate/toolkits/recommender/popularity_recommender.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/popularity_recommender.py#L15-L102 | def create(observation_data,
user_id='user_id', item_id='item_id', target=None,
user_data=None, item_data=None,
random_seed=0,
verbose=True):
"""
Create a model that makes recommendations using item popularity. When no
target column is provided, the popularity is determined by the number of
observations involving each item. When a target is provided, popularity
is computed using the item's mean target value. When the target column
contains ratings, for example, the model computes the mean rating for
each item and uses this to rank items for recommendations.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
The `observation_data` can optionally contain a column of scores
representing ratings given by the users. If present, the name of this
column may be specified variables `target`.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with
the same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information.
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with
the same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information.
verbose : bool, optional
Enables verbose output.
Examples
--------
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m = turicreate.popularity_recommender.create(sf, target='rating')
See Also
--------
PopularityRecommender
"""
from turicreate._cython.cy_server import QuietProgress
opts = {}
model_proxy = _turicreate.extensions.popularity()
model_proxy.init_options(opts)
if user_data is None:
user_data = _turicreate.SFrame()
if item_data is None:
item_data = _turicreate.SFrame()
nearest_items = _turicreate.SFrame()
opts = {'user_id': user_id,
'item_id': item_id,
'target': target,
'random_seed': 1}
extra_data = {"nearest_items" : _turicreate.SFrame()}
with QuietProgress(verbose):
model_proxy.train(observation_data, user_data, item_data, opts, extra_data)
return PopularityRecommender(model_proxy) | [
"def",
"create",
"(",
"observation_data",
",",
"user_id",
"=",
"'user_id'",
",",
"item_id",
"=",
"'item_id'",
",",
"target",
"=",
"None",
",",
"user_data",
"=",
"None",
",",
"item_data",
"=",
"None",
",",
"random_seed",
"=",
"0",
",",
"verbose",
"=",
"Tr... | Create a model that makes recommendations using item popularity. When no
target column is provided, the popularity is determined by the number of
observations involving each item. When a target is provided, popularity
is computed using the item's mean target value. When the target column
contains ratings, for example, the model computes the mean rating for
each item and uses this to rank items for recommendations.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
The `observation_data` can optionally contain a column of scores
representing ratings given by the users. If present, the name of this
column may be specified variables `target`.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with
the same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information.
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with
the same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information.
verbose : bool, optional
Enables verbose output.
Examples
--------
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m = turicreate.popularity_recommender.create(sf, target='rating')
See Also
--------
PopularityRecommender | [
"Create",
"a",
"model",
"that",
"makes",
"recommendations",
"using",
"item",
"popularity",
".",
"When",
"no",
"target",
"column",
"is",
"provided",
"the",
"popularity",
"is",
"determined",
"by",
"the",
"number",
"of",
"observations",
"involving",
"each",
"item",... | python | train |
kgori/treeCl | treeCl/distance_matrix.py | https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/distance_matrix.py#L126-L134 | def check_pd(matrix):
""" A symmetric matrix (M) is PD if it has a Cholesky decomposition, i.e.
M = R.T dot R, where R is upper triangular with positive diagonal entries
"""
try:
np.linalg.cholesky(matrix)
return True
except np.linalg.LinAlgError:
return False | [
"def",
"check_pd",
"(",
"matrix",
")",
":",
"try",
":",
"np",
".",
"linalg",
".",
"cholesky",
"(",
"matrix",
")",
"return",
"True",
"except",
"np",
".",
"linalg",
".",
"LinAlgError",
":",
"return",
"False"
] | A symmetric matrix (M) is PD if it has a Cholesky decomposition, i.e.
M = R.T dot R, where R is upper triangular with positive diagonal entries | [
"A",
"symmetric",
"matrix",
"(",
"M",
")",
"is",
"PD",
"if",
"it",
"has",
"a",
"Cholesky",
"decomposition",
"i",
".",
"e",
".",
"M",
"=",
"R",
".",
"T",
"dot",
"R",
"where",
"R",
"is",
"upper",
"triangular",
"with",
"positive",
"diagonal",
"entries"
... | python | train |
BreakingBytes/simkit | simkit/core/models.py | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/models.py#L221-L233 | def load(self, modelfile, layer=None):
"""
Load or update a model or layers in a model.
:param modelfile: The name of the json file to load.
:type modelfile: str
:param layer: Optionally load only specified layer.
:type layer: str
"""
# read modelfile, convert JSON and load/update model
self.param_file = modelfile
self._load(layer)
self._update(layer) | [
"def",
"load",
"(",
"self",
",",
"modelfile",
",",
"layer",
"=",
"None",
")",
":",
"# read modelfile, convert JSON and load/update model",
"self",
".",
"param_file",
"=",
"modelfile",
"self",
".",
"_load",
"(",
"layer",
")",
"self",
".",
"_update",
"(",
"layer... | Load or update a model or layers in a model.
:param modelfile: The name of the json file to load.
:type modelfile: str
:param layer: Optionally load only specified layer.
:type layer: str | [
"Load",
"or",
"update",
"a",
"model",
"or",
"layers",
"in",
"a",
"model",
"."
] | python | train |
openstack/networking-arista | networking_arista/ml2/arista_sync.py | https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/arista_sync.py#L176-L241 | def synchronize_resources(self):
"""Synchronize worker with CVX
All database queries must occur while the sync lock is held. This
tightly couples reads with writes and ensures that an older read
does not result in the last write. Eg:
Worker 1 reads (P1 created)
Worder 2 reads (P1 deleted)
Worker 2 writes (Delete P1 from CVX)
Worker 1 writes (Create P1 on CVX)
By ensuring that all reads occur with the sync lock held, we ensure
that Worker 1 completes its writes before Worker2 is allowed to read.
A failure to write results in a full resync and purges all reads from
memory.
It is also important that we compute resources to sync in reverse sync
order in order to avoid missing dependencies on creation. Eg:
If we query in sync order
1. Query Instances -> I1 isn't there
2. Query Port table -> Port P1 is there, connected to I1
3. We send P1 to CVX without sending I1 -> Error raised
But if we query P1 first:
1. Query Ports P1 -> P1 is not there
2. Query Instances -> find I1
3. We create I1, not P1 -> harmless, mech driver creates P1
Missing dependencies on deletion will helpfully result in the
dependent resource not being created:
1. Query Ports -> P1 is found
2. Query Instances -> I1 not found
3. Creating P1 fails on CVX
"""
# Grab the sync lock
if not self._rpc.sync_start():
LOG.info("%(pid)s Failed to grab the sync lock",
{'pid': os.getpid()})
greenthread.sleep(1)
return
for resource in self._resources_to_update:
self.update_neutron_resource(resource)
self._resources_to_update = list()
# Sync any necessary resources.
# We delete in reverse order and create in order to ensure that
# dependent resources are deleted before the resources they depend
# on and created after them
for resource_type in reversed(self.sync_order):
resource_type.delete_cvx_resources()
for resource_type in self.sync_order:
resource_type.create_cvx_resources()
# Release the sync lock
self._rpc.sync_end()
# Update local uuid if this was a full sync
if self._synchronizing_uuid:
LOG.info("%(pid)s Full sync for cvx uuid %(uuid)s complete",
{'uuid': self._synchronizing_uuid,
'pid': os.getpid()})
self._cvx_uuid = self._synchronizing_uuid
self._synchronizing_uuid = None | [
"def",
"synchronize_resources",
"(",
"self",
")",
":",
"# Grab the sync lock",
"if",
"not",
"self",
".",
"_rpc",
".",
"sync_start",
"(",
")",
":",
"LOG",
".",
"info",
"(",
"\"%(pid)s Failed to grab the sync lock\"",
",",
"{",
"'pid'",
":",
"os",
".",
"getpid",... | Synchronize worker with CVX
All database queries must occur while the sync lock is held. This
tightly couples reads with writes and ensures that an older read
does not result in the last write. Eg:
Worker 1 reads (P1 created)
Worder 2 reads (P1 deleted)
Worker 2 writes (Delete P1 from CVX)
Worker 1 writes (Create P1 on CVX)
By ensuring that all reads occur with the sync lock held, we ensure
that Worker 1 completes its writes before Worker2 is allowed to read.
A failure to write results in a full resync and purges all reads from
memory.
It is also important that we compute resources to sync in reverse sync
order in order to avoid missing dependencies on creation. Eg:
If we query in sync order
1. Query Instances -> I1 isn't there
2. Query Port table -> Port P1 is there, connected to I1
3. We send P1 to CVX without sending I1 -> Error raised
But if we query P1 first:
1. Query Ports P1 -> P1 is not there
2. Query Instances -> find I1
3. We create I1, not P1 -> harmless, mech driver creates P1
Missing dependencies on deletion will helpfully result in the
dependent resource not being created:
1. Query Ports -> P1 is found
2. Query Instances -> I1 not found
3. Creating P1 fails on CVX | [
"Synchronize",
"worker",
"with",
"CVX"
] | python | train |
har07/PySastrawi | src/Sastrawi/Stemmer/Stemmer.py | https://github.com/har07/PySastrawi/blob/01afc81c579bde14dcb41c33686b26af8afab121/src/Sastrawi/Stemmer/Stemmer.py#L31-L36 | def stem_word(self, word):
"""Stem a word to its common stem form."""
if self.is_plural(word):
return self.stem_plural_word(word)
else:
return self.stem_singular_word(word) | [
"def",
"stem_word",
"(",
"self",
",",
"word",
")",
":",
"if",
"self",
".",
"is_plural",
"(",
"word",
")",
":",
"return",
"self",
".",
"stem_plural_word",
"(",
"word",
")",
"else",
":",
"return",
"self",
".",
"stem_singular_word",
"(",
"word",
")"
] | Stem a word to its common stem form. | [
"Stem",
"a",
"word",
"to",
"its",
"common",
"stem",
"form",
"."
] | python | train |
mwickert/scikit-dsp-comm | sk_dsp_comm/fec_conv.py | https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/fec_conv.py#L621-L708 | def depuncture(self,soft_bits,puncture_pattern = ('110','101'),
erase_value = 3.5):
"""
Apply de-puncturing to the soft bits coming from the channel. Erasure bits
are inserted to return the soft bit values back to a form that can be
Viterbi decoded.
:param soft_bits:
:param puncture_pattern:
:param erase_value:
:return:
Examples
--------
This example uses the following puncture matrix:
.. math::
\\begin{align*}
\\mathbf{A} = \\begin{bmatrix}
1 & 1 & 0 \\\\
1 & 0 & 1
\\end{bmatrix}
\\end{align*}
The upper row operates on the outputs for the :math:`G_{1}` polynomial and the lower row operates on the outputs of
the :math:`G_{2}` polynomial.
>>> import numpy as np
>>> from sk_dsp_comm.fec_conv import fec_conv
>>> cc = fec_conv(('101','111'))
>>> x = np.array([0, 0, 1, 1, 1, 0, 0, 0, 0, 0])
>>> state = '00'
>>> y, state = cc.conv_encoder(x, state)
>>> yp = cc.puncture(y, ('110','101'))
>>> cc.depuncture(yp, ('110', '101'), 1)
array([ 0., 0., 0., 1., 1., 1., 1., 0., 0., 1., 1., 0., 1., 1., 0., 1., 1., 0.]
"""
# Check to see that the length of soft_bits is consistent with a rate
# 1/2 code.
L_pp = len(puncture_pattern[0])
L_pp1 = len([g1 for g1 in puncture_pattern[0] if g1 == '1'])
L_pp0 = len([g1 for g1 in puncture_pattern[0] if g1 == '0'])
#L_pp0 = len([g1 for g1 in pp1 if g1 == '0'])
N_softwords = int(np.floor(len(soft_bits)/float(2)))
if 2*N_softwords != len(soft_bits):
warnings.warn('Number of soft bits must be even!')
warnings.warn('Truncating bits to be compatible.')
soft_bits = soft_bits[:2*N_softwords]
# Extract the G1p and G2p encoded bits from the serial stream.
# Assume the stream is of the form [G1p G2p G1p G2p ... ],
# which for QPSK may be of the form [Ip Qp Ip Qp Ip Qp ... ]
x_G1 = soft_bits.reshape(N_softwords,2).take([0],
axis=1).reshape(1,N_softwords).flatten()
x_G2 = soft_bits.reshape(N_softwords,2).take([1],
axis=1).reshape(1,N_softwords).flatten()
# Check to see that the length of x_G1 and x_G2 is consistent with the
# puncture length period of the soft bits
N_punct_periods = int(np.floor(N_softwords/float(L_pp1)))
if L_pp1*N_punct_periods != N_softwords:
warnings.warn('Number of soft bits per puncture period is %d' % L_pp1)
warnings.warn('The number of soft bits is not a multiple')
warnings.warn('Truncating soft bits to be compatible.')
x_G1 = x_G1[:L_pp1*N_punct_periods]
x_G2 = x_G2[:L_pp1*N_punct_periods]
x_G1 = x_G1.reshape(N_punct_periods,L_pp1)
x_G2 = x_G2.reshape(N_punct_periods,L_pp1)
#Depuncture x_G1 and x_G1
g1_pp1 = [k for k,g1 in enumerate(puncture_pattern[0]) if g1 == '1']
g1_pp0 = [k for k,g1 in enumerate(puncture_pattern[0]) if g1 == '0']
g2_pp1 = [k for k,g2 in enumerate(puncture_pattern[1]) if g2 == '1']
g2_pp0 = [k for k,g2 in enumerate(puncture_pattern[1]) if g2 == '0']
x_E = erase_value*np.ones((N_punct_periods,L_pp0))
y_G1 = np.hstack((x_G1,x_E))
y_G2 = np.hstack((x_G2,x_E))
[g1_pp1.append(val) for idx,val in enumerate(g1_pp0)]
g1_comp = list(zip(g1_pp1,list(range(L_pp))))
g1_comp.sort()
G1_col_permute = [g1_comp[idx][1] for idx in range(L_pp)]
[g2_pp1.append(val) for idx,val in enumerate(g2_pp0)]
g2_comp = list(zip(g2_pp1,list(range(L_pp))))
g2_comp.sort()
G2_col_permute = [g2_comp[idx][1] for idx in range(L_pp)]
#permute columns to place erasure bits in the correct position
y = np.hstack((y_G1[:,G1_col_permute].reshape(L_pp*N_punct_periods,1),
y_G2[:,G2_col_permute].reshape(L_pp*N_punct_periods,
1))).reshape(1,2*L_pp*N_punct_periods).flatten()
return y | [
"def",
"depuncture",
"(",
"self",
",",
"soft_bits",
",",
"puncture_pattern",
"=",
"(",
"'110'",
",",
"'101'",
")",
",",
"erase_value",
"=",
"3.5",
")",
":",
"# Check to see that the length of soft_bits is consistent with a rate\r",
"# 1/2 code.\r",
"L_pp",
"=",
"len",... | Apply de-puncturing to the soft bits coming from the channel. Erasure bits
are inserted to return the soft bit values back to a form that can be
Viterbi decoded.
:param soft_bits:
:param puncture_pattern:
:param erase_value:
:return:
Examples
--------
This example uses the following puncture matrix:
.. math::
\\begin{align*}
\\mathbf{A} = \\begin{bmatrix}
1 & 1 & 0 \\\\
1 & 0 & 1
\\end{bmatrix}
\\end{align*}
The upper row operates on the outputs for the :math:`G_{1}` polynomial and the lower row operates on the outputs of
the :math:`G_{2}` polynomial.
>>> import numpy as np
>>> from sk_dsp_comm.fec_conv import fec_conv
>>> cc = fec_conv(('101','111'))
>>> x = np.array([0, 0, 1, 1, 1, 0, 0, 0, 0, 0])
>>> state = '00'
>>> y, state = cc.conv_encoder(x, state)
>>> yp = cc.puncture(y, ('110','101'))
>>> cc.depuncture(yp, ('110', '101'), 1)
array([ 0., 0., 0., 1., 1., 1., 1., 0., 0., 1., 1., 0., 1., 1., 0., 1., 1., 0.] | [
"Apply",
"de",
"-",
"puncturing",
"to",
"the",
"soft",
"bits",
"coming",
"from",
"the",
"channel",
".",
"Erasure",
"bits",
"are",
"inserted",
"to",
"return",
"the",
"soft",
"bit",
"values",
"back",
"to",
"a",
"form",
"that",
"can",
"be",
"Viterbi",
"deco... | python | valid |
DataONEorg/d1_python | lib_client/src/d1_client/cnclient.py | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_client/src/d1_client/cnclient.py#L836-L849 | def updateGroupResponse(self, group, vendorSpecific=None):
"""CNIdentity.addGroupMembers(session, groupName, members) → boolean
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.addGroupMembers.
Args:
group:
vendorSpecific:
Returns:
"""
mmp_dict = {'group': ('group.xml', group.toxml('utf-8'))}
return self.PUT('groups', fields=mmp_dict, headers=vendorSpecific) | [
"def",
"updateGroupResponse",
"(",
"self",
",",
"group",
",",
"vendorSpecific",
"=",
"None",
")",
":",
"mmp_dict",
"=",
"{",
"'group'",
":",
"(",
"'group.xml'",
",",
"group",
".",
"toxml",
"(",
"'utf-8'",
")",
")",
"}",
"return",
"self",
".",
"PUT",
"(... | CNIdentity.addGroupMembers(session, groupName, members) → boolean
https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNIdentity.addGroupMembers.
Args:
group:
vendorSpecific:
Returns: | [
"CNIdentity",
".",
"addGroupMembers",
"(",
"session",
"groupName",
"members",
")",
"→",
"boolean",
"https",
":",
"//",
"releases",
".",
"dataone",
".",
"org",
"/",
"online",
"/",
"api",
"-",
"documentation",
"-",
"v2",
".",
"0",
".",
"1",
"/",
"apis",
... | python | train |
marcomusy/vtkplotter | vtkplotter/actors.py | https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/actors.py#L1887-L1918 | def decimate(self, fraction=0.5, N=None, boundaries=False, verbose=True):
"""
Downsample the number of vertices in a mesh.
:param float fraction: the desired target of reduction.
:param int N: the desired number of final points (**fraction** is recalculated based on it).
:param bool boundaries: (True), decide whether to leave boundaries untouched or not.
.. note:: Setting ``fraction=0.1`` leaves 10% of the original nr of vertices.
.. hint:: |skeletonize| |skeletonize.py|_
"""
poly = self.polydata(True)
if N: # N = desired number of points
Np = poly.GetNumberOfPoints()
fraction = float(N) / Np
if fraction >= 1:
return self
decimate = vtk.vtkDecimatePro()
decimate.SetInputData(poly)
decimate.SetTargetReduction(1 - fraction)
decimate.PreserveTopologyOff()
if boundaries:
decimate.BoundaryVertexDeletionOff()
else:
decimate.BoundaryVertexDeletionOn()
decimate.Update()
if verbose:
print("Nr. of pts, input:", poly.GetNumberOfPoints(), end="")
print(" output:", decimate.GetOutput().GetNumberOfPoints())
return self.updateMesh(decimate.GetOutput()) | [
"def",
"decimate",
"(",
"self",
",",
"fraction",
"=",
"0.5",
",",
"N",
"=",
"None",
",",
"boundaries",
"=",
"False",
",",
"verbose",
"=",
"True",
")",
":",
"poly",
"=",
"self",
".",
"polydata",
"(",
"True",
")",
"if",
"N",
":",
"# N = desired number ... | Downsample the number of vertices in a mesh.
:param float fraction: the desired target of reduction.
:param int N: the desired number of final points (**fraction** is recalculated based on it).
:param bool boundaries: (True), decide whether to leave boundaries untouched or not.
.. note:: Setting ``fraction=0.1`` leaves 10% of the original nr of vertices.
.. hint:: |skeletonize| |skeletonize.py|_ | [
"Downsample",
"the",
"number",
"of",
"vertices",
"in",
"a",
"mesh",
"."
] | python | train |
tgsmith61591/pmdarima | pmdarima/datasets/wineind.py | https://github.com/tgsmith61591/pmdarima/blob/a133de78ba5bd68da9785b061f519ba28cd514cc/pmdarima/datasets/wineind.py#L19-L112 | def load_wineind(as_series=False):
"""Australian total wine sales by wine makers in bottles <= 1 litre.
This time-series records wine sales by Australian wine makers between
Jan 1980 -- Aug 1994. This dataset is found in the R ``forecast`` package.
Parameters
----------
as_series : bool, optional (default=False)
Whether to return a Pandas series. If True, the index will be set to
the observed years/months. If False, will return a 1d numpy array.
Notes
-----
This is monthly data, so *m* should be set to 12 when using in a seasonal
context.
Examples
--------
>>> from pmdarima.datasets import load_wineind
>>> load_wineind()
array([15136, 16733, 20016, 17708, 18019, 19227, 22893, 23739, 21133,
22591, 26786, 29740, 15028, 17977, 20008, 21354, 19498, 22125,
25817, 28779, 20960, 22254, 27392, 29945, 16933, 17892, 20533,
23569, 22417, 22084, 26580, 27454, 24081, 23451, 28991, 31386,
16896, 20045, 23471, 21747, 25621, 23859, 25500, 30998, 24475,
23145, 29701, 34365, 17556, 22077, 25702, 22214, 26886, 23191,
27831, 35406, 23195, 25110, 30009, 36242, 18450, 21845, 26488,
22394, 28057, 25451, 24872, 33424, 24052, 28449, 33533, 37351,
19969, 21701, 26249, 24493, 24603, 26485, 30723, 34569, 26689,
26157, 32064, 38870, 21337, 19419, 23166, 28286, 24570, 24001,
33151, 24878, 26804, 28967, 33311, 40226, 20504, 23060, 23562,
27562, 23940, 24584, 34303, 25517, 23494, 29095, 32903, 34379,
16991, 21109, 23740, 25552, 21752, 20294, 29009, 25500, 24166,
26960, 31222, 38641, 14672, 17543, 25453, 32683, 22449, 22316,
27595, 25451, 25421, 25288, 32568, 35110, 16052, 22146, 21198,
19543, 22084, 23816, 29961, 26773, 26635, 26972, 30207, 38687,
16974, 21697, 24179, 23757, 25013, 24019, 30345, 24488, 25156,
25650, 30923, 37240, 17466, 19463, 24352, 26805, 25236, 24735,
29356, 31234, 22724, 28496, 32857, 37198, 13652, 22784, 23565,
26323, 23779, 27549, 29660, 23356])
>>> load_wineind(True).head()
Jan 1980 15136
Feb 1980 16733
Mar 1980 20016
Apr 1980 17708
May 1980 18019
dtype: int64
References
----------
.. [1] https://www.rdocumentation.org/packages/forecast/versions/8.1/topics/wineind # noqa: E501
Returns
-------
rslt : array-like, shape=(n_samples,)
The wineind dataset. There are 176 observations.
"""
rslt = np.array([15136, 16733, 20016, 17708, 18019, 19227, 22893, 23739,
21133, 22591, 26786, 29740, 15028, 17977, 20008, 21354,
19498, 22125, 25817, 28779, 20960, 22254, 27392, 29945,
16933, 17892, 20533, 23569, 22417, 22084, 26580, 27454,
24081, 23451, 28991, 31386, 16896, 20045, 23471, 21747,
25621, 23859, 25500, 30998, 24475, 23145, 29701, 34365,
17556, 22077, 25702, 22214, 26886, 23191, 27831, 35406,
23195, 25110, 30009, 36242, 18450, 21845, 26488, 22394,
28057, 25451, 24872, 33424, 24052, 28449, 33533, 37351,
19969, 21701, 26249, 24493, 24603, 26485, 30723, 34569,
26689, 26157, 32064, 38870, 21337, 19419, 23166, 28286,
24570, 24001, 33151, 24878, 26804, 28967, 33311, 40226,
20504, 23060, 23562, 27562, 23940, 24584, 34303, 25517,
23494, 29095, 32903, 34379, 16991, 21109, 23740, 25552,
21752, 20294, 29009, 25500, 24166, 26960, 31222, 38641,
14672, 17543, 25453, 32683, 22449, 22316, 27595, 25451,
25421, 25288, 32568, 35110, 16052, 22146, 21198, 19543,
22084, 23816, 29961, 26773, 26635, 26972, 30207, 38687,
16974, 21697, 24179, 23757, 25013, 24019, 30345, 24488,
25156, 25650, 30923, 37240, 17466, 19463, 24352, 26805,
25236, 24735, 29356, 31234, 22724, 28496, 32857, 37198,
13652, 22784, 23565, 26323, 23779, 27549, 29660, 23356])
if not as_series:
return rslt
# Otherwise we want a series and have to cleverly create the index
# (we don't want after aug in 1994, so trip Sep, Oct, Nov and Dec)
index = [
"%s %i" % (calendar.month_abbr[i + 1], year)
for year in range(1980, 1995)
for i in range(12)
][:-4]
return pd.Series(rslt, index=index) | [
"def",
"load_wineind",
"(",
"as_series",
"=",
"False",
")",
":",
"rslt",
"=",
"np",
".",
"array",
"(",
"[",
"15136",
",",
"16733",
",",
"20016",
",",
"17708",
",",
"18019",
",",
"19227",
",",
"22893",
",",
"23739",
",",
"21133",
",",
"22591",
",",
... | Australian total wine sales by wine makers in bottles <= 1 litre.
This time-series records wine sales by Australian wine makers between
Jan 1980 -- Aug 1994. This dataset is found in the R ``forecast`` package.
Parameters
----------
as_series : bool, optional (default=False)
Whether to return a Pandas series. If True, the index will be set to
the observed years/months. If False, will return a 1d numpy array.
Notes
-----
This is monthly data, so *m* should be set to 12 when using in a seasonal
context.
Examples
--------
>>> from pmdarima.datasets import load_wineind
>>> load_wineind()
array([15136, 16733, 20016, 17708, 18019, 19227, 22893, 23739, 21133,
22591, 26786, 29740, 15028, 17977, 20008, 21354, 19498, 22125,
25817, 28779, 20960, 22254, 27392, 29945, 16933, 17892, 20533,
23569, 22417, 22084, 26580, 27454, 24081, 23451, 28991, 31386,
16896, 20045, 23471, 21747, 25621, 23859, 25500, 30998, 24475,
23145, 29701, 34365, 17556, 22077, 25702, 22214, 26886, 23191,
27831, 35406, 23195, 25110, 30009, 36242, 18450, 21845, 26488,
22394, 28057, 25451, 24872, 33424, 24052, 28449, 33533, 37351,
19969, 21701, 26249, 24493, 24603, 26485, 30723, 34569, 26689,
26157, 32064, 38870, 21337, 19419, 23166, 28286, 24570, 24001,
33151, 24878, 26804, 28967, 33311, 40226, 20504, 23060, 23562,
27562, 23940, 24584, 34303, 25517, 23494, 29095, 32903, 34379,
16991, 21109, 23740, 25552, 21752, 20294, 29009, 25500, 24166,
26960, 31222, 38641, 14672, 17543, 25453, 32683, 22449, 22316,
27595, 25451, 25421, 25288, 32568, 35110, 16052, 22146, 21198,
19543, 22084, 23816, 29961, 26773, 26635, 26972, 30207, 38687,
16974, 21697, 24179, 23757, 25013, 24019, 30345, 24488, 25156,
25650, 30923, 37240, 17466, 19463, 24352, 26805, 25236, 24735,
29356, 31234, 22724, 28496, 32857, 37198, 13652, 22784, 23565,
26323, 23779, 27549, 29660, 23356])
>>> load_wineind(True).head()
Jan 1980 15136
Feb 1980 16733
Mar 1980 20016
Apr 1980 17708
May 1980 18019
dtype: int64
References
----------
.. [1] https://www.rdocumentation.org/packages/forecast/versions/8.1/topics/wineind # noqa: E501
Returns
-------
rslt : array-like, shape=(n_samples,)
The wineind dataset. There are 176 observations. | [
"Australian",
"total",
"wine",
"sales",
"by",
"wine",
"makers",
"in",
"bottles",
"<",
"=",
"1",
"litre",
"."
] | python | train |
geomet/geomet | geomet/wkt.py | https://github.com/geomet/geomet/blob/b82d7118113ab723751eba3de5df98c368423c2b/geomet/wkt.py#L334-L363 | def _load_point(tokens, string):
"""
:param tokens:
A generator of string tokens for the input WKT, begining just after the
geometry type. The geometry type is consumed before we get to here. For
example, if :func:`loads` is called with the input 'POINT(0.0 1.0)',
``tokens`` would generate the following values:
.. code-block:: python
['(', '0.0', '1.0', ')']
:param str string:
The original WKT string.
:returns:
A GeoJSON `dict` Point representation of the WKT ``string``.
"""
if not next(tokens) == '(':
raise ValueError(INVALID_WKT_FMT % string)
coords = []
try:
for t in tokens:
if t == ')':
break
else:
coords.append(float(t))
except tokenize.TokenError:
raise ValueError(INVALID_WKT_FMT % string)
return dict(type='Point', coordinates=coords) | [
"def",
"_load_point",
"(",
"tokens",
",",
"string",
")",
":",
"if",
"not",
"next",
"(",
"tokens",
")",
"==",
"'('",
":",
"raise",
"ValueError",
"(",
"INVALID_WKT_FMT",
"%",
"string",
")",
"coords",
"=",
"[",
"]",
"try",
":",
"for",
"t",
"in",
"tokens... | :param tokens:
A generator of string tokens for the input WKT, begining just after the
geometry type. The geometry type is consumed before we get to here. For
example, if :func:`loads` is called with the input 'POINT(0.0 1.0)',
``tokens`` would generate the following values:
.. code-block:: python
['(', '0.0', '1.0', ')']
:param str string:
The original WKT string.
:returns:
A GeoJSON `dict` Point representation of the WKT ``string``. | [
":",
"param",
"tokens",
":",
"A",
"generator",
"of",
"string",
"tokens",
"for",
"the",
"input",
"WKT",
"begining",
"just",
"after",
"the",
"geometry",
"type",
".",
"The",
"geometry",
"type",
"is",
"consumed",
"before",
"we",
"get",
"to",
"here",
".",
"Fo... | python | train |
berkerpeksag/astor | astor/string_repr.py | https://github.com/berkerpeksag/astor/blob/d9e893eb49d9eb2e30779680f90cd632c30e0ba1/astor/string_repr.py#L48-L55 | def _prep_triple_quotes(s, mysplit=mysplit, replacements=replacements):
""" Split the string up and force-feed some replacements
to make sure it will round-trip OK
"""
s = mysplit(s)
s[1::2] = (replacements[x] for x in s[1::2])
return ''.join(s) | [
"def",
"_prep_triple_quotes",
"(",
"s",
",",
"mysplit",
"=",
"mysplit",
",",
"replacements",
"=",
"replacements",
")",
":",
"s",
"=",
"mysplit",
"(",
"s",
")",
"s",
"[",
"1",
":",
":",
"2",
"]",
"=",
"(",
"replacements",
"[",
"x",
"]",
"for",
"x",
... | Split the string up and force-feed some replacements
to make sure it will round-trip OK | [
"Split",
"the",
"string",
"up",
"and",
"force",
"-",
"feed",
"some",
"replacements",
"to",
"make",
"sure",
"it",
"will",
"round",
"-",
"trip",
"OK"
] | python | train |
numenta/htmresearch | htmresearch/frameworks/layers/object_machine_base.py | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/object_machine_base.py#L155-L204 | def objectConfusion(self):
"""
Compute overlap between each pair of objects. Computes the average number
of feature/location pairs that are identical, as well as the average number
of shared locations and features.
This function will raise an exception if two objects are identical.
Returns the tuple:
(avg common pairs, avg common locations, avg common features)
"""
objects = self.getObjects()
if len(objects) == 0:
return 0.0, 0.0, 0.0
sumCommonLocations = 0
sumCommonFeatures = 0
sumCommonPairs = 0
numObjects = 0
commonPairHistogram = numpy.zeros(len(objects[0]), dtype=numpy.int32)
for o1, s1 in objects.iteritems():
for o2, s2 in objects.iteritems():
if o1 != o2:
# Count number of common locations id's and common feature id's
commonLocations = 0
commonFeatures = 0
for pair1 in s1:
for pair2 in s2:
if pair1[0] == pair2[0]: commonLocations += 1
if pair1[1] == pair2[1]: commonFeatures += 1
# print "Confusion",o1,o2,", common pairs=",len(set(s1)&set(s2)),
# print ", common locations=",commonLocations,"common features=",commonFeatures
if len(set(s1) & set(s2)) == len(s1):
raise RuntimeError("Two objects are identical!")
sumCommonPairs += len(set(s1) & set(s2))
sumCommonLocations += commonLocations
sumCommonFeatures += commonFeatures
commonPairHistogram[len(set(s1) & set(s2))] += 1
numObjects += 1
# print "Common pair histogram=", commonPairHistogram
return (sumCommonPairs / float(numObjects),
sumCommonLocations / float(numObjects),
sumCommonFeatures / float(numObjects)
) | [
"def",
"objectConfusion",
"(",
"self",
")",
":",
"objects",
"=",
"self",
".",
"getObjects",
"(",
")",
"if",
"len",
"(",
"objects",
")",
"==",
"0",
":",
"return",
"0.0",
",",
"0.0",
",",
"0.0",
"sumCommonLocations",
"=",
"0",
"sumCommonFeatures",
"=",
"... | Compute overlap between each pair of objects. Computes the average number
of feature/location pairs that are identical, as well as the average number
of shared locations and features.
This function will raise an exception if two objects are identical.
Returns the tuple:
(avg common pairs, avg common locations, avg common features) | [
"Compute",
"overlap",
"between",
"each",
"pair",
"of",
"objects",
".",
"Computes",
"the",
"average",
"number",
"of",
"feature",
"/",
"location",
"pairs",
"that",
"are",
"identical",
"as",
"well",
"as",
"the",
"average",
"number",
"of",
"shared",
"locations",
... | python | train |
materialsproject/pymatgen | pymatgen/vis/plotters.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/vis/plotters.py#L135-L144 | def save_plot(self, filename, img_format="eps", **kwargs):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
"""
plt = self.get_plot(**kwargs)
plt.savefig(filename, format=img_format) | [
"def",
"save_plot",
"(",
"self",
",",
"filename",
",",
"img_format",
"=",
"\"eps\"",
",",
"*",
"*",
"kwargs",
")",
":",
"plt",
"=",
"self",
".",
"get_plot",
"(",
"*",
"*",
"kwargs",
")",
"plt",
".",
"savefig",
"(",
"filename",
",",
"format",
"=",
"... | Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS. | [
"Save",
"matplotlib",
"plot",
"to",
"a",
"file",
"."
] | python | train |
Qiskit/qiskit-terra | qiskit/tools/qcvv/fitters.py | https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/tools/qcvv/fitters.py#L23-L26 | def exp_fit_fun(x, a, tau, c):
"""Function used to fit the exponential decay."""
# pylint: disable=invalid-name
return a * np.exp(-x / tau) + c | [
"def",
"exp_fit_fun",
"(",
"x",
",",
"a",
",",
"tau",
",",
"c",
")",
":",
"# pylint: disable=invalid-name",
"return",
"a",
"*",
"np",
".",
"exp",
"(",
"-",
"x",
"/",
"tau",
")",
"+",
"c"
] | Function used to fit the exponential decay. | [
"Function",
"used",
"to",
"fit",
"the",
"exponential",
"decay",
"."
] | python | test |
brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_policer.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_policer.py#L539-L554 | def policy_map_clss_span_session(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
policy_map = ET.SubElement(config, "policy-map", xmlns="urn:brocade.com:mgmt:brocade-policer")
po_name_key = ET.SubElement(policy_map, "po-name")
po_name_key.text = kwargs.pop('po_name')
clss = ET.SubElement(policy_map, "class")
cl_name_key = ET.SubElement(clss, "cl-name")
cl_name_key.text = kwargs.pop('cl_name')
span = ET.SubElement(clss, "span")
session = ET.SubElement(span, "session")
session.text = kwargs.pop('session')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"policy_map_clss_span_session",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"policy_map",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"policy-map\"",
",",
"xmlns",
"=",
"\"urn:... | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
ihgazni2/elist | elist/elist.py | https://github.com/ihgazni2/elist/blob/8c07b5029bda34ead60ce10335ceb145f209263c/elist/elist.py#L5089-L5113 | def replace_value_seqs(ol,src_value,dst_value,seqs,**kwargs):
'''
from elist.elist import *
ol = [1,'a',3,'a',5,'a',6,'a']
id(ol)
new = replace_value_seqs(ol,'a','AAA',[0,1])
ol
new
id(ol)
id(new)
####
ol = [1,'a',3,'a',5,'a',6,'a']
id(ol)
rslt = replace_value_seqs(ol,'a','AAA',[0,1],mode="original")
ol
rslt
id(ol)
id(rslt)
'''
if('mode' in kwargs):
mode = kwargs["mode"]
else:
mode = "new"
indexes = indexes_seqs(ol,src_value,seqs)
return(replace_indexes(ol,dst_value,indexes,mode=mode)) | [
"def",
"replace_value_seqs",
"(",
"ol",
",",
"src_value",
",",
"dst_value",
",",
"seqs",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"(",
"'mode'",
"in",
"kwargs",
")",
":",
"mode",
"=",
"kwargs",
"[",
"\"mode\"",
"]",
"else",
":",
"mode",
"=",
"\"new\"... | from elist.elist import *
ol = [1,'a',3,'a',5,'a',6,'a']
id(ol)
new = replace_value_seqs(ol,'a','AAA',[0,1])
ol
new
id(ol)
id(new)
####
ol = [1,'a',3,'a',5,'a',6,'a']
id(ol)
rslt = replace_value_seqs(ol,'a','AAA',[0,1],mode="original")
ol
rslt
id(ol)
id(rslt) | [
"from",
"elist",
".",
"elist",
"import",
"*",
"ol",
"=",
"[",
"1",
"a",
"3",
"a",
"5",
"a",
"6",
"a",
"]",
"id",
"(",
"ol",
")",
"new",
"=",
"replace_value_seqs",
"(",
"ol",
"a",
"AAA",
"[",
"0",
"1",
"]",
")",
"ol",
"new",
"id",
"(",
"ol",... | python | valid |
androguard/androguard | androguard/core/bytecodes/apk.py | https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/apk.py#L1868-L1881 | def get_certificates_der_v3(self):
"""
Return a list of DER coded X.509 certificates from the v3 signature block
"""
if self._v3_signing_data == None:
self.parse_v3_signing_block()
certs = []
for signed_data in [signer.signed_data for signer in self._v3_signing_data]:
for cert in signed_data.certificates:
certs.append(cert)
return certs | [
"def",
"get_certificates_der_v3",
"(",
"self",
")",
":",
"if",
"self",
".",
"_v3_signing_data",
"==",
"None",
":",
"self",
".",
"parse_v3_signing_block",
"(",
")",
"certs",
"=",
"[",
"]",
"for",
"signed_data",
"in",
"[",
"signer",
".",
"signed_data",
"for",
... | Return a list of DER coded X.509 certificates from the v3 signature block | [
"Return",
"a",
"list",
"of",
"DER",
"coded",
"X",
".",
"509",
"certificates",
"from",
"the",
"v3",
"signature",
"block"
] | python | train |
explosion/spaCy | spacy/language.py | https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/language.py#L886-L894 | def restore(self):
"""Restore the pipeline to its state when DisabledPipes was created."""
current, self.nlp.pipeline = self.nlp.pipeline, self.original_pipeline
unexpected = [name for name, pipe in current if not self.nlp.has_pipe(name)]
if unexpected:
# Don't change the pipeline if we're raising an error.
self.nlp.pipeline = current
raise ValueError(Errors.E008.format(names=unexpected))
self[:] = [] | [
"def",
"restore",
"(",
"self",
")",
":",
"current",
",",
"self",
".",
"nlp",
".",
"pipeline",
"=",
"self",
".",
"nlp",
".",
"pipeline",
",",
"self",
".",
"original_pipeline",
"unexpected",
"=",
"[",
"name",
"for",
"name",
",",
"pipe",
"in",
"current",
... | Restore the pipeline to its state when DisabledPipes was created. | [
"Restore",
"the",
"pipeline",
"to",
"its",
"state",
"when",
"DisabledPipes",
"was",
"created",
"."
] | python | train |
Azure/azure-cli-extensions | src/alias/azext_alias/_validators.py | https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/alias/azext_alias/_validators.py#L127-L144 | def _validate_pos_args_syntax(alias_name, alias_command):
"""
Check if the positional argument syntax is valid in alias name and alias command.
Args:
alias_name: The name of the alias to validate.
alias_command: The command to validate.
"""
pos_args_from_alias = get_placeholders(alias_name)
# Split by '|' to extract positional argument name from Jinja filter (e.g. {{ arg_name | upper }})
# Split by '.' to extract positional argument name from function call (e.g. {{ arg_name.split()[0] }})
pos_args_from_command = [x.split('|')[0].split('.')[0].strip() for x in get_placeholders(alias_command)]
if set(pos_args_from_alias) != set(pos_args_from_command):
arg_diff = set(pos_args_from_alias) ^ set(pos_args_from_command)
raise CLIError(INCONSISTENT_ARG_ERROR.format('' if len(arg_diff) == 1 else 's',
arg_diff,
'is' if len(arg_diff) == 1 else 'are')) | [
"def",
"_validate_pos_args_syntax",
"(",
"alias_name",
",",
"alias_command",
")",
":",
"pos_args_from_alias",
"=",
"get_placeholders",
"(",
"alias_name",
")",
"# Split by '|' to extract positional argument name from Jinja filter (e.g. {{ arg_name | upper }})",
"# Split by '.' to extrac... | Check if the positional argument syntax is valid in alias name and alias command.
Args:
alias_name: The name of the alias to validate.
alias_command: The command to validate. | [
"Check",
"if",
"the",
"positional",
"argument",
"syntax",
"is",
"valid",
"in",
"alias",
"name",
"and",
"alias",
"command",
"."
] | python | train |
arista-eosplus/pyeapi | pyeapi/client.py | https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/client.py#L583-L609 | def section(self, regex, config='running_config'):
"""Returns a section of the config
Args:
regex (str): A valid regular expression used to select sections
of configuration to return
config (str): The configuration to return. Valid values for config
are "running_config" or "startup_config". The default value
is "running_config"
Returns:
The configuration section as a string object.
"""
if config in ['running_config', 'startup_config']:
config = getattr(self, config)
match = re.search(regex, config, re.M)
if not match:
raise TypeError('config section not found')
block_start, line_end = match.regs[0]
match = re.search(r'^[^\s]', config[line_end:], re.M)
if not match:
raise TypeError('could not find end block')
_, block_end = match.regs[0]
block_end = line_end + block_end
return config[block_start:block_end] | [
"def",
"section",
"(",
"self",
",",
"regex",
",",
"config",
"=",
"'running_config'",
")",
":",
"if",
"config",
"in",
"[",
"'running_config'",
",",
"'startup_config'",
"]",
":",
"config",
"=",
"getattr",
"(",
"self",
",",
"config",
")",
"match",
"=",
"re"... | Returns a section of the config
Args:
regex (str): A valid regular expression used to select sections
of configuration to return
config (str): The configuration to return. Valid values for config
are "running_config" or "startup_config". The default value
is "running_config"
Returns:
The configuration section as a string object. | [
"Returns",
"a",
"section",
"of",
"the",
"config"
] | python | train |
StackStorm/pybind | pybind/slxos/v17r_1_01a/show/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/show/__init__.py#L334-L357 | def _set_infra(self, v, load=False):
"""
Setter method for infra, mapped from YANG variable /show/infra (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_infra is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_infra() directly.
YANG Description: Show system info
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=infra.infra, is_container='container', presence=False, yang_name="infra", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'action': u'chassis', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ras-ext', defining_module='brocade-ras-ext', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """infra must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=infra.infra, is_container='container', presence=False, yang_name="infra", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'action': u'chassis', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ras-ext', defining_module='brocade-ras-ext', yang_type='container', is_config=True)""",
})
self.__infra = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_infra",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
... | Setter method for infra, mapped from YANG variable /show/infra (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_infra is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_infra() directly.
YANG Description: Show system info | [
"Setter",
"method",
"for",
"infra",
"mapped",
"from",
"YANG",
"variable",
"/",
"show",
"/",
"infra",
"(",
"container",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"t... | python | train |
titusjan/argos | argos/config/configtreemodel.py | https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/config/configtreemodel.py#L167-L173 | def setExpanded(self, index, expanded):
""" Expands the model item specified by the index.
Overridden from QTreeView to make it persistent (between inspector changes).
"""
if index.isValid():
item = self.getItem(index)
item.expanded = expanded | [
"def",
"setExpanded",
"(",
"self",
",",
"index",
",",
"expanded",
")",
":",
"if",
"index",
".",
"isValid",
"(",
")",
":",
"item",
"=",
"self",
".",
"getItem",
"(",
"index",
")",
"item",
".",
"expanded",
"=",
"expanded"
] | Expands the model item specified by the index.
Overridden from QTreeView to make it persistent (between inspector changes). | [
"Expands",
"the",
"model",
"item",
"specified",
"by",
"the",
"index",
".",
"Overridden",
"from",
"QTreeView",
"to",
"make",
"it",
"persistent",
"(",
"between",
"inspector",
"changes",
")",
"."
] | python | train |
librosa/librosa | librosa/feature/rhythm.py | https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/feature/rhythm.py#L18-L178 | def tempogram(y=None, sr=22050, onset_envelope=None, hop_length=512,
win_length=384, center=True, window='hann', norm=np.inf):
'''Compute the tempogram: local autocorrelation of the onset strength envelope. [1]_
.. [1] Grosche, Peter, Meinard Müller, and Frank Kurth.
"Cyclic tempogram - A mid-level tempo representation for music signals."
ICASSP, 2010.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
Audio time series.
sr : number > 0 [scalar]
sampling rate of `y`
onset_envelope : np.ndarray [shape=(n,) or (m, n)] or None
Optional pre-computed onset strength envelope as provided by
`onset.onset_strength`.
If multi-dimensional, tempograms are computed independently for each
band (first dimension).
hop_length : int > 0
number of audio samples between successive onset measurements
win_length : int > 0
length of the onset autocorrelation window (in frames/onset measurements)
The default settings (384) corresponds to `384 * hop_length / sr ~= 8.9s`.
center : bool
If `True`, onset autocorrelation windows are centered.
If `False`, windows are left-aligned.
window : string, function, number, tuple, or np.ndarray [shape=(win_length,)]
A window specification as in `core.stft`.
norm : {np.inf, -np.inf, 0, float > 0, None}
Normalization mode. Set to `None` to disable normalization.
Returns
-------
tempogram : np.ndarray [shape=(win_length, n) or (m, win_length, n)]
Localized autocorrelation of the onset strength envelope.
If given multi-band input (`onset_envelope.shape==(m,n)`) then
`tempogram[i]` is the tempogram of `onset_envelope[i]`.
Raises
------
ParameterError
if neither `y` nor `onset_envelope` are provided
if `win_length < 1`
See Also
--------
librosa.onset.onset_strength
librosa.util.normalize
librosa.core.stft
Examples
--------
>>> # Compute local onset autocorrelation
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> hop_length = 512
>>> oenv = librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop_length)
>>> tempogram = librosa.feature.tempogram(onset_envelope=oenv, sr=sr,
... hop_length=hop_length)
>>> # Compute global onset autocorrelation
>>> ac_global = librosa.autocorrelate(oenv, max_size=tempogram.shape[0])
>>> ac_global = librosa.util.normalize(ac_global)
>>> # Estimate the global tempo for display purposes
>>> tempo = librosa.beat.tempo(onset_envelope=oenv, sr=sr,
... hop_length=hop_length)[0]
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 8))
>>> plt.subplot(4, 1, 1)
>>> plt.plot(oenv, label='Onset strength')
>>> plt.xticks([])
>>> plt.legend(frameon=True)
>>> plt.axis('tight')
>>> plt.subplot(4, 1, 2)
>>> # We'll truncate the display to a narrower range of tempi
>>> librosa.display.specshow(tempogram, sr=sr, hop_length=hop_length,
>>> x_axis='time', y_axis='tempo')
>>> plt.axhline(tempo, color='w', linestyle='--', alpha=1,
... label='Estimated tempo={:g}'.format(tempo))
>>> plt.legend(frameon=True, framealpha=0.75)
>>> plt.subplot(4, 1, 3)
>>> x = np.linspace(0, tempogram.shape[0] * float(hop_length) / sr,
... num=tempogram.shape[0])
>>> plt.plot(x, np.mean(tempogram, axis=1), label='Mean local autocorrelation')
>>> plt.plot(x, ac_global, '--', alpha=0.75, label='Global autocorrelation')
>>> plt.xlabel('Lag (seconds)')
>>> plt.axis('tight')
>>> plt.legend(frameon=True)
>>> plt.subplot(4,1,4)
>>> # We can also plot on a BPM axis
>>> freqs = librosa.tempo_frequencies(tempogram.shape[0], hop_length=hop_length, sr=sr)
>>> plt.semilogx(freqs[1:], np.mean(tempogram[1:], axis=1),
... label='Mean local autocorrelation', basex=2)
>>> plt.semilogx(freqs[1:], ac_global[1:], '--', alpha=0.75,
... label='Global autocorrelation', basex=2)
>>> plt.axvline(tempo, color='black', linestyle='--', alpha=.8,
... label='Estimated tempo={:g}'.format(tempo))
>>> plt.legend(frameon=True)
>>> plt.xlabel('BPM')
>>> plt.axis('tight')
>>> plt.grid()
>>> plt.tight_layout()
'''
from ..onset import onset_strength
if win_length < 1:
raise ParameterError('win_length must be a positive integer')
ac_window = get_window(window, win_length, fftbins=True)
if onset_envelope is None:
if y is None:
raise ParameterError('Either y or onset_envelope must be provided')
onset_envelope = onset_strength(y=y, sr=sr, hop_length=hop_length)
else:
# Force row-contiguity to avoid framing errors below
onset_envelope = np.ascontiguousarray(onset_envelope)
if onset_envelope.ndim > 1:
# If we have multi-band input, iterate over rows
return np.asarray([tempogram(onset_envelope=oe_subband,
hop_length=hop_length,
win_length=win_length,
center=center,
window=window,
norm=norm) for oe_subband in onset_envelope])
# Center the autocorrelation windows
n = len(onset_envelope)
if center:
onset_envelope = np.pad(onset_envelope, int(win_length // 2),
mode='linear_ramp', end_values=[0, 0])
# Carve onset envelope into frames
odf_frame = util.frame(onset_envelope,
frame_length=win_length,
hop_length=1)
# Truncate to the length of the original signal
if center:
odf_frame = odf_frame[:, :n]
# Window, autocorrelate, and normalize
return util.normalize(autocorrelate(odf_frame * ac_window[:, np.newaxis],
axis=0),
norm=norm, axis=0) | [
"def",
"tempogram",
"(",
"y",
"=",
"None",
",",
"sr",
"=",
"22050",
",",
"onset_envelope",
"=",
"None",
",",
"hop_length",
"=",
"512",
",",
"win_length",
"=",
"384",
",",
"center",
"=",
"True",
",",
"window",
"=",
"'hann'",
",",
"norm",
"=",
"np",
... | Compute the tempogram: local autocorrelation of the onset strength envelope. [1]_
.. [1] Grosche, Peter, Meinard Müller, and Frank Kurth.
"Cyclic tempogram - A mid-level tempo representation for music signals."
ICASSP, 2010.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
Audio time series.
sr : number > 0 [scalar]
sampling rate of `y`
onset_envelope : np.ndarray [shape=(n,) or (m, n)] or None
Optional pre-computed onset strength envelope as provided by
`onset.onset_strength`.
If multi-dimensional, tempograms are computed independently for each
band (first dimension).
hop_length : int > 0
number of audio samples between successive onset measurements
win_length : int > 0
length of the onset autocorrelation window (in frames/onset measurements)
The default settings (384) corresponds to `384 * hop_length / sr ~= 8.9s`.
center : bool
If `True`, onset autocorrelation windows are centered.
If `False`, windows are left-aligned.
window : string, function, number, tuple, or np.ndarray [shape=(win_length,)]
A window specification as in `core.stft`.
norm : {np.inf, -np.inf, 0, float > 0, None}
Normalization mode. Set to `None` to disable normalization.
Returns
-------
tempogram : np.ndarray [shape=(win_length, n) or (m, win_length, n)]
Localized autocorrelation of the onset strength envelope.
If given multi-band input (`onset_envelope.shape==(m,n)`) then
`tempogram[i]` is the tempogram of `onset_envelope[i]`.
Raises
------
ParameterError
if neither `y` nor `onset_envelope` are provided
if `win_length < 1`
See Also
--------
librosa.onset.onset_strength
librosa.util.normalize
librosa.core.stft
Examples
--------
>>> # Compute local onset autocorrelation
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> hop_length = 512
>>> oenv = librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop_length)
>>> tempogram = librosa.feature.tempogram(onset_envelope=oenv, sr=sr,
... hop_length=hop_length)
>>> # Compute global onset autocorrelation
>>> ac_global = librosa.autocorrelate(oenv, max_size=tempogram.shape[0])
>>> ac_global = librosa.util.normalize(ac_global)
>>> # Estimate the global tempo for display purposes
>>> tempo = librosa.beat.tempo(onset_envelope=oenv, sr=sr,
... hop_length=hop_length)[0]
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 8))
>>> plt.subplot(4, 1, 1)
>>> plt.plot(oenv, label='Onset strength')
>>> plt.xticks([])
>>> plt.legend(frameon=True)
>>> plt.axis('tight')
>>> plt.subplot(4, 1, 2)
>>> # We'll truncate the display to a narrower range of tempi
>>> librosa.display.specshow(tempogram, sr=sr, hop_length=hop_length,
>>> x_axis='time', y_axis='tempo')
>>> plt.axhline(tempo, color='w', linestyle='--', alpha=1,
... label='Estimated tempo={:g}'.format(tempo))
>>> plt.legend(frameon=True, framealpha=0.75)
>>> plt.subplot(4, 1, 3)
>>> x = np.linspace(0, tempogram.shape[0] * float(hop_length) / sr,
... num=tempogram.shape[0])
>>> plt.plot(x, np.mean(tempogram, axis=1), label='Mean local autocorrelation')
>>> plt.plot(x, ac_global, '--', alpha=0.75, label='Global autocorrelation')
>>> plt.xlabel('Lag (seconds)')
>>> plt.axis('tight')
>>> plt.legend(frameon=True)
>>> plt.subplot(4,1,4)
>>> # We can also plot on a BPM axis
>>> freqs = librosa.tempo_frequencies(tempogram.shape[0], hop_length=hop_length, sr=sr)
>>> plt.semilogx(freqs[1:], np.mean(tempogram[1:], axis=1),
... label='Mean local autocorrelation', basex=2)
>>> plt.semilogx(freqs[1:], ac_global[1:], '--', alpha=0.75,
... label='Global autocorrelation', basex=2)
>>> plt.axvline(tempo, color='black', linestyle='--', alpha=.8,
... label='Estimated tempo={:g}'.format(tempo))
>>> plt.legend(frameon=True)
>>> plt.xlabel('BPM')
>>> plt.axis('tight')
>>> plt.grid()
>>> plt.tight_layout() | [
"Compute",
"the",
"tempogram",
":",
"local",
"autocorrelation",
"of",
"the",
"onset",
"strength",
"envelope",
".",
"[",
"1",
"]",
"_"
] | python | test |
iotile/coretools | iotilegateway/iotilegateway/supervisor/client.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilegateway/iotilegateway/supervisor/client.py#L646-L660 | def service_info(self, name):
"""Pull descriptive info of a service by name.
Information returned includes the service's user friendly
name and whether it was preregistered or added dynamically.
Returns:
dict: A dictionary of service information with the following keys
set:
long_name (string): The user friendly name of the service
preregistered (bool): Whether the service was explicitly
called out as a preregistered service.
"""
return self._loop.run_coroutine(self._client.service_info(name)) | [
"def",
"service_info",
"(",
"self",
",",
"name",
")",
":",
"return",
"self",
".",
"_loop",
".",
"run_coroutine",
"(",
"self",
".",
"_client",
".",
"service_info",
"(",
"name",
")",
")"
] | Pull descriptive info of a service by name.
Information returned includes the service's user friendly
name and whether it was preregistered or added dynamically.
Returns:
dict: A dictionary of service information with the following keys
set:
long_name (string): The user friendly name of the service
preregistered (bool): Whether the service was explicitly
called out as a preregistered service. | [
"Pull",
"descriptive",
"info",
"of",
"a",
"service",
"by",
"name",
"."
] | python | train |
jaegertracing/jaeger-client-python | jaeger_client/throttler.py | https://github.com/jaegertracing/jaeger-client-python/blob/06face094757c645a6d81f0e073c001931a22a05/jaeger_client/throttler.py#L81-L87 | def _set_client_id(self, client_id):
"""
Method for tracer to set client ID of throttler.
"""
with self.lock:
if self.client_id is None:
self.client_id = client_id | [
"def",
"_set_client_id",
"(",
"self",
",",
"client_id",
")",
":",
"with",
"self",
".",
"lock",
":",
"if",
"self",
".",
"client_id",
"is",
"None",
":",
"self",
".",
"client_id",
"=",
"client_id"
] | Method for tracer to set client ID of throttler. | [
"Method",
"for",
"tracer",
"to",
"set",
"client",
"ID",
"of",
"throttler",
"."
] | python | train |
saltstack/salt | salt/runners/asam.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/asam.py#L77-L121 | def _get_asam_configuration(driver_url=''):
'''
Return the configuration read from the master configuration
file or directory
'''
asam_config = __opts__['asam'] if 'asam' in __opts__ else None
if asam_config:
try:
for asam_server, service_config in six.iteritems(asam_config):
username = service_config.get('username', None)
password = service_config.get('password', None)
protocol = service_config.get('protocol', 'https')
port = service_config.get('port', 3451)
if not username or not password:
log.error(
'Username or Password has not been specified in the '
'master configuration for %s', asam_server
)
return False
ret = {
'platform_edit_url': "{0}://{1}:{2}/config/PlatformEdit.html".format(protocol, asam_server, port),
'platform_config_url': "{0}://{1}:{2}/config/PlatformConfig.html".format(protocol, asam_server, port),
'platformset_edit_url': "{0}://{1}:{2}/config/PlatformSetEdit.html".format(protocol, asam_server, port),
'platformset_config_url': "{0}://{1}:{2}/config/PlatformSetConfig.html".format(protocol, asam_server, port),
'username': username,
'password': password
}
if (not driver_url) or (driver_url == asam_server):
return ret
except Exception as exc:
log.error('Exception encountered: %s', exc)
return False
if driver_url:
log.error(
'Configuration for %s has not been specified in the master '
'configuration', driver_url
)
return False
return False | [
"def",
"_get_asam_configuration",
"(",
"driver_url",
"=",
"''",
")",
":",
"asam_config",
"=",
"__opts__",
"[",
"'asam'",
"]",
"if",
"'asam'",
"in",
"__opts__",
"else",
"None",
"if",
"asam_config",
":",
"try",
":",
"for",
"asam_server",
",",
"service_config",
... | Return the configuration read from the master configuration
file or directory | [
"Return",
"the",
"configuration",
"read",
"from",
"the",
"master",
"configuration",
"file",
"or",
"directory"
] | python | train |
signalfx/signalfx-python | signalfx/pyformance/registry.py | https://github.com/signalfx/signalfx-python/blob/650eb9a2b301bcc795e4e3a8c031574ade69849d/signalfx/pyformance/registry.py#L43-L46 | def meter(self, key, **dims):
"""Adds meter with dimensions to the registry"""
return super(MetricsRegistry, self).meter(
self.metadata.register(key, **dims)) | [
"def",
"meter",
"(",
"self",
",",
"key",
",",
"*",
"*",
"dims",
")",
":",
"return",
"super",
"(",
"MetricsRegistry",
",",
"self",
")",
".",
"meter",
"(",
"self",
".",
"metadata",
".",
"register",
"(",
"key",
",",
"*",
"*",
"dims",
")",
")"
] | Adds meter with dimensions to the registry | [
"Adds",
"meter",
"with",
"dimensions",
"to",
"the",
"registry"
] | python | train |
Autodesk/aomi | aomi/helpers.py | https://github.com/Autodesk/aomi/blob/84da2dfb0424837adf9c4ddc1aa352e942bb7a4a/aomi/helpers.py#L214-L218 | def ensure_dir(path):
"""Ensures a directory exists"""
if not (os.path.exists(path) and
os.path.isdir(path)):
os.mkdir(path) | [
"def",
"ensure_dir",
"(",
"path",
")",
":",
"if",
"not",
"(",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
")",
":",
"os",
".",
"mkdir",
"(",
"path",
")"
] | Ensures a directory exists | [
"Ensures",
"a",
"directory",
"exists"
] | python | train |
eng-tools/sfsimodels | sfsimodels/files.py | https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/files.py#L33-L51 | def loads_json(p_str, custom=None, meta=False, verbose=0):
"""
Given a json string it creates a dictionary of sfsi objects
:param ffp: str, Full file path to json file
:param custom: dict, used to load custom objects, {model type: custom object}
:param meta: bool, if true then also return all ecp meta data in separate dict
:param verbose: int, console output
:return: dict
"""
data = json.loads(p_str)
if meta:
md = {}
for item in data:
if item != "models":
md[item] = data[item]
return ecp_dict_to_objects(data, custom, verbose=verbose), md
else:
return ecp_dict_to_objects(data, custom, verbose=verbose) | [
"def",
"loads_json",
"(",
"p_str",
",",
"custom",
"=",
"None",
",",
"meta",
"=",
"False",
",",
"verbose",
"=",
"0",
")",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"p_str",
")",
"if",
"meta",
":",
"md",
"=",
"{",
"}",
"for",
"item",
"in",
"da... | Given a json string it creates a dictionary of sfsi objects
:param ffp: str, Full file path to json file
:param custom: dict, used to load custom objects, {model type: custom object}
:param meta: bool, if true then also return all ecp meta data in separate dict
:param verbose: int, console output
:return: dict | [
"Given",
"a",
"json",
"string",
"it",
"creates",
"a",
"dictionary",
"of",
"sfsi",
"objects"
] | python | train |
reingart/gui2py | gui/controls/listview.py | https://github.com/reingart/gui2py/blob/aca0a05f6fcde55c94ad7cc058671a06608b01a4/gui/controls/listview.py#L72-L81 | def FindPyData(self, start, py_data):
"Do a reverse look up for an item containing the requested data"
# first, look at our internal dict:
wx_data = self._wx_data_map[py_data]
# do the real search at the wx control:
if wx.VERSION < (3, 0, 0) or 'classic' in wx.version():
data = self.FindItemData(start, wx_data)
else:
data = self.FindItem(start, wx_data)
return data | [
"def",
"FindPyData",
"(",
"self",
",",
"start",
",",
"py_data",
")",
":",
"# first, look at our internal dict:\r",
"wx_data",
"=",
"self",
".",
"_wx_data_map",
"[",
"py_data",
"]",
"# do the real search at the wx control:\r",
"if",
"wx",
".",
"VERSION",
"<",
"(",
... | Do a reverse look up for an item containing the requested data | [
"Do",
"a",
"reverse",
"look",
"up",
"for",
"an",
"item",
"containing",
"the",
"requested",
"data"
] | python | test |
jbittel/django-mama-cas | mama_cas/cas.py | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/cas.py#L18-L38 | def validate_service_ticket(service, ticket, pgturl=None, renew=False, require_https=False):
"""
Validate a service ticket string. Return a triplet containing a
``ServiceTicket`` and an optional ``ProxyGrantingTicket``, or a
``ValidationError`` if ticket validation failed.
"""
logger.debug("Service validation request received for %s" % ticket)
# Check for proxy tickets passed to /serviceValidate
if ticket and ticket.startswith(ProxyTicket.TICKET_PREFIX):
raise InvalidTicketSpec('Proxy tickets cannot be validated with /serviceValidate')
st = ServiceTicket.objects.validate_ticket(ticket, service, renew=renew, require_https=require_https)
attributes = get_attributes(st.user, st.service)
if pgturl is not None:
logger.debug("Proxy-granting ticket request received for %s" % pgturl)
pgt = ProxyGrantingTicket.objects.create_ticket(service, pgturl, user=st.user, granted_by_st=st)
else:
pgt = None
return st, attributes, pgt | [
"def",
"validate_service_ticket",
"(",
"service",
",",
"ticket",
",",
"pgturl",
"=",
"None",
",",
"renew",
"=",
"False",
",",
"require_https",
"=",
"False",
")",
":",
"logger",
".",
"debug",
"(",
"\"Service validation request received for %s\"",
"%",
"ticket",
"... | Validate a service ticket string. Return a triplet containing a
``ServiceTicket`` and an optional ``ProxyGrantingTicket``, or a
``ValidationError`` if ticket validation failed. | [
"Validate",
"a",
"service",
"ticket",
"string",
".",
"Return",
"a",
"triplet",
"containing",
"a",
"ServiceTicket",
"and",
"an",
"optional",
"ProxyGrantingTicket",
"or",
"a",
"ValidationError",
"if",
"ticket",
"validation",
"failed",
"."
] | python | train |
juju/charm-helpers | charmhelpers/contrib/openstack/utils.py | https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/utils.py#L1737-L1755 | def get_snaps_install_info_from_origin(snaps, src, mode='classic'):
"""Generate a dictionary of snap install information from origin
@param snaps: List of snaps
@param src: String of openstack-origin or source of the form
snap:track/channel
@param mode: String classic, devmode or jailmode
@returns: Dictionary of snaps with channels and modes
"""
if not src.startswith('snap:'):
juju_log("Snap source is not a snap origin", 'WARN')
return {}
_src = src[5:]
channel = '--channel={}'.format(_src)
return {snap: {'channel': channel, 'mode': mode}
for snap in snaps} | [
"def",
"get_snaps_install_info_from_origin",
"(",
"snaps",
",",
"src",
",",
"mode",
"=",
"'classic'",
")",
":",
"if",
"not",
"src",
".",
"startswith",
"(",
"'snap:'",
")",
":",
"juju_log",
"(",
"\"Snap source is not a snap origin\"",
",",
"'WARN'",
")",
"return"... | Generate a dictionary of snap install information from origin
@param snaps: List of snaps
@param src: String of openstack-origin or source of the form
snap:track/channel
@param mode: String classic, devmode or jailmode
@returns: Dictionary of snaps with channels and modes | [
"Generate",
"a",
"dictionary",
"of",
"snap",
"install",
"information",
"from",
"origin"
] | python | train |
MycroftAI/adapt | adapt/engine.py | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/engine.py#L315-L327 | def register_regex_entity(self, regex_str, domain=0):
"""
A regular expression making use of python named group expressions.
Example: (?P<Artist>.*)
Args:
regex_str(str): a string representing a regular expression as defined above
domain(str): a string representing the domain you wish to add the entity to
"""
if domain not in self.domains:
self.register_domain(domain=domain)
self.domains[domain].register_regex_entity(regex_str=regex_str) | [
"def",
"register_regex_entity",
"(",
"self",
",",
"regex_str",
",",
"domain",
"=",
"0",
")",
":",
"if",
"domain",
"not",
"in",
"self",
".",
"domains",
":",
"self",
".",
"register_domain",
"(",
"domain",
"=",
"domain",
")",
"self",
".",
"domains",
"[",
... | A regular expression making use of python named group expressions.
Example: (?P<Artist>.*)
Args:
regex_str(str): a string representing a regular expression as defined above
domain(str): a string representing the domain you wish to add the entity to | [
"A",
"regular",
"expression",
"making",
"use",
"of",
"python",
"named",
"group",
"expressions",
"."
] | python | train |
ponty/pyavrutils | pyavrutils/avrgcc.py | https://github.com/ponty/pyavrutils/blob/7a396a25b3ac076ede07b5cd5cbd416ebb578a28/pyavrutils/avrgcc.py#L140-L188 | def command_list(self, sources, _opt=False):
'''command line as list'''
def abspath(x):
x = Path(x).abspath()
if not x.exists():
raise ValueError('file not found! ' + x)
return x
self.f_cpu = int(self.f_cpu)
self.mcu = str(self.mcu)
# if not self.mcu in self.targets:
# raise ValueError('invalid mcu:' + self.mcu)
if not _opt:
sources = [abspath(x) for x in sources]
includes = [abspath(x) for x in self.includes]
if not self.output:
self.output = tempfile.NamedTemporaryFile(
prefix='pyavrutils_', suffix='.elf', delete=0).name
defines = self.defines + ['F_CPU=' + str(self.f_cpu)]
cmd = [self.cc]
if not self.use_only_extra_options:
if not _opt:
cmd += sources
cmd += ['-D' + x for x in defines]
cmd += ['-I' + x for x in includes]
if not _opt:
cmd += ['-o', self.output]
cmd += ['-mmcu=' + self.mcu]
cmd += ['--std=' + self.std]
if self.relax:
cmd += ['-Wl,--relax']
if self.gc_sections:
cmd += ['-Wl,--gc-sections']
if self.ffunction_sections:
cmd += ['-ffunction-sections']
if self.fdata_sections:
cmd += ['-fdata-sections']
if self.fno_inline_small_functions:
cmd += ['-fno-inline-small-functions']
if self.optimization != 0:
cmd += ['-O' + str(self.optimization)]
cmd += self.options_extra
return cmd | [
"def",
"command_list",
"(",
"self",
",",
"sources",
",",
"_opt",
"=",
"False",
")",
":",
"def",
"abspath",
"(",
"x",
")",
":",
"x",
"=",
"Path",
"(",
"x",
")",
".",
"abspath",
"(",
")",
"if",
"not",
"x",
".",
"exists",
"(",
")",
":",
"raise",
... | command line as list | [
"command",
"line",
"as",
"list"
] | python | train |
RudolfCardinal/pythonlib | cardinal_pythonlib/nhs.py | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/nhs.py#L116-L128 | def generate_random_nhs_number() -> int:
"""
Returns a random valid NHS number, as an ``int``.
"""
check_digit = 10 # NHS numbers with this check digit are all invalid
while check_digit == 10:
digits = [random.randint(1, 9)] # don't start with a zero
digits.extend([random.randint(0, 9) for _ in range(8)])
# ... length now 9
check_digit = nhs_check_digit(digits)
# noinspection PyUnboundLocalVariable
digits.append(check_digit)
return int("".join([str(d) for d in digits])) | [
"def",
"generate_random_nhs_number",
"(",
")",
"->",
"int",
":",
"check_digit",
"=",
"10",
"# NHS numbers with this check digit are all invalid",
"while",
"check_digit",
"==",
"10",
":",
"digits",
"=",
"[",
"random",
".",
"randint",
"(",
"1",
",",
"9",
")",
"]",... | Returns a random valid NHS number, as an ``int``. | [
"Returns",
"a",
"random",
"valid",
"NHS",
"number",
"as",
"an",
"int",
"."
] | python | train |
bwhite/hadoopy | hadoopy/_runner.py | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/_runner.py#L66-L70 | def _listeq_to_dict(jobconfs):
"""Convert iterators of 'key=val' into a dictionary with later values taking priority."""
if not isinstance(jobconfs, dict):
jobconfs = dict(x.split('=', 1) for x in jobconfs)
return dict((str(k), str(v)) for k, v in jobconfs.items()) | [
"def",
"_listeq_to_dict",
"(",
"jobconfs",
")",
":",
"if",
"not",
"isinstance",
"(",
"jobconfs",
",",
"dict",
")",
":",
"jobconfs",
"=",
"dict",
"(",
"x",
".",
"split",
"(",
"'='",
",",
"1",
")",
"for",
"x",
"in",
"jobconfs",
")",
"return",
"dict",
... | Convert iterators of 'key=val' into a dictionary with later values taking priority. | [
"Convert",
"iterators",
"of",
"key",
"=",
"val",
"into",
"a",
"dictionary",
"with",
"later",
"values",
"taking",
"priority",
"."
] | python | train |
androguard/androguard | androguard/decompiler/dad/util.py | https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/decompiler/dad/util.py#L202-L213 | def create_png(cls_name, meth_name, graph, dir_name='graphs2'):
"""
Creates a PNG from a given :class:`~androguard.decompiler.dad.graph.Graph`.
:param str cls_name: name of the class
:param str meth_name: name of the method
:param androguard.decompiler.dad.graph.Graph graph:
:param str dir_name: output directory
"""
m_name = ''.join(x for x in meth_name if x.isalnum())
name = ''.join((cls_name.split('/')[-1][:-1], '#', m_name))
graph.draw(name, dir_name) | [
"def",
"create_png",
"(",
"cls_name",
",",
"meth_name",
",",
"graph",
",",
"dir_name",
"=",
"'graphs2'",
")",
":",
"m_name",
"=",
"''",
".",
"join",
"(",
"x",
"for",
"x",
"in",
"meth_name",
"if",
"x",
".",
"isalnum",
"(",
")",
")",
"name",
"=",
"''... | Creates a PNG from a given :class:`~androguard.decompiler.dad.graph.Graph`.
:param str cls_name: name of the class
:param str meth_name: name of the method
:param androguard.decompiler.dad.graph.Graph graph:
:param str dir_name: output directory | [
"Creates",
"a",
"PNG",
"from",
"a",
"given",
":",
"class",
":",
"~androguard",
".",
"decompiler",
".",
"dad",
".",
"graph",
".",
"Graph",
"."
] | python | train |
mlperf/training | image_classification/tensorflow/official/resnet/imagenet_preprocessing.py | https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/image_classification/tensorflow/official/resnet/imagenet_preprocessing.py#L254-L291 | def preprocess_image(image_buffer, output_height, output_width,
num_channels, is_training=False):
"""Preprocesses the given image.
Preprocessing includes decoding, cropping, and resizing for both training
and eval images. Training preprocessing, however, introduces some random
distortion of the image to improve accuracy.
Args:
image_buffer: scalar string Tensor representing the raw JPEG image buffer.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
num_channels: Integer depth of the image buffer for decoding.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
Returns:
A preprocessed image.
"""
if is_training:
# For training, we want to randomize some of the distortions.
image = _decode_crop_and_flip(image_buffer, num_channels)
mlperf_log.resnet_print(key=mlperf_log.INPUT_RESIZE,
value=[output_height, output_width])
image = _resize_image(image, output_height, output_width)
else:
# For validation, we want to decode, resize, then just crop the middle.
image = tf.image.decode_jpeg(image_buffer, channels=num_channels)
image = _aspect_preserving_resize(image, _RESIZE_MIN)
mlperf_log.resnet_print(key=mlperf_log.INPUT_RESIZE,
value=[output_height, output_width])
image = _central_crop(image, output_height, output_width)
image.set_shape([output_height, output_width, num_channels])
return _mean_image_subtraction(image, _CHANNEL_MEANS, num_channels) | [
"def",
"preprocess_image",
"(",
"image_buffer",
",",
"output_height",
",",
"output_width",
",",
"num_channels",
",",
"is_training",
"=",
"False",
")",
":",
"if",
"is_training",
":",
"# For training, we want to randomize some of the distortions.",
"image",
"=",
"_decode_cr... | Preprocesses the given image.
Preprocessing includes decoding, cropping, and resizing for both training
and eval images. Training preprocessing, however, introduces some random
distortion of the image to improve accuracy.
Args:
image_buffer: scalar string Tensor representing the raw JPEG image buffer.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
num_channels: Integer depth of the image buffer for decoding.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
Returns:
A preprocessed image. | [
"Preprocesses",
"the",
"given",
"image",
"."
] | python | train |
aparo/pyes | performance/utils.py | https://github.com/aparo/pyes/blob/712eb6095961755067b2b5baa262008ade6584b3/performance/utils.py#L16-L31 | def generate_dataset(number_items=1000):
"""
Generate a dataset with number_items elements.
"""
data = []
names = get_names()
totalnames = len(names)
#init random seeder
random.seed()
#calculate items
# names = random.sample(names, number_items)
for i in range(number_items):
data.append({"name":names[random.randint(0,totalnames-1)],
"age":random.randint(1,100),
"description":li_words(50, False)})
return data | [
"def",
"generate_dataset",
"(",
"number_items",
"=",
"1000",
")",
":",
"data",
"=",
"[",
"]",
"names",
"=",
"get_names",
"(",
")",
"totalnames",
"=",
"len",
"(",
"names",
")",
"#init random seeder",
"random",
".",
"seed",
"(",
")",
"#calculate items",
"# ... | Generate a dataset with number_items elements. | [
"Generate",
"a",
"dataset",
"with",
"number_items",
"elements",
"."
] | python | train |
load-tools/netort | netort/data_processing.py | https://github.com/load-tools/netort/blob/b5233a70cea74108857ea24ba5c37975057ca00f/netort/data_processing.py#L10-L18 | def get_nowait_from_queue(queue):
""" Collect all immediately available items from a queue """
data = []
for _ in range(queue.qsize()):
try:
data.append(queue.get_nowait())
except q.Empty:
break
return data | [
"def",
"get_nowait_from_queue",
"(",
"queue",
")",
":",
"data",
"=",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"queue",
".",
"qsize",
"(",
")",
")",
":",
"try",
":",
"data",
".",
"append",
"(",
"queue",
".",
"get_nowait",
"(",
")",
")",
"except",
... | Collect all immediately available items from a queue | [
"Collect",
"all",
"immediately",
"available",
"items",
"from",
"a",
"queue"
] | python | train |
ihgazni2/elist | elist/elist.py | https://github.com/ihgazni2/elist/blob/8c07b5029bda34ead60ce10335ceb145f209263c/elist/elist.py#L6647-L6686 | def get_next_char_level_in_j_str(curr_lv,curr_seq,j_str,block_op_pairs_dict=get_block_op_pairs("{}[]()")):
''' the first-char is level-1
when current is non-op, next-char-level = curr-level
when current is lop, non-paired-rop-next-char-level = lop-level+1;
when current is lop, paired-rop-next-char-level = lop-level
when current is rop, next-char-level = rop-level - 1
# {"key_4_UF0aJJ6v": "value_1", "key_2_Hd0t": ["value_16", "value_8", "value_8", "value_15", "value_14", "value_19", {......
# 122222222222222222222222222222222222222222222333333333333333333333333333333333333333333333333333333333333333333333334......
# {\n"key_4_UF0aJJ6v": "value_1", \n"key_2_Hd0t": [\n"value_16", \n"value_8", \n"value_8", \n"value_15", \n"value_14", \n"value_19",......
# 1 222222222222222222222222222222 2222222222222222 3333333333333 333333333333 333333333333 3333333333333 3333333333333 3333333333333......
'''
curr_ch = j_str[curr_seq]
next_ch = j_str[curr_seq + 1]
cond = 0
for i in range(1,block_op_pairs_dict.__len__()+1):
if(curr_ch == block_op_pairs_dict[i][0]):
if(next_ch == block_op_pairs_dict[i][1]):
next_lv = curr_lv
else:
next_lv = curr_lv + 1
cond = 1
break
elif(curr_ch == block_op_pairs_dict[i][1]):
if(is_rop(next_ch,block_op_pairs_dict)):
next_lv = curr_lv - 1
else:
next_lv = curr_lv
cond = 1
break
else:
pass
if(cond == 1):
pass
elif(is_rop(next_ch,block_op_pairs_dict)):
next_lv = curr_lv - 1
else:
next_lv = curr_lv
curr_lv = next_lv
curr_seq = curr_seq + 1
return(curr_lv,curr_lv,curr_seq) | [
"def",
"get_next_char_level_in_j_str",
"(",
"curr_lv",
",",
"curr_seq",
",",
"j_str",
",",
"block_op_pairs_dict",
"=",
"get_block_op_pairs",
"(",
"\"{}[]()\"",
")",
")",
":",
"curr_ch",
"=",
"j_str",
"[",
"curr_seq",
"]",
"next_ch",
"=",
"j_str",
"[",
"curr_seq"... | the first-char is level-1
when current is non-op, next-char-level = curr-level
when current is lop, non-paired-rop-next-char-level = lop-level+1;
when current is lop, paired-rop-next-char-level = lop-level
when current is rop, next-char-level = rop-level - 1
# {"key_4_UF0aJJ6v": "value_1", "key_2_Hd0t": ["value_16", "value_8", "value_8", "value_15", "value_14", "value_19", {......
# 122222222222222222222222222222222222222222222333333333333333333333333333333333333333333333333333333333333333333333334......
# {\n"key_4_UF0aJJ6v": "value_1", \n"key_2_Hd0t": [\n"value_16", \n"value_8", \n"value_8", \n"value_15", \n"value_14", \n"value_19",......
# 1 222222222222222222222222222222 2222222222222222 3333333333333 333333333333 333333333333 3333333333333 3333333333333 3333333333333...... | [
"the",
"first",
"-",
"char",
"is",
"level",
"-",
"1",
"when",
"current",
"is",
"non",
"-",
"op",
"next",
"-",
"char",
"-",
"level",
"=",
"curr",
"-",
"level",
"when",
"current",
"is",
"lop",
"non",
"-",
"paired",
"-",
"rop",
"-",
"next",
"-",
"ch... | python | valid |
tanghaibao/goatools | goatools/grouper/grprobj.py | https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/grprobj.py#L108-L114 | def get_section2usrnts(self):
"""Get dict section2usrnts."""
sec_nts = []
for section_name, _ in self.get_sections_2d():
usrgos = self.get_usrgos_g_section(section_name)
sec_nts.append((section_name, [self.go2nt.get(u) for u in usrgos]))
return cx.OrderedDict(sec_nts) | [
"def",
"get_section2usrnts",
"(",
"self",
")",
":",
"sec_nts",
"=",
"[",
"]",
"for",
"section_name",
",",
"_",
"in",
"self",
".",
"get_sections_2d",
"(",
")",
":",
"usrgos",
"=",
"self",
".",
"get_usrgos_g_section",
"(",
"section_name",
")",
"sec_nts",
"."... | Get dict section2usrnts. | [
"Get",
"dict",
"section2usrnts",
"."
] | python | train |
uogbuji/versa | tools/py/writer/md.py | https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/writer/md.py#L42-L79 | def write(models, out=None, base=None, propertybase=None, shorteners=None, logger=logging):
'''
models - input Versa models from which output is generated. Must be a sequence
object, not an iterator
'''
assert out is not None #Output stream required
if not isinstance(models, list): models = [models]
shorteners = shorteners or {}
all_propertybase = [propertybase] if propertybase else []
all_propertybase.append(VERSA_BASEIRI)
if any((base, propertybase, shorteners)):
out.write('# @docheader\n\n* @iri:\n')
if base:
out.write(' * @base: {0}'.format(base))
#for k, v in shorteners:
# out.write(' * @base: {0}'.format(base))
out.write('\n\n')
origin_space = set()
#base_out = models[0].base
for m in models:
origin_space.update(all_origins(m))
for o in origin_space:
out.write('# {0}\n\n'.format(o))
for o_, r, t, a in m.match(o):
abbr_r = abbreviate(r, all_propertybase)
value_format(t)
out.write('* {0}: {1}\n'.format(abbr_r, value_format(t)))
for k, v in a.items():
abbr_k = abbreviate(k, all_propertybase)
out.write(' * {0}: {1}\n'.format(k, value_format(v)))
out.write('\n')
return | [
"def",
"write",
"(",
"models",
",",
"out",
"=",
"None",
",",
"base",
"=",
"None",
",",
"propertybase",
"=",
"None",
",",
"shorteners",
"=",
"None",
",",
"logger",
"=",
"logging",
")",
":",
"assert",
"out",
"is",
"not",
"None",
"#Output stream required",
... | models - input Versa models from which output is generated. Must be a sequence
object, not an iterator | [
"models",
"-",
"input",
"Versa",
"models",
"from",
"which",
"output",
"is",
"generated",
".",
"Must",
"be",
"a",
"sequence",
"object",
"not",
"an",
"iterator"
] | python | train |
saltstack/salt | salt/modules/system_profiler.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/system_profiler.py#L34-L55 | def _call_system_profiler(datatype):
'''
Call out to system_profiler. Return a dictionary
of the stuff we are interested in.
'''
p = subprocess.Popen(
[PROFILER_BINARY, '-detailLevel', 'full',
'-xml', datatype], stdout=subprocess.PIPE)
(sysprofresults, sysprof_stderr) = p.communicate(input=None)
if six.PY2:
plist = plistlib.readPlistFromString(sysprofresults)
else:
plist = plistlib.readPlistFromBytes(sysprofresults)
try:
apps = plist[0]['_items']
except (IndexError, KeyError):
apps = []
return apps | [
"def",
"_call_system_profiler",
"(",
"datatype",
")",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"PROFILER_BINARY",
",",
"'-detailLevel'",
",",
"'full'",
",",
"'-xml'",
",",
"datatype",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"("... | Call out to system_profiler. Return a dictionary
of the stuff we are interested in. | [
"Call",
"out",
"to",
"system_profiler",
".",
"Return",
"a",
"dictionary",
"of",
"the",
"stuff",
"we",
"are",
"interested",
"in",
"."
] | python | train |
PyCQA/pylint | pylint/checkers/imports.py | https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/imports.py#L791-L813 | def _check_relative_import(
self, modnode, importnode, importedmodnode, importedasname
):
"""check relative import. node is either an Import or From node, modname
the imported module name.
"""
if not self.linter.is_message_enabled("relative-import"):
return None
if importedmodnode.file is None:
return False # built-in module
if modnode is importedmodnode:
return False # module importing itself
if modnode.absolute_import_activated() or getattr(importnode, "level", None):
return False
if importedmodnode.name != importedasname:
# this must be a relative import...
self.add_message(
"relative-import",
args=(importedasname, importedmodnode.name),
node=importnode,
)
return None
return None | [
"def",
"_check_relative_import",
"(",
"self",
",",
"modnode",
",",
"importnode",
",",
"importedmodnode",
",",
"importedasname",
")",
":",
"if",
"not",
"self",
".",
"linter",
".",
"is_message_enabled",
"(",
"\"relative-import\"",
")",
":",
"return",
"None",
"if",... | check relative import. node is either an Import or From node, modname
the imported module name. | [
"check",
"relative",
"import",
".",
"node",
"is",
"either",
"an",
"Import",
"or",
"From",
"node",
"modname",
"the",
"imported",
"module",
"name",
"."
] | python | test |
zomux/deepy | deepy/dataset/basic.py | https://github.com/zomux/deepy/blob/090fbad22a08a809b12951cd0d4984f5bd432698/deepy/dataset/basic.py#L30-L40 | def map(self, func):
"""
Process all data with given function.
The scheme of function should be x,y -> x,y.
"""
if self._train_set:
self._train_set = map(func, self._train_set)
if self._valid_set:
self._valid_set = map(func, self._valid_set)
if self._test_set:
self._test_set = map(func, self._test_set) | [
"def",
"map",
"(",
"self",
",",
"func",
")",
":",
"if",
"self",
".",
"_train_set",
":",
"self",
".",
"_train_set",
"=",
"map",
"(",
"func",
",",
"self",
".",
"_train_set",
")",
"if",
"self",
".",
"_valid_set",
":",
"self",
".",
"_valid_set",
"=",
"... | Process all data with given function.
The scheme of function should be x,y -> x,y. | [
"Process",
"all",
"data",
"with",
"given",
"function",
".",
"The",
"scheme",
"of",
"function",
"should",
"be",
"x",
"y",
"-",
">",
"x",
"y",
"."
] | python | test |
edx/edx-celeryutils | celery_utils/persist_on_failure.py | https://github.com/edx/edx-celeryutils/blob/d8745f5f0929ad154fad779a19fbefe7f51e9498/celery_utils/persist_on_failure.py#L22-L34 | def on_failure(self, exc, task_id, args, kwargs, einfo):
"""
If the task fails, persist a record of the task.
"""
if not FailedTask.objects.filter(task_id=task_id, datetime_resolved=None).exists():
FailedTask.objects.create(
task_name=_truncate_to_field(FailedTask, 'task_name', self.name),
task_id=task_id, # Fixed length UUID: No need to truncate
args=args,
kwargs=kwargs,
exc=_truncate_to_field(FailedTask, 'exc', repr(exc)),
)
super(PersistOnFailureTask, self).on_failure(exc, task_id, args, kwargs, einfo) | [
"def",
"on_failure",
"(",
"self",
",",
"exc",
",",
"task_id",
",",
"args",
",",
"kwargs",
",",
"einfo",
")",
":",
"if",
"not",
"FailedTask",
".",
"objects",
".",
"filter",
"(",
"task_id",
"=",
"task_id",
",",
"datetime_resolved",
"=",
"None",
")",
".",... | If the task fails, persist a record of the task. | [
"If",
"the",
"task",
"fails",
"persist",
"a",
"record",
"of",
"the",
"task",
"."
] | python | train |
JarryShaw/PyPCAPKit | src/protocols/internet/internet.py | https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/internet/internet.py#L113-L170 | def _import_next_layer(self, proto, length=None, *, version=4, extension=False):
"""Import next layer extractor.
Positional arguments:
* proto -- str, next layer protocol name
* length -- int, valid (not padding) length
Keyword Arguments:
* version -- int, IP version (4 in default)
<keyword> 4 / 6
* extension -- bool, if is extension header (False in default)
<keyword> True / False
Returns:
* bool -- flag if extraction of next layer succeeded
* Info -- info of next layer
* ProtoChain -- protocol chain of next layer
* str -- alias of next layer
Protocols:
* IPv4 -- internet layer
* IPv6 -- internet layer
* AH -- internet layer
* TCP -- transport layer
* UDP -- transport layer
"""
if length == 0:
from pcapkit.protocols.null import NoPayload as Protocol
elif self._sigterm or proto == 59:
from pcapkit.protocols.raw import Raw as Protocol
elif proto == 51:
from pcapkit.protocols.internet.ah import AH as Protocol
elif proto == 139:
from pcapkit.protocols.internet.hip import HIP as Protocol
elif proto == 0:
from pcapkit.protocols.internet.hopopt import HOPOPT as Protocol
elif proto == 44:
from pcapkit.protocols.internet.ipv6_frag import IPv6_Frag as Protocol
elif proto == 60:
from pcapkit.protocols.internet.ipv6_opts import IPv6_Opts as Protocol
elif proto == 43:
from pcapkit.protocols.internet.ipv6_route import IPv6_Route as Protocol
elif proto == 135:
from pcapkit.protocols.internet.mh import MH as Protocol
elif proto == 4:
from pcapkit.protocols.internet.ipv4 import IPv4 as Protocol
elif proto == 41:
from pcapkit.protocols.internet.ipv6 import IPv6 as Protocol
elif proto == 6:
from pcapkit.protocols.transport.tcp import TCP as Protocol
elif proto == 17:
from pcapkit.protocols.transport.udp import UDP as Protocol
else:
from pcapkit.protocols.raw import Raw as Protocol
next_ = Protocol(self._file, length, version=version, extension=extension,
error=self._onerror, layer=self._exlayer, protocol=self._exproto)
return next_ | [
"def",
"_import_next_layer",
"(",
"self",
",",
"proto",
",",
"length",
"=",
"None",
",",
"*",
",",
"version",
"=",
"4",
",",
"extension",
"=",
"False",
")",
":",
"if",
"length",
"==",
"0",
":",
"from",
"pcapkit",
".",
"protocols",
".",
"null",
"impor... | Import next layer extractor.
Positional arguments:
* proto -- str, next layer protocol name
* length -- int, valid (not padding) length
Keyword Arguments:
* version -- int, IP version (4 in default)
<keyword> 4 / 6
* extension -- bool, if is extension header (False in default)
<keyword> True / False
Returns:
* bool -- flag if extraction of next layer succeeded
* Info -- info of next layer
* ProtoChain -- protocol chain of next layer
* str -- alias of next layer
Protocols:
* IPv4 -- internet layer
* IPv6 -- internet layer
* AH -- internet layer
* TCP -- transport layer
* UDP -- transport layer | [
"Import",
"next",
"layer",
"extractor",
"."
] | python | train |
mitsei/dlkit | dlkit/json_/resource/sessions.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/resource/sessions.py#L3075-L3094 | def is_descendant_of_bin(self, id_, bin_id):
"""Tests if an ``Id`` is a descendant of a bin.
arg: id (osid.id.Id): an ``Id``
arg: bin_id (osid.id.Id): the ``Id`` of a bin
return: (boolean) - ``true`` if the ``id`` is a descendant of
the ``bin_id,`` ``false`` otherwise
raise: NotFound - ``bin_id`` is not found
raise: NullArgument - ``id`` or ``bin_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_descendant_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_descendant_of_catalog(id_=id_, catalog_id=bin_id)
return self._hierarchy_session.is_descendant(id_=id_, descendant_id=bin_id) | [
"def",
"is_descendant_of_bin",
"(",
"self",
",",
"id_",
",",
"bin_id",
")",
":",
"# Implemented from template for",
"# osid.resource.BinHierarchySession.is_descendant_of_bin",
"if",
"self",
".",
"_catalog_session",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_cata... | Tests if an ``Id`` is a descendant of a bin.
arg: id (osid.id.Id): an ``Id``
arg: bin_id (osid.id.Id): the ``Id`` of a bin
return: (boolean) - ``true`` if the ``id`` is a descendant of
the ``bin_id,`` ``false`` otherwise
raise: NotFound - ``bin_id`` is not found
raise: NullArgument - ``id`` or ``bin_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``. | [
"Tests",
"if",
"an",
"Id",
"is",
"a",
"descendant",
"of",
"a",
"bin",
"."
] | python | train |
saltstack/salt | salt/returners/pgjsonb.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/pgjsonb.py#L325-L337 | def save_load(jid, load, minions=None):
'''
Save the load to the specified jid id
'''
with _get_serv(commit=True) as cur:
try:
cur.execute(PG_SAVE_LOAD_SQL,
{'jid': jid, 'load': psycopg2.extras.Json(load)})
except psycopg2.IntegrityError:
# https://github.com/saltstack/salt/issues/22171
# Without this try/except we get tons of duplicate entry errors
# which result in job returns not being stored properly
pass | [
"def",
"save_load",
"(",
"jid",
",",
"load",
",",
"minions",
"=",
"None",
")",
":",
"with",
"_get_serv",
"(",
"commit",
"=",
"True",
")",
"as",
"cur",
":",
"try",
":",
"cur",
".",
"execute",
"(",
"PG_SAVE_LOAD_SQL",
",",
"{",
"'jid'",
":",
"jid",
"... | Save the load to the specified jid id | [
"Save",
"the",
"load",
"to",
"the",
"specified",
"jid",
"id"
] | python | train |
tanghaibao/jcvi | jcvi/variation/str.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/str.py#L876-L907 | def mergecsv(args):
"""
%prog mergecsv *.csv
Combine CSV into binary array.
"""
p = OptionParser(mergecsv.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
csvfiles = args
arrays = []
samplekeys = []
for csvfile in csvfiles:
samplekey = op.basename(csvfile).split(".")[0]
a = np.fromfile(csvfile, sep=",", dtype=np.int32)
x1 = a[::2]
x2 = a[1::2]
a = x1 * 1000 + x2
a[a < 0] = -1
arrays.append(a)
samplekeys.append(samplekey)
print(samplekey, a, file=sys.stderr)
print("Merging", file=sys.stderr)
b = np.concatenate(arrays)
b.tofile("data.bin")
fw = open("samples", "w")
print("\n".join(samplekeys), file=fw)
fw.close() | [
"def",
"mergecsv",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"mergecsv",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"<",
"1",
":",
"sys",
".",
"exit",
"(",
... | %prog mergecsv *.csv
Combine CSV into binary array. | [
"%prog",
"mergecsv",
"*",
".",
"csv"
] | python | train |
cltk/cltk | cltk/prosody/old_norse/verse.py | https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/old_norse/verse.py#L227-L247 | def find_alliterations(self):
"""
Alliterations is the repetition of a same sound pattern (usually the first sound) of important words.
This usually excludes stop words.
:return:
"""
self.n_alliterations = 0
self.alliterations = []
for j, sound1 in enumerate(self.first_sounds):
word1 = normalize(self.tokenized_text[j])
if j < len(self.first_sounds)-1:
for k, sound2 in enumerate(self.first_sounds[j+1:]):
word2 = normalize(self.tokenized_text[k])
if word1 not in STOPS_LIST and sound2 not in STOPS_LIST:
if isinstance(sound1, Consonant) and sound1.ipar == sound2.ipar:
self.alliterations.append((word1, word2))
self.n_alliterations += 1
elif isinstance(sound1, Vowel) and isinstance(sound2, Vowel):
self.alliterations.append((word1, word2))
self.n_alliterations += 1
return self.alliterations, self.n_alliterations | [
"def",
"find_alliterations",
"(",
"self",
")",
":",
"self",
".",
"n_alliterations",
"=",
"0",
"self",
".",
"alliterations",
"=",
"[",
"]",
"for",
"j",
",",
"sound1",
"in",
"enumerate",
"(",
"self",
".",
"first_sounds",
")",
":",
"word1",
"=",
"normalize"... | Alliterations is the repetition of a same sound pattern (usually the first sound) of important words.
This usually excludes stop words.
:return: | [
"Alliterations",
"is",
"the",
"repetition",
"of",
"a",
"same",
"sound",
"pattern",
"(",
"usually",
"the",
"first",
"sound",
")",
"of",
"important",
"words",
".",
"This",
"usually",
"excludes",
"stop",
"words",
".",
":",
"return",
":"
] | python | train |
mwouts/jupytext | jupytext/cell_to_text.py | https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/cell_to_text.py#L136-L151 | def code_to_text(self):
"""Return the text representation of a code cell"""
source = copy(self.source)
comment_magic(source, self.language, self.comment_magics)
options = []
if self.cell_type == 'code' and self.language:
options.append(self.language)
filtered_metadata = {key: self.metadata[key] for key in self.metadata
if key not in ['active', 'language']}
if filtered_metadata:
options.append(metadata_to_md_options(filtered_metadata))
return ['```{}'.format(' '.join(options))] + source + ['```'] | [
"def",
"code_to_text",
"(",
"self",
")",
":",
"source",
"=",
"copy",
"(",
"self",
".",
"source",
")",
"comment_magic",
"(",
"source",
",",
"self",
".",
"language",
",",
"self",
".",
"comment_magics",
")",
"options",
"=",
"[",
"]",
"if",
"self",
".",
... | Return the text representation of a code cell | [
"Return",
"the",
"text",
"representation",
"of",
"a",
"code",
"cell"
] | python | train |
Phyks/libbmc | libbmc/citations/pdf.py | https://github.com/Phyks/libbmc/blob/9ef1a29d2514157d1edd6c13ecbd61b07ae9315e/libbmc/citations/pdf.py#L21-L87 | def cermine(pdf_file, force_api=False, override_local=None):
"""
Run `CERMINE <https://github.com/CeON/CERMINE>`_ to extract metadata from \
the given PDF file, to retrieve citations (and more) from the \
provided PDF file. This function returns the raw output of \
CERMINE call.
.. note::
Try to use a local CERMINE JAR file, and falls back to using the API. \
JAR file is expected to be found in \
``libbmc/external/cermine.jar``. You can override this using \
the ``override_local`` parameter.
.. note::
CERMINE JAR file can be found at \
`<http://maven.icm.edu.pl/artifactory/simple/kdd-releases/pl/edu/icm/cermine/cermine-impl/>`_.
.. note::
This fallback using the \
`CERMINE API <http://cermine.ceon.pl/about.html>`_, and \
hence, uploads the PDF file (so uses network). Check out \
the CERMINE API terms.
:param pdf_file: Path to the PDF file to handle.
:param force_api: Force the use of the Cermine API \
(and do not try to use a local JAR file). Defaults to ``False``.
:param override_local: Use this specific JAR file, instead of the one at \
the default location (``libbmc/external/cermine.jar``).
:returns: Raw output from CERMINE API or ``None`` if an error occurred. \
No post-processing is done.
"""
try:
# Check if we want to load the local JAR from a specific path
local = override_local
# Else, try to stat the JAR file at the expected local path
if (local is None) and (not force_api):
if os.path.isfile(os.path.join(SCRIPT_DIR,
"../external/cermine.jar")):
local = os.path.join(SCRIPT_DIR,
"../external/cermine.jar")
# If we want to force the API use, or we could not get a local JAR
if force_api or (local is None):
print("Using API")
with open(pdf_file, "rb") as fh:
# Query the API
request = requests.post(
CERMINE_BASE_URL + "extract.do",
headers={"Content-Type": "application/binary"},
files={"file": fh}
)
return request.text
# Else, use the local JAR file
else:
return subprocess.check_output([
"java",
"-cp", local,
"pl.edu.icm.cermine.PdfNLMContentExtractor",
"-path", pdf_file]).decode("utf-8")
except (RequestException,
subprocess.CalledProcessError,
FileNotFoundError):
# In case of any error, return None
return None | [
"def",
"cermine",
"(",
"pdf_file",
",",
"force_api",
"=",
"False",
",",
"override_local",
"=",
"None",
")",
":",
"try",
":",
"# Check if we want to load the local JAR from a specific path",
"local",
"=",
"override_local",
"# Else, try to stat the JAR file at the expected loca... | Run `CERMINE <https://github.com/CeON/CERMINE>`_ to extract metadata from \
the given PDF file, to retrieve citations (and more) from the \
provided PDF file. This function returns the raw output of \
CERMINE call.
.. note::
Try to use a local CERMINE JAR file, and falls back to using the API. \
JAR file is expected to be found in \
``libbmc/external/cermine.jar``. You can override this using \
the ``override_local`` parameter.
.. note::
CERMINE JAR file can be found at \
`<http://maven.icm.edu.pl/artifactory/simple/kdd-releases/pl/edu/icm/cermine/cermine-impl/>`_.
.. note::
This fallback using the \
`CERMINE API <http://cermine.ceon.pl/about.html>`_, and \
hence, uploads the PDF file (so uses network). Check out \
the CERMINE API terms.
:param pdf_file: Path to the PDF file to handle.
:param force_api: Force the use of the Cermine API \
(and do not try to use a local JAR file). Defaults to ``False``.
:param override_local: Use this specific JAR file, instead of the one at \
the default location (``libbmc/external/cermine.jar``).
:returns: Raw output from CERMINE API or ``None`` if an error occurred. \
No post-processing is done. | [
"Run",
"CERMINE",
"<https",
":",
"//",
"github",
".",
"com",
"/",
"CeON",
"/",
"CERMINE",
">",
"_",
"to",
"extract",
"metadata",
"from",
"\\",
"the",
"given",
"PDF",
"file",
"to",
"retrieve",
"citations",
"(",
"and",
"more",
")",
"from",
"the",
"\\",
... | python | train |
tensorflow/tensorboard | tensorboard/plugins/text/summary.py | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/text/summary.py#L79-L116 | def pb(name, data, display_name=None, description=None):
"""Create a legacy text summary protobuf.
Arguments:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
data: A Python bytestring (of type bytes), or Unicode string. Or a numpy
data array of those types.
display_name: Optional name for this summary in TensorBoard, as a
`str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
`str`. Markdown is supported. Defaults to empty.
Raises:
ValueError: If the type of the data is unsupported.
Returns:
A `tf.Summary` protobuf object.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
try:
tensor = tf.make_tensor_proto(data, dtype=tf.string)
except TypeError as e:
raise ValueError(e)
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description)
tf_summary_metadata = tf.SummaryMetadata.FromString(
summary_metadata.SerializeToString())
summary = tf.Summary()
summary.value.add(tag='%s/text_summary' % name,
metadata=tf_summary_metadata,
tensor=tensor)
return summary | [
"def",
"pb",
"(",
"name",
",",
"data",
",",
"display_name",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"# TODO(nickfelt): remove on-demand imports once dep situation is fixed.",
"import",
"tensorflow",
".",
"compat",
".",
"v1",
"as",
"tf",
"try",
":",... | Create a legacy text summary protobuf.
Arguments:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
data: A Python bytestring (of type bytes), or Unicode string. Or a numpy
data array of those types.
display_name: Optional name for this summary in TensorBoard, as a
`str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
`str`. Markdown is supported. Defaults to empty.
Raises:
ValueError: If the type of the data is unsupported.
Returns:
A `tf.Summary` protobuf object. | [
"Create",
"a",
"legacy",
"text",
"summary",
"protobuf",
"."
] | python | train |
hyperledger/indy-node | indy_node/server/domain_req_handler.py | https://github.com/hyperledger/indy-node/blob/8fabd364eaf7d940a56df2911d9215b1e512a2de/indy_node/server/domain_req_handler.py#L86-L114 | def gen_txn_path(self, txn):
"""Return path to state as 'str' type or None"""
txn_type = get_type(txn)
if txn_type not in self.state_update_handlers:
logger.error('Cannot generate id for txn of type {}'.format(txn_type))
return None
if txn_type == NYM:
nym = get_payload_data(txn).get(TARGET_NYM)
binary_digest = domain.make_state_path_for_nym(nym)
return hexlify(binary_digest).decode()
elif txn_type == ATTRIB:
path = domain.prepare_attr_for_state(txn, path_only=True)
return path.decode()
elif txn_type == SCHEMA:
path = domain.prepare_schema_for_state(txn, path_only=True)
return path.decode()
elif txn_type == CLAIM_DEF:
path = domain.prepare_claim_def_for_state(txn, path_only=True)
return path.decode()
elif txn_type == REVOC_REG_DEF:
path = domain.prepare_revoc_def_for_state(txn, path_only=True)
return path.decode()
elif txn_type == REVOC_REG_ENTRY:
path = domain.prepare_revoc_reg_entry_for_state(txn, path_only=True)
return path.decode()
raise NotImplementedError("path construction is not implemented for type {}".format(txn_type)) | [
"def",
"gen_txn_path",
"(",
"self",
",",
"txn",
")",
":",
"txn_type",
"=",
"get_type",
"(",
"txn",
")",
"if",
"txn_type",
"not",
"in",
"self",
".",
"state_update_handlers",
":",
"logger",
".",
"error",
"(",
"'Cannot generate id for txn of type {}'",
".",
"form... | Return path to state as 'str' type or None | [
"Return",
"path",
"to",
"state",
"as",
"str",
"type",
"or",
"None"
] | python | train |
oauthlib/oauthlib | oauthlib/oauth1/rfc5849/endpoints/access_token.py | https://github.com/oauthlib/oauthlib/blob/30321dd3c0ca784d3508a1970cf90d9f76835c79/oauthlib/oauth1/rfc5849/endpoints/access_token.py#L34-L54 | def create_access_token(self, request, credentials):
"""Create and save a new access token.
Similar to OAuth 2, indication of granted scopes will be included as a
space separated list in ``oauth_authorized_realms``.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: The token as an urlencoded string.
"""
request.realms = self.request_validator.get_realms(
request.resource_owner_key, request)
token = {
'oauth_token': self.token_generator(),
'oauth_token_secret': self.token_generator(),
# Backport the authorized scopes indication used in OAuth2
'oauth_authorized_realms': ' '.join(request.realms)
}
token.update(credentials)
self.request_validator.save_access_token(token, request)
return urlencode(token.items()) | [
"def",
"create_access_token",
"(",
"self",
",",
"request",
",",
"credentials",
")",
":",
"request",
".",
"realms",
"=",
"self",
".",
"request_validator",
".",
"get_realms",
"(",
"request",
".",
"resource_owner_key",
",",
"request",
")",
"token",
"=",
"{",
"'... | Create and save a new access token.
Similar to OAuth 2, indication of granted scopes will be included as a
space separated list in ``oauth_authorized_realms``.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: The token as an urlencoded string. | [
"Create",
"and",
"save",
"a",
"new",
"access",
"token",
"."
] | python | train |
wuher/devil | devil/resource.py | https://github.com/wuher/devil/blob/a8834d4f88d915a21754c6b96f99d0ad9123ad4d/devil/resource.py#L135-L171 | def _process_response(self, response, request):
""" Process the response.
If the response is ``HttpResponse``, does nothing. Otherwise,
serializes, formats and validates the response.
:param response: resource's response. This can be
- ``None``,
- django's ``HttpResponse``
- devil's ``Response``
- dictionary (or list of dictionaries)
- object (or list of objects) that are first serialized into dict
using ``self.factory``.
- plaintext
:returns: Django's ``HttpResponse``
"""
def coerce_response():
""" Coerce the response object into devil structure. """
if not isinstance(response, Response):
return Response(0, response)
return response
if isinstance(response, HttpResponse):
# we don't do anything if resource returns django's http response
return response
devil_res = coerce_response()
if devil_res.content and devil_res.get_code_num() in (0, 200, 201):
# serialize, format and validate
serialized_res = devil_res.content = self._serialize_object(devil_res.content, request)
formatted_res = self._format_response(request, devil_res)
self._validate_output_data(response, serialized_res, formatted_res, request)
else:
# no data -> format only
formatted_res = self._format_response(request, devil_res)
return formatted_res | [
"def",
"_process_response",
"(",
"self",
",",
"response",
",",
"request",
")",
":",
"def",
"coerce_response",
"(",
")",
":",
"\"\"\" Coerce the response object into devil structure. \"\"\"",
"if",
"not",
"isinstance",
"(",
"response",
",",
"Response",
")",
":",
"ret... | Process the response.
If the response is ``HttpResponse``, does nothing. Otherwise,
serializes, formats and validates the response.
:param response: resource's response. This can be
- ``None``,
- django's ``HttpResponse``
- devil's ``Response``
- dictionary (or list of dictionaries)
- object (or list of objects) that are first serialized into dict
using ``self.factory``.
- plaintext
:returns: Django's ``HttpResponse`` | [
"Process",
"the",
"response",
"."
] | python | train |
robertpeteuil/multi-cloud-control | mcc/cldcnct.py | https://github.com/robertpeteuil/multi-cloud-control/blob/f1565af1c0b6ed465ff312d3ccc592ba0609f4a2/mcc/cldcnct.py#L159-L169 | def adj_nodes_aws(aws_nodes):
"""Adjust details specific to AWS."""
for node in aws_nodes:
node.cloud = "aws"
node.cloud_disp = "AWS"
node.private_ips = ip_to_str(node.private_ips)
node.public_ips = ip_to_str(node.public_ips)
node.zone = node.extra['availability']
node.size = node.extra['instance_type']
node.type = node.extra['instance_lifecycle']
return aws_nodes | [
"def",
"adj_nodes_aws",
"(",
"aws_nodes",
")",
":",
"for",
"node",
"in",
"aws_nodes",
":",
"node",
".",
"cloud",
"=",
"\"aws\"",
"node",
".",
"cloud_disp",
"=",
"\"AWS\"",
"node",
".",
"private_ips",
"=",
"ip_to_str",
"(",
"node",
".",
"private_ips",
")",
... | Adjust details specific to AWS. | [
"Adjust",
"details",
"specific",
"to",
"AWS",
"."
] | python | train |
getsentry/raven-python | raven/base.py | https://github.com/getsentry/raven-python/blob/d891c20f0f930153f508e9d698d9de42e910face/raven/base.py#L806-L826 | def captureException(self, exc_info=None, **kwargs):
"""
Creates an event from an exception.
>>> try:
>>> exc_info = sys.exc_info()
>>> client.captureException(exc_info)
>>> finally:
>>> del exc_info
If exc_info is not provided, or is set to True, then this method will
perform the ``exc_info = sys.exc_info()`` and the requisite clean-up
for you.
``kwargs`` are passed through to ``.capture``.
"""
if exc_info is None or exc_info is True:
exc_info = sys.exc_info()
return self.capture(
'raven.events.Exception', exc_info=exc_info, **kwargs) | [
"def",
"captureException",
"(",
"self",
",",
"exc_info",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"exc_info",
"is",
"None",
"or",
"exc_info",
"is",
"True",
":",
"exc_info",
"=",
"sys",
".",
"exc_info",
"(",
")",
"return",
"self",
".",
"c... | Creates an event from an exception.
>>> try:
>>> exc_info = sys.exc_info()
>>> client.captureException(exc_info)
>>> finally:
>>> del exc_info
If exc_info is not provided, or is set to True, then this method will
perform the ``exc_info = sys.exc_info()`` and the requisite clean-up
for you.
``kwargs`` are passed through to ``.capture``. | [
"Creates",
"an",
"event",
"from",
"an",
"exception",
"."
] | python | train |
PMEAL/OpenPNM | openpnm/materials/VoronoiFibers.py | https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/materials/VoronoiFibers.py#L441-L496 | def inhull(self, xyz, pore, tol=1e-7):
r"""
Tests whether points lie within a convex hull or not.
Computes a tesselation of the hull works out the normals of the facets.
Then tests whether dot(x.normals) < dot(a.normals) where a is the the
first vertex of the facets
"""
xyz = np.around(xyz, 10)
# Work out range to span over for pore hull
xmin = xyz[:, 0].min()
xr = (np.ceil(xyz[:, 0].max())-np.floor(xmin)).astype(int)+1
ymin = xyz[:, 1].min()
yr = (np.ceil(xyz[:, 1].max())-np.floor(ymin)).astype(int)+1
zmin = xyz[:, 2].min()
zr = (np.ceil(xyz[:, 2].max())-np.floor(zmin)).astype(int)+1
origin = np.array([xmin, ymin, zmin])
# start index
si = np.floor(origin).astype(int)
xyz -= origin
dom = np.zeros([xr, yr, zr], dtype=np.uint8)
indx, indy, indz = np.indices((xr, yr, zr))
# Calculate the tesselation of the points
hull = sptl.ConvexHull(xyz)
# Assume 3d for now
# Calc normals from the vector cross product of the vectors defined
# by joining points in the simplices
vab = xyz[hull.simplices[:, 0]]-xyz[hull.simplices[:, 1]]
vac = xyz[hull.simplices[:, 0]]-xyz[hull.simplices[:, 2]]
nrmls = np.cross(vab, vac)
# Scale normal vectors to unit length
nrmlen = np.sum(nrmls**2, axis=-1)**(1./2)
nrmls = nrmls*np.tile((1/nrmlen), (3, 1)).T
# Center of Mass
center = np.mean(xyz, axis=0)
# Any point from each simplex
a = xyz[hull.simplices[:, 0]]
# Make sure all normals point inwards
dp = np.sum((np.tile(center, (len(a), 1))-a)*nrmls, axis=-1)
k = dp < 0
nrmls[k] = -nrmls[k]
# Now we want to test whether dot(x,N) >= dot(a,N)
aN = np.sum(nrmls*a, axis=-1)
for plane_index in range(len(a)):
eqx = nrmls[plane_index][0]*(indx)
eqy = nrmls[plane_index][1]*(indy)
eqz = nrmls[plane_index][2]*(indz)
xN = eqx + eqy + eqz
dom[xN - aN[plane_index] >= 0-tol] += 1
dom[dom < len(a)] = 0
dom[dom == len(a)] = 1
ds = np.shape(dom)
temp_arr = np.zeros_like(self._hull_image, dtype=bool)
temp_arr[si[0]:si[0]+ds[0], si[1]:si[1]+ds[1], si[2]:si[2]+ds[2]] = dom
self._hull_image[temp_arr] = pore
del temp_arr | [
"def",
"inhull",
"(",
"self",
",",
"xyz",
",",
"pore",
",",
"tol",
"=",
"1e-7",
")",
":",
"xyz",
"=",
"np",
".",
"around",
"(",
"xyz",
",",
"10",
")",
"# Work out range to span over for pore hull",
"xmin",
"=",
"xyz",
"[",
":",
",",
"0",
"]",
".",
... | r"""
Tests whether points lie within a convex hull or not.
Computes a tesselation of the hull works out the normals of the facets.
Then tests whether dot(x.normals) < dot(a.normals) where a is the the
first vertex of the facets | [
"r",
"Tests",
"whether",
"points",
"lie",
"within",
"a",
"convex",
"hull",
"or",
"not",
".",
"Computes",
"a",
"tesselation",
"of",
"the",
"hull",
"works",
"out",
"the",
"normals",
"of",
"the",
"facets",
".",
"Then",
"tests",
"whether",
"dot",
"(",
"x",
... | python | train |
facebook/pyre-check | sapp/sapp/interactive.py | https://github.com/facebook/pyre-check/blob/4a9604d943d28ef20238505a51acfb1f666328d7/sapp/sapp/interactive.py#L411-L420 | def show(self):
""" More details about the selected issue or trace frame.
"""
self._verify_entrypoint_selected()
if self.current_issue_instance_id != -1:
self._show_current_issue_instance()
return
self._show_current_trace_frame() | [
"def",
"show",
"(",
"self",
")",
":",
"self",
".",
"_verify_entrypoint_selected",
"(",
")",
"if",
"self",
".",
"current_issue_instance_id",
"!=",
"-",
"1",
":",
"self",
".",
"_show_current_issue_instance",
"(",
")",
"return",
"self",
".",
"_show_current_trace_fr... | More details about the selected issue or trace frame. | [
"More",
"details",
"about",
"the",
"selected",
"issue",
"or",
"trace",
"frame",
"."
] | python | train |
kennedyshead/aioasuswrt | aioasuswrt/connection.py | https://github.com/kennedyshead/aioasuswrt/blob/0c4336433727abbb7b324ee29e4c5382be9aaa2b/aioasuswrt/connection.py#L92-L118 | async def async_run_command(self, command, first_try=True):
"""Run a command through a Telnet connection.
Connect to the Telnet server if not currently connected, otherwise
use the existing connection.
"""
await self.async_connect()
try:
with (await self._io_lock):
self._writer.write('{}\n'.format(
"%s && %s" % (
_PATH_EXPORT_COMMAND, command)).encode('ascii'))
data = ((await asyncio.wait_for(self._reader.readuntil(
self._prompt_string), 9)).split(b'\n')[1:-1])
except (BrokenPipeError, LimitOverrunError):
if first_try:
return await self.async_run_command(command, False)
else:
_LOGGER.warning("connection is lost to host.")
return[]
except TimeoutError:
_LOGGER.error("Host timeout.")
return []
finally:
self._writer.close()
return [line.decode('utf-8') for line in data] | [
"async",
"def",
"async_run_command",
"(",
"self",
",",
"command",
",",
"first_try",
"=",
"True",
")",
":",
"await",
"self",
".",
"async_connect",
"(",
")",
"try",
":",
"with",
"(",
"await",
"self",
".",
"_io_lock",
")",
":",
"self",
".",
"_writer",
"."... | Run a command through a Telnet connection.
Connect to the Telnet server if not currently connected, otherwise
use the existing connection. | [
"Run",
"a",
"command",
"through",
"a",
"Telnet",
"connection",
".",
"Connect",
"to",
"the",
"Telnet",
"server",
"if",
"not",
"currently",
"connected",
"otherwise",
"use",
"the",
"existing",
"connection",
"."
] | python | train |
druids/django-chamber | chamber/utils/__init__.py | https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/utils/__init__.py#L17-L28 | def get_class_method(cls_or_inst, method_name):
"""
Returns a method from a given class or instance. When the method doest not exist, it returns `None`. Also works with
properties and cached properties.
"""
cls = cls_or_inst if isinstance(cls_or_inst, type) else cls_or_inst.__class__
meth = getattr(cls, method_name, None)
if isinstance(meth, property):
meth = meth.fget
elif isinstance(meth, cached_property):
meth = meth.func
return meth | [
"def",
"get_class_method",
"(",
"cls_or_inst",
",",
"method_name",
")",
":",
"cls",
"=",
"cls_or_inst",
"if",
"isinstance",
"(",
"cls_or_inst",
",",
"type",
")",
"else",
"cls_or_inst",
".",
"__class__",
"meth",
"=",
"getattr",
"(",
"cls",
",",
"method_name",
... | Returns a method from a given class or instance. When the method doest not exist, it returns `None`. Also works with
properties and cached properties. | [
"Returns",
"a",
"method",
"from",
"a",
"given",
"class",
"or",
"instance",
".",
"When",
"the",
"method",
"doest",
"not",
"exist",
"it",
"returns",
"None",
".",
"Also",
"works",
"with",
"properties",
"and",
"cached",
"properties",
"."
] | python | train |
AlecAivazis/graphql-over-kafka | nautilus/management/scripts/events/publish.py | https://github.com/AlecAivazis/graphql-over-kafka/blob/70e2acef27a2f87355590be1a6ca60ce3ab4d09c/nautilus/management/scripts/events/publish.py#L15-L38 | def publish(type, payload):
"""
Publish a message with the specified action_type and payload over the
event system. Useful for debugging.
"""
async def _produce():
# fire an action with the given values
await producer.send(action_type=type, payload=payload)
# notify the user that we were successful
print("Successfully dispatched action with type {}.".format(type))
# create a producer
producer = ActionHandler()
# start the producer
producer.start()
# get the current event loop
loop = asyncio.get_event_loop()
# run the production sequence
loop.run_until_complete(_produce())
# start the producer
producer.stop() | [
"def",
"publish",
"(",
"type",
",",
"payload",
")",
":",
"async",
"def",
"_produce",
"(",
")",
":",
"# fire an action with the given values",
"await",
"producer",
".",
"send",
"(",
"action_type",
"=",
"type",
",",
"payload",
"=",
"payload",
")",
"# notify the ... | Publish a message with the specified action_type and payload over the
event system. Useful for debugging. | [
"Publish",
"a",
"message",
"with",
"the",
"specified",
"action_type",
"and",
"payload",
"over",
"the",
"event",
"system",
".",
"Useful",
"for",
"debugging",
"."
] | python | train |
vertexproject/synapse | synapse/lib/cli.py | https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/cli.py#L310-L316 | def addCmdClass(self, ctor, **opts):
'''
Add a Cmd subclass to this cli.
'''
item = ctor(self, **opts)
name = item.getCmdName()
self.cmds[name] = item | [
"def",
"addCmdClass",
"(",
"self",
",",
"ctor",
",",
"*",
"*",
"opts",
")",
":",
"item",
"=",
"ctor",
"(",
"self",
",",
"*",
"*",
"opts",
")",
"name",
"=",
"item",
".",
"getCmdName",
"(",
")",
"self",
".",
"cmds",
"[",
"name",
"]",
"=",
"item"
... | Add a Cmd subclass to this cli. | [
"Add",
"a",
"Cmd",
"subclass",
"to",
"this",
"cli",
"."
] | python | train |
christian-oudard/htmltreediff | htmltreediff/diff_core.py | https://github.com/christian-oudard/htmltreediff/blob/0e28f56492ae7e69bb0f74f9a79a8909a5ad588d/htmltreediff/diff_core.py#L246-L253 | def match_blocks(hash_func, old_children, new_children):
"""Use difflib to find matching blocks."""
sm = difflib.SequenceMatcher(
_is_junk,
a=[hash_func(c) for c in old_children],
b=[hash_func(c) for c in new_children],
)
return sm | [
"def",
"match_blocks",
"(",
"hash_func",
",",
"old_children",
",",
"new_children",
")",
":",
"sm",
"=",
"difflib",
".",
"SequenceMatcher",
"(",
"_is_junk",
",",
"a",
"=",
"[",
"hash_func",
"(",
"c",
")",
"for",
"c",
"in",
"old_children",
"]",
",",
"b",
... | Use difflib to find matching blocks. | [
"Use",
"difflib",
"to",
"find",
"matching",
"blocks",
"."
] | python | train |
sentinel-hub/eo-learn | core/eolearn/core/utilities.py | https://github.com/sentinel-hub/eo-learn/blob/b8c390b9f553c561612fe9eb64e720611633a035/core/eolearn/core/utilities.py#L325-L337 | def get_common_timestamps(source, target):
"""Return indices of timestamps from source that are also found in target.
:param source: timestamps from source
:type source: list of datetime objects
:param target: timestamps from target
:type target: list of datetime objects
:return: indices of timestamps from source that are also found in target
:rtype: list of ints
"""
remove_from_source = set(source).difference(target)
remove_from_source_idxs = [source.index(rm_date) for rm_date in remove_from_source]
return [idx for idx, _ in enumerate(source) if idx not in remove_from_source_idxs] | [
"def",
"get_common_timestamps",
"(",
"source",
",",
"target",
")",
":",
"remove_from_source",
"=",
"set",
"(",
"source",
")",
".",
"difference",
"(",
"target",
")",
"remove_from_source_idxs",
"=",
"[",
"source",
".",
"index",
"(",
"rm_date",
")",
"for",
"rm_... | Return indices of timestamps from source that are also found in target.
:param source: timestamps from source
:type source: list of datetime objects
:param target: timestamps from target
:type target: list of datetime objects
:return: indices of timestamps from source that are also found in target
:rtype: list of ints | [
"Return",
"indices",
"of",
"timestamps",
"from",
"source",
"that",
"are",
"also",
"found",
"in",
"target",
".",
":",
"param",
"source",
":",
"timestamps",
"from",
"source",
":",
"type",
"source",
":",
"list",
"of",
"datetime",
"objects",
":",
"param",
"tar... | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.