repo stringlengths 7 54 | path stringlengths 4 192 | url stringlengths 87 284 | code stringlengths 78 104k | code_tokens list | docstring stringlengths 1 46.9k | docstring_tokens list | language stringclasses 1
value | partition stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|
saltstack/salt | salt/thorium/reg.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/thorium/reg.py#L95-L133 | def mean(name, add, match):
'''
Accept a numeric value from the matched events and store a running average
of the values in the given register. If the specified value is not numeric
it will be skipped
USAGE:
.. code-block:: yaml
foo:
reg.mean:
- add: data_field
- match: my/custom/event
'''
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
if name not in __reg__:
__reg__[name] = {}
__reg__[name]['val'] = 0
__reg__[name]['total'] = 0
__reg__[name]['count'] = 0
for event in __events__:
try:
event_data = event['data']['data']
except KeyError:
event_data = event['data']
if salt.utils.stringutils.expr_match(event['tag'], match):
if add in event_data:
try:
comp = int(event_data)
except ValueError:
continue
__reg__[name]['total'] += comp
__reg__[name]['count'] += 1
__reg__[name]['val'] = __reg__[name]['total'] / __reg__[name]['count']
return ret | [
"def",
"mean",
"(",
"name",
",",
"add",
",",
"match",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'comment'",
":",
"''",
",",
"'result'",
":",
"True",
"}",
"if",
"name",
"not",
"in",
"__reg__",
":",
... | Accept a numeric value from the matched events and store a running average
of the values in the given register. If the specified value is not numeric
it will be skipped
USAGE:
.. code-block:: yaml
foo:
reg.mean:
- add: data_field
- match: my/custom/event | [
"Accept",
"a",
"numeric",
"value",
"from",
"the",
"matched",
"events",
"and",
"store",
"a",
"running",
"average",
"of",
"the",
"values",
"in",
"the",
"given",
"register",
".",
"If",
"the",
"specified",
"value",
"is",
"not",
"numeric",
"it",
"will",
"be",
... | python | train |
KenjiTakahashi/td | td/model.py | https://github.com/KenjiTakahashi/td/blob/7311eabc63efe6fe6600687c3026f0837454c2e4/td/model.py#L361-L371 | def setOptions(self, glob=False, **kwargs):
"""Set option(s).
:glob: If True, stores specified options globally.
:kwargs: Dictionary of options and values to set.
"""
if glob:
self.globalOptions.update(kwargs)
else:
self.options.update(kwargs) | [
"def",
"setOptions",
"(",
"self",
",",
"glob",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"glob",
":",
"self",
".",
"globalOptions",
".",
"update",
"(",
"kwargs",
")",
"else",
":",
"self",
".",
"options",
".",
"update",
"(",
"kwargs",
"... | Set option(s).
:glob: If True, stores specified options globally.
:kwargs: Dictionary of options and values to set. | [
"Set",
"option",
"(",
"s",
")",
"."
] | python | train |
klahnakoski/pyLibrary | jx_python/flat_list.py | https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/jx_python/flat_list.py#L124-L138 | def _select1(data, field, depth, output):
"""
SELECT A SINGLE FIELD
"""
for d in data:
for i, f in enumerate(field[depth:]):
d = d[f]
if d == None:
output.append(None)
break
elif is_list(d):
_select1(d, field, i + 1, output)
break
else:
output.append(d) | [
"def",
"_select1",
"(",
"data",
",",
"field",
",",
"depth",
",",
"output",
")",
":",
"for",
"d",
"in",
"data",
":",
"for",
"i",
",",
"f",
"in",
"enumerate",
"(",
"field",
"[",
"depth",
":",
"]",
")",
":",
"d",
"=",
"d",
"[",
"f",
"]",
"if",
... | SELECT A SINGLE FIELD | [
"SELECT",
"A",
"SINGLE",
"FIELD"
] | python | train |
Clinical-Genomics/scout | scout/adapter/mongo/index.py | https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/index.py#L54-L72 | def update_indexes(self):
"""Update the indexes
If there are any indexes that are not added to the database, add those.
"""
LOG.info("Updating indexes...")
nr_updated = 0
for collection_name in INDEXES:
existing_indexes = self.indexes(collection_name)
indexes = INDEXES[collection_name]
for index in indexes:
index_name = index.document.get('name')
if index_name not in existing_indexes:
nr_updated += 1
LOG.info("Adding index : %s" % index_name)
self.db[collection_name].create_indexes(indexes)
if nr_updated == 0:
LOG.info("All indexes in place") | [
"def",
"update_indexes",
"(",
"self",
")",
":",
"LOG",
".",
"info",
"(",
"\"Updating indexes...\"",
")",
"nr_updated",
"=",
"0",
"for",
"collection_name",
"in",
"INDEXES",
":",
"existing_indexes",
"=",
"self",
".",
"indexes",
"(",
"collection_name",
")",
"inde... | Update the indexes
If there are any indexes that are not added to the database, add those. | [
"Update",
"the",
"indexes",
"If",
"there",
"are",
"any",
"indexes",
"that",
"are",
"not",
"added",
"to",
"the",
"database",
"add",
"those",
"."
] | python | test |
coleifer/peewee | playhouse/sqlite_ext.py | https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/playhouse/sqlite_ext.py#L719-L807 | def ClosureTable(model_class, foreign_key=None, referencing_class=None,
referencing_key=None):
"""Model factory for the transitive closure extension."""
if referencing_class is None:
referencing_class = model_class
if foreign_key is None:
for field_obj in model_class._meta.refs:
if field_obj.rel_model is model_class:
foreign_key = field_obj
break
else:
raise ValueError('Unable to find self-referential foreign key.')
source_key = model_class._meta.primary_key
if referencing_key is None:
referencing_key = source_key
class BaseClosureTable(VirtualModel):
depth = VirtualField(IntegerField)
id = VirtualField(IntegerField)
idcolumn = VirtualField(TextField)
parentcolumn = VirtualField(TextField)
root = VirtualField(IntegerField)
tablename = VirtualField(TextField)
class Meta:
extension_module = 'transitive_closure'
@classmethod
def descendants(cls, node, depth=None, include_node=False):
query = (model_class
.select(model_class, cls.depth.alias('depth'))
.join(cls, on=(source_key == cls.id))
.where(cls.root == node)
.objects())
if depth is not None:
query = query.where(cls.depth == depth)
elif not include_node:
query = query.where(cls.depth > 0)
return query
@classmethod
def ancestors(cls, node, depth=None, include_node=False):
query = (model_class
.select(model_class, cls.depth.alias('depth'))
.join(cls, on=(source_key == cls.root))
.where(cls.id == node)
.objects())
if depth:
query = query.where(cls.depth == depth)
elif not include_node:
query = query.where(cls.depth > 0)
return query
@classmethod
def siblings(cls, node, include_node=False):
if referencing_class is model_class:
# self-join
fk_value = node.__data__.get(foreign_key.name)
query = model_class.select().where(foreign_key == fk_value)
else:
# siblings as given in reference_class
siblings = (referencing_class
.select(referencing_key)
.join(cls, on=(foreign_key == cls.root))
.where((cls.id == node) & (cls.depth == 1)))
# the according models
query = (model_class
.select()
.where(source_key << siblings)
.objects())
if not include_node:
query = query.where(source_key != node)
return query
class Meta:
database = referencing_class._meta.database
options = {
'tablename': referencing_class._meta.table_name,
'idcolumn': referencing_key.column_name,
'parentcolumn': foreign_key.column_name}
primary_key = False
name = '%sClosure' % model_class.__name__
return type(name, (BaseClosureTable,), {'Meta': Meta}) | [
"def",
"ClosureTable",
"(",
"model_class",
",",
"foreign_key",
"=",
"None",
",",
"referencing_class",
"=",
"None",
",",
"referencing_key",
"=",
"None",
")",
":",
"if",
"referencing_class",
"is",
"None",
":",
"referencing_class",
"=",
"model_class",
"if",
"foreig... | Model factory for the transitive closure extension. | [
"Model",
"factory",
"for",
"the",
"transitive",
"closure",
"extension",
"."
] | python | train |
StackStorm/pybind | pybind/nos/v7_2_0/isns/isns_vrf/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v7_2_0/isns/isns_vrf/__init__.py#L143-L166 | def _set_isns_ipaddress(self, v, load=False):
"""
Setter method for isns_ipaddress, mapped from YANG variable /isns/isns_vrf/isns_ipaddress (inet:ip-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_isns_ipaddress is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_isns_ipaddress() directly.
YANG Description: This specifies the IP address of the vrf instance.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),], is_leaf=True, yang_name="isns-ipaddress", rest_name="isns-ipaddress", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'iSNS VRF forwarding IP address A.B.C.D format', u'hidden': u'isns-ipaddress'}}, namespace='urn:brocade.com:mgmt:brocade-isns', defining_module='brocade-isns', yang_type='inet:ip-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """isns_ipaddress must be of a type compatible with inet:ip-address""",
'defined-type': "inet:ip-address",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}),], is_leaf=True, yang_name="isns-ipaddress", rest_name="isns-ipaddress", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'iSNS VRF forwarding IP address A.B.C.D format', u'hidden': u'isns-ipaddress'}}, namespace='urn:brocade.com:mgmt:brocade-isns', defining_module='brocade-isns', yang_type='inet:ip-address', is_config=True)""",
})
self.__isns_ipaddress = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_isns_ipaddress",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
... | Setter method for isns_ipaddress, mapped from YANG variable /isns/isns_vrf/isns_ipaddress (inet:ip-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_isns_ipaddress is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_isns_ipaddress() directly.
YANG Description: This specifies the IP address of the vrf instance. | [
"Setter",
"method",
"for",
"isns_ipaddress",
"mapped",
"from",
"YANG",
"variable",
"/",
"isns",
"/",
"isns_vrf",
"/",
"isns_ipaddress",
"(",
"inet",
":",
"ip",
"-",
"address",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
... | python | train |
sods/ods | pods/notebook.py | https://github.com/sods/ods/blob/3995c659f25a0a640f6009ed7fcc2559ce659b1d/pods/notebook.py#L68-L98 | def code_toggle(start_show=False, message=None):
"""Toggling on and off code in a notebook.
:param start_show: Whether to display the code or not on first load (default is False).
:type start_show: bool
:param message: the message used to toggle display of the code.
:type message: string
The tip that this idea is
based on is from Damian Kao (http://blog.nextgenetics.net/?e=102)."""
html ='<script>\n'
if message is None:
message = u'The raw code for this jupyter notebook can be hidden for easier reading.'
if start_show:
html += u'code_show=true;\n'
else:
html += u'code_show=false;\n'
html+='''function code_toggle() {
if (code_show){
$('div.input').show();
} else {
$('div.input').hide();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
'''
html += message + ' To toggle on/off the raw code, click <a href="javascript:code_toggle()">here</a>.'
display(HTML(html)) | [
"def",
"code_toggle",
"(",
"start_show",
"=",
"False",
",",
"message",
"=",
"None",
")",
":",
"html",
"=",
"'<script>\\n'",
"if",
"message",
"is",
"None",
":",
"message",
"=",
"u'The raw code for this jupyter notebook can be hidden for easier reading.'",
"if",
"start_... | Toggling on and off code in a notebook.
:param start_show: Whether to display the code or not on first load (default is False).
:type start_show: bool
:param message: the message used to toggle display of the code.
:type message: string
The tip that this idea is
based on is from Damian Kao (http://blog.nextgenetics.net/?e=102). | [
"Toggling",
"on",
"and",
"off",
"code",
"in",
"a",
"notebook",
".",
":",
"param",
"start_show",
":",
"Whether",
"to",
"display",
"the",
"code",
"or",
"not",
"on",
"first",
"load",
"(",
"default",
"is",
"False",
")",
".",
":",
"type",
"start_show",
":",... | python | train |
GeospatialPython/pyshp | shapefile.py | https://github.com/GeospatialPython/pyshp/blob/71231ddc5aa54f155d4f0563c56006fffbfc84e7/shapefile.py#L1303-L1335 | def __dbfHeader(self):
"""Writes the dbf header and field descriptors."""
f = self.__getFileObj(self.dbf)
f.seek(0)
version = 3
year, month, day = time.localtime()[:3]
year -= 1900
# Remove deletion flag placeholder from fields
for field in self.fields:
if field[0].startswith("Deletion"):
self.fields.remove(field)
numRecs = self.recNum
numFields = len(self.fields)
headerLength = numFields * 32 + 33
if headerLength >= 65535:
raise ShapefileException(
"Shapefile dbf header length exceeds maximum length.")
recordLength = sum([int(field[2]) for field in self.fields]) + 1
header = pack('<BBBBLHH20x', version, year, month, day, numRecs,
headerLength, recordLength)
f.write(header)
# Field descriptors
for field in self.fields:
name, fieldType, size, decimal = field
name = b(name, self.encoding, self.encodingErrors)
name = name.replace(b' ', b'_')
name = name.ljust(11).replace(b' ', b'\x00')
fieldType = b(fieldType, 'ascii')
size = int(size)
fld = pack('<11sc4xBB14x', name, fieldType, size, decimal)
f.write(fld)
# Terminator
f.write(b'\r') | [
"def",
"__dbfHeader",
"(",
"self",
")",
":",
"f",
"=",
"self",
".",
"__getFileObj",
"(",
"self",
".",
"dbf",
")",
"f",
".",
"seek",
"(",
"0",
")",
"version",
"=",
"3",
"year",
",",
"month",
",",
"day",
"=",
"time",
".",
"localtime",
"(",
")",
"... | Writes the dbf header and field descriptors. | [
"Writes",
"the",
"dbf",
"header",
"and",
"field",
"descriptors",
"."
] | python | train |
theislab/scanpy | scanpy/neighbors/__init__.py | https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/neighbors/__init__.py#L862-L870 | def _compute_Lp_matrix(self):
"""See Fouss et al. (2006) and von Luxburg et al. (2007).
See Proposition 6 in von Luxburg (2007) and the inline equations
right in the text above.
"""
self.Lp = sum([1/self.eigen_values[i]
* np.outer(self.eigen_basis[:, i], self.eigen_basis[:, i])
for i in range(1, self.eigen_values.size)]) | [
"def",
"_compute_Lp_matrix",
"(",
"self",
")",
":",
"self",
".",
"Lp",
"=",
"sum",
"(",
"[",
"1",
"/",
"self",
".",
"eigen_values",
"[",
"i",
"]",
"*",
"np",
".",
"outer",
"(",
"self",
".",
"eigen_basis",
"[",
":",
",",
"i",
"]",
",",
"self",
"... | See Fouss et al. (2006) and von Luxburg et al. (2007).
See Proposition 6 in von Luxburg (2007) and the inline equations
right in the text above. | [
"See",
"Fouss",
"et",
"al",
".",
"(",
"2006",
")",
"and",
"von",
"Luxburg",
"et",
"al",
".",
"(",
"2007",
")",
"."
] | python | train |
clalancette/pycdlib | pycdlib/headervd.py | https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/headervd.py#L83-L195 | def parse(self, vd, extent_loc):
# type: (bytes, int) -> None
'''
Parse a Volume Descriptor out of a string.
Parameters:
vd - The string containing the Volume Descriptor.
extent_loc - The location on the ISO of this Volume Descriptor.
Returns:
Nothing.
'''
################ PVD VERSION ######################
(descriptor_type, identifier, self.version, self.flags,
self.system_identifier, self.volume_identifier, unused1,
space_size_le, space_size_be, self.escape_sequences, set_size_le,
set_size_be, seqnum_le, seqnum_be, logical_block_size_le,
logical_block_size_be, path_table_size_le, path_table_size_be,
self.path_table_location_le, self.optional_path_table_location_le,
self.path_table_location_be, self.optional_path_table_location_be,
root_dir_record, self.volume_set_identifier, pub_ident_str,
prepare_ident_str, app_ident_str, self.copyright_file_identifier,
self.abstract_file_identifier, self.bibliographic_file_identifier,
vol_create_date_str, vol_mod_date_str, vol_expire_date_str,
vol_effective_date_str, self.file_structure_version, unused2,
self.application_use, zero_unused) = struct.unpack_from(self.FMT, vd, 0)
# According to Ecma-119, 8.4.1, the primary volume descriptor type
# should be 1.
if descriptor_type != self._vd_type:
raise pycdlibexception.PyCdlibInvalidISO('Invalid volume descriptor')
# According to Ecma-119, 8.4.2, the identifier should be 'CD001'.
if identifier != b'CD001':
raise pycdlibexception.PyCdlibInvalidISO('invalid CD isoIdentification')
# According to Ecma-119, 8.4.3, the version should be 1 (or 2 for
# ISO9660:1999)
expected_versions = [1]
if self._vd_type == VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY:
expected_versions.append(2)
if self.version not in expected_versions:
raise pycdlibexception.PyCdlibInvalidISO('Invalid volume descriptor version %d' % (self.version))
# According to Ecma-119, 8.4.4, the first flags field should be 0 for a Primary.
if self._vd_type == VOLUME_DESCRIPTOR_TYPE_PRIMARY and self.flags != 0:
raise pycdlibexception.PyCdlibInvalidISO('PVD flags field is not zero')
# According to Ecma-119, 8.4.5, the first unused field (after the
# system identifier and volume identifier) should be 0.
if unused1 != 0:
raise pycdlibexception.PyCdlibInvalidISO('data in 2nd unused field not zero')
# According to Ecma-119, 8.4.9, the escape sequences for a PVD should
# be 32 zero-bytes. However, we have seen ISOs in the wild (Fantastic
# Night Dreams - Cotton Original (Japan).cue from the psx redump
# collection) that don't have this set to 0, so allow anything here.
# According to Ecma-119, 8.4.30, the file structure version should be 1.
# However, we have seen ISOs in the wild that that don't have this
# properly set to one. In those cases, forcibly set it to one and let
# it pass.
if self._vd_type == VOLUME_DESCRIPTOR_TYPE_PRIMARY:
if self.file_structure_version != 1:
self.file_structure_version = 1
elif self._vd_type == VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY:
if self.file_structure_version not in (1, 2):
raise pycdlibexception.PyCdlibInvalidISO('File structure version expected to be 1')
# According to Ecma-119, 8.4.31, the second unused field should be 0.
if unused2 != 0:
raise pycdlibexception.PyCdlibInvalidISO('data in 2nd unused field not zero')
# According to Ecma-119, the last 653 bytes of the VD should be all 0.
# However, we have seen ISOs in the wild that do not follow this, so
# relax the check.
# Check to make sure that the little-endian and big-endian versions
# of the parsed data agree with each other.
if space_size_le != utils.swab_32bit(space_size_be):
raise pycdlibexception.PyCdlibInvalidISO('Little-endian and big-endian space size disagree')
self.space_size = space_size_le
if set_size_le != utils.swab_16bit(set_size_be):
raise pycdlibexception.PyCdlibInvalidISO('Little-endian and big-endian set size disagree')
self.set_size = set_size_le
if seqnum_le != utils.swab_16bit(seqnum_be):
raise pycdlibexception.PyCdlibInvalidISO('Little-endian and big-endian seqnum disagree')
self.seqnum = seqnum_le
if logical_block_size_le != utils.swab_16bit(logical_block_size_be):
raise pycdlibexception.PyCdlibInvalidISO('Little-endian and big-endian logical block size disagree')
self.log_block_size = logical_block_size_le
if path_table_size_le != utils.swab_32bit(path_table_size_be):
raise pycdlibexception.PyCdlibInvalidISO('Little-endian and big-endian path table size disagree')
self.path_tbl_size = path_table_size_le
self.path_table_num_extents = utils.ceiling_div(self.path_tbl_size, 4096) * 2
self.path_table_location_be = utils.swab_32bit(self.path_table_location_be)
self.publisher_identifier = FileOrTextIdentifier()
self.publisher_identifier.parse(pub_ident_str)
self.preparer_identifier = FileOrTextIdentifier()
self.preparer_identifier.parse(prepare_ident_str)
self.application_identifier = FileOrTextIdentifier()
self.application_identifier.parse(app_ident_str)
self.volume_creation_date = dates.VolumeDescriptorDate()
self.volume_creation_date.parse(vol_create_date_str)
self.volume_modification_date = dates.VolumeDescriptorDate()
self.volume_modification_date.parse(vol_mod_date_str)
self.volume_expiration_date = dates.VolumeDescriptorDate()
self.volume_expiration_date.parse(vol_expire_date_str)
self.volume_effective_date = dates.VolumeDescriptorDate()
self.volume_effective_date.parse(vol_effective_date_str)
self.root_dir_record.parse(self, root_dir_record, None)
self.orig_extent_loc = extent_loc
self._initialized = True | [
"def",
"parse",
"(",
"self",
",",
"vd",
",",
"extent_loc",
")",
":",
"# type: (bytes, int) -> None",
"################ PVD VERSION ######################",
"(",
"descriptor_type",
",",
"identifier",
",",
"self",
".",
"version",
",",
"self",
".",
"flags",
",",
"self"... | Parse a Volume Descriptor out of a string.
Parameters:
vd - The string containing the Volume Descriptor.
extent_loc - The location on the ISO of this Volume Descriptor.
Returns:
Nothing. | [
"Parse",
"a",
"Volume",
"Descriptor",
"out",
"of",
"a",
"string",
"."
] | python | train |
garethr/django-timelog | src/timelog/lib.py | https://github.com/garethr/django-timelog/blob/84c7015248a82faccb9d3fe4e6014645cc9ec103/src/timelog/lib.py#L72-L132 | def analyze_log_file(logfile, pattern, reverse_paths=True, progress=True):
"Given a log file and regex group and extract the performance data"
if progress:
lines = count_lines_in(logfile)
pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=lines+1).start()
counter = 0
data = {}
compiled_pattern = compile(pattern)
for line in fileinput.input([logfile]):
if progress:
counter = counter + 1
parsed = compiled_pattern.findall(line)[0]
date = parsed[0]
method = parsed[1]
path = parsed[2]
status = parsed[3]
time = parsed[4]
sql = parsed[5]
sqltime = parsed[6]
try:
ignore = False
for ignored_path in IGNORE_PATHS:
compiled_path = compile(ignored_path)
if compiled_path.match(path):
ignore = True
if not ignore:
if reverse_paths:
view = view_name_from(path)
else:
view = path
key = "%s-%s-%s" % (view, status, method)
try:
data[key]['count'] = data[key]['count'] + 1
data[key]['times'].append(float(time))
data[key]['sql'].append(int(sql))
data[key]['sqltime'].append(float(sqltime))
except KeyError:
data[key] = {
'count': 1,
'status': status,
'view': view,
'method': method,
'times': [float(time)],
'sql': [int(sql)],
'sqltime': [float(sqltime)],
}
except Resolver404:
pass
if progress:
pbar.update(counter)
if progress:
pbar.finish()
return data | [
"def",
"analyze_log_file",
"(",
"logfile",
",",
"pattern",
",",
"reverse_paths",
"=",
"True",
",",
"progress",
"=",
"True",
")",
":",
"if",
"progress",
":",
"lines",
"=",
"count_lines_in",
"(",
"logfile",
")",
"pbar",
"=",
"ProgressBar",
"(",
"widgets",
"=... | Given a log file and regex group and extract the performance data | [
"Given",
"a",
"log",
"file",
"and",
"regex",
"group",
"and",
"extract",
"the",
"performance",
"data"
] | python | train |
Gandi/gandi.cli | gandi/cli/modules/forward.py | https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/forward.py#L28-L34 | def create(cls, domain, source, destinations):
"""Create a domain mail forward."""
cls.echo('Creating mail forward %s@%s' % (source, domain))
options = {'destinations': list(destinations)}
result = cls.call('domain.forward.create', domain, source, options)
return result | [
"def",
"create",
"(",
"cls",
",",
"domain",
",",
"source",
",",
"destinations",
")",
":",
"cls",
".",
"echo",
"(",
"'Creating mail forward %s@%s'",
"%",
"(",
"source",
",",
"domain",
")",
")",
"options",
"=",
"{",
"'destinations'",
":",
"list",
"(",
"des... | Create a domain mail forward. | [
"Create",
"a",
"domain",
"mail",
"forward",
"."
] | python | train |
LordDarkula/chess_py | chess_py/core/algebraic/converter.py | https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/core/algebraic/converter.py#L308-L330 | def make_legal(move, position):
"""
Converts an incomplete move (initial ``Location`` not specified)
and the corresponding position into the a complete move
with the most likely starting point specified. If no moves match, ``None``
is returned.
:type: move: Move
:type: position: Board
:rtype: Move
"""
assert isinstance(move, Move)
for legal_move in position.all_possible_moves(move.color):
if move.status == notation_const.LONG_ALG:
if move.end_loc == legal_move.end_loc and \
move.start_loc == legal_move.start_loc:
return legal_move
elif move == legal_move:
return legal_move
raise ValueError("Move {} not legal in \n{}".format(repr(move), position)) | [
"def",
"make_legal",
"(",
"move",
",",
"position",
")",
":",
"assert",
"isinstance",
"(",
"move",
",",
"Move",
")",
"for",
"legal_move",
"in",
"position",
".",
"all_possible_moves",
"(",
"move",
".",
"color",
")",
":",
"if",
"move",
".",
"status",
"==",
... | Converts an incomplete move (initial ``Location`` not specified)
and the corresponding position into the a complete move
with the most likely starting point specified. If no moves match, ``None``
is returned.
:type: move: Move
:type: position: Board
:rtype: Move | [
"Converts",
"an",
"incomplete",
"move",
"(",
"initial",
"Location",
"not",
"specified",
")",
"and",
"the",
"corresponding",
"position",
"into",
"the",
"a",
"complete",
"move",
"with",
"the",
"most",
"likely",
"starting",
"point",
"specified",
".",
"If",
"no",
... | python | train |
henzk/django-productline | django_productline/tasks.py | https://github.com/henzk/django-productline/blob/24ff156924c1a8c07b99cbb8a1de0a42b8d81f60/django_productline/tasks.py#L260-L266 | def export_context(target_zip):
"""
Append context.json to target_zip
"""
from django_productline import utils
context_file = tasks.get_context_path()
return utils.create_or_append_to_zip(context_file, target_zip, 'context.json') | [
"def",
"export_context",
"(",
"target_zip",
")",
":",
"from",
"django_productline",
"import",
"utils",
"context_file",
"=",
"tasks",
".",
"get_context_path",
"(",
")",
"return",
"utils",
".",
"create_or_append_to_zip",
"(",
"context_file",
",",
"target_zip",
",",
... | Append context.json to target_zip | [
"Append",
"context",
".",
"json",
"to",
"target_zip"
] | python | train |
noxdafox/clipspy | clips/facts.py | https://github.com/noxdafox/clipspy/blob/b22d71a6da821c1715d8fa00d7d75cabc09ed364/clips/facts.py#L438-L441 | def multifield(self):
"""True if the slot is a multifield slot."""
return bool(lib.EnvDeftemplateSlotMultiP(
self._env, self._tpl, self._name)) | [
"def",
"multifield",
"(",
"self",
")",
":",
"return",
"bool",
"(",
"lib",
".",
"EnvDeftemplateSlotMultiP",
"(",
"self",
".",
"_env",
",",
"self",
".",
"_tpl",
",",
"self",
".",
"_name",
")",
")"
] | True if the slot is a multifield slot. | [
"True",
"if",
"the",
"slot",
"is",
"a",
"multifield",
"slot",
"."
] | python | train |
pyQode/pyqode.core | pyqode/core/managers/backend.py | https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/managers/backend.py#L161-L194 | def send_request(self, worker_class_or_function, args, on_receive=None):
"""
Requests some work to be done by the backend. You can get notified of
the work results by passing a callback (on_receive).
:param worker_class_or_function: Worker class or function
:param args: worker args, any Json serializable objects
:param on_receive: an optional callback executed when we receive the
worker's results. The callback will be called with one arguments:
the results of the worker (object)
:raise: backend.NotRunning if the backend process is not running.
"""
if not self.running:
try:
# try to restart the backend if it crashed.
self.start(self.server_script, interpreter=self.interpreter,
args=self.args)
except AttributeError:
pass # not started yet
finally:
# caller should try again, later
raise NotRunning()
else:
comm('sending request, worker=%r' % worker_class_or_function)
# create a socket, the request will be send as soon as the socket
# has connected
socket = JsonTcpClient(
self.editor, self._port, worker_class_or_function, args,
on_receive=on_receive)
socket.finished.connect(self._rm_socket)
self._sockets.append(socket)
# restart heartbeat timer
self._heartbeat_timer.start() | [
"def",
"send_request",
"(",
"self",
",",
"worker_class_or_function",
",",
"args",
",",
"on_receive",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"running",
":",
"try",
":",
"# try to restart the backend if it crashed.",
"self",
".",
"start",
"(",
"self",
... | Requests some work to be done by the backend. You can get notified of
the work results by passing a callback (on_receive).
:param worker_class_or_function: Worker class or function
:param args: worker args, any Json serializable objects
:param on_receive: an optional callback executed when we receive the
worker's results. The callback will be called with one arguments:
the results of the worker (object)
:raise: backend.NotRunning if the backend process is not running. | [
"Requests",
"some",
"work",
"to",
"be",
"done",
"by",
"the",
"backend",
".",
"You",
"can",
"get",
"notified",
"of",
"the",
"work",
"results",
"by",
"passing",
"a",
"callback",
"(",
"on_receive",
")",
"."
] | python | train |
MDAnalysis/GridDataFormats | gridData/OpenDX.py | https://github.com/MDAnalysis/GridDataFormats/blob/3eeb0432f8cf856912436e4f3e7aba99d3c916be/gridData/OpenDX.py#L353-L393 | def write(self, file):
"""Write the *class array* section.
Parameters
----------
file : file
Raises
------
ValueError
If the `dxtype` is not a valid type, :exc:`ValueError` is
raised.
"""
if self.type not in self.dx_types:
raise ValueError(("DX type {} is not supported in the DX format. \n"
"Supported valus are: {}\n"
"Use the type=<type> keyword argument.").format(
self.type, list(self.dx_types.keys())))
typelabel = (self.typequote+self.type+self.typequote)
DXclass.write(self,file,
'type {0} rank 0 items {1} data follows'.format(
typelabel, self.array.size))
# grid data, serialized as a C array (z fastest varying)
# (flat iterator is equivalent to: for x: for y: for z: grid[x,y,z])
# VMD's DX reader requires exactly 3 values per line
fmt_string = "{:d}"
if (self.array.dtype.kind == 'f' or self.array.dtype.kind == 'c'):
precision = numpy.finfo(self.array.dtype).precision
fmt_string = "{:."+"{:d}".format(precision)+"f}"
values_per_line = 3
values = self.array.flat
while 1:
try:
for i in range(values_per_line):
file.write(fmt_string.format(next(values)) + "\t")
file.write('\n')
except StopIteration:
file.write('\n')
break
file.write('attribute "dep" string "positions"\n') | [
"def",
"write",
"(",
"self",
",",
"file",
")",
":",
"if",
"self",
".",
"type",
"not",
"in",
"self",
".",
"dx_types",
":",
"raise",
"ValueError",
"(",
"(",
"\"DX type {} is not supported in the DX format. \\n\"",
"\"Supported valus are: {}\\n\"",
"\"Use the type=<type>... | Write the *class array* section.
Parameters
----------
file : file
Raises
------
ValueError
If the `dxtype` is not a valid type, :exc:`ValueError` is
raised. | [
"Write",
"the",
"*",
"class",
"array",
"*",
"section",
"."
] | python | valid |
tehmaze/natural | natural/phone.py | https://github.com/tehmaze/natural/blob/d7a1fc9de712f9bcf68884a80826a7977df356fb/natural/phone.py#L123-L142 | def enum(number, zone='e164.arpa'):
'''
Printable DNS ENUM (telephone number mapping) record.
:param number: string
:param zone: string
>>> print(enum('+31 20 5423 1567'))
7.6.5.1.3.2.4.5.0.2.1.3.e164.arpa.
>>> print(enum('+31 97 99 6642', zone='e164.spacephone.org'))
2.4.6.6.9.9.7.9.1.3.e164.spacephone.org.
'''
number = e164(number).lstrip('+')
return u'.'.join([
u'.'.join(number[::-1]),
zone.strip(u'.'),
'',
]) | [
"def",
"enum",
"(",
"number",
",",
"zone",
"=",
"'e164.arpa'",
")",
":",
"number",
"=",
"e164",
"(",
"number",
")",
".",
"lstrip",
"(",
"'+'",
")",
"return",
"u'.'",
".",
"join",
"(",
"[",
"u'.'",
".",
"join",
"(",
"number",
"[",
":",
":",
"-",
... | Printable DNS ENUM (telephone number mapping) record.
:param number: string
:param zone: string
>>> print(enum('+31 20 5423 1567'))
7.6.5.1.3.2.4.5.0.2.1.3.e164.arpa.
>>> print(enum('+31 97 99 6642', zone='e164.spacephone.org'))
2.4.6.6.9.9.7.9.1.3.e164.spacephone.org. | [
"Printable",
"DNS",
"ENUM",
"(",
"telephone",
"number",
"mapping",
")",
"record",
"."
] | python | train |
danilobellini/dose | dose/_legacy.py | https://github.com/danilobellini/dose/blob/141f48322f7812b7d32e3d5f065d4473a11102a4/dose/_legacy.py#L497-L510 | def call_after(lag):
"""
Parametrized decorator for calling a function after a time ``lag`` given
in milliseconds. This cancels simultaneous calls.
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
wrapper.timer.cancel() # Debounce
wrapper.timer = threading.Timer(lag, func, args=args, kwargs=kwargs)
wrapper.timer.start()
wrapper.timer = threading.Timer(0, lambda: None) # timer.cancel now exists
return wrapper
return decorator | [
"def",
"call_after",
"(",
"lag",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"wrapper",
".",
"timer",
".",
"cancel",
"(",
")",
"# Debo... | Parametrized decorator for calling a function after a time ``lag`` given
in milliseconds. This cancels simultaneous calls. | [
"Parametrized",
"decorator",
"for",
"calling",
"a",
"function",
"after",
"a",
"time",
"lag",
"given",
"in",
"milliseconds",
".",
"This",
"cancels",
"simultaneous",
"calls",
"."
] | python | train |
cqparts/cqparts | src/cqparts/search.py | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/search.py#L89-L117 | def search(**criteria):
"""
Search registered *component* classes matching the given criteria.
:param criteria: search criteria of the form: ``a='1', b='x'``
:return: parts registered with the given criteria
:rtype: :class:`set`
Will return an empty :class:`set` if nothing is found.
::
from cqparts.search import search
import cqparts_motors # example of a 3rd party lib
# Get all DC motor classes
dc_motors = search(type='motor', current_class='dc')
# For more complex queries:
air_cooled = search(cooling='air')
non_aircooled_dcmotors = dc_motors - air_cooled
# will be all DC motors that aren't air-cooled
"""
# Find all parts that match the given criteria
results = copy(class_list) # start with full list
for (category, value) in criteria.items():
results &= index[category][value]
return results | [
"def",
"search",
"(",
"*",
"*",
"criteria",
")",
":",
"# Find all parts that match the given criteria",
"results",
"=",
"copy",
"(",
"class_list",
")",
"# start with full list",
"for",
"(",
"category",
",",
"value",
")",
"in",
"criteria",
".",
"items",
"(",
")",... | Search registered *component* classes matching the given criteria.
:param criteria: search criteria of the form: ``a='1', b='x'``
:return: parts registered with the given criteria
:rtype: :class:`set`
Will return an empty :class:`set` if nothing is found.
::
from cqparts.search import search
import cqparts_motors # example of a 3rd party lib
# Get all DC motor classes
dc_motors = search(type='motor', current_class='dc')
# For more complex queries:
air_cooled = search(cooling='air')
non_aircooled_dcmotors = dc_motors - air_cooled
# will be all DC motors that aren't air-cooled | [
"Search",
"registered",
"*",
"component",
"*",
"classes",
"matching",
"the",
"given",
"criteria",
"."
] | python | train |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/status.py | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/status.py#L242-L262 | def get_mapreduce_yaml(parse=parse_mapreduce_yaml):
"""Locates mapreduce.yaml, loads and parses its info.
Args:
parse: Used for testing.
Returns:
MapReduceYaml object.
Raises:
errors.BadYamlError: when contents is not a valid mapreduce.yaml file or the
file is missing.
"""
mr_yaml_path = find_mapreduce_yaml()
if not mr_yaml_path:
raise errors.MissingYamlError()
mr_yaml_file = open(mr_yaml_path)
try:
return parse(mr_yaml_file.read())
finally:
mr_yaml_file.close() | [
"def",
"get_mapreduce_yaml",
"(",
"parse",
"=",
"parse_mapreduce_yaml",
")",
":",
"mr_yaml_path",
"=",
"find_mapreduce_yaml",
"(",
")",
"if",
"not",
"mr_yaml_path",
":",
"raise",
"errors",
".",
"MissingYamlError",
"(",
")",
"mr_yaml_file",
"=",
"open",
"(",
"mr_... | Locates mapreduce.yaml, loads and parses its info.
Args:
parse: Used for testing.
Returns:
MapReduceYaml object.
Raises:
errors.BadYamlError: when contents is not a valid mapreduce.yaml file or the
file is missing. | [
"Locates",
"mapreduce",
".",
"yaml",
"loads",
"and",
"parses",
"its",
"info",
"."
] | python | train |
guaix-ucm/numina | numina/array/cosmetics.py | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/cosmetics.py#L225-L268 | def cosmetics(flat1, flat2 = None, mask=None, lowercut=6.0, uppercut=6.0, siglev=2.0):
"""Find cosmetic defects in a detector using two flat field images.
Two arrays representing flat fields of different exposure times are
required. Cosmetic defects are selected as points that deviate
significantly of the expected normal distribution of pixels in
the ratio between `flat2` and `flat1`.
The median of the ratio array is computed and subtracted to it.
The standard deviation of the distribution of pixels is computed
obtaining the percentiles nearest the pixel values corresponding to
`nsig` in the normal CDF. The standar deviation is then the distance
between the pixel values divided by two times `nsig`.
The ratio image is then normalized with this standard deviation.
The values in the ratio above `uppercut` are flagged as hot pixels,
and those below '-lowercut` are flagged as dead pixels in the output mask.
:parameter flat1: an array representing a flat illuminated exposure.
:parameter flat2: an array representing a flat illuminated exposure.
:parameter mask: an integer array representing initial mask.
:parameter lowercut: values bellow this sigma level are flagged as dead pixels.
:parameter uppercut: values above this sigma level are flagged as hot pixels.
:parameter siglev: level to estimate the standard deviation.
:returns: the updated mask
"""
if flat2 is None:
flat1, flat2 = flat2, flat1
flat1 = numpy.ones_like(flat2)
if type(mask) is not numpy.ndarray:
mask=numpy.zeros(flat1.shape,dtype='int')
ratio, mask = comp_ratio(flat1, flat2, mask)
fratio1 = ratio[~mask]
central = numpy.median(fratio1)
std = robust_std(fratio1, central, siglev)
mask_u = ratio > central + uppercut * std
mask_d = ratio < central - lowercut * std
mask_final = mask_u | mask_d | mask
return mask_final | [
"def",
"cosmetics",
"(",
"flat1",
",",
"flat2",
"=",
"None",
",",
"mask",
"=",
"None",
",",
"lowercut",
"=",
"6.0",
",",
"uppercut",
"=",
"6.0",
",",
"siglev",
"=",
"2.0",
")",
":",
"if",
"flat2",
"is",
"None",
":",
"flat1",
",",
"flat2",
"=",
"f... | Find cosmetic defects in a detector using two flat field images.
Two arrays representing flat fields of different exposure times are
required. Cosmetic defects are selected as points that deviate
significantly of the expected normal distribution of pixels in
the ratio between `flat2` and `flat1`.
The median of the ratio array is computed and subtracted to it.
The standard deviation of the distribution of pixels is computed
obtaining the percentiles nearest the pixel values corresponding to
`nsig` in the normal CDF. The standar deviation is then the distance
between the pixel values divided by two times `nsig`.
The ratio image is then normalized with this standard deviation.
The values in the ratio above `uppercut` are flagged as hot pixels,
and those below '-lowercut` are flagged as dead pixels in the output mask.
:parameter flat1: an array representing a flat illuminated exposure.
:parameter flat2: an array representing a flat illuminated exposure.
:parameter mask: an integer array representing initial mask.
:parameter lowercut: values bellow this sigma level are flagged as dead pixels.
:parameter uppercut: values above this sigma level are flagged as hot pixels.
:parameter siglev: level to estimate the standard deviation.
:returns: the updated mask | [
"Find",
"cosmetic",
"defects",
"in",
"a",
"detector",
"using",
"two",
"flat",
"field",
"images",
"."
] | python | train |
wmayner/pyphi | pyphi/models/fmt.py | https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/models/fmt.py#L376-L398 | def fmt_sia(sia, ces=True):
"""Format a |SystemIrreducibilityAnalysis|."""
if ces:
body = (
'{ces}'
'{partitioned_ces}'.format(
ces=fmt_ces(
sia.ces,
'Cause-effect structure'),
partitioned_ces=fmt_ces(
sia.partitioned_ces,
'Partitioned cause-effect structure')))
center_header = True
else:
body = ''
center_header = False
title = 'System irreducibility analysis: {BIG_PHI} = {phi}'.format(
BIG_PHI=BIG_PHI, phi=fmt_number(sia.phi))
body = header(str(sia.subsystem), body, center=center_header)
body = header(str(sia.cut), body, center=center_header)
return box(header(title, body, center=center_header)) | [
"def",
"fmt_sia",
"(",
"sia",
",",
"ces",
"=",
"True",
")",
":",
"if",
"ces",
":",
"body",
"=",
"(",
"'{ces}'",
"'{partitioned_ces}'",
".",
"format",
"(",
"ces",
"=",
"fmt_ces",
"(",
"sia",
".",
"ces",
",",
"'Cause-effect structure'",
")",
",",
"partit... | Format a |SystemIrreducibilityAnalysis|. | [
"Format",
"a",
"|SystemIrreducibilityAnalysis|",
"."
] | python | train |
Oneiroe/PySimpleAutomata | PySimpleAutomata/NFA.py | https://github.com/Oneiroe/PySimpleAutomata/blob/0f9f2705fd8ddd5d8118bc31552a640f5d00c359/PySimpleAutomata/NFA.py#L232-L265 | def nfa_nonemptiness_check(nfa: dict) -> bool:
""" Checks if the input NFA reads any language other than the
empty one, returning True/False.
The language L(A) recognized by the automaton A is nonempty iff
there are states :math:`s ∈ S_0` and :math:`t ∈ F` such that
t is connected to s.
Thus, automata nonemptiness is equivalent to graph reachability.
A breadth-first-search algorithm can construct in linear time
the set of all states connected to a state in :math:`S_0`. A
is nonempty iff this set intersects F nontrivially.
:param dict nfa: input NFA.
:return: *(bool)*, True if the input nfa is nonempty, False
otherwise.
"""
# BFS
queue = list()
visited = set()
for state in nfa['initial_states']:
visited.add(state)
queue.append(state)
while queue:
state = queue.pop(0)
visited.add(state)
for a in nfa['alphabet']:
if (state, a) in nfa['transitions']:
for next_state in nfa['transitions'][state, a]:
if next_state in nfa['accepting_states']:
return True
if next_state not in visited:
queue.append(next_state)
return False | [
"def",
"nfa_nonemptiness_check",
"(",
"nfa",
":",
"dict",
")",
"->",
"bool",
":",
"# BFS",
"queue",
"=",
"list",
"(",
")",
"visited",
"=",
"set",
"(",
")",
"for",
"state",
"in",
"nfa",
"[",
"'initial_states'",
"]",
":",
"visited",
".",
"add",
"(",
"s... | Checks if the input NFA reads any language other than the
empty one, returning True/False.
The language L(A) recognized by the automaton A is nonempty iff
there are states :math:`s ∈ S_0` and :math:`t ∈ F` such that
t is connected to s.
Thus, automata nonemptiness is equivalent to graph reachability.
A breadth-first-search algorithm can construct in linear time
the set of all states connected to a state in :math:`S_0`. A
is nonempty iff this set intersects F nontrivially.
:param dict nfa: input NFA.
:return: *(bool)*, True if the input nfa is nonempty, False
otherwise. | [
"Checks",
"if",
"the",
"input",
"NFA",
"reads",
"any",
"language",
"other",
"than",
"the",
"empty",
"one",
"returning",
"True",
"/",
"False",
"."
] | python | train |
jbasko/configmanager | configmanager/items.py | https://github.com/jbasko/configmanager/blob/1d7229ce367143c7210d8e5f0782de03945a1721/configmanager/items.py#L315-L327 | def get_path(self):
"""
Calculate item's path in configuration tree.
Use this sparingly -- path is calculated by going up the configuration tree.
For a large number of items, it is more efficient to use iterators that return paths
as keys.
Path value is stable only once the configuration tree is completely initialised.
"""
if self.section:
return self.section.get_path() + (self.name,)
else:
return self.name, | [
"def",
"get_path",
"(",
"self",
")",
":",
"if",
"self",
".",
"section",
":",
"return",
"self",
".",
"section",
".",
"get_path",
"(",
")",
"+",
"(",
"self",
".",
"name",
",",
")",
"else",
":",
"return",
"self",
".",
"name",
","
] | Calculate item's path in configuration tree.
Use this sparingly -- path is calculated by going up the configuration tree.
For a large number of items, it is more efficient to use iterators that return paths
as keys.
Path value is stable only once the configuration tree is completely initialised. | [
"Calculate",
"item",
"s",
"path",
"in",
"configuration",
"tree",
".",
"Use",
"this",
"sparingly",
"--",
"path",
"is",
"calculated",
"by",
"going",
"up",
"the",
"configuration",
"tree",
".",
"For",
"a",
"large",
"number",
"of",
"items",
"it",
"is",
"more",
... | python | train |
belbio/bel | bel/lang/completion.py | https://github.com/belbio/bel/blob/60333e8815625b942b4836903f3b618cf44b3771/bel/lang/completion.py#L494-L582 | def add_completions(
replace_list: list, belstr: str, replace_span: Span, completion_text: str
) -> List[Mapping[str, Any]]:
"""Create completions to return given replacement list
Args:
replace_list: list of completion replacement values
belstr: BEL String
replace_span: start, stop of belstr to replace
completion_text: text to use for completion - used for creating highlight
Returns:
[{
"replacement": replacement,
"cursor_loc": cursor_loc,
"highlight": highlight,
"label": label,
}]
"""
completions = []
for r in replace_list:
# if '(' not in belstr:
# replacement = f'{r["replacement"]}()'
# cursor_loc = len(replacement) - 1 # inside parenthesis
# elif r['type'] == 'Function' and replace_span[1] == len(belstr) - 1:
if len(belstr) > 0:
belstr_end = len(belstr) - 1
else:
belstr_end = 0
log.debug(
f'Replace list {r} Replace_span {replace_span} BELstr: {belstr} Len: {belstr_end} Test1 {r["type"] == "Function"} Test2 {replace_span[1] + 1 == len(belstr)}'
)
# Put a space between comma and following function arg
if (
r["type"] == "Function"
and replace_span[0] > 0
and belstr[replace_span[0] - 1] == ","
):
log.debug("prior char is a comma")
replacement = (
belstr[0 : replace_span[0]]
+ " "
+ f"{r['replacement']}()"
+ belstr[replace_span[1] + 1 :]
)
cursor_loc = len(
belstr[0 : replace_span[0]] + " " + f"{r['replacement']}()"
)
# Put a space between comman and following NSArg or StrArg
elif replace_span[0] > 0 and belstr[replace_span[0] - 1] == ",":
log.debug("prior char is a comma")
replacement = (
belstr[0 : replace_span[0]]
+ " "
+ r["replacement"]
+ belstr[replace_span[1] + 1 :]
)
cursor_loc = len(belstr[0 : replace_span[0]] + " " + r["replacement"])
# Add function to end of belstr
elif r["type"] == "Function" and replace_span[1] >= belstr_end:
replacement = belstr[0 : replace_span[0]] + f"{r['replacement']}()"
cursor_loc = len(replacement) - 1 # inside parenthesis
log.debug(f"Replacement: {replacement}")
# Insert replacement in beginning or middle of belstr
else:
replacement = (
belstr[0 : replace_span[0]]
+ r["replacement"]
+ belstr[replace_span[1] + 1 :]
)
cursor_loc = len(
belstr[0 : replace_span[0]] + r["replacement"]
) # move cursor just past replacement
completions.append(
{
"replacement": replacement,
"cursor_loc": cursor_loc,
"highlight": r["highlight"],
"label": r["label"],
}
)
return completions | [
"def",
"add_completions",
"(",
"replace_list",
":",
"list",
",",
"belstr",
":",
"str",
",",
"replace_span",
":",
"Span",
",",
"completion_text",
":",
"str",
")",
"->",
"List",
"[",
"Mapping",
"[",
"str",
",",
"Any",
"]",
"]",
":",
"completions",
"=",
"... | Create completions to return given replacement list
Args:
replace_list: list of completion replacement values
belstr: BEL String
replace_span: start, stop of belstr to replace
completion_text: text to use for completion - used for creating highlight
Returns:
[{
"replacement": replacement,
"cursor_loc": cursor_loc,
"highlight": highlight,
"label": label,
}] | [
"Create",
"completions",
"to",
"return",
"given",
"replacement",
"list"
] | python | train |
PGower/PyCanvas | pycanvas/apis/calendar_events.py | https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/calendar_events.py#L321-L396 | def update_calendar_event(self, id, calendar_event_child_event_data_X_context_code=None, calendar_event_child_event_data_X_end_at=None, calendar_event_child_event_data_X_start_at=None, calendar_event_context_code=None, calendar_event_description=None, calendar_event_end_at=None, calendar_event_location_address=None, calendar_event_location_name=None, calendar_event_start_at=None, calendar_event_time_zone_edited=None, calendar_event_title=None):
"""
Update a calendar event.
Update and return a calendar event
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - calendar_event[context_code]
"""Context code of the course/group/user to move this event to.
Scheduler appointments and events with section-specific times cannot be moved between calendars."""
if calendar_event_context_code is not None:
data["calendar_event[context_code]"] = calendar_event_context_code
# OPTIONAL - calendar_event[title]
"""Short title for the calendar event."""
if calendar_event_title is not None:
data["calendar_event[title]"] = calendar_event_title
# OPTIONAL - calendar_event[description]
"""Longer HTML description of the event."""
if calendar_event_description is not None:
data["calendar_event[description]"] = calendar_event_description
# OPTIONAL - calendar_event[start_at]
"""Start date/time of the event."""
if calendar_event_start_at is not None:
data["calendar_event[start_at]"] = calendar_event_start_at
# OPTIONAL - calendar_event[end_at]
"""End date/time of the event."""
if calendar_event_end_at is not None:
data["calendar_event[end_at]"] = calendar_event_end_at
# OPTIONAL - calendar_event[location_name]
"""Location name of the event."""
if calendar_event_location_name is not None:
data["calendar_event[location_name]"] = calendar_event_location_name
# OPTIONAL - calendar_event[location_address]
"""Location address"""
if calendar_event_location_address is not None:
data["calendar_event[location_address]"] = calendar_event_location_address
# OPTIONAL - calendar_event[time_zone_edited]
"""Time zone of the user editing the event. Allowed time zones are
{http://www.iana.org/time-zones IANA time zones} or friendlier
{http://api.rubyonrails.org/classes/ActiveSupport/TimeZone.html Ruby on Rails time zones}."""
if calendar_event_time_zone_edited is not None:
data["calendar_event[time_zone_edited]"] = calendar_event_time_zone_edited
# OPTIONAL - calendar_event[child_event_data][X][start_at]
"""Section-level start time(s) if this is a course event. X can be any
identifier, provided that it is consistent across the start_at, end_at
and context_code"""
if calendar_event_child_event_data_X_start_at is not None:
data["calendar_event[child_event_data][X][start_at]"] = calendar_event_child_event_data_X_start_at
# OPTIONAL - calendar_event[child_event_data][X][end_at]
"""Section-level end time(s) if this is a course event."""
if calendar_event_child_event_data_X_end_at is not None:
data["calendar_event[child_event_data][X][end_at]"] = calendar_event_child_event_data_X_end_at
# OPTIONAL - calendar_event[child_event_data][X][context_code]
"""Context code(s) corresponding to the section-level start and end time(s)."""
if calendar_event_child_event_data_X_context_code is not None:
data["calendar_event[child_event_data][X][context_code]"] = calendar_event_child_event_data_X_context_code
self.logger.debug("PUT /api/v1/calendar_events/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/calendar_events/{id}".format(**path), data=data, params=params, no_data=True) | [
"def",
"update_calendar_event",
"(",
"self",
",",
"id",
",",
"calendar_event_child_event_data_X_context_code",
"=",
"None",
",",
"calendar_event_child_event_data_X_end_at",
"=",
"None",
",",
"calendar_event_child_event_data_X_start_at",
"=",
"None",
",",
"calendar_event_context... | Update a calendar event.
Update and return a calendar event | [
"Update",
"a",
"calendar",
"event",
".",
"Update",
"and",
"return",
"a",
"calendar",
"event"
] | python | train |
kplindegaard/smbus2 | smbus2/smbus2.py | https://github.com/kplindegaard/smbus2/blob/a1088a03438dba84c266b73ad61b0c06750d0961/smbus2/smbus2.py#L409-L427 | def read_word_data(self, i2c_addr, register, force=None):
"""
Read a single word (2 bytes) from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read
:type register: int
:param force:
:type force: Boolean
:return: 2-byte word
:rtype: int
"""
self._set_address(i2c_addr, force=force)
msg = i2c_smbus_ioctl_data.create(
read_write=I2C_SMBUS_READ, command=register, size=I2C_SMBUS_WORD_DATA
)
ioctl(self.fd, I2C_SMBUS, msg)
return msg.data.contents.word | [
"def",
"read_word_data",
"(",
"self",
",",
"i2c_addr",
",",
"register",
",",
"force",
"=",
"None",
")",
":",
"self",
".",
"_set_address",
"(",
"i2c_addr",
",",
"force",
"=",
"force",
")",
"msg",
"=",
"i2c_smbus_ioctl_data",
".",
"create",
"(",
"read_write"... | Read a single word (2 bytes) from a given register.
:param i2c_addr: i2c address
:type i2c_addr: int
:param register: Register to read
:type register: int
:param force:
:type force: Boolean
:return: 2-byte word
:rtype: int | [
"Read",
"a",
"single",
"word",
"(",
"2",
"bytes",
")",
"from",
"a",
"given",
"register",
"."
] | python | train |
Kozea/wdb | client/wdb/__init__.py | https://github.com/Kozea/wdb/blob/6af7901b02e866d76f8b0a697a8c078e5b70d1aa/client/wdb/__init__.py#L813-L876 | def interaction(
self,
frame,
tb=None,
exception='Wdb',
exception_description='Stepping',
init=None,
shell=False,
shell_vars=None,
source=None,
iframe_mode=False,
timeout=None,
post_mortem=False
):
"""User interaction handling blocking on socket receive"""
log.info(
'Interaction %r %r %r %r' %
(frame, tb, exception, exception_description)
)
self.reconnect_if_needed()
self.stepping = not shell
if not iframe_mode:
opts = {}
if shell:
opts['type_'] = 'shell'
if post_mortem:
opts['type_'] = 'pm'
self.open_browser(**opts)
lvl = len(self.interaction_stack)
if lvl:
exception_description += ' [recursive%s]' % (
'^%d' % lvl if lvl > 1 else ''
)
interaction = Interaction(
self,
frame,
tb,
exception,
exception_description,
init=init,
shell=shell,
shell_vars=shell_vars,
source=source,
timeout=timeout
)
self.interaction_stack.append(interaction)
# For meta debugging purpose
self._ui = interaction
if self.begun:
# Each new state sends the trace and selects a frame
interaction.init()
else:
self.begun = True
interaction.loop()
self.interaction_stack.pop()
if lvl:
self.interaction_stack[-1].init() | [
"def",
"interaction",
"(",
"self",
",",
"frame",
",",
"tb",
"=",
"None",
",",
"exception",
"=",
"'Wdb'",
",",
"exception_description",
"=",
"'Stepping'",
",",
"init",
"=",
"None",
",",
"shell",
"=",
"False",
",",
"shell_vars",
"=",
"None",
",",
"source",... | User interaction handling blocking on socket receive | [
"User",
"interaction",
"handling",
"blocking",
"on",
"socket",
"receive"
] | python | train |
phn/lineid_plot | lineid_plot/lineid_plot.py | https://github.com/phn/lineid_plot/blob/7c7a1af53fe439b3a7c5a57f01680575837fb978/lineid_plot/lineid_plot.py#L124-L219 | def adjust_boxes(line_wave, box_widths, left_edge, right_edge,
max_iter=1000, adjust_factor=0.35,
factor_decrement=3.0, fd_p=0.75):
"""Ajdust given boxes so that they don't overlap.
Parameters
----------
line_wave: list or array of floats
Line wave lengths. These are assumed to be the initial y (wave
length) location of the boxes.
box_widths: list or array of floats
Width of box containing labels for each line identification.
left_edge: float
Left edge of valid data i.e., wave length minimum.
right_edge: float
Right edge of valid data i.e., wave lengths maximum.
max_iter: int
Maximum number of iterations to attempt.
adjust_factor: float
Gap between boxes are reduced or increased by this factor after
each iteration.
factor_decrement: float
The `adjust_factor` itself if reduced by this factor, after
certain number of iterations. This is useful for crowded
regions.
fd_p: float
Percentage, given as a fraction between 0 and 1, after which
adjust_factor must be reduced by a factor of
`factor_decrement`. Default is set to 0.75.
Returns
-------
wlp, niter, changed: (float, float, float)
The new y (wave length) location of the text boxes, the number
of iterations used and a flag to indicated whether any changes to
the input locations were made or not.
Notes
-----
This is a direct translation of the code in lineid_plot.pro file in
NASA IDLAstro library.
Positions are returned either when the boxes no longer overlap or
when `max_iter` number of iterations are completed. So if there are
many boxes, there is a possibility that the final box locations
overlap.
References
----------
+ http://idlastro.gsfc.nasa.gov/ftp/pro/plot/lineid_plot.pro
+ http://idlastro.gsfc.nasa.gov/
"""
# Adjust positions.
niter = 0
changed = True
nlines = len(line_wave)
wlp = line_wave[:]
while changed:
changed = False
for i in range(nlines):
if i > 0:
diff1 = wlp[i] - wlp[i - 1]
separation1 = (box_widths[i] + box_widths[i - 1]) / 2.0
else:
diff1 = wlp[i] - left_edge + box_widths[i] * 1.01
separation1 = box_widths[i]
if i < nlines - 2:
diff2 = wlp[i + 1] - wlp[i]
separation2 = (box_widths[i] + box_widths[i + 1]) / 2.0
else:
diff2 = right_edge + box_widths[i] * 1.01 - wlp[i]
separation2 = box_widths[i]
if diff1 < separation1 or diff2 < separation2:
if wlp[i] == left_edge:
diff1 = 0
if wlp[i] == right_edge:
diff2 = 0
if diff2 > diff1:
wlp[i] = wlp[i] + separation2 * adjust_factor
wlp[i] = wlp[i] if wlp[i] < right_edge else \
right_edge
else:
wlp[i] = wlp[i] - separation1 * adjust_factor
wlp[i] = wlp[i] if wlp[i] > left_edge else \
left_edge
changed = True
niter += 1
if niter == max_iter * fd_p:
adjust_factor /= factor_decrement
if niter >= max_iter:
break
return wlp, changed, niter | [
"def",
"adjust_boxes",
"(",
"line_wave",
",",
"box_widths",
",",
"left_edge",
",",
"right_edge",
",",
"max_iter",
"=",
"1000",
",",
"adjust_factor",
"=",
"0.35",
",",
"factor_decrement",
"=",
"3.0",
",",
"fd_p",
"=",
"0.75",
")",
":",
"# Adjust positions.",
... | Ajdust given boxes so that they don't overlap.
Parameters
----------
line_wave: list or array of floats
Line wave lengths. These are assumed to be the initial y (wave
length) location of the boxes.
box_widths: list or array of floats
Width of box containing labels for each line identification.
left_edge: float
Left edge of valid data i.e., wave length minimum.
right_edge: float
Right edge of valid data i.e., wave lengths maximum.
max_iter: int
Maximum number of iterations to attempt.
adjust_factor: float
Gap between boxes are reduced or increased by this factor after
each iteration.
factor_decrement: float
The `adjust_factor` itself if reduced by this factor, after
certain number of iterations. This is useful for crowded
regions.
fd_p: float
Percentage, given as a fraction between 0 and 1, after which
adjust_factor must be reduced by a factor of
`factor_decrement`. Default is set to 0.75.
Returns
-------
wlp, niter, changed: (float, float, float)
The new y (wave length) location of the text boxes, the number
of iterations used and a flag to indicated whether any changes to
the input locations were made or not.
Notes
-----
This is a direct translation of the code in lineid_plot.pro file in
NASA IDLAstro library.
Positions are returned either when the boxes no longer overlap or
when `max_iter` number of iterations are completed. So if there are
many boxes, there is a possibility that the final box locations
overlap.
References
----------
+ http://idlastro.gsfc.nasa.gov/ftp/pro/plot/lineid_plot.pro
+ http://idlastro.gsfc.nasa.gov/ | [
"Ajdust",
"given",
"boxes",
"so",
"that",
"they",
"don",
"t",
"overlap",
"."
] | python | train |
iLampard/x-utils | xutils/config_utils.py | https://github.com/iLampard/x-utils/blob/291d92832ee0e0c89bc22e10ecf2f44445e0d300/xutils/config_utils.py#L38-L73 | def merge_configs(to_be_merged, default):
"""Merges two configuration dictionaries by overwriting values with
same keys, with the priority on values given on the 'left' side, so
the to_be_merged dict.
Notice that with lists in the configuration, it skips from the default
(right side) the tuples in that which already exist in the left side
to_be_merged list. This is used to be able to override time intervals for
default values in the configuration.
Example:
In [1]: x = [["get_stats_disk_usage_for_database", 180],
["get_stats_tx_rate_for_database", 500]]
In [2]: y = [["get_stats_seconds_since_last_vacuum_per_table", 60],
["get_stats_tx_rate_for_database", 60]]
In [3]: merge_configs(x, y)
Out[3]:
[['get_stats_disk_usage_for_database', 180],
['get_stats_tx_rate_for_database', 500],
['get_stats_seconds_since_last_vacuum_per_table', 60]]
"""
if isinstance(to_be_merged, dict) and isinstance(default, dict):
for k, v in default.items():
if k not in to_be_merged:
to_be_merged[k] = v
else:
to_be_merged[k] = merge_configs(to_be_merged[k], v)
elif isinstance(to_be_merged, list) and isinstance(default, list):
same_keys = set()
for x in to_be_merged:
for y in default:
if isinstance(x, (list, set, tuple)) and isinstance(y, (list, set, tuple)) and len(
x) > 0 and len(y) > 0 and x[0] == y[0]:
same_keys.add(x[0])
for y in default:
if not isinstance(y, (list, set, tuple)) or y[0] not in same_keys:
to_be_merged.append(y)
return to_be_merged | [
"def",
"merge_configs",
"(",
"to_be_merged",
",",
"default",
")",
":",
"if",
"isinstance",
"(",
"to_be_merged",
",",
"dict",
")",
"and",
"isinstance",
"(",
"default",
",",
"dict",
")",
":",
"for",
"k",
",",
"v",
"in",
"default",
".",
"items",
"(",
")",... | Merges two configuration dictionaries by overwriting values with
same keys, with the priority on values given on the 'left' side, so
the to_be_merged dict.
Notice that with lists in the configuration, it skips from the default
(right side) the tuples in that which already exist in the left side
to_be_merged list. This is used to be able to override time intervals for
default values in the configuration.
Example:
In [1]: x = [["get_stats_disk_usage_for_database", 180],
["get_stats_tx_rate_for_database", 500]]
In [2]: y = [["get_stats_seconds_since_last_vacuum_per_table", 60],
["get_stats_tx_rate_for_database", 60]]
In [3]: merge_configs(x, y)
Out[3]:
[['get_stats_disk_usage_for_database', 180],
['get_stats_tx_rate_for_database', 500],
['get_stats_seconds_since_last_vacuum_per_table', 60]] | [
"Merges",
"two",
"configuration",
"dictionaries",
"by",
"overwriting",
"values",
"with",
"same",
"keys",
"with",
"the",
"priority",
"on",
"values",
"given",
"on",
"the",
"left",
"side",
"so",
"the",
"to_be_merged",
"dict",
".",
"Notice",
"that",
"with",
"lists... | python | train |
senaite/senaite.core | bika/lims/content/analysisprofile.py | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/analysisprofile.py#L212-L235 | def remove_service(self, service):
"""Removes the service passed in from the services offered by the
current Profile. If the Analysis Service passed in is not assigned to
this Analysis Profile, returns False.
:param service: the service to be removed from this Analysis Profile
:type service: AnalysisService
:return: True if the AnalysisService has been removed successfully
"""
obj = api.get_object(service)
uid = api.get_uid(obj)
# Remove the service from the referenced services
services = self.getService()
num_services = len(services)
services.remove(obj)
self.setService(services)
removed = len(services) < num_services
# Remove the service from the settings map
settings = self.getAnalysisServicesSettings()
settings = [item for item in settings if item.get('uid', '') != uid]
self.setAnalysisServicesSettings(settings)
return removed | [
"def",
"remove_service",
"(",
"self",
",",
"service",
")",
":",
"obj",
"=",
"api",
".",
"get_object",
"(",
"service",
")",
"uid",
"=",
"api",
".",
"get_uid",
"(",
"obj",
")",
"# Remove the service from the referenced services",
"services",
"=",
"self",
".",
... | Removes the service passed in from the services offered by the
current Profile. If the Analysis Service passed in is not assigned to
this Analysis Profile, returns False.
:param service: the service to be removed from this Analysis Profile
:type service: AnalysisService
:return: True if the AnalysisService has been removed successfully | [
"Removes",
"the",
"service",
"passed",
"in",
"from",
"the",
"services",
"offered",
"by",
"the",
"current",
"Profile",
".",
"If",
"the",
"Analysis",
"Service",
"passed",
"in",
"is",
"not",
"assigned",
"to",
"this",
"Analysis",
"Profile",
"returns",
"False",
"... | python | train |
maas/python-libmaas | maas/client/viscera/block_devices.py | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/block_devices.py#L130-L133 | async def unformat(self):
"""Unformat this block device."""
self._data = await self._handler.unformat(
system_id=self.node.system_id, id=self.id) | [
"async",
"def",
"unformat",
"(",
"self",
")",
":",
"self",
".",
"_data",
"=",
"await",
"self",
".",
"_handler",
".",
"unformat",
"(",
"system_id",
"=",
"self",
".",
"node",
".",
"system_id",
",",
"id",
"=",
"self",
".",
"id",
")"
] | Unformat this block device. | [
"Unformat",
"this",
"block",
"device",
"."
] | python | train |
tmux-python/tmuxp | tmuxp/cli.py | https://github.com/tmux-python/tmuxp/blob/f4aa2e26589a4311131898d2e4a85cb1876b5c9b/tmuxp/cli.py#L66-L84 | def get_tmuxinator_dir():
"""
Return tmuxinator configuration directory.
Checks for ``TMUXINATOR_CONFIG`` environmental variable.
Returns
-------
str :
absolute path to tmuxinator config directory
See Also
--------
:meth:`tmuxp.config.import_tmuxinator`
"""
if 'TMUXINATOR_CONFIG' in os.environ:
return os.path.expanduser(os.environ['TMUXINATOR_CONFIG'])
return os.path.expanduser('~/.tmuxinator/') | [
"def",
"get_tmuxinator_dir",
"(",
")",
":",
"if",
"'TMUXINATOR_CONFIG'",
"in",
"os",
".",
"environ",
":",
"return",
"os",
".",
"path",
".",
"expanduser",
"(",
"os",
".",
"environ",
"[",
"'TMUXINATOR_CONFIG'",
"]",
")",
"return",
"os",
".",
"path",
".",
"... | Return tmuxinator configuration directory.
Checks for ``TMUXINATOR_CONFIG`` environmental variable.
Returns
-------
str :
absolute path to tmuxinator config directory
See Also
--------
:meth:`tmuxp.config.import_tmuxinator` | [
"Return",
"tmuxinator",
"configuration",
"directory",
"."
] | python | train |
readbeyond/aeneas | aeneas/diagnostics.py | https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/diagnostics.py#L195-L209 | def check_cdtw(cls):
"""
Check whether Python C extension ``cdtw`` can be imported.
Return ``True`` on failure and ``False`` on success.
:rtype: bool
"""
if gf.can_run_c_extension("cdtw"):
gf.print_success(u"aeneas.cdtw AVAILABLE")
return False
gf.print_warning(u"aeneas.cdtw NOT AVAILABLE")
gf.print_info(u" You can still run aeneas but it will be significantly slower")
gf.print_info(u" Please refer to the installation documentation for details")
return True | [
"def",
"check_cdtw",
"(",
"cls",
")",
":",
"if",
"gf",
".",
"can_run_c_extension",
"(",
"\"cdtw\"",
")",
":",
"gf",
".",
"print_success",
"(",
"u\"aeneas.cdtw AVAILABLE\"",
")",
"return",
"False",
"gf",
".",
"print_warning",
"(",
"u\"aeneas.cdtw NOT AVAILABL... | Check whether Python C extension ``cdtw`` can be imported.
Return ``True`` on failure and ``False`` on success.
:rtype: bool | [
"Check",
"whether",
"Python",
"C",
"extension",
"cdtw",
"can",
"be",
"imported",
"."
] | python | train |
magrathealabs/feito | feito/prospector.py | https://github.com/magrathealabs/feito/blob/4179e40233ccf6e5a6c9892e528595690ce9ef43/feito/prospector.py#L10-L16 | def run(self):
"""
Runs prospector in the input files and returns a json with the analysis
"""
arg_prospector = f'prospector --output-format json {self.repo.diff_files()}'
analysis = subprocess.run(arg_prospector, stdout=subprocess.PIPE, shell=True)
return json.loads(analysis.stdout) | [
"def",
"run",
"(",
"self",
")",
":",
"arg_prospector",
"=",
"f'prospector --output-format json {self.repo.diff_files()}'",
"analysis",
"=",
"subprocess",
".",
"run",
"(",
"arg_prospector",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"shell",
"=",
"True",
"... | Runs prospector in the input files and returns a json with the analysis | [
"Runs",
"prospector",
"in",
"the",
"input",
"files",
"and",
"returns",
"a",
"json",
"with",
"the",
"analysis"
] | python | train |
diffeo/yakonfig | yakonfig/toplevel.py | https://github.com/diffeo/yakonfig/blob/412e195da29b4f4fc7b72967c192714a6f5eaeb5/yakonfig/toplevel.py#L414-L441 | def fill_in_arguments(config, modules, args):
"""Fill in configuration fields from command-line arguments.
`config` is a dictionary holding the initial configuration,
probably the result of :func:`assemble_default_config`. It reads
through `modules`, and for each, fills in any configuration values
that are provided in `args`.
`config` is modified in place. `args` may be either a dictionary
or an object (as the result of :mod:`argparse`).
:param dict config: configuration tree to update
:param modules: modules or Configurable instances to use
:type modules: iterable of :class:`~yakonfig.configurable.Configurable`
:param args: command-line objects
:paramtype args: dict or object
:return: config
"""
def work_in(config, module, name):
rkeys = getattr(module, 'runtime_keys', {})
for (attr, cname) in iteritems(rkeys):
v = args.get(attr, None)
if v is not None:
config[cname] = v
if not isinstance(args, collections.Mapping):
args = vars(args)
return _walk_config(config, modules, work_in) | [
"def",
"fill_in_arguments",
"(",
"config",
",",
"modules",
",",
"args",
")",
":",
"def",
"work_in",
"(",
"config",
",",
"module",
",",
"name",
")",
":",
"rkeys",
"=",
"getattr",
"(",
"module",
",",
"'runtime_keys'",
",",
"{",
"}",
")",
"for",
"(",
"a... | Fill in configuration fields from command-line arguments.
`config` is a dictionary holding the initial configuration,
probably the result of :func:`assemble_default_config`. It reads
through `modules`, and for each, fills in any configuration values
that are provided in `args`.
`config` is modified in place. `args` may be either a dictionary
or an object (as the result of :mod:`argparse`).
:param dict config: configuration tree to update
:param modules: modules or Configurable instances to use
:type modules: iterable of :class:`~yakonfig.configurable.Configurable`
:param args: command-line objects
:paramtype args: dict or object
:return: config | [
"Fill",
"in",
"configuration",
"fields",
"from",
"command",
"-",
"line",
"arguments",
"."
] | python | train |
Opentrons/opentrons | api/src/opentrons/protocol_api/contexts.py | https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/protocol_api/contexts.py#L1063-L1082 | def distribute(self,
volume: float,
source: Well,
dest: List[Well],
*args, **kwargs) -> 'InstrumentContext':
"""
Move a volume of liquid from one source to multiple destinations.
:param volume: The amount of volume to distribute to each destination
well.
:param source: A single well from where liquid will be aspirated.
:param dest: List of Wells where liquid will be dispensed to.
:param kwargs: See :py:meth:`transfer`.
:returns: This instance
"""
self._log.debug("Distributing {} from {} to {}"
.format(volume, source, dest))
kwargs['mode'] = 'distribute'
kwargs['disposal_volume'] = kwargs.get('disposal_vol', self.min_volume)
return self.transfer(volume, source, dest, **kwargs) | [
"def",
"distribute",
"(",
"self",
",",
"volume",
":",
"float",
",",
"source",
":",
"Well",
",",
"dest",
":",
"List",
"[",
"Well",
"]",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"->",
"'InstrumentContext'",
":",
"self",
".",
"_log",
".",
"debu... | Move a volume of liquid from one source to multiple destinations.
:param volume: The amount of volume to distribute to each destination
well.
:param source: A single well from where liquid will be aspirated.
:param dest: List of Wells where liquid will be dispensed to.
:param kwargs: See :py:meth:`transfer`.
:returns: This instance | [
"Move",
"a",
"volume",
"of",
"liquid",
"from",
"one",
"source",
"to",
"multiple",
"destinations",
"."
] | python | train |
saltstack/salt | salt/modules/ldap3.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ldap3.py#L109-L306 | def connect(connect_spec=None):
'''Connect and optionally bind to an LDAP server.
:param connect_spec:
This can be an LDAP connection object returned by a previous
call to :py:func:`connect` (in which case the argument is
simply returned), ``None`` (in which case an empty dict is
used), or a dict with the following keys:
* ``'backend'``
Optional; default depends on which Python LDAP modules are
installed. Name of the Python LDAP module to use. Only
``'ldap'`` is supported at the moment.
* ``'url'``
Optional; defaults to ``'ldapi:///'``. URL to the LDAP
server.
* ``'bind'``
Optional; defaults to ``None``. Describes how to bind an
identity to the LDAP connection. If ``None``, an
anonymous connection is made. Valid keys:
* ``'method'``
Optional; defaults to ``None``. The authentication
method to use. Valid values include but are not
necessarily limited to ``'simple'``, ``'sasl'``, and
``None``. If ``None``, an anonymous connection is
made. Available methods depend on the chosen backend.
* ``'mechanism'``
Optional; defaults to ``'EXTERNAL'``. The SASL
mechanism to use. Ignored unless the method is
``'sasl'``. Available methods depend on the chosen
backend and the server's capabilities.
* ``'credentials'``
Optional; defaults to ``None``. An object specific to
the chosen SASL mechanism and backend that represents
the authentication credentials. Ignored unless the
method is ``'sasl'``.
For the ``'ldap'`` backend, this is a dictionary. If
``None``, an empty dict is used. Keys:
* ``'args'``
Optional; defaults to an empty list. A list of
arguments to pass to the SASL mechanism
constructor. See the SASL mechanism constructor
documentation in the ``ldap.sasl`` Python module.
* ``'kwargs'``
Optional; defaults to an empty dict. A dict of
keyword arguments to pass to the SASL mechanism
constructor. See the SASL mechanism constructor
documentation in the ``ldap.sasl`` Python module.
* ``'dn'``
Optional; defaults to an empty string. The
distinguished name to bind.
* ``'password'``
Optional; defaults to an empty string. Password for
binding. Ignored if the method is ``'sasl'``.
* ``'tls'``
Optional; defaults to ``None``. A backend-specific object
containing settings to override default TLS behavior.
For the ``'ldap'`` backend, this is a dictionary. Not all
settings in this dictionary are supported by all versions
of ``python-ldap`` or the underlying TLS library. If
``None``, an empty dict is used. Possible keys:
* ``'starttls'``
If present, initiate a TLS connection using StartTLS.
(The value associated with this key is ignored.)
* ``'cacertdir'``
Set the path of the directory containing CA
certificates.
* ``'cacertfile'``
Set the pathname of the CA certificate file.
* ``'certfile'``
Set the pathname of the certificate file.
* ``'cipher_suite'``
Set the allowed cipher suite.
* ``'crlcheck'``
Set the CRL evaluation strategy. Valid values are
``'none'``, ``'peer'``, and ``'all'``.
* ``'crlfile'``
Set the pathname of the CRL file.
* ``'dhfile'``
Set the pathname of the file containing the parameters
for Diffie-Hellman ephemeral key exchange.
* ``'keyfile'``
Set the pathname of the certificate key file.
* ``'newctx'``
If present, instruct the underlying TLS library to
create a new TLS context. (The value associated with
this key is ignored.)
* ``'protocol_min'``
Set the minimum protocol version.
* ``'random_file'``
Set the pathname of the random file when
``/dev/random`` and ``/dev/urandom`` are not
available.
* ``'require_cert'``
Set the certificate validation policy. Valid values
are ``'never'``, ``'hard'``, ``'demand'``,
``'allow'``, and ``'try'``.
* ``'opts'``
Optional; defaults to ``None``. A backend-specific object
containing options for the backend.
For the ``'ldap'`` backend, this is a dictionary of
OpenLDAP options to set. If ``None``, an empty dict is
used. Each key is a the name of an OpenLDAP option
constant without the ``'LDAP_OPT_'`` prefix, then
converted to lower case.
:returns:
an object representing an LDAP connection that can be used as
the ``connect_spec`` argument to any of the functions in this
module (to avoid the overhead of making and terminating
multiple connections).
This object should be used as a context manager. It is safe
to nest ``with`` statements.
CLI example:
.. code-block:: bash
salt '*' ldap3.connect "{
'url': 'ldaps://ldap.example.com/',
'bind': {
'method': 'simple',
'dn': 'cn=admin,dc=example,dc=com',
'password': 'secret'}
}"
'''
if isinstance(connect_spec, _connect_ctx):
return connect_spec
if connect_spec is None:
connect_spec = {}
backend_name = connect_spec.get('backend', 'ldap')
if backend_name not in available_backends:
raise ValueError('unsupported backend or required Python module'
+ ' unavailable: {0}'.format(backend_name))
url = connect_spec.get('url', 'ldapi:///')
try:
l = ldap.initialize(url)
l.protocol_version = ldap.VERSION3
# set up tls
tls = connect_spec.get('tls', None)
if tls is None:
tls = {}
vars = {}
for k, v in six.iteritems(tls):
if k in ('starttls', 'newctx'):
vars[k] = True
elif k in ('crlcheck', 'require_cert'):
l.set_option(getattr(ldap, 'OPT_X_TLS_' + k.upper()),
getattr(ldap, 'OPT_X_TLS_' + v.upper()))
else:
l.set_option(getattr(ldap, 'OPT_X_TLS_' + k.upper()), v)
if vars.get('starttls', False):
l.start_tls_s()
if vars.get('newctx', False):
l.set_option(ldap.OPT_X_TLS_NEWCTX, 0)
# set up other options
l.set_option(ldap.OPT_REFERRALS, 0)
opts = connect_spec.get('opts', None)
if opts is None:
opts = {}
for k, v in six.iteritems(opts):
opt = getattr(ldap, 'OPT_' + k.upper())
l.set_option(opt, v)
_bind(l, connect_spec.get('bind', None))
except ldap.LDAPError as e:
_convert_exception(e)
return _connect_ctx(l) | [
"def",
"connect",
"(",
"connect_spec",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"connect_spec",
",",
"_connect_ctx",
")",
":",
"return",
"connect_spec",
"if",
"connect_spec",
"is",
"None",
":",
"connect_spec",
"=",
"{",
"}",
"backend_name",
"=",
"conn... | Connect and optionally bind to an LDAP server.
:param connect_spec:
This can be an LDAP connection object returned by a previous
call to :py:func:`connect` (in which case the argument is
simply returned), ``None`` (in which case an empty dict is
used), or a dict with the following keys:
* ``'backend'``
Optional; default depends on which Python LDAP modules are
installed. Name of the Python LDAP module to use. Only
``'ldap'`` is supported at the moment.
* ``'url'``
Optional; defaults to ``'ldapi:///'``. URL to the LDAP
server.
* ``'bind'``
Optional; defaults to ``None``. Describes how to bind an
identity to the LDAP connection. If ``None``, an
anonymous connection is made. Valid keys:
* ``'method'``
Optional; defaults to ``None``. The authentication
method to use. Valid values include but are not
necessarily limited to ``'simple'``, ``'sasl'``, and
``None``. If ``None``, an anonymous connection is
made. Available methods depend on the chosen backend.
* ``'mechanism'``
Optional; defaults to ``'EXTERNAL'``. The SASL
mechanism to use. Ignored unless the method is
``'sasl'``. Available methods depend on the chosen
backend and the server's capabilities.
* ``'credentials'``
Optional; defaults to ``None``. An object specific to
the chosen SASL mechanism and backend that represents
the authentication credentials. Ignored unless the
method is ``'sasl'``.
For the ``'ldap'`` backend, this is a dictionary. If
``None``, an empty dict is used. Keys:
* ``'args'``
Optional; defaults to an empty list. A list of
arguments to pass to the SASL mechanism
constructor. See the SASL mechanism constructor
documentation in the ``ldap.sasl`` Python module.
* ``'kwargs'``
Optional; defaults to an empty dict. A dict of
keyword arguments to pass to the SASL mechanism
constructor. See the SASL mechanism constructor
documentation in the ``ldap.sasl`` Python module.
* ``'dn'``
Optional; defaults to an empty string. The
distinguished name to bind.
* ``'password'``
Optional; defaults to an empty string. Password for
binding. Ignored if the method is ``'sasl'``.
* ``'tls'``
Optional; defaults to ``None``. A backend-specific object
containing settings to override default TLS behavior.
For the ``'ldap'`` backend, this is a dictionary. Not all
settings in this dictionary are supported by all versions
of ``python-ldap`` or the underlying TLS library. If
``None``, an empty dict is used. Possible keys:
* ``'starttls'``
If present, initiate a TLS connection using StartTLS.
(The value associated with this key is ignored.)
* ``'cacertdir'``
Set the path of the directory containing CA
certificates.
* ``'cacertfile'``
Set the pathname of the CA certificate file.
* ``'certfile'``
Set the pathname of the certificate file.
* ``'cipher_suite'``
Set the allowed cipher suite.
* ``'crlcheck'``
Set the CRL evaluation strategy. Valid values are
``'none'``, ``'peer'``, and ``'all'``.
* ``'crlfile'``
Set the pathname of the CRL file.
* ``'dhfile'``
Set the pathname of the file containing the parameters
for Diffie-Hellman ephemeral key exchange.
* ``'keyfile'``
Set the pathname of the certificate key file.
* ``'newctx'``
If present, instruct the underlying TLS library to
create a new TLS context. (The value associated with
this key is ignored.)
* ``'protocol_min'``
Set the minimum protocol version.
* ``'random_file'``
Set the pathname of the random file when
``/dev/random`` and ``/dev/urandom`` are not
available.
* ``'require_cert'``
Set the certificate validation policy. Valid values
are ``'never'``, ``'hard'``, ``'demand'``,
``'allow'``, and ``'try'``.
* ``'opts'``
Optional; defaults to ``None``. A backend-specific object
containing options for the backend.
For the ``'ldap'`` backend, this is a dictionary of
OpenLDAP options to set. If ``None``, an empty dict is
used. Each key is a the name of an OpenLDAP option
constant without the ``'LDAP_OPT_'`` prefix, then
converted to lower case.
:returns:
an object representing an LDAP connection that can be used as
the ``connect_spec`` argument to any of the functions in this
module (to avoid the overhead of making and terminating
multiple connections).
This object should be used as a context manager. It is safe
to nest ``with`` statements.
CLI example:
.. code-block:: bash
salt '*' ldap3.connect "{
'url': 'ldaps://ldap.example.com/',
'bind': {
'method': 'simple',
'dn': 'cn=admin,dc=example,dc=com',
'password': 'secret'}
}" | [
"Connect",
"and",
"optionally",
"bind",
"to",
"an",
"LDAP",
"server",
"."
] | python | train |
callowayproject/Transmogrify | transmogrify/images2gif.py | https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/images2gif.py#L977-L991 | def palette_image(self):
""" PIL weird interface for making a paletted image: create an image which
already has the palette, and use that in Image.quantize. This function
returns this palette image. """
if self.pimage is None:
palette = []
for i in range(self.NETSIZE):
palette.extend(self.colormap[i][:3])
palette.extend([0] * (256 - self.NETSIZE) * 3)
# a palette image to use for quant
self.pimage = Image.new("P", (1, 1), 0)
self.pimage.putpalette(palette)
return self.pimage | [
"def",
"palette_image",
"(",
"self",
")",
":",
"if",
"self",
".",
"pimage",
"is",
"None",
":",
"palette",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NETSIZE",
")",
":",
"palette",
".",
"extend",
"(",
"self",
".",
"colormap",
"[",
... | PIL weird interface for making a paletted image: create an image which
already has the palette, and use that in Image.quantize. This function
returns this palette image. | [
"PIL",
"weird",
"interface",
"for",
"making",
"a",
"paletted",
"image",
":",
"create",
"an",
"image",
"which",
"already",
"has",
"the",
"palette",
"and",
"use",
"that",
"in",
"Image",
".",
"quantize",
".",
"This",
"function",
"returns",
"this",
"palette",
... | python | train |
timdiels/pytil | pytil/algorithms.py | https://github.com/timdiels/pytil/blob/086a3f8d52caecdd9d1c9f66c8d8a6d38667b00b/pytil/algorithms.py#L43-L71 | def multi_way_partitioning(items, bin_count): #TODO rename bin_count -> bins
'''
Greedily divide weighted items equally across bins.
This approximately solves a multi-way partition problem, minimising the
difference between the largest and smallest sum of weights in a bin.
Parameters
----------
items : ~typing.Iterable[~typing.Tuple[~typing.Any, float]]
Weighted items as ``(item, weight)`` tuples.
bin_count : int
Number of bins.
Returns
-------
bins : ~collections_extended.frozenbag[~collections_extended.frozenbag[~typing.Any]]
Item bins as a bag of item bags.
Notes
----------
- `A greedy solution <http://stackoverflow.com/a/6855546/1031434>`_
- `Problem definition and solutions <http://ijcai.org/Proceedings/09/Papers/096.pdf>`_
'''
bins = [_Bin() for _ in range(bin_count)]
for item, weight in sorted(items, key=lambda x: x[1], reverse=True):
bin_ = min(bins, key=lambda bin_: bin_.weights_sum)
bin_.add(item, weight)
return frozenbag(frozenbag(bin_.items) for bin_ in bins) | [
"def",
"multi_way_partitioning",
"(",
"items",
",",
"bin_count",
")",
":",
"#TODO rename bin_count -> bins",
"bins",
"=",
"[",
"_Bin",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"bin_count",
")",
"]",
"for",
"item",
",",
"weight",
"in",
"sorted",
"(",
"item... | Greedily divide weighted items equally across bins.
This approximately solves a multi-way partition problem, minimising the
difference between the largest and smallest sum of weights in a bin.
Parameters
----------
items : ~typing.Iterable[~typing.Tuple[~typing.Any, float]]
Weighted items as ``(item, weight)`` tuples.
bin_count : int
Number of bins.
Returns
-------
bins : ~collections_extended.frozenbag[~collections_extended.frozenbag[~typing.Any]]
Item bins as a bag of item bags.
Notes
----------
- `A greedy solution <http://stackoverflow.com/a/6855546/1031434>`_
- `Problem definition and solutions <http://ijcai.org/Proceedings/09/Papers/096.pdf>`_ | [
"Greedily",
"divide",
"weighted",
"items",
"equally",
"across",
"bins",
"."
] | python | train |
fabioz/PyDev.Debugger | pydevd_attach_to_process/winappdbg/thread.py | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/thread.py#L1172-L1232 | def __get_stack_trace_manually(self, depth = 16, bUseLabels = True,
bMakePretty = True):
"""
Tries to get a stack trace for the current function.
Only works for functions with standard prologue and epilogue.
@type depth: int
@param depth: Maximum depth of stack trace.
@type bUseLabels: bool
@param bUseLabels: C{True} to use labels, C{False} to use addresses.
@type bMakePretty: bool
@param bMakePretty:
C{True} for user readable labels,
C{False} for labels that can be passed to L{Process.resolve_label}.
"Pretty" labels look better when producing output for the user to
read, while pure labels are more useful programatically.
@rtype: tuple of tuple( int, int, str )
@return: Stack trace of the thread as a tuple of
( return address, frame pointer address, module filename )
when C{bUseLabels} is C{True}, or a tuple of
( return address, frame pointer label )
when C{bUseLabels} is C{False}.
@raise WindowsError: Raises an exception on error.
"""
aProcess = self.get_process()
st, sb = self.get_stack_range() # top, bottom
fp = self.get_fp()
trace = list()
if aProcess.get_module_count() == 0:
aProcess.scan_modules()
bits = aProcess.get_bits()
while depth > 0:
if fp == 0:
break
if not st <= fp < sb:
break
ra = aProcess.peek_pointer(fp + 4)
if ra == 0:
break
lib = aProcess.get_module_at_address(ra)
if lib is None:
lib = ""
else:
if lib.fileName:
lib = lib.fileName
else:
lib = "%s" % HexDump.address(lib.lpBaseOfDll, bits)
if bUseLabels:
label = aProcess.get_label_at_address(ra)
if bMakePretty:
label = '%s (%s)' % (HexDump.address(ra, bits), label)
trace.append( (fp, label) )
else:
trace.append( (fp, ra, lib) )
fp = aProcess.peek_pointer(fp)
return tuple(trace) | [
"def",
"__get_stack_trace_manually",
"(",
"self",
",",
"depth",
"=",
"16",
",",
"bUseLabels",
"=",
"True",
",",
"bMakePretty",
"=",
"True",
")",
":",
"aProcess",
"=",
"self",
".",
"get_process",
"(",
")",
"st",
",",
"sb",
"=",
"self",
".",
"get_stack_ran... | Tries to get a stack trace for the current function.
Only works for functions with standard prologue and epilogue.
@type depth: int
@param depth: Maximum depth of stack trace.
@type bUseLabels: bool
@param bUseLabels: C{True} to use labels, C{False} to use addresses.
@type bMakePretty: bool
@param bMakePretty:
C{True} for user readable labels,
C{False} for labels that can be passed to L{Process.resolve_label}.
"Pretty" labels look better when producing output for the user to
read, while pure labels are more useful programatically.
@rtype: tuple of tuple( int, int, str )
@return: Stack trace of the thread as a tuple of
( return address, frame pointer address, module filename )
when C{bUseLabels} is C{True}, or a tuple of
( return address, frame pointer label )
when C{bUseLabels} is C{False}.
@raise WindowsError: Raises an exception on error. | [
"Tries",
"to",
"get",
"a",
"stack",
"trace",
"for",
"the",
"current",
"function",
".",
"Only",
"works",
"for",
"functions",
"with",
"standard",
"prologue",
"and",
"epilogue",
"."
] | python | train |
sebdah/dynamic-dynamodb | dynamic_dynamodb/core/circuit_breaker.py | https://github.com/sebdah/dynamic-dynamodb/blob/bfd0ca806b1c3301e724696de90ef0f973410493/dynamic_dynamodb/core/circuit_breaker.py#L13-L97 | def is_open(table_name=None, table_key=None, gsi_name=None, gsi_key=None):
""" Checks whether the circuit breaker is open
:param table_name: Name of the table being checked
:param table_key: Configuration key for table
:param gsi_name: Name of the GSI being checked
:param gsi_key: Configuration key for the GSI
:returns: bool -- True if the circuit is open
"""
logger.debug('Checking circuit breaker status')
# Parse the URL to make sure it is OK
pattern = re.compile(
r'^(?P<scheme>http(s)?://)'
r'((?P<username>.+):(?P<password>.+)@){0,1}'
r'(?P<url>.*)$'
)
url = timeout = None
if gsi_name:
url = get_gsi_option(table_key, gsi_key, 'circuit_breaker_url')
timeout = get_gsi_option(table_key, gsi_key, 'circuit_breaker_timeout')
elif table_name:
url = get_table_option(table_key, 'circuit_breaker_url')
timeout = get_table_option(table_key, 'circuit_breaker_timeout')
if not url:
url = get_global_option('circuit_breaker_url')
timeout = get_global_option('circuit_breaker_timeout')
match = pattern.match(url)
if not match:
logger.error('Malformatted URL: {0}'.format(url))
sys.exit(1)
use_basic_auth = False
if match.group('username') and match.group('password'):
use_basic_auth = True
# Make the actual URL to call
auth = ()
if use_basic_auth:
url = '{scheme}{url}'.format(
scheme=match.group('scheme'),
url=match.group('url'))
auth = (match.group('username'), match.group('password'))
headers = {}
if table_name:
headers["x-table-name"] = table_name
if gsi_name:
headers["x-gsi-name"] = gsi_name
# Make the actual request
try:
response = requests.get(
url,
auth=auth,
timeout=timeout / 1000.00,
headers=headers)
if int(response.status_code) >= 200 and int(response.status_code) < 300:
logger.info('Circuit breaker is closed')
return False
else:
logger.warning(
'Circuit breaker returned with status code {0:d}'.format(
response.status_code))
except requests.exceptions.SSLError as error:
logger.warning('Circuit breaker: {0}'.format(error))
except requests.exceptions.Timeout as error:
logger.warning('Circuit breaker: {0}'.format(error))
except requests.exceptions.ConnectionError as error:
logger.warning('Circuit breaker: {0}'.format(error))
except requests.exceptions.HTTPError as error:
logger.warning('Circuit breaker: {0}'.format(error))
except requests.exceptions.TooManyRedirects as error:
logger.warning('Circuit breaker: {0}'.format(error))
except Exception as error:
logger.error('Unhandled exception: {0}'.format(error))
logger.error(
'Please file a bug at '
'https://github.com/sebdah/dynamic-dynamodb/issues')
return True | [
"def",
"is_open",
"(",
"table_name",
"=",
"None",
",",
"table_key",
"=",
"None",
",",
"gsi_name",
"=",
"None",
",",
"gsi_key",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"'Checking circuit breaker status'",
")",
"# Parse the URL to make sure it is OK",
... | Checks whether the circuit breaker is open
:param table_name: Name of the table being checked
:param table_key: Configuration key for table
:param gsi_name: Name of the GSI being checked
:param gsi_key: Configuration key for the GSI
:returns: bool -- True if the circuit is open | [
"Checks",
"whether",
"the",
"circuit",
"breaker",
"is",
"open"
] | python | train |
tanghaibao/jcvi | jcvi/formats/gff.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/gff.py#L663-L697 | def sizes(args):
"""
%prog sizes gffile
Given a gff file of features, calculate the sizes of chosen parent feature
based on summation of sizes of child features.
For example, for parent 'mRNA' and child 'CDS' feature types, calcuate sizes of
mRNA by summing the sizes of the disjoint CDS parts.
"""
p = OptionParser(sizes.__doc__)
p.set_outfile()
p.add_option("--parents", dest="parents", default="mRNA",
help="parent feature(s) for which size is to be calculated")
p.add_option("--child", dest="child", default="CDS",
help="child feature to use for size calculations")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gffile, = args
parents, cftype = set(opts.parents.split(",")), opts.child
gff = make_index(gffile)
fw = must_open(opts.outfile, "w")
for parent in parents:
for feat in gff.features_of_type(parent, order_by=('seqid', 'start')):
fsize = 0
fsize = feat.end - feat.start + 1 \
if cftype == parent else \
gff.children_bp(feat, child_featuretype=cftype)
print("\t".join(str(x) for x in (feat.id, fsize)), file=fw)
fw.close() | [
"def",
"sizes",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"sizes",
".",
"__doc__",
")",
"p",
".",
"set_outfile",
"(",
")",
"p",
".",
"add_option",
"(",
"\"--parents\"",
",",
"dest",
"=",
"\"parents\"",
",",
"default",
"=",
"\"mRNA\"",
",",
... | %prog sizes gffile
Given a gff file of features, calculate the sizes of chosen parent feature
based on summation of sizes of child features.
For example, for parent 'mRNA' and child 'CDS' feature types, calcuate sizes of
mRNA by summing the sizes of the disjoint CDS parts. | [
"%prog",
"sizes",
"gffile"
] | python | train |
noahbenson/pimms | pimms/util.py | https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/util.py#L775-L793 | def flatten_maps(*args, **kwargs):
'''
flatten_maps(*args, **kwags) yields a tuple of the maps in the given arguments; this flattens
over lists and iterables so long as all elements eventually yield True to is_map(el). The
optional keyword arguments passed make up the final map.
This funtion does not evaluate any values of any of the maps and thus implicitly respects the
laziness of the provided maps.
'''
def _recur(arg, work):
if is_map(arg): work.append(arg)
elif is_str(arg) or not hasattr(arg, '__iter__'):
raise ValueError('Non-map given to flatten_maps')
else:
for a in arg: _recur(a, work)
res = []
for arg in args: _recur(arg, res)
if len(kwargs) > 0: res.append(kwargs)
return tuple(res) | [
"def",
"flatten_maps",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"_recur",
"(",
"arg",
",",
"work",
")",
":",
"if",
"is_map",
"(",
"arg",
")",
":",
"work",
".",
"append",
"(",
"arg",
")",
"elif",
"is_str",
"(",
"arg",
")",
"or... | flatten_maps(*args, **kwags) yields a tuple of the maps in the given arguments; this flattens
over lists and iterables so long as all elements eventually yield True to is_map(el). The
optional keyword arguments passed make up the final map.
This funtion does not evaluate any values of any of the maps and thus implicitly respects the
laziness of the provided maps. | [
"flatten_maps",
"(",
"*",
"args",
"**",
"kwags",
")",
"yields",
"a",
"tuple",
"of",
"the",
"maps",
"in",
"the",
"given",
"arguments",
";",
"this",
"flattens",
"over",
"lists",
"and",
"iterables",
"so",
"long",
"as",
"all",
"elements",
"eventually",
"yield"... | python | train |
jreese/dotlink | dotlink/dotlink.py | https://github.com/jreese/dotlink/blob/5e48c1493c20fc6df4ad0144e80563915ce339b6/dotlink/dotlink.py#L366-L400 | def deploy_local(self, dotfiles, target_root=None):
"""Deploy dotfiles to a local path."""
if target_root is None:
target_root = self.args.path
for source_path, target_path in dotfiles.items():
source_path = path.join(self.source, source_path)
target_path = path.join(target_root, target_path)
if path.isfile(target_path) or path.islink(target_path):
self.log.debug('Removing existing file at %s', target_path)
os.unlink(target_path)
elif path.isdir(target_path):
self.log.debug('Removing existing dir at %s', target_path)
shutil.rmtree(target_path)
parent_dir = path.dirname(target_path)
if not path.isdir(parent_dir):
self.log.debug('Creating parent dir %s', parent_dir)
os.makedirs(parent_dir)
if self.args.copy:
if path.isdir(source_path):
self.log.debug('Copying file %s to %s',
source_path, target_path)
shutil.copytree(source_path, target_path)
else:
self.log.debug('Copying dir %s to %s',
source_path, target_path)
shutil.copy(source_path, target_path)
else:
self.log.debug('Symlinking %s -> %s', target_path, source_path)
os.symlink(source_path, target_path) | [
"def",
"deploy_local",
"(",
"self",
",",
"dotfiles",
",",
"target_root",
"=",
"None",
")",
":",
"if",
"target_root",
"is",
"None",
":",
"target_root",
"=",
"self",
".",
"args",
".",
"path",
"for",
"source_path",
",",
"target_path",
"in",
"dotfiles",
".",
... | Deploy dotfiles to a local path. | [
"Deploy",
"dotfiles",
"to",
"a",
"local",
"path",
"."
] | python | train |
eddiejessup/metropack | metropack/draw.py | https://github.com/eddiejessup/metropack/blob/528b47d0f2f70f39e1490e41433f2da8c8b9d63c/metropack/draw.py#L6-L37 | def _unwrap_one_layer(r, L, n):
"""For a set of points in a 2 dimensional periodic system, extend the set of
points to tile the points at a given period.
Parameters
----------
r: float array, shape (:, 2).
Set of points.
L: float array, shape (2,)
System lengths.
n: integer.
Period to unwrap.
Returns
-------
rcu: float array, shape (:, 2).
The set of points. tiled at the periods at a distance `n` from the
origin.
"""
try:
L[0]
except (TypeError, IndexError):
L = np.ones([r.shape[1]]) * L
if n == 0:
return list(r)
rcu = []
for x, y in r:
for ix in range(-n, n + 1):
for iy in range(-n, n + 1):
if abs(ix) == n or abs(iy) == n:
rcu.append(np.array([x + ix * L[0], y + iy * L[1]]))
return rcu | [
"def",
"_unwrap_one_layer",
"(",
"r",
",",
"L",
",",
"n",
")",
":",
"try",
":",
"L",
"[",
"0",
"]",
"except",
"(",
"TypeError",
",",
"IndexError",
")",
":",
"L",
"=",
"np",
".",
"ones",
"(",
"[",
"r",
".",
"shape",
"[",
"1",
"]",
"]",
")",
... | For a set of points in a 2 dimensional periodic system, extend the set of
points to tile the points at a given period.
Parameters
----------
r: float array, shape (:, 2).
Set of points.
L: float array, shape (2,)
System lengths.
n: integer.
Period to unwrap.
Returns
-------
rcu: float array, shape (:, 2).
The set of points. tiled at the periods at a distance `n` from the
origin. | [
"For",
"a",
"set",
"of",
"points",
"in",
"a",
"2",
"dimensional",
"periodic",
"system",
"extend",
"the",
"set",
"of",
"points",
"to",
"tile",
"the",
"points",
"at",
"a",
"given",
"period",
"."
] | python | train |
KelSolaar/Umbra | umbra/components/addons/projects_explorer/projects_explorer.py | https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/addons/projects_explorer/projects_explorer.py#L1111-L1140 | def add_new_directory(self, node):
"""
Adds a new directory next to given Node associated path.
:param node: Node.
:type node: ProjectNode or DirectoryNode or FileNode
:return: Method success.
:rtype: bool
"""
if self.__script_editor.model.is_authoring_node(node):
return False
directory, state = QInputDialog.getText(self, "Add Directory", "Enter your new directory name:")
if not state:
return False
if node.family in ("Project", "Directory"):
parent_directory = node.path
elif node.family == "File":
parent_directory = os.path.dirname(node.path)
directory = foundations.strings.to_string(directory)
if not directory in os.listdir(parent_directory):
directory = os.path.join(parent_directory, directory)
LOGGER.info("{0} | Adding '{1}' directory!".format(self.__class__.__name__, directory))
os.makedirs(directory)
else:
self.__raise_file_system_exception(file, parent_directory)
return True | [
"def",
"add_new_directory",
"(",
"self",
",",
"node",
")",
":",
"if",
"self",
".",
"__script_editor",
".",
"model",
".",
"is_authoring_node",
"(",
"node",
")",
":",
"return",
"False",
"directory",
",",
"state",
"=",
"QInputDialog",
".",
"getText",
"(",
"se... | Adds a new directory next to given Node associated path.
:param node: Node.
:type node: ProjectNode or DirectoryNode or FileNode
:return: Method success.
:rtype: bool | [
"Adds",
"a",
"new",
"directory",
"next",
"to",
"given",
"Node",
"associated",
"path",
"."
] | python | train |
aiidateam/aiida-codtools | aiida_codtools/workflows/cif_clean.py | https://github.com/aiidateam/aiida-codtools/blob/da5e4259b7a2e86cf0cc3f997e11dd36d445fa94/aiida_codtools/workflows/cif_clean.py#L112-L126 | def run_select_calculation(self):
"""Run the CifSelectCalculation on the CifData output node of the CifFilterCalculation."""
inputs = {
'cif': self.ctx.cif,
'code': self.inputs.cif_select,
'parameters': self.inputs.cif_select_parameters,
'metadata': {
'options': self.inputs.options.get_dict(),
}
}
calculation = self.submit(CifSelectCalculation, **inputs)
self.report('submitted {}<{}>'.format(CifSelectCalculation.__name__, calculation.uuid))
return ToContext(cif_select=calculation) | [
"def",
"run_select_calculation",
"(",
"self",
")",
":",
"inputs",
"=",
"{",
"'cif'",
":",
"self",
".",
"ctx",
".",
"cif",
",",
"'code'",
":",
"self",
".",
"inputs",
".",
"cif_select",
",",
"'parameters'",
":",
"self",
".",
"inputs",
".",
"cif_select_para... | Run the CifSelectCalculation on the CifData output node of the CifFilterCalculation. | [
"Run",
"the",
"CifSelectCalculation",
"on",
"the",
"CifData",
"output",
"node",
"of",
"the",
"CifFilterCalculation",
"."
] | python | train |
saltstack/salt | salt/modules/win_lgpo.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_lgpo.py#L5637-L5695 | def _checkListItem(policy_element, policy_name, policy_key, xpath_object, policy_file_data, test_items=True):
'''
helper function to process an enabled/disabled/true/falseList set
if test_items is True, it will determine if the policy is enabled or
disabled returning True if all items are configured in the registry.pol file
and false if they are not
if test_items is False, the expected strings for the items will be returned
as a list
returns True if the enabled/disabledList is 100% configured in the
registry.pol file, otherwise returns False
'''
xpath_string = ('.//*[local-name() = "decimal" or local-name() = "delete"'
' or local-name() = "longDecimal" or local-name() = "string"]')
value_item_child_xpath = etree.XPath(xpath_string)
expected_strings = []
for list_element in xpath_object(policy_element):
configured_items = 0
required_items = 0
for item in list_element.getchildren():
required_items = required_items + 1
if 'key' in item.attrib:
item_key = item.attrib['key']
else:
item_key = policy_key
if 'valueName' in item.attrib:
item_valuename = item.attrib['valueName']
else:
log.error('%s item with attributes %s in policy %s does not '
'have the required "valueName" attribute',
etree.QName(list_element).localname,
item.attrib, policy_element.attrib)
break
for value_item in value_item_child_xpath(item):
search_string = _processValueItem(value_item,
item_key,
item_valuename,
policy_element,
item)
if test_items:
if _regexSearchRegPolData(re.escape(search_string), policy_file_data):
configured_items = configured_items + 1
log.debug('found the search string in the pol file,'
'%s of %s items for policy %s are '
'configured in registry.pol',
configured_items, required_items,
policy_name)
else:
expected_strings.append(search_string)
if test_items:
if required_items > 0 and required_items == configured_items:
log.debug('%s all items are set', policy_name)
return True
if test_items:
return False
else:
return expected_strings | [
"def",
"_checkListItem",
"(",
"policy_element",
",",
"policy_name",
",",
"policy_key",
",",
"xpath_object",
",",
"policy_file_data",
",",
"test_items",
"=",
"True",
")",
":",
"xpath_string",
"=",
"(",
"'.//*[local-name() = \"decimal\" or local-name() = \"delete\"'",
"' or... | helper function to process an enabled/disabled/true/falseList set
if test_items is True, it will determine if the policy is enabled or
disabled returning True if all items are configured in the registry.pol file
and false if they are not
if test_items is False, the expected strings for the items will be returned
as a list
returns True if the enabled/disabledList is 100% configured in the
registry.pol file, otherwise returns False | [
"helper",
"function",
"to",
"process",
"an",
"enabled",
"/",
"disabled",
"/",
"true",
"/",
"falseList",
"set"
] | python | train |
pandemicsyn/statsdpy | statsdpy/statsd.py | https://github.com/pandemicsyn/statsdpy/blob/9cfccf89121fd6a12df20f17fa3eb8f618a36455/statsdpy/statsd.py#L124-L144 | def stats_flush(self):
"""
Periodically flush stats to graphite
"""
while True:
try:
eventlet.sleep(self.flush_interval)
if self.debug:
print "seen %d stats so far." % self.stats_seen
print "current counters: %s" % self.counters
if self.pickle_proto:
payload = self.pickle_payload()
if payload:
for batch in payload:
self.report_stats(batch)
else:
payload = self.plain_payload()
if payload:
self.report_stats(payload)
except: # safety net
self.logger.critical('Encountered error in stats_flush loop') | [
"def",
"stats_flush",
"(",
"self",
")",
":",
"while",
"True",
":",
"try",
":",
"eventlet",
".",
"sleep",
"(",
"self",
".",
"flush_interval",
")",
"if",
"self",
".",
"debug",
":",
"print",
"\"seen %d stats so far.\"",
"%",
"self",
".",
"stats_seen",
"print"... | Periodically flush stats to graphite | [
"Periodically",
"flush",
"stats",
"to",
"graphite"
] | python | train |
PinLin/KCOJ_api | KCOJ_api/api.py | https://github.com/PinLin/KCOJ_api/blob/64f6ef0f9e64dc1efd692cbe6d5738ee7cfb78ec/KCOJ_api/api.py#L66-L107 | def get_question(self, number: str=None) -> dict:
"""
取得課程中的所有題目資訊
"""
try:
# 取得資料
response = self.__session.get(
self.__url + '/HomeworkBoard', timeout=0.5, verify=False)
soup = BeautifulSoup(response.text, 'html.parser')
# 整理題目資訊
questions = {}
for tag in soup.find_all('tr'):
# 去除標題列
if tag.find('a') == None:
continue
# 儲存題目資訊
questions[tag.find('a').get_text().strip()] = {
# 繳交期限
'deadline': tag.find_all('td')[3].get_text().strip(),
# 是否已經過期限
'expired': tag.find_all('td')[4].get_text().strip() == '期限已過',
# 是否繳交
'status': tag.find_all('td')[6].get_text().strip() == '已繳',
# 程式語言種類
'language': tag.find_all('td')[5].get_text().strip(),
}
# 回傳結果
if number != None:
return questions.get(number)
else:
return questions
except requests.exceptions.Timeout:
return {
"Timeout": {
'deadline': "Timeout",
'expired': False,
'status': False,
'language': "Timeout",
}
} | [
"def",
"get_question",
"(",
"self",
",",
"number",
":",
"str",
"=",
"None",
")",
"->",
"dict",
":",
"try",
":",
"# 取得資料",
"response",
"=",
"self",
".",
"__session",
".",
"get",
"(",
"self",
".",
"__url",
"+",
"'/HomeworkBoard'",
",",
"timeout",
"=",
... | 取得課程中的所有題目資訊 | [
"取得課程中的所有題目資訊"
] | python | train |
exosite-labs/pyonep | pyonep/portals/endpoints.py | https://github.com/exosite-labs/pyonep/blob/d27b621b00688a542e0adcc01f3e3354c05238a1/pyonep/portals/endpoints.py#L212-L242 | def add_device(self, model, serial):
"""
Returns 'device object' of newly created device.
http://docs.exosite.com/portals/#create-device
http://docs.exosite.com/portals/#device-object
"""
device = {
'model': model,
'vendor': self.vendor(),
'sn': serial,
'type': 'vendor'
}
headers = {
'User-Agent': self.user_agent(),
}
headers.update(self.headers())
r = requests.post( self.portals_url()+'/portals/'+self.portal_id()+'/devices',
data=json.dumps(device),
headers=headers,
auth=self.auth())
if HTTP_STATUS.ADDED == r.status_code:
# fix the 'meta' to be dictionary instead of string
device_obj = r.json()
return dictify_device_meta(device_obj)
else:
print("add_device: Something went wrong: <{0}>: {1}".format(
r.status_code, r.reason))
r.raise_for_status() | [
"def",
"add_device",
"(",
"self",
",",
"model",
",",
"serial",
")",
":",
"device",
"=",
"{",
"'model'",
":",
"model",
",",
"'vendor'",
":",
"self",
".",
"vendor",
"(",
")",
",",
"'sn'",
":",
"serial",
",",
"'type'",
":",
"'vendor'",
"}",
"headers",
... | Returns 'device object' of newly created device.
http://docs.exosite.com/portals/#create-device
http://docs.exosite.com/portals/#device-object | [
"Returns",
"device",
"object",
"of",
"newly",
"created",
"device",
"."
] | python | train |
dcramer/peek | peek/tracer.py | https://github.com/dcramer/peek/blob/da7c086660fc870c6632c4dc5ccb2ff9bfbee52e/peek/tracer.py#L237-L248 | def start(self, origin):
"""
Start this Tracer.
Return a Python function suitable for use with sys.settrace().
"""
self.start_time = time.time()
self.pause_until = None
self.data.update(self._get_struct(origin, 'origin'))
self.data_stack.append(self.data)
sys.settrace(self._trace)
return self._trace | [
"def",
"start",
"(",
"self",
",",
"origin",
")",
":",
"self",
".",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"pause_until",
"=",
"None",
"self",
".",
"data",
".",
"update",
"(",
"self",
".",
"_get_struct",
"(",
"origin",
",",
"... | Start this Tracer.
Return a Python function suitable for use with sys.settrace(). | [
"Start",
"this",
"Tracer",
"."
] | python | train |
frascoweb/frasco | frasco/expression.py | https://github.com/frascoweb/frasco/blob/ea519d69dd5ca6deaf3650175692ee4a1a02518f/frasco/expression.py#L348-L368 | def eval_expr(expr, context):
"""Recursively evaluates a compiled expression using the specified context.
Dict instances can contain a "__kwargs" key which will be used to update the
dict with its content
"""
if isinstance(expr, list):
rv = []
for item in expr:
rv.append(eval_expr(item, context))
return rv
if isinstance(expr, dict):
rv = {}
for k, v in expr.iteritems():
rv[k] = eval_expr(v, context)
kwargs = rv.pop("__kwargs", None)
if kwargs:
rv.update(kwargs)
return rv
if isinstance(expr, Expression):
return expr.eval(context)
return expr | [
"def",
"eval_expr",
"(",
"expr",
",",
"context",
")",
":",
"if",
"isinstance",
"(",
"expr",
",",
"list",
")",
":",
"rv",
"=",
"[",
"]",
"for",
"item",
"in",
"expr",
":",
"rv",
".",
"append",
"(",
"eval_expr",
"(",
"item",
",",
"context",
")",
")"... | Recursively evaluates a compiled expression using the specified context.
Dict instances can contain a "__kwargs" key which will be used to update the
dict with its content | [
"Recursively",
"evaluates",
"a",
"compiled",
"expression",
"using",
"the",
"specified",
"context",
".",
"Dict",
"instances",
"can",
"contain",
"a",
"__kwargs",
"key",
"which",
"will",
"be",
"used",
"to",
"update",
"the",
"dict",
"with",
"its",
"content"
] | python | train |
kmike/django-generic-images | generic_images/admin.py | https://github.com/kmike/django-generic-images/blob/4e45068ed219ac35396758eb6b6e1fe5306147df/generic_images/admin.py#L10-L29 | def attachedimage_form_factory(lang='en', debug=False):
''' Returns ModelForm class to be used in admin.
'lang' is the language for GearsUploader (can be 'en' and 'ru' at the
moment).
'''
yui = '' if debug else '.yui'
class _AttachedImageAdminForm(forms.ModelForm):
caption = forms.CharField(label=_('Caption'), required=False)
class Media:
js = [
'generic_images/js/mootools-1.2.4-core-yc.js',
'generic_images/js/GearsUploader.%s%s.js' % (lang, yui,),
'generic_images/js/AttachedImageInline.js',
]
class Meta:
model = AttachedImage
return _AttachedImageAdminForm | [
"def",
"attachedimage_form_factory",
"(",
"lang",
"=",
"'en'",
",",
"debug",
"=",
"False",
")",
":",
"yui",
"=",
"''",
"if",
"debug",
"else",
"'.yui'",
"class",
"_AttachedImageAdminForm",
"(",
"forms",
".",
"ModelForm",
")",
":",
"caption",
"=",
"forms",
"... | Returns ModelForm class to be used in admin.
'lang' is the language for GearsUploader (can be 'en' and 'ru' at the
moment). | [
"Returns",
"ModelForm",
"class",
"to",
"be",
"used",
"in",
"admin",
".",
"lang",
"is",
"the",
"language",
"for",
"GearsUploader",
"(",
"can",
"be",
"en",
"and",
"ru",
"at",
"the",
"moment",
")",
"."
] | python | train |
apache/incubator-mxnet | python/mxnet/image/image.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/image/image.py#L1409-L1428 | def imdecode(self, s):
"""Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details."""
def locate():
"""Locate the image file/index if decode fails."""
if self.seq is not None:
idx = self.seq[(self.cur % self.num_image) - 1]
else:
idx = (self.cur % self.num_image) - 1
if self.imglist is not None:
_, fname = self.imglist[idx]
msg = "filename: {}".format(fname)
else:
msg = "index: {}".format(idx)
return "Broken image " + msg
try:
img = imdecode(s)
except Exception as e:
raise RuntimeError("{}, {}".format(locate(), e))
return img | [
"def",
"imdecode",
"(",
"self",
",",
"s",
")",
":",
"def",
"locate",
"(",
")",
":",
"\"\"\"Locate the image file/index if decode fails.\"\"\"",
"if",
"self",
".",
"seq",
"is",
"not",
"None",
":",
"idx",
"=",
"self",
".",
"seq",
"[",
"(",
"self",
".",
"cu... | Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details. | [
"Decodes",
"a",
"string",
"or",
"byte",
"string",
"to",
"an",
"NDArray",
".",
"See",
"mx",
".",
"img",
".",
"imdecode",
"for",
"more",
"details",
"."
] | python | train |
vberlier/nbtlib | nbtlib/literal/serializer.py | https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/literal/serializer.py#L142-L150 | def serialize_list(self, tag):
"""Return the literal representation of a list tag."""
separator, fmt = self.comma, '[{}]'
with self.depth():
if self.should_expand(tag):
separator, fmt = self.expand(separator, fmt)
return fmt.format(separator.join(map(self.serialize, tag))) | [
"def",
"serialize_list",
"(",
"self",
",",
"tag",
")",
":",
"separator",
",",
"fmt",
"=",
"self",
".",
"comma",
",",
"'[{}]'",
"with",
"self",
".",
"depth",
"(",
")",
":",
"if",
"self",
".",
"should_expand",
"(",
"tag",
")",
":",
"separator",
",",
... | Return the literal representation of a list tag. | [
"Return",
"the",
"literal",
"representation",
"of",
"a",
"list",
"tag",
"."
] | python | train |
amzn/ion-python | amazon/ion/reader_text.py | https://github.com/amzn/ion-python/blob/0b21fa3ba7755f55f745e4aa970d86343b82449d/amazon/ion/reader_text.py#L2152-L2176 | def _skip_trampoline(handler):
"""Intercepts events from container handlers, emitting them only if they should not be skipped."""
data_event, self = (yield None)
delegate = handler
event = None
depth = 0
while True:
def pass_through():
_trans = delegate.send(Transition(data_event, delegate))
return _trans, _trans.delegate, _trans.event
if data_event is not None and data_event.type is ReadEventType.SKIP:
while True:
trans, delegate, event = pass_through()
if event is not None:
if event.event_type is IonEventType.CONTAINER_END and event.depth <= depth:
break
if event is None or event.event_type is IonEventType.INCOMPLETE:
data_event, _ = yield Transition(event, self)
else:
trans, delegate, event = pass_through()
if event is not None and (event.event_type is IonEventType.CONTAINER_START or
event.event_type is IonEventType.CONTAINER_END):
depth = event.depth
data_event, _ = yield Transition(event, self) | [
"def",
"_skip_trampoline",
"(",
"handler",
")",
":",
"data_event",
",",
"self",
"=",
"(",
"yield",
"None",
")",
"delegate",
"=",
"handler",
"event",
"=",
"None",
"depth",
"=",
"0",
"while",
"True",
":",
"def",
"pass_through",
"(",
")",
":",
"_trans",
"... | Intercepts events from container handlers, emitting them only if they should not be skipped. | [
"Intercepts",
"events",
"from",
"container",
"handlers",
"emitting",
"them",
"only",
"if",
"they",
"should",
"not",
"be",
"skipped",
"."
] | python | train |
jason-weirather/pythologist | pythologist/__init__.py | https://github.com/jason-weirather/pythologist/blob/6eb4082be9dffa9570e4ceaa06d97845eac4c006/pythologist/__init__.py#L223-L237 | def is_uniform(self,verbose=True):
"""
Check to make sure phenotype calls, or scored calls are consistent across all images / samples
"""
uni = pd.Series(self['phenotype_calls'].apply(lambda x: json.dumps(x)).unique()).\
apply(lambda x: json.loads(x)).apply(lambda x: tuple(sorted(x.keys()))).unique()
if len(uni) > 1:
if verbose: sys.stderr.write("WARNING: phenotypes differ across the dataframe \n"+str(uni)+"\n")
return False
uni = pd.Series(self['scored_calls'].apply(lambda x: json.dumps(x)).unique()).\
apply(lambda x: json.loads(x)).apply(lambda x: tuple(sorted(x.keys()))).unique()
if len(uni) > 1:
if verbose: sys.stderr.write("WARNING: scored_calls differ across the dataframe \n"+str(uni)+"\n")
return False
return True | [
"def",
"is_uniform",
"(",
"self",
",",
"verbose",
"=",
"True",
")",
":",
"uni",
"=",
"pd",
".",
"Series",
"(",
"self",
"[",
"'phenotype_calls'",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"json",
".",
"dumps",
"(",
"x",
")",
")",
".",
"unique",
... | Check to make sure phenotype calls, or scored calls are consistent across all images / samples | [
"Check",
"to",
"make",
"sure",
"phenotype",
"calls",
"or",
"scored",
"calls",
"are",
"consistent",
"across",
"all",
"images",
"/",
"samples"
] | python | train |
secdev/scapy | scapy/contrib/isotp.py | https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/contrib/isotp.py#L347-L370 | def pop(self, identifier=None, ext_addr=None, basecls=ISOTP):
"""
Returns a built ISOTP message
:param identifier: if not None, only return isotp messages with this
destination
:param ext_addr: if identifier is not None, only return isotp messages
with this extended address for destination
:param basecls: the class of packets that will be returned, defautls to
ISOTP
:return: an ISOTP packet, or None if no message is ready
"""
if identifier is not None:
for i in range(len(self.ready)):
b = self.ready[i]
identifier = b[0]
ea = b[1]
if identifier == identifier and ext_addr == ea:
return ISOTPMessageBuilder._build(self.ready.pop(i))
return None
if len(self.ready) > 0:
return ISOTPMessageBuilder._build(self.ready.pop(0))
return None | [
"def",
"pop",
"(",
"self",
",",
"identifier",
"=",
"None",
",",
"ext_addr",
"=",
"None",
",",
"basecls",
"=",
"ISOTP",
")",
":",
"if",
"identifier",
"is",
"not",
"None",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"ready",
")",
... | Returns a built ISOTP message
:param identifier: if not None, only return isotp messages with this
destination
:param ext_addr: if identifier is not None, only return isotp messages
with this extended address for destination
:param basecls: the class of packets that will be returned, defautls to
ISOTP
:return: an ISOTP packet, or None if no message is ready | [
"Returns",
"a",
"built",
"ISOTP",
"message",
":",
"param",
"identifier",
":",
"if",
"not",
"None",
"only",
"return",
"isotp",
"messages",
"with",
"this",
"destination",
":",
"param",
"ext_addr",
":",
"if",
"identifier",
"is",
"not",
"None",
"only",
"return",... | python | train |
tango-controls/pytango | tango/utils.py | https://github.com/tango-controls/pytango/blob/9cf78c517c9cdc1081ff6d080a9646a740cc1d36/tango/utils.py#L1264-L1271 | def insert(self, i, x):
"""s.insert(i, x) same as s[i:i] = [x]
Raises TypeError if x isn't a string."""
if not isinstance(x, str):
raise TypeError(
'Members of this object must be strings. '
'You supplied \"%s\"' % type(x))
list.insert(self, i, x) | [
"def",
"insert",
"(",
"self",
",",
"i",
",",
"x",
")",
":",
"if",
"not",
"isinstance",
"(",
"x",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"'Members of this object must be strings. '",
"'You supplied \\\"%s\\\"'",
"%",
"type",
"(",
"x",
")",
")",
"l... | s.insert(i, x) same as s[i:i] = [x]
Raises TypeError if x isn't a string. | [
"s",
".",
"insert",
"(",
"i",
"x",
")",
"same",
"as",
"s",
"[",
"i",
":",
"i",
"]",
"=",
"[",
"x",
"]",
"Raises",
"TypeError",
"if",
"x",
"isn",
"t",
"a",
"string",
"."
] | python | train |
tamasgal/km3pipe | km3pipe/utils/streamds.py | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/streamds.py#L165-L203 | def convert_runsummary_to_json(
df, comment='Uploaded via km3pipe.StreamDS', prefix='TEST_'
):
"""Convert a Pandas DataFrame with runsummary to JSON for DB upload"""
data_field = []
comment += ", by {}".format(getpass.getuser())
for det_id, det_data in df.groupby('det_id'):
runs_field = []
data_field.append({"DetectorId": det_id, "Runs": runs_field})
for run, run_data in det_data.groupby('run'):
parameters_field = []
runs_field.append({
"Run": int(run),
"Parameters": parameters_field
})
parameter_dict = {}
for row in run_data.itertuples():
for parameter_name in run_data.columns:
if parameter_name in REQUIRED_COLUMNS:
continue
if parameter_name not in parameter_dict:
entry = {'Name': prefix + parameter_name, 'Data': []}
parameter_dict[parameter_name] = entry
data_value = getattr(row, parameter_name)
try:
data_value = float(data_value)
except ValueError as e:
log.critical("Data values has to be floats!")
raise ValueError(e)
value = {'S': str(getattr(row, 'source')), 'D': data_value}
parameter_dict[parameter_name]['Data'].append(value)
for parameter_data in parameter_dict.values():
parameters_field.append(parameter_data)
data_to_upload = {"Comment": comment, "Data": data_field}
file_data_to_upload = json.dumps(data_to_upload)
return file_data_to_upload | [
"def",
"convert_runsummary_to_json",
"(",
"df",
",",
"comment",
"=",
"'Uploaded via km3pipe.StreamDS'",
",",
"prefix",
"=",
"'TEST_'",
")",
":",
"data_field",
"=",
"[",
"]",
"comment",
"+=",
"\", by {}\"",
".",
"format",
"(",
"getpass",
".",
"getuser",
"(",
")... | Convert a Pandas DataFrame with runsummary to JSON for DB upload | [
"Convert",
"a",
"Pandas",
"DataFrame",
"with",
"runsummary",
"to",
"JSON",
"for",
"DB",
"upload"
] | python | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/builder.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/builder.py#L2612-L2657 | def add_resize_bilinear(self, name, input_name, output_name, target_height=1, target_width=1,
mode='ALIGN_ENDPOINTS_MODE'):
"""
Add resize bilinear layer to the model. A layer that resizes the input to a given spatial size using bilinear interpolation.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
target_height: int
Output height dimension.
target_width: int
Output width dimension.
mode: str
Following values are supported: 'STRICT_ALIGN_ENDPOINTS_MODE', 'ALIGN_ENDPOINTS_MODE', 'UPSAMPLE_MODE', 'ROI_ALIGN_MODE'.
This parameter determines the sampling grid used for bilinear interpolation. Kindly refer to NeuralNetwork.proto for details.
See Also
--------
add_upsample
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new inner-product layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.resizeBilinear
spec_layer_params.targetSize.append(target_height)
spec_layer_params.targetSize.append(target_width)
if mode == 'ALIGN_ENDPOINTS_MODE':
spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value('ALIGN_ENDPOINTS_MODE')
elif mode == 'STRICT_ALIGN_ENDPOINTS_MODE':
spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value('STRICT_ALIGN_ENDPOINTS_MODE')
elif mode == 'UPSAMPLE_MODE':
spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value('UPSAMPLE_MODE')
elif mode == 'ROI_ALIGN_MODE':
spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value('ROI_ALIGN_MODE')
else:
raise ValueError("Unspported resize bilinear mode %s" % mode) | [
"def",
"add_resize_bilinear",
"(",
"self",
",",
"name",
",",
"input_name",
",",
"output_name",
",",
"target_height",
"=",
"1",
",",
"target_width",
"=",
"1",
",",
"mode",
"=",
"'ALIGN_ENDPOINTS_MODE'",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
... | Add resize bilinear layer to the model. A layer that resizes the input to a given spatial size using bilinear interpolation.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
target_height: int
Output height dimension.
target_width: int
Output width dimension.
mode: str
Following values are supported: 'STRICT_ALIGN_ENDPOINTS_MODE', 'ALIGN_ENDPOINTS_MODE', 'UPSAMPLE_MODE', 'ROI_ALIGN_MODE'.
This parameter determines the sampling grid used for bilinear interpolation. Kindly refer to NeuralNetwork.proto for details.
See Also
--------
add_upsample | [
"Add",
"resize",
"bilinear",
"layer",
"to",
"the",
"model",
".",
"A",
"layer",
"that",
"resizes",
"the",
"input",
"to",
"a",
"given",
"spatial",
"size",
"using",
"bilinear",
"interpolation",
"."
] | python | train |
sio2project/filetracker | filetracker/client/data_store.py | https://github.com/sio2project/filetracker/blob/359b474850622e3d0c25ee2596d7242c02f84efb/filetracker/client/data_store.py#L80-L98 | def get_file(self, name, filename):
"""Saves the content of file named ``name`` to ``filename``.
Works like :meth:`get_stream`, but ``filename`` is the name of
a file which will be created (or overwritten).
Returns the full versioned name of the retrieved file.
"""
stream, vname = self.get_stream(name)
path, version = split_name(vname)
dir_path = os.path.dirname(filename)
if dir_path:
mkdir(dir_path)
with open(filename, 'wb') as f:
shutil.copyfileobj(stream, f)
return vname | [
"def",
"get_file",
"(",
"self",
",",
"name",
",",
"filename",
")",
":",
"stream",
",",
"vname",
"=",
"self",
".",
"get_stream",
"(",
"name",
")",
"path",
",",
"version",
"=",
"split_name",
"(",
"vname",
")",
"dir_path",
"=",
"os",
".",
"path",
".",
... | Saves the content of file named ``name`` to ``filename``.
Works like :meth:`get_stream`, but ``filename`` is the name of
a file which will be created (or overwritten).
Returns the full versioned name of the retrieved file. | [
"Saves",
"the",
"content",
"of",
"file",
"named",
"name",
"to",
"filename",
"."
] | python | train |
google/grr | grr/core/grr_response_core/lib/parsers/linux_file_parser.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/parsers/linux_file_parser.py#L679-L711 | def ParseFileset(self, fileset=None):
"""Process linux system login files.
Orchestrates collection of account entries from /etc/passwd and
/etc/shadow. The passwd and shadow entries are reconciled and group
memberships are mapped to the account.
Args:
fileset: A dict of files mapped from path to an open file.
Yields:
- A series of User entries, each of which is populated with
group memberships and indications of the shadow state of the account.
- A series of anomalies in cases where there are mismatches between passwd
and shadow state.
"""
self.AddPassword(fileset)
self.AddShadow(fileset)
self.ReconcileShadow(self.shadow_store)
# Get group memberships using the files that were already collected.
# Separate out groups and anomalies.
for rdf in LinuxSystemGroupParser().ParseFileset(fileset):
if isinstance(rdf, rdf_client.Group):
self.groups[rdf.name] = rdf
else:
yield rdf
self.AddGroupMemberships()
for user in itervalues(self.entry):
yield user
for grp in itervalues(self.groups):
yield grp
for anom in self.FindAnomalies():
yield anom | [
"def",
"ParseFileset",
"(",
"self",
",",
"fileset",
"=",
"None",
")",
":",
"self",
".",
"AddPassword",
"(",
"fileset",
")",
"self",
".",
"AddShadow",
"(",
"fileset",
")",
"self",
".",
"ReconcileShadow",
"(",
"self",
".",
"shadow_store",
")",
"# Get group m... | Process linux system login files.
Orchestrates collection of account entries from /etc/passwd and
/etc/shadow. The passwd and shadow entries are reconciled and group
memberships are mapped to the account.
Args:
fileset: A dict of files mapped from path to an open file.
Yields:
- A series of User entries, each of which is populated with
group memberships and indications of the shadow state of the account.
- A series of anomalies in cases where there are mismatches between passwd
and shadow state. | [
"Process",
"linux",
"system",
"login",
"files",
"."
] | python | train |
Autodesk/aomi | aomi/model/context.py | https://github.com/Autodesk/aomi/blob/84da2dfb0424837adf9c4ddc1aa352e942bb7a4a/aomi/model/context.py#L155-L159 | def thaw(self, tmp_dir):
"""Will thaw every secret into an appropriate temporary location"""
for resource in self.resources():
if resource.present:
resource.thaw(tmp_dir) | [
"def",
"thaw",
"(",
"self",
",",
"tmp_dir",
")",
":",
"for",
"resource",
"in",
"self",
".",
"resources",
"(",
")",
":",
"if",
"resource",
".",
"present",
":",
"resource",
".",
"thaw",
"(",
"tmp_dir",
")"
] | Will thaw every secret into an appropriate temporary location | [
"Will",
"thaw",
"every",
"secret",
"into",
"an",
"appropriate",
"temporary",
"location"
] | python | train |
bwohlberg/sporco | sporco/linalg.py | https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/linalg.py#L1379-L1413 | def rfl2norm2(xf, xs, axis=(0, 1)):
r"""
Compute the squared :math:`\ell_2` norm in the DFT domain, taking
into account the unnormalised DFT scaling, i.e. given the DFT of a
multi-dimensional array computed via :func:`rfftn`, return the
squared :math:`\ell_2` norm of the original array.
Parameters
----------
xf : array_like
Input array
xs : sequence of ints
Shape of original array to which :func:`rfftn` was applied to
obtain the input array
axis : sequence of ints, optional (default (0,1))
Axes on which the input is in the frequency domain
Returns
-------
x : float
:math:`\|\mathbf{x}\|_2^2` where the input array is the result of
applying :func:`rfftn` to the specified axes of multi-dimensional
array :math:`\mathbf{x}`
"""
scl = 1.0 / np.prod(np.array([xs[k] for k in axis]))
slc0 = (slice(None),) * axis[-1]
nrm0 = np.linalg.norm(xf[slc0 + (0,)])
idx1 = (xs[axis[-1]] + 1) // 2
nrm1 = np.linalg.norm(xf[slc0 + (slice(1, idx1),)])
if xs[axis[-1]] % 2 == 0:
nrm2 = np.linalg.norm(xf[slc0 + (slice(-1, None),)])
else:
nrm2 = 0.0
return scl*(nrm0**2 + 2.0*nrm1**2 + nrm2**2) | [
"def",
"rfl2norm2",
"(",
"xf",
",",
"xs",
",",
"axis",
"=",
"(",
"0",
",",
"1",
")",
")",
":",
"scl",
"=",
"1.0",
"/",
"np",
".",
"prod",
"(",
"np",
".",
"array",
"(",
"[",
"xs",
"[",
"k",
"]",
"for",
"k",
"in",
"axis",
"]",
")",
")",
"... | r"""
Compute the squared :math:`\ell_2` norm in the DFT domain, taking
into account the unnormalised DFT scaling, i.e. given the DFT of a
multi-dimensional array computed via :func:`rfftn`, return the
squared :math:`\ell_2` norm of the original array.
Parameters
----------
xf : array_like
Input array
xs : sequence of ints
Shape of original array to which :func:`rfftn` was applied to
obtain the input array
axis : sequence of ints, optional (default (0,1))
Axes on which the input is in the frequency domain
Returns
-------
x : float
:math:`\|\mathbf{x}\|_2^2` where the input array is the result of
applying :func:`rfftn` to the specified axes of multi-dimensional
array :math:`\mathbf{x}` | [
"r",
"Compute",
"the",
"squared",
":",
"math",
":",
"\\",
"ell_2",
"norm",
"in",
"the",
"DFT",
"domain",
"taking",
"into",
"account",
"the",
"unnormalised",
"DFT",
"scaling",
"i",
".",
"e",
".",
"given",
"the",
"DFT",
"of",
"a",
"multi",
"-",
"dimensio... | python | train |
iotile/coretools | transport_plugins/bled112/iotile_transport_bled112/bled112_cmd.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/bled112/iotile_transport_bled112/bled112_cmd.py#L163-L215 | def _probe_services(self, handle):
"""Probe for all primary services and characteristics in those services
Args:
handle (int): the connection handle to probe
"""
code = 0x2800
def event_filter_func(event):
if (event.command_class == 4 and event.command == 2):
event_handle, = unpack("B", event.payload[0:1])
return event_handle == handle
return False
def end_filter_func(event):
if (event.command_class == 4 and event.command == 1):
event_handle, = unpack("B", event.payload[0:1])
return event_handle == handle
return False
payload = struct.pack('<BHHBH', handle, 1, 0xFFFF, 2, code)
try:
response = self._send_command(4, 1, payload)
except InternalTimeoutError:
return False, {'reason': 'Timeout waiting for command response'}
handle, result = unpack("<BH", response.payload)
if result != 0:
return False, None
events = self._wait_process_events(0.5, event_filter_func, end_filter_func)
gatt_events = [x for x in events if event_filter_func(x)]
end_events = [x for x in events if end_filter_func(x)]
if len(end_events) == 0:
return False, None
#Make sure we successfully probed the gatt table
end_event = end_events[0]
_, result, _ = unpack("<BHH", end_event.payload)
if result != 0:
self._logger.warn("Error enumerating GATT table, protocol error code = %d (0x%X)" % (result, result))
return False, None
services = {}
for event in gatt_events:
process_gatt_service(services, event)
return True, {'services': services} | [
"def",
"_probe_services",
"(",
"self",
",",
"handle",
")",
":",
"code",
"=",
"0x2800",
"def",
"event_filter_func",
"(",
"event",
")",
":",
"if",
"(",
"event",
".",
"command_class",
"==",
"4",
"and",
"event",
".",
"command",
"==",
"2",
")",
":",
"event_... | Probe for all primary services and characteristics in those services
Args:
handle (int): the connection handle to probe | [
"Probe",
"for",
"all",
"primary",
"services",
"and",
"characteristics",
"in",
"those",
"services"
] | python | train |
datadesk/django-bakery | bakery/management/commands/publish.py | https://github.com/datadesk/django-bakery/blob/e2feb13a66552a388fbcfaaacdd504bba08d3c69/bakery/management/commands/publish.py#L313-L319 | def get_md5(self, filename):
"""
Returns the md5 checksum of the provided file name.
"""
with open(filename, 'rb') as f:
m = hashlib.md5(f.read())
return m.hexdigest() | [
"def",
"get_md5",
"(",
"self",
",",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"f",
":",
"m",
"=",
"hashlib",
".",
"md5",
"(",
"f",
".",
"read",
"(",
")",
")",
"return",
"m",
".",
"hexdigest",
"(",
")"
] | Returns the md5 checksum of the provided file name. | [
"Returns",
"the",
"md5",
"checksum",
"of",
"the",
"provided",
"file",
"name",
"."
] | python | train |
markovmodel/PyEMMA | pyemma/coordinates/pipelines.py | https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/pipelines.py#L74-L90 | def add_element(self, e):
r""" Appends a pipeline stage.
Appends the given element to the end of the current chain.
"""
if not isinstance(e, Iterable):
raise TypeError("given element {} is not iterable in terms of "
"PyEMMAs coordinate pipeline.".format(e))
# only if we have more than one element
if not e.is_reader and len(self._chain) >= 1:
data_producer = self._chain[-1]
# avoid calling the setter of StreamingTransformer.data_producer, since this
# triggers a re-parametrization even on readers (where it makes not sense)
e._data_producer = data_producer
e.chunksize = self.chunksize
self._chain.append(e) | [
"def",
"add_element",
"(",
"self",
",",
"e",
")",
":",
"if",
"not",
"isinstance",
"(",
"e",
",",
"Iterable",
")",
":",
"raise",
"TypeError",
"(",
"\"given element {} is not iterable in terms of \"",
"\"PyEMMAs coordinate pipeline.\"",
".",
"format",
"(",
"e",
")",... | r""" Appends a pipeline stage.
Appends the given element to the end of the current chain. | [
"r",
"Appends",
"a",
"pipeline",
"stage",
"."
] | python | train |
vsoch/helpme | helpme/action/submit.py | https://github.com/vsoch/helpme/blob/e609172260b10cddadb2d2023ab26da8082a9feb/helpme/action/submit.py#L24-L64 | def upload_asciinema(filename):
'''a wrapper around generation of an asciinema.api.Api to call the
upload command given an already existing asciinema file.
Parameters
==========
filename: the asciinema file to upload, can be generated with
function record_asciinema in record.py
'''
if os.path.exists(filename):
import asciinema.config as aconfig
from asciinema.api import Api
# Load the API class
cfg = aconfig.load()
api = Api(cfg.api_url, os.environ.get("USER"), cfg.install_id)
# Perform the upload, return the url
uploader = UploadCommand(api, filename)
try:
url, warn = uploader.api.upload_asciicast(filename)
if warn:
uploader.print_warning(warn)
# Extract just the url, if provided (always is https)
if url:
match = re.search('https://.+', url)
if match:
url = match.group()
return url
except:
bot.error('Problem with upload, skipping')
else:
bot.warning('Cannot find %s, skipping submission.' %filename) | [
"def",
"upload_asciinema",
"(",
"filename",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"import",
"asciinema",
".",
"config",
"as",
"aconfig",
"from",
"asciinema",
".",
"api",
"import",
"Api",
"# Load the API class",
"cfg",
... | a wrapper around generation of an asciinema.api.Api to call the
upload command given an already existing asciinema file.
Parameters
==========
filename: the asciinema file to upload, can be generated with
function record_asciinema in record.py | [
"a",
"wrapper",
"around",
"generation",
"of",
"an",
"asciinema",
".",
"api",
".",
"Api",
"to",
"call",
"the",
"upload",
"command",
"given",
"an",
"already",
"existing",
"asciinema",
"file",
"."
] | python | train |
barrust/pyspellchecker | spellchecker/spellchecker.py | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L398-L404 | def load_words(self, words):
""" Load a list of words from which to generate a word frequency list
Args:
words (list): The list of words to be loaded """
self._dictionary.update([word.lower() for word in words])
self._update_dictionary() | [
"def",
"load_words",
"(",
"self",
",",
"words",
")",
":",
"self",
".",
"_dictionary",
".",
"update",
"(",
"[",
"word",
".",
"lower",
"(",
")",
"for",
"word",
"in",
"words",
"]",
")",
"self",
".",
"_update_dictionary",
"(",
")"
] | Load a list of words from which to generate a word frequency list
Args:
words (list): The list of words to be loaded | [
"Load",
"a",
"list",
"of",
"words",
"from",
"which",
"to",
"generate",
"a",
"word",
"frequency",
"list"
] | python | train |
suds-community/suds | suds/client.py | https://github.com/suds-community/suds/blob/6fb0a829337b5037a66c20aae6f89b41acd77e40/suds/client.py#L852-L868 | def __headers(self):
"""
Get HTTP headers for a HTTP/HTTPS SOAP request.
@return: A dictionary of header/values.
@rtype: dict
"""
action = self.method.soap.action
if isinstance(action, unicode):
action = action.encode("utf-8")
result = {
"Content-Type": "text/xml; charset=utf-8",
"SOAPAction": action}
result.update(**self.options.headers)
log.debug("headers = %s", result)
return result | [
"def",
"__headers",
"(",
"self",
")",
":",
"action",
"=",
"self",
".",
"method",
".",
"soap",
".",
"action",
"if",
"isinstance",
"(",
"action",
",",
"unicode",
")",
":",
"action",
"=",
"action",
".",
"encode",
"(",
"\"utf-8\"",
")",
"result",
"=",
"{... | Get HTTP headers for a HTTP/HTTPS SOAP request.
@return: A dictionary of header/values.
@rtype: dict | [
"Get",
"HTTP",
"headers",
"for",
"a",
"HTTP",
"/",
"HTTPS",
"SOAP",
"request",
"."
] | python | train |
ntucllab/libact | libact/base/dataset.py | https://github.com/ntucllab/libact/blob/e37e9ed6c36febe701d84b2d495c958ab02f0bc8/libact/base/dataset.py#L104-L119 | def update(self, entry_id, new_label):
"""
Updates an entry with entry_id with the given label
Parameters
----------
entry_id : int
entry id of the sample to update.
label : {int, None}
Label of the sample to be update.
"""
self.data[entry_id] = (self.data[entry_id][0], new_label)
self.modified = True
for callback in self._update_callback:
callback(entry_id, new_label) | [
"def",
"update",
"(",
"self",
",",
"entry_id",
",",
"new_label",
")",
":",
"self",
".",
"data",
"[",
"entry_id",
"]",
"=",
"(",
"self",
".",
"data",
"[",
"entry_id",
"]",
"[",
"0",
"]",
",",
"new_label",
")",
"self",
".",
"modified",
"=",
"True",
... | Updates an entry with entry_id with the given label
Parameters
----------
entry_id : int
entry id of the sample to update.
label : {int, None}
Label of the sample to be update. | [
"Updates",
"an",
"entry",
"with",
"entry_id",
"with",
"the",
"given",
"label"
] | python | train |
jgorset/facebook | facebook/user.py | https://github.com/jgorset/facebook/blob/90f035ae1828e4eeb7af428964fedf0ee99ec2ad/facebook/user.py#L85-L106 | def work(self):
"""
A list of :class:`Employment` instances describing the user's work history.
Each structure has attributes ``employer``, ``position``, ``started_at`` and ``ended_at``.
``employer`` and ``position`` reference ``Page`` instances, while ``started_at`` and ``ended_at``
are datetime objects.
"""
employments = []
for work in self.cache['work']:
employment = Employment(
employer = work.get('employer'),
position = work.get('position'),
started_at = work.get('start_date'),
ended_at = work.get('end_date')
)
employments.append(employment)
return employments | [
"def",
"work",
"(",
"self",
")",
":",
"employments",
"=",
"[",
"]",
"for",
"work",
"in",
"self",
".",
"cache",
"[",
"'work'",
"]",
":",
"employment",
"=",
"Employment",
"(",
"employer",
"=",
"work",
".",
"get",
"(",
"'employer'",
")",
",",
"position"... | A list of :class:`Employment` instances describing the user's work history.
Each structure has attributes ``employer``, ``position``, ``started_at`` and ``ended_at``.
``employer`` and ``position`` reference ``Page`` instances, while ``started_at`` and ``ended_at``
are datetime objects. | [
"A",
"list",
"of",
":",
"class",
":",
"Employment",
"instances",
"describing",
"the",
"user",
"s",
"work",
"history",
"."
] | python | train |
realitix/vulkan | generator/generate.py | https://github.com/realitix/vulkan/blob/07285387092aaa61d2d71fa2913d60a73f022cbe/generator/generate.py#L325-L445 | def model_functions(vk, model):
"""Fill the model with functions"""
def get_vk_extension_functions():
names = set()
for extension in get_extensions_filtered(vk):
for req in extension['require']:
if 'command' not in req:
continue
for command in req['command']:
cn = command['@name']
names.add(cn)
# add alias command too
for alias, n in model['alias'].items():
if n == cn:
names.add(alias)
return names
def get_count_param(command):
for param in command['param']:
if param['type'] + param.get('#text', '') == 'uint32_t*':
return param
return None
def member_has_str(name):
c = next(iter([x for x in model['constructors']
if x['name'] == name]), None)
if c and any(['char' in x['type'] for x in c['members']]):
return True
return False
def format_member(member):
type_name = member['type']
if '#text' in member:
text = member['#text'].replace('const ', '').strip()
type_name += ' ' + text
return {'name': member['name'],
'type': member['type'],
'none': member['name'] in NULL_MEMBERS,
'force_array': True if '@len' in member else False,
'to_create': False,
'has_str': member_has_str(member['type'])}
def format_return_member(member):
t = member['type']
static_count = None
if '@len' in member and '::' in member['@len']:
lens = member['@len'].split('::')
static_count = {'key': lens[0], 'value': lens[1]}
is_handle = t in get_handle_names(vk)
is_enum = t in get_enum_names(vk)
is_struct = t in get_struct_names(vk)
return {'name': member['name'],
'type': t,
'handle': is_handle,
'enum': is_enum,
'struct': is_struct,
'static_count': static_count,
'has_str': member_has_str(member['type'])}
ALLOCATE_PREFIX = ('vkCreate', 'vkGet', 'vkEnumerate', 'vkAllocate',
'vkMap', 'vkAcquire')
ALLOCATE_EXCEPTION = ('vkGetFenceStatus', 'vkGetEventStatus',
'vkGetQueryPoolResults',
'vkGetPhysicalDeviceXlibPresentationSupportKHR')
COUNT_EXCEPTION = ('vkAcquireNextImageKHR', 'vkEnumerateInstanceVersion')
model['functions'] = []
model['extension_functions'] = []
functions = [f for f in vk['registry']['commands']['command']]
extension_function_names = get_vk_extension_functions()
for function in functions:
if '@alias' in function:
continue
fname = function['proto']['name']
ftype = function['proto']['type']
if fname in CUSTOM_FUNCTIONS:
continue
if type(function['param']) is not list:
function['param'] = [function['param']]
count_param = get_count_param(function)
if fname in COUNT_EXCEPTION:
count_param = None
is_allocate = any([fname.startswith(a) for a in ALLOCATE_PREFIX])
is_count = is_allocate and count_param is not None
if fname in ALLOCATE_EXCEPTION or ftype == 'VkBool32':
is_allocate = is_count = False
members = []
for member in function['param']:
members.append(format_member(member))
return_member = None
if is_allocate:
return_member = format_return_member(function['param'][-1])
members[-1]['to_create'] = True
if is_count:
members[-2]['to_create'] = True
f = {
'name': fname,
'members': members,
'allocate': is_allocate,
'count': is_count,
'return_boolean': True if ftype == 'VkBool32' else False,
'return_result': True if ftype == 'VkResult' else False,
'return_member': return_member,
'is_extension': fname in extension_function_names
}
model['functions'].append(f) | [
"def",
"model_functions",
"(",
"vk",
",",
"model",
")",
":",
"def",
"get_vk_extension_functions",
"(",
")",
":",
"names",
"=",
"set",
"(",
")",
"for",
"extension",
"in",
"get_extensions_filtered",
"(",
"vk",
")",
":",
"for",
"req",
"in",
"extension",
"[",
... | Fill the model with functions | [
"Fill",
"the",
"model",
"with",
"functions"
] | python | train |
quantopian/zipline | zipline/finance/commission.py | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/commission.py#L364-L369 | def calculate(self, order, transaction):
"""
Pay commission based on dollar value of shares.
"""
cost_per_share = transaction.price * self.cost_per_dollar
return abs(transaction.amount) * cost_per_share | [
"def",
"calculate",
"(",
"self",
",",
"order",
",",
"transaction",
")",
":",
"cost_per_share",
"=",
"transaction",
".",
"price",
"*",
"self",
".",
"cost_per_dollar",
"return",
"abs",
"(",
"transaction",
".",
"amount",
")",
"*",
"cost_per_share"
] | Pay commission based on dollar value of shares. | [
"Pay",
"commission",
"based",
"on",
"dollar",
"value",
"of",
"shares",
"."
] | python | train |
zxylvlp/PingPHP | pingphp/grammar.py | https://github.com/zxylvlp/PingPHP/blob/2e9a5f1ef4b5b13310e3f8ff350fa91032357bc5/pingphp/grammar.py#L1095-L1103 | def p_InterfaceDefList(p):
'''
InterfaceDefList : InterfaceDef
| InterfaceDefList InterfaceDef
'''
if len(p) < 3:
p[0] = InterfaceDefList(None, p[1])
else:
p[0] = InterfaceDefList(p[1], p[2]) | [
"def",
"p_InterfaceDefList",
"(",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
"<",
"3",
":",
"p",
"[",
"0",
"]",
"=",
"InterfaceDefList",
"(",
"None",
",",
"p",
"[",
"1",
"]",
")",
"else",
":",
"p",
"[",
"0",
"]",
"=",
"InterfaceDefList",
"(",
... | InterfaceDefList : InterfaceDef
| InterfaceDefList InterfaceDef | [
"InterfaceDefList",
":",
"InterfaceDef",
"|",
"InterfaceDefList",
"InterfaceDef"
] | python | train |
astropy/astropy-healpix | astropy_healpix/high_level.py | https://github.com/astropy/astropy-healpix/blob/c7fbe36305aadda9946dd37969d5dcb9ff6b1440/astropy_healpix/high_level.py#L272-L298 | def healpix_to_skycoord(self, healpix_index, dx=None, dy=None):
"""
Convert HEALPix indices (optionally with offsets) to celestial coordinates.
Note that this method requires that a celestial frame was specified when
initializing HEALPix. If you don't know or need the celestial frame, you
can instead use :meth:`~astropy_healpix.HEALPix.healpix_to_lonlat`.
Parameters
----------
healpix_index : `~numpy.ndarray`
1-D array of HEALPix indices
dx, dy : `~numpy.ndarray`, optional
1-D arrays of offsets inside the HEALPix pixel, which must be in
the range [0:1] (0.5 is the center of the HEALPix pixels). If not
specified, the position at the center of the pixel is used.
Returns
-------
coord : :class:`~astropy.coordinates.SkyCoord`
The resulting celestial coordinates
"""
if self.frame is None:
raise NoFrameError("healpix_to_skycoord")
lon, lat = self.healpix_to_lonlat(healpix_index, dx=dx, dy=dy)
representation = UnitSphericalRepresentation(lon, lat, copy=False)
return SkyCoord(self.frame.realize_frame(representation)) | [
"def",
"healpix_to_skycoord",
"(",
"self",
",",
"healpix_index",
",",
"dx",
"=",
"None",
",",
"dy",
"=",
"None",
")",
":",
"if",
"self",
".",
"frame",
"is",
"None",
":",
"raise",
"NoFrameError",
"(",
"\"healpix_to_skycoord\"",
")",
"lon",
",",
"lat",
"="... | Convert HEALPix indices (optionally with offsets) to celestial coordinates.
Note that this method requires that a celestial frame was specified when
initializing HEALPix. If you don't know or need the celestial frame, you
can instead use :meth:`~astropy_healpix.HEALPix.healpix_to_lonlat`.
Parameters
----------
healpix_index : `~numpy.ndarray`
1-D array of HEALPix indices
dx, dy : `~numpy.ndarray`, optional
1-D arrays of offsets inside the HEALPix pixel, which must be in
the range [0:1] (0.5 is the center of the HEALPix pixels). If not
specified, the position at the center of the pixel is used.
Returns
-------
coord : :class:`~astropy.coordinates.SkyCoord`
The resulting celestial coordinates | [
"Convert",
"HEALPix",
"indices",
"(",
"optionally",
"with",
"offsets",
")",
"to",
"celestial",
"coordinates",
"."
] | python | train |
obriencj/python-javatools | javatools/__init__.py | https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/__init__.py#L2226-L2277 | def _pretty_type(s, offset=0):
# pylint: disable=R0911, R0912
# too many returns, too many branches. Not converting this to a
# dict lookup. Waiving instead.
"""
returns the pretty version of a type code
"""
tc = s[offset]
if tc == "V":
return "void"
elif tc == "Z":
return "boolean"
elif tc == "C":
return "char"
elif tc == "B":
return "byte"
elif tc == "S":
return "short"
elif tc == "I":
return "int"
elif tc == "J":
return "long"
elif tc == "D":
return "double"
elif tc == "F":
return "float"
elif tc == "L":
return _pretty_class(s[offset + 1:-1])
elif tc == "[":
return "%s[]" % _pretty_type(s, offset + 1)
elif tc == "(":
return "(%s)" % ",".join(_pretty_typeseq(s[offset + 1:-1]))
elif tc == "T":
return "generic " + s[offset + 1:]
else:
raise Unimplemented("unknown type, %r" % tc) | [
"def",
"_pretty_type",
"(",
"s",
",",
"offset",
"=",
"0",
")",
":",
"# pylint: disable=R0911, R0912",
"# too many returns, too many branches. Not converting this to a",
"# dict lookup. Waiving instead.",
"tc",
"=",
"s",
"[",
"offset",
"]",
"if",
"tc",
"==",
"\"V\"",
":"... | returns the pretty version of a type code | [
"returns",
"the",
"pretty",
"version",
"of",
"a",
"type",
"code"
] | python | train |
theonion/django-bulbs | bulbs/api/views.py | https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/api/views.py#L204-L216 | def status(self, request, **kwargs):
"""This endpoint returns a status text, currently one of:
- "Draft" (If no publish date is set, and no item exists in the editor queue)
- "Waiting for Editor" (If no publish date is set, and an item exists in the editor queue)
- "Published" (The published date is in the past)
- "Scheduled" (The published date is set in the future)
:param request: a WSGI request object
:param kwargs: keyword arguments (optional)
:return: `rest_framework.response.Response`
"""
content = self.get_object()
return Response({"status": content.get_status()}) | [
"def",
"status",
"(",
"self",
",",
"request",
",",
"*",
"*",
"kwargs",
")",
":",
"content",
"=",
"self",
".",
"get_object",
"(",
")",
"return",
"Response",
"(",
"{",
"\"status\"",
":",
"content",
".",
"get_status",
"(",
")",
"}",
")"
] | This endpoint returns a status text, currently one of:
- "Draft" (If no publish date is set, and no item exists in the editor queue)
- "Waiting for Editor" (If no publish date is set, and an item exists in the editor queue)
- "Published" (The published date is in the past)
- "Scheduled" (The published date is set in the future)
:param request: a WSGI request object
:param kwargs: keyword arguments (optional)
:return: `rest_framework.response.Response` | [
"This",
"endpoint",
"returns",
"a",
"status",
"text",
"currently",
"one",
"of",
":",
"-",
"Draft",
"(",
"If",
"no",
"publish",
"date",
"is",
"set",
"and",
"no",
"item",
"exists",
"in",
"the",
"editor",
"queue",
")",
"-",
"Waiting",
"for",
"Editor",
"("... | python | train |
timothydmorton/isochrones | isochrones/observation.py | https://github.com/timothydmorton/isochrones/blob/d84495573044c66db2fd6b959fe69e370757ea14/isochrones/observation.py#L827-L856 | def save_hdf(self, filename, path='', overwrite=False, append=False):
"""
Writes all info necessary to recreate object to HDF file
Saves table of photometry in DataFrame
Saves model specification, spectroscopy, parallax to attrs
"""
if os.path.exists(filename):
store = pd.HDFStore(filename)
if path in store:
store.close()
if overwrite:
os.remove(filename)
elif not append:
raise IOError('{} in {} exists. Set either overwrite or append option.'.format(path,filename))
else:
store.close()
df = self.to_df()
df.to_hdf(filename, path+'/df')
with pd.HDFStore(filename) as store:
# store = pd.HDFStore(filename)
attrs = store.get_storer(path+'/df').attrs
attrs.spectroscopy = self.spectroscopy
attrs.parallax = self.parallax
attrs.N = self._N
attrs.index = self._index
store.close() | [
"def",
"save_hdf",
"(",
"self",
",",
"filename",
",",
"path",
"=",
"''",
",",
"overwrite",
"=",
"False",
",",
"append",
"=",
"False",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"store",
"=",
"pd",
".",
"HDFStore",
... | Writes all info necessary to recreate object to HDF file
Saves table of photometry in DataFrame
Saves model specification, spectroscopy, parallax to attrs | [
"Writes",
"all",
"info",
"necessary",
"to",
"recreate",
"object",
"to",
"HDF",
"file"
] | python | train |
satellogic/telluric | telluric/georaster.py | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L838-L934 | def save(self, filename, tags=None, **kwargs):
"""
Save GeoRaster to a file.
:param filename: url
:param tags: tags to add to default namespace
optional parameters:
* GDAL_TIFF_INTERNAL_MASK: specifies whether mask is within image file, or additional .msk
* overviews: if True, will save with previews. default: True
* factors: list of factors for the overview, default: calculated based on raster width and height
* resampling: to build overviews. default: cubic
* tiled: if True raster will be saved tiled, default: False
* compress: any supported rasterio.enums.Compression value, default to LZW
* blockxsize: int, tile x size, default:256
* blockysize: int, tile y size, default:256
* creation_options: dict, key value of additional creation options
* nodata: if passed, will save with nodata value (e.g. useful for qgis)
"""
if not filename.startswith("/vsi"):
folder = os.path.abspath(os.path.join(filename, os.pardir))
os.makedirs(folder, exist_ok=True)
internal_mask = kwargs.get('GDAL_TIFF_INTERNAL_MASK', True)
nodata_value = kwargs.get('nodata', None)
compression = kwargs.get('compression', Compression.lzw)
rasterio_envs = {'GDAL_TIFF_INTERNAL_MASK': internal_mask}
if os.environ.get('DEBUG', False):
rasterio_envs['CPL_DEBUG'] = True
with rasterio.Env(**rasterio_envs):
try:
size = self.shape
extension = os.path.splitext(filename)[1].lower()[1:]
driver = gdal_drivers[extension]
# tiled
tiled = kwargs.get('tiled', False)
blockxsize = kwargs.get('blockxsize', 256)
blockysize = kwargs.get('blockysize', 256)
params = {
'mode': "w", 'transform': self.affine, 'crs': self.crs,
'driver': driver, 'width': size[2], 'height': size[1], 'count': size[0],
'dtype': dtype_map[self.dtype.type],
'nodata': nodata_value,
'masked': True,
'blockxsize': min(blockxsize, size[2]),
'blockysize': min(blockysize, size[1]),
'tiled': tiled,
'compress': compression.name if compression in Compression else compression,
}
# additional creation options
# -co COPY_SRC_OVERVIEWS=YES -co COMPRESS=DEFLATE -co PHOTOMETRIC=MINISBLACK
creation_options = kwargs.get('creation_options', {})
if creation_options:
params.update(**creation_options)
if self._image is None and self._filename is not None:
creation_options["blockxsize"] = params["blockxsize"]
creation_options["blockysize"] = params["blockysize"]
creation_options["tiled"] = params["tiled"]
creation_options["compress"] = params["compress"]
rasterio.shutil.copy(self.source_file, filename, creation_options=creation_options)
self._cleanup()
with GeoRaster2._raster_opener(filename, "r+",) as r:
self._add_overviews_and_tags(r, tags, kwargs)
else:
with GeoRaster2._raster_opener(filename, **params) as r:
# write data:
for band in range(self.shape[0]):
img = self.image.data
r.write_band(1 + band, img[band, :, :])
# write mask:
if not (
np.ma.getmaskarray(self.image) ==
np.ma.getmaskarray(self.image)[0]
).all():
warnings.warn(
"Saving different masks per band is not supported, "
"the union of the masked values will be performed.", GeoRaster2Warning
)
if params.get('masked'):
mask = _mask_from_masked_array(self.image)
r.write_mask(mask)
self._add_overviews_and_tags(r, tags, kwargs)
return GeoRaster2.open(filename)
except (rasterio.errors.RasterioIOError, rasterio._err.CPLE_BaseError, KeyError) as e:
raise GeoRaster2IOError(e) | [
"def",
"save",
"(",
"self",
",",
"filename",
",",
"tags",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"filename",
".",
"startswith",
"(",
"\"/vsi\"",
")",
":",
"folder",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"pa... | Save GeoRaster to a file.
:param filename: url
:param tags: tags to add to default namespace
optional parameters:
* GDAL_TIFF_INTERNAL_MASK: specifies whether mask is within image file, or additional .msk
* overviews: if True, will save with previews. default: True
* factors: list of factors for the overview, default: calculated based on raster width and height
* resampling: to build overviews. default: cubic
* tiled: if True raster will be saved tiled, default: False
* compress: any supported rasterio.enums.Compression value, default to LZW
* blockxsize: int, tile x size, default:256
* blockysize: int, tile y size, default:256
* creation_options: dict, key value of additional creation options
* nodata: if passed, will save with nodata value (e.g. useful for qgis) | [
"Save",
"GeoRaster",
"to",
"a",
"file",
"."
] | python | train |
saltstack/salt | salt/utils/cache.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/cache.py#L244-L248 | def put_cache(self, minions):
'''
published the given minions to the ConCache
'''
self.cupd_out.send(self.serial.dumps(minions)) | [
"def",
"put_cache",
"(",
"self",
",",
"minions",
")",
":",
"self",
".",
"cupd_out",
".",
"send",
"(",
"self",
".",
"serial",
".",
"dumps",
"(",
"minions",
")",
")"
] | published the given minions to the ConCache | [
"published",
"the",
"given",
"minions",
"to",
"the",
"ConCache"
] | python | train |
openvax/pepdata | pepdata/iedb/alleles.py | https://github.com/openvax/pepdata/blob/2f1bad79f8084545227f4a7f895bbf08a6fb6fdc/pepdata/iedb/alleles.py#L25-L31 | def local_path(force_download=False):
"""Downloads allele database from IEDB, returns local path to XML file."""
return cache.fetch(
filename=ALLELE_XML_FILENAME,
url=ALLELE_XML_URL,
decompress=ALLELE_XML_DECOMPRESS,
force=force_download) | [
"def",
"local_path",
"(",
"force_download",
"=",
"False",
")",
":",
"return",
"cache",
".",
"fetch",
"(",
"filename",
"=",
"ALLELE_XML_FILENAME",
",",
"url",
"=",
"ALLELE_XML_URL",
",",
"decompress",
"=",
"ALLELE_XML_DECOMPRESS",
",",
"force",
"=",
"force_downlo... | Downloads allele database from IEDB, returns local path to XML file. | [
"Downloads",
"allele",
"database",
"from",
"IEDB",
"returns",
"local",
"path",
"to",
"XML",
"file",
"."
] | python | train |
petl-developers/petl | petl/util/vis.py | https://github.com/petl-developers/petl/blob/1d33ca055f7e04e0d28a772041c9fd30c8d415d6/petl/util/vis.py#L155-L162 | def lookstr(table, limit=0, **kwargs):
"""Like :func:`petl.util.vis.look` but use str() rather than repr() for data
values.
"""
kwargs['vrepr'] = str
return look(table, limit=limit, **kwargs) | [
"def",
"lookstr",
"(",
"table",
",",
"limit",
"=",
"0",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'vrepr'",
"]",
"=",
"str",
"return",
"look",
"(",
"table",
",",
"limit",
"=",
"limit",
",",
"*",
"*",
"kwargs",
")"
] | Like :func:`petl.util.vis.look` but use str() rather than repr() for data
values. | [
"Like",
":",
"func",
":",
"petl",
".",
"util",
".",
"vis",
".",
"look",
"but",
"use",
"str",
"()",
"rather",
"than",
"repr",
"()",
"for",
"data",
"values",
"."
] | python | train |
yandex/yandex-tank | yandextank/common/interfaces.py | https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/common/interfaces.py#L98-L102 | def publish(self, key, value):
"""publish value to status"""
self.log.debug(
"Publishing status: %s/%s: %s", self.__class__.__name__, key, value)
self.core.publish(self.__class__.__name__, key, value) | [
"def",
"publish",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Publishing status: %s/%s: %s\"",
",",
"self",
".",
"__class__",
".",
"__name__",
",",
"key",
",",
"value",
")",
"self",
".",
"core",
".",
"publ... | publish value to status | [
"publish",
"value",
"to",
"status"
] | python | test |
rosenbrockc/fortpy | fortpy/msg.py | https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/msg.py#L27-L40 | def printer(text, color=None, **kwargs):
"""Prints using color or standard print() depending on the value
of 'nocolor'.
"""
if nocolor:
# import sys
# sys.stdout.write(text + "" if ("end" in kwargs and kwargs["end"] == "") else '\n')
# sys.stdout.flush()
print(text, **kwargs)
else:
if color is None:
cprint(text, **kwargs)
else:
cprint(text, color, **kwargs) | [
"def",
"printer",
"(",
"text",
",",
"color",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"nocolor",
":",
"# import sys",
"# sys.stdout.write(text + \"\" if (\"end\" in kwargs and kwargs[\"end\"] == \"\") else '\\n')",
"# sys.stdout.flush()",
"print",
"(",
"text"... | Prints using color or standard print() depending on the value
of 'nocolor'. | [
"Prints",
"using",
"color",
"or",
"standard",
"print",
"()",
"depending",
"on",
"the",
"value",
"of",
"nocolor",
"."
] | python | train |
xapple/plumbing | plumbing/slurm/job.py | https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/slurm/job.py#L121-L136 | def set_paths(self, base_dir, script_path):
"""Set the directory, the script path and the outfile path"""
# Make absolute paths #
if 'change_dir' in self.kwargs:
self.kwargs['change_dir'] = DirectoryPath(os.path.abspath(self.kwargs['change_dir']))
if 'out_file' in self.kwargs:
self.kwargs['out_file'] = FilePath(os.path.abspath(self.kwargs['out_file']))
# In case there is a base directory #
if base_dir is not None:
self.base_dir = DirectoryPath(os.path.abspath(base_dir))
self.script_path = FilePath(base_dir + "run." + self.extensions[self.language])
self.kwargs['change_dir'] = base_dir
self.kwargs['out_file'] = FilePath(base_dir + "run.out")
# Other cases #
if base_dir is None and script_path is None: self.script_path = FilePath(new_temp_path())
if script_path is not None: self.script_path = FilePath(os.path.abspath(script_path)) | [
"def",
"set_paths",
"(",
"self",
",",
"base_dir",
",",
"script_path",
")",
":",
"# Make absolute paths #",
"if",
"'change_dir'",
"in",
"self",
".",
"kwargs",
":",
"self",
".",
"kwargs",
"[",
"'change_dir'",
"]",
"=",
"DirectoryPath",
"(",
"os",
".",
"path",
... | Set the directory, the script path and the outfile path | [
"Set",
"the",
"directory",
"the",
"script",
"path",
"and",
"the",
"outfile",
"path"
] | python | train |
MacHu-GWU/uszipcode-project | uszipcode/search.py | https://github.com/MacHu-GWU/uszipcode-project/blob/96282b779a3efb422802de83c48ca284598ba952/uszipcode/search.py#L616-L631 | def by_state(self,
state,
zipcode_type=ZipcodeType.Standard,
sort_by=SimpleZipcode.zipcode.name,
ascending=True,
returns=DEFAULT_LIMIT):
"""
Search zipcode information by fuzzy State name.
My engine use fuzzy match and guess what is the state you want.
"""
return self.query(
state=state,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
) | [
"def",
"by_state",
"(",
"self",
",",
"state",
",",
"zipcode_type",
"=",
"ZipcodeType",
".",
"Standard",
",",
"sort_by",
"=",
"SimpleZipcode",
".",
"zipcode",
".",
"name",
",",
"ascending",
"=",
"True",
",",
"returns",
"=",
"DEFAULT_LIMIT",
")",
":",
"retur... | Search zipcode information by fuzzy State name.
My engine use fuzzy match and guess what is the state you want. | [
"Search",
"zipcode",
"information",
"by",
"fuzzy",
"State",
"name",
"."
] | python | train |
fhamborg/news-please | newsplease/helper_classes/url_extractor.py | https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/url_extractor.py#L68-L107 | def get_sitemap_url(url, allow_subdomains):
"""
Determines the domain's robot.txt
:param str url: the url to work on
:param bool allow_subdomains: Determines if the robot.txt may be the
subdomain's
:return: the robot.txt's address
:raises Exception: if there's no robot.txt on the site's domain
"""
if allow_subdomains:
redirect = UrlExtractor.follow_redirects(
"http://" + UrlExtractor.get_allowed_domain(url)
)
else:
redirect = UrlExtractor.follow_redirects(
"http://" +
UrlExtractor.get_allowed_domain(url, False)
)
redirect = UrlExtractor.follow_redirects(url)
# Get robots.txt
parsed = urlparse(redirect)
if allow_subdomains:
url_netloc = parsed.netloc
else:
url_netloc = UrlExtractor.get_allowed_domain(
parsed.netloc, False)
robots = '{url.scheme}://{url_netloc}/robots.txt'.format(
url=parsed, url_netloc=url_netloc)
try:
urllib2.urlopen(robots)
return robots
except:
if allow_subdomains:
return UrlExtractor.get_sitemap_url(url, False)
else:
raise Exception('Fatal: no robots.txt found.') | [
"def",
"get_sitemap_url",
"(",
"url",
",",
"allow_subdomains",
")",
":",
"if",
"allow_subdomains",
":",
"redirect",
"=",
"UrlExtractor",
".",
"follow_redirects",
"(",
"\"http://\"",
"+",
"UrlExtractor",
".",
"get_allowed_domain",
"(",
"url",
")",
")",
"else",
":... | Determines the domain's robot.txt
:param str url: the url to work on
:param bool allow_subdomains: Determines if the robot.txt may be the
subdomain's
:return: the robot.txt's address
:raises Exception: if there's no robot.txt on the site's domain | [
"Determines",
"the",
"domain",
"s",
"robot",
".",
"txt"
] | python | train |
openstack/networking-cisco | networking_cisco/plugins/cisco/device_manager/config.py | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/device_manager/config.py#L148-L199 | def verify_resource_dict(res_dict, is_create, attr_info):
"""Verifies required attributes are in resource dictionary, res_dict.
Also checking that an attribute is only specified if it is allowed
for the given operation (create/update).
Attribute with default values are considered to be optional.
This function contains code taken from function 'prepare_request_body' in
attributes.py.
"""
if ((bc.NEUTRON_VERSION >= bc.NEUTRON_NEWTON_VERSION) and 'tenant_id'
in res_dict):
res_dict['project_id'] = res_dict['tenant_id']
if is_create: # POST
for attr, attr_vals in six.iteritems(attr_info):
if attr_vals['allow_post']:
if 'default' not in attr_vals and attr not in res_dict:
msg = _("Failed to parse request. Required attribute '%s' "
"not specified") % attr
raise webob.exc.HTTPBadRequest(msg)
res_dict[attr] = res_dict.get(attr, attr_vals.get('default'))
else:
if attr in res_dict:
msg = _("Attribute '%s' not allowed in POST") % attr
raise webob.exc.HTTPBadRequest(msg)
else: # PUT
for attr, attr_vals in six.iteritems(attr_info):
if attr in res_dict and not attr_vals['allow_put']:
msg = _("Cannot update read-only attribute %s") % attr
raise webob.exc.HTTPBadRequest(msg)
for attr, attr_vals in six.iteritems(attr_info):
if (attr not in res_dict or
res_dict[attr] is bc.constants.ATTR_NOT_SPECIFIED):
continue
# Convert values if necessary
if 'convert_to' in attr_vals:
res_dict[attr] = attr_vals['convert_to'](res_dict[attr])
# Check that configured values are correct
if 'validate' not in attr_vals:
continue
for rule in attr_vals['validate']:
_ensure_format(rule, attr, res_dict)
res = bc.validators[rule](res_dict[attr],
attr_vals['validate'][rule])
if res:
msg_dict = dict(attr=attr, reason=res)
msg = (_("Invalid input for %(attr)s. Reason: %(reason)s.") %
msg_dict)
raise webob.exc.HTTPBadRequest(msg)
return res_dict | [
"def",
"verify_resource_dict",
"(",
"res_dict",
",",
"is_create",
",",
"attr_info",
")",
":",
"if",
"(",
"(",
"bc",
".",
"NEUTRON_VERSION",
">=",
"bc",
".",
"NEUTRON_NEWTON_VERSION",
")",
"and",
"'tenant_id'",
"in",
"res_dict",
")",
":",
"res_dict",
"[",
"'p... | Verifies required attributes are in resource dictionary, res_dict.
Also checking that an attribute is only specified if it is allowed
for the given operation (create/update).
Attribute with default values are considered to be optional.
This function contains code taken from function 'prepare_request_body' in
attributes.py. | [
"Verifies",
"required",
"attributes",
"are",
"in",
"resource",
"dictionary",
"res_dict",
"."
] | python | train |
mlperf/training | object_detection/pytorch/maskrcnn_benchmark/structures/boxlist_ops.py | https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/object_detection/pytorch/maskrcnn_benchmark/structures/boxlist_ops.py#L102-L128 | def cat_boxlist(bboxes):
"""
Concatenates a list of BoxList (having the same image size) into a
single BoxList
Arguments:
bboxes (list[BoxList])
"""
assert isinstance(bboxes, (list, tuple))
assert all(isinstance(bbox, BoxList) for bbox in bboxes)
size = bboxes[0].size
assert all(bbox.size == size for bbox in bboxes)
mode = bboxes[0].mode
assert all(bbox.mode == mode for bbox in bboxes)
fields = set(bboxes[0].fields())
assert all(set(bbox.fields()) == fields for bbox in bboxes)
cat_boxes = BoxList(_cat([bbox.bbox for bbox in bboxes], dim=0), size, mode)
for field in fields:
data = _cat([bbox.get_field(field) for bbox in bboxes], dim=0)
cat_boxes.add_field(field, data)
return cat_boxes | [
"def",
"cat_boxlist",
"(",
"bboxes",
")",
":",
"assert",
"isinstance",
"(",
"bboxes",
",",
"(",
"list",
",",
"tuple",
")",
")",
"assert",
"all",
"(",
"isinstance",
"(",
"bbox",
",",
"BoxList",
")",
"for",
"bbox",
"in",
"bboxes",
")",
"size",
"=",
"bb... | Concatenates a list of BoxList (having the same image size) into a
single BoxList
Arguments:
bboxes (list[BoxList]) | [
"Concatenates",
"a",
"list",
"of",
"BoxList",
"(",
"having",
"the",
"same",
"image",
"size",
")",
"into",
"a",
"single",
"BoxList"
] | python | train |
globality-corp/microcosm-postgres | microcosm_postgres/store.py | https://github.com/globality-corp/microcosm-postgres/blob/43dd793b1fc9b84e4056700f350e79e0df5ff501/microcosm_postgres/store.py#L211-L217 | def _filter(self, query, **kwargs):
"""
Filter a query with user-supplied arguments.
"""
query = self._auto_filter(query, **kwargs)
return query | [
"def",
"_filter",
"(",
"self",
",",
"query",
",",
"*",
"*",
"kwargs",
")",
":",
"query",
"=",
"self",
".",
"_auto_filter",
"(",
"query",
",",
"*",
"*",
"kwargs",
")",
"return",
"query"
] | Filter a query with user-supplied arguments. | [
"Filter",
"a",
"query",
"with",
"user",
"-",
"supplied",
"arguments",
"."
] | python | train |
Cadasta/django-tutelary | tutelary/models.py | https://github.com/Cadasta/django-tutelary/blob/66bb05de7098777c0a383410c287bf48433cde87/tutelary/models.py#L363-L377 | def assign_user_policies(user, *policies_roles):
"""Assign a sequence of policies to a user (or the anonymous user is
``user`` is ``None``). (Also installed as ``assign_policies``
method on ``User`` model.
"""
clear_user_policies(user)
pset = PermissionSet.objects.by_policies_and_roles(policies_roles)
pset.refresh()
if user is None:
pset.anonymous_user = True
else:
pset.users.add(user)
pset.save()
cache.set(user_cache_key(user), None) | [
"def",
"assign_user_policies",
"(",
"user",
",",
"*",
"policies_roles",
")",
":",
"clear_user_policies",
"(",
"user",
")",
"pset",
"=",
"PermissionSet",
".",
"objects",
".",
"by_policies_and_roles",
"(",
"policies_roles",
")",
"pset",
".",
"refresh",
"(",
")",
... | Assign a sequence of policies to a user (or the anonymous user is
``user`` is ``None``). (Also installed as ``assign_policies``
method on ``User`` model. | [
"Assign",
"a",
"sequence",
"of",
"policies",
"to",
"a",
"user",
"(",
"or",
"the",
"anonymous",
"user",
"is",
"user",
"is",
"None",
")",
".",
"(",
"Also",
"installed",
"as",
"assign_policies",
"method",
"on",
"User",
"model",
"."
] | python | train |
materialsproject/pymatgen | pymatgen/io/abinit/pseudos.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/pseudos.py#L1786-L1797 | def print_table(self, stream=sys.stdout, filter_function=None):
"""
A pretty ASCII printer for the periodic table, based on some filter_function.
Args:
stream: file-like object
filter_function:
A filtering function that take a Pseudo as input and returns a boolean.
For example, setting filter_function = lambda p: p.Z_val > 2 will print
a periodic table containing only pseudos with Z_val > 2.
"""
print(self.to_table(filter_function=filter_function), file=stream) | [
"def",
"print_table",
"(",
"self",
",",
"stream",
"=",
"sys",
".",
"stdout",
",",
"filter_function",
"=",
"None",
")",
":",
"print",
"(",
"self",
".",
"to_table",
"(",
"filter_function",
"=",
"filter_function",
")",
",",
"file",
"=",
"stream",
")"
] | A pretty ASCII printer for the periodic table, based on some filter_function.
Args:
stream: file-like object
filter_function:
A filtering function that take a Pseudo as input and returns a boolean.
For example, setting filter_function = lambda p: p.Z_val > 2 will print
a periodic table containing only pseudos with Z_val > 2. | [
"A",
"pretty",
"ASCII",
"printer",
"for",
"the",
"periodic",
"table",
"based",
"on",
"some",
"filter_function",
"."
] | python | train |
dmlc/xgboost | python-package/xgboost/core.py | https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/core.py#L1072-L1087 | def set_param(self, params, value=None):
"""Set parameters into the Booster.
Parameters
----------
params: dict/list/str
list of key,value pairs, dict of key to value or simply str key
value: optional
value of the specified parameter, when params is str key
"""
if isinstance(params, Mapping):
params = params.items()
elif isinstance(params, STRING_TYPES) and value is not None:
params = [(params, value)]
for key, val in params:
_check_call(_LIB.XGBoosterSetParam(self.handle, c_str(key), c_str(str(val)))) | [
"def",
"set_param",
"(",
"self",
",",
"params",
",",
"value",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"params",
",",
"Mapping",
")",
":",
"params",
"=",
"params",
".",
"items",
"(",
")",
"elif",
"isinstance",
"(",
"params",
",",
"STRING_TYPES",... | Set parameters into the Booster.
Parameters
----------
params: dict/list/str
list of key,value pairs, dict of key to value or simply str key
value: optional
value of the specified parameter, when params is str key | [
"Set",
"parameters",
"into",
"the",
"Booster",
"."
] | python | train |
akfullfo/taskforce | taskforce/utils.py | https://github.com/akfullfo/taskforce/blob/bc6dd744bd33546447d085dbd18a350532220193/taskforce/utils.py#L282-L307 | def module_description(module__name__, module__doc__, module__file__):
"""
Return formatted text that lists the module-level and class-level
embedded documentation. The function should be called exactly
as:
taskforce.utils.module_description(__name__, __doc__, __file__)
The most common use for this function is to produce the help
message for test code in a library module, which might look
something like:
if __name__ == "__main__":
import ns_utils, argparse
p = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description=taskforce.utils.module_description(__name__, __doc__, __file__))
"""
mod_name = os.path.splitext(os.path.basename(module__file__))[0]
mod_desc = (lambda x: x + '\n' + '='*len(x) + '\n')('Module '+mod_name) if mod_name else ''
for name, obj in inspect.getmembers(sys.modules[module__name__]):
if inspect.isclass(obj) and '__doc__' in dir(obj) and obj.__doc__:
mod_desc += '\n' + (lambda x: x + '\n' + '-'*len(x) + '\n')('Class '+name)
mod_desc += obj.__doc__.lstrip()
return mod_desc | [
"def",
"module_description",
"(",
"module__name__",
",",
"module__doc__",
",",
"module__file__",
")",
":",
"mod_name",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"module__file__",
")",
")",
"[",
"0",
"]",
"mod_de... | Return formatted text that lists the module-level and class-level
embedded documentation. The function should be called exactly
as:
taskforce.utils.module_description(__name__, __doc__, __file__)
The most common use for this function is to produce the help
message for test code in a library module, which might look
something like:
if __name__ == "__main__":
import ns_utils, argparse
p = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description=taskforce.utils.module_description(__name__, __doc__, __file__)) | [
"Return",
"formatted",
"text",
"that",
"lists",
"the",
"module",
"-",
"level",
"and",
"class",
"-",
"level",
"embedded",
"documentation",
".",
"The",
"function",
"should",
"be",
"called",
"exactly",
"as",
":"
] | python | train |
EliotBerriot/django-dynamic-preferences | dynamic_preferences/managers.py | https://github.com/EliotBerriot/django-dynamic-preferences/blob/12eab4f17b960290525b215d954d1b5fb91199df/dynamic_preferences/managers.py#L47-L49 | def by_name(self):
"""Return a dictionary with preferences identifiers and values, but without the section name in the identifier"""
return {key.split(preferences_settings.SECTION_KEY_SEPARATOR)[-1]: value for key, value in self.all().items()} | [
"def",
"by_name",
"(",
"self",
")",
":",
"return",
"{",
"key",
".",
"split",
"(",
"preferences_settings",
".",
"SECTION_KEY_SEPARATOR",
")",
"[",
"-",
"1",
"]",
":",
"value",
"for",
"key",
",",
"value",
"in",
"self",
".",
"all",
"(",
")",
".",
"items... | Return a dictionary with preferences identifiers and values, but without the section name in the identifier | [
"Return",
"a",
"dictionary",
"with",
"preferences",
"identifiers",
"and",
"values",
"but",
"without",
"the",
"section",
"name",
"in",
"the",
"identifier"
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.