repo stringlengths 7 54 | path stringlengths 4 192 | url stringlengths 87 284 | code stringlengths 78 104k | code_tokens list | docstring stringlengths 1 46.9k | docstring_tokens list | language stringclasses 1
value | partition stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|
edelbluth/blackred | src/blackred/blackred.py | https://github.com/edelbluth/blackred/blob/57a655e4d4eca60ce16e7b338079355049a87b49/src/blackred/blackred.py#L245-L262 | def _encode_item(self, item: str) -> str:
"""
If anonymization is on, an item gets salted and hashed here.
:param str item:
:return: Hashed item, if anonymization is on; the unmodified item otherwise
:rtype: str
"""
assert item is not None
if not self.__redis_conf['anonymization']:
return item
connection = self.__get_connection()
salt = connection.get(self.__redis_conf['salt_key'])
if salt is None:
salt = create_salt()
connection.set(self.__redis_conf['salt_key'], salt)
BlackRed.__release_connection(connection)
return sha512(salt + item.encode()).hexdigest() | [
"def",
"_encode_item",
"(",
"self",
",",
"item",
":",
"str",
")",
"->",
"str",
":",
"assert",
"item",
"is",
"not",
"None",
"if",
"not",
"self",
".",
"__redis_conf",
"[",
"'anonymization'",
"]",
":",
"return",
"item",
"connection",
"=",
"self",
".",
"__... | If anonymization is on, an item gets salted and hashed here.
:param str item:
:return: Hashed item, if anonymization is on; the unmodified item otherwise
:rtype: str | [
"If",
"anonymization",
"is",
"on",
"an",
"item",
"gets",
"salted",
"and",
"hashed",
"here",
"."
] | python | train |
google/grr | grr/server/grr_response_server/flows/general/audit.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/flows/general/audit.py#L82-L89 | def _CurrentAuditLog():
"""Get the rdfurn of the current audit log."""
now_sec = rdfvalue.RDFDatetime.Now().AsSecondsSinceEpoch()
rollover_seconds = AUDIT_ROLLOVER_TIME.seconds
# This gives us a filename that only changes every
# AUDIT_ROLLOVER_TIfilME seconds, but is still a valid timestamp.
current_log = (now_sec // rollover_seconds) * rollover_seconds
return _AuditLogBase().Add(str(current_log)) | [
"def",
"_CurrentAuditLog",
"(",
")",
":",
"now_sec",
"=",
"rdfvalue",
".",
"RDFDatetime",
".",
"Now",
"(",
")",
".",
"AsSecondsSinceEpoch",
"(",
")",
"rollover_seconds",
"=",
"AUDIT_ROLLOVER_TIME",
".",
"seconds",
"# This gives us a filename that only changes every",
... | Get the rdfurn of the current audit log. | [
"Get",
"the",
"rdfurn",
"of",
"the",
"current",
"audit",
"log",
"."
] | python | train |
lesscpy/lesscpy | lesscpy/plib/call.py | https://github.com/lesscpy/lesscpy/blob/51e392fb4a3cd4ccfb6175e0e42ce7d2f6b78126/lesscpy/plib/call.py#L195-L204 | def round(self, value, *args):
""" Round number
args:
value (str): target
returns:
str
"""
n, u = utility.analyze_number(value)
return utility.with_unit(
int(utility.away_from_zero_round(float(n))), u) | [
"def",
"round",
"(",
"self",
",",
"value",
",",
"*",
"args",
")",
":",
"n",
",",
"u",
"=",
"utility",
".",
"analyze_number",
"(",
"value",
")",
"return",
"utility",
".",
"with_unit",
"(",
"int",
"(",
"utility",
".",
"away_from_zero_round",
"(",
"float"... | Round number
args:
value (str): target
returns:
str | [
"Round",
"number",
"args",
":",
"value",
"(",
"str",
")",
":",
"target",
"returns",
":",
"str"
] | python | valid |
spacetelescope/drizzlepac | drizzlepac/imgclasses.py | https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/imgclasses.py#L331-L338 | def get_wcs(self):
""" Helper method to return a list of all the input WCS objects associated
with this image.
"""
wcslist = []
for chip in self.chip_catalogs:
wcslist.append(self.chip_catalogs[chip]['wcs'])
return wcslist | [
"def",
"get_wcs",
"(",
"self",
")",
":",
"wcslist",
"=",
"[",
"]",
"for",
"chip",
"in",
"self",
".",
"chip_catalogs",
":",
"wcslist",
".",
"append",
"(",
"self",
".",
"chip_catalogs",
"[",
"chip",
"]",
"[",
"'wcs'",
"]",
")",
"return",
"wcslist"
] | Helper method to return a list of all the input WCS objects associated
with this image. | [
"Helper",
"method",
"to",
"return",
"a",
"list",
"of",
"all",
"the",
"input",
"WCS",
"objects",
"associated",
"with",
"this",
"image",
"."
] | python | train |
Kitware/tangelo | tangelo/tangelo/pkgdata/plugin/watch/python/__init__.py | https://github.com/Kitware/tangelo/blob/470034ee9b3d7a01becc1ce5fddc7adc1d5263ef/tangelo/tangelo/pkgdata/plugin/watch/python/__init__.py#L151-L189 | def reload_recent_submodules(module, mtime=0, processed=[]):
"""
Recursively reload submodules which are more recent than a specified
timestamp. To be called from a thread that has acquired the import lock to
be thread safe.
:param module: the module name. The WatchList is checked for modules that
list this as a parent.
:param mtime: the latest module time known to this point.
:param processed: a list of modules that were processed (to avoid infinite
recursion).
:returns: True if any submodule was reloaded.
"""
if module.endswith(".py"):
module = module[:-3]
if module in processed:
return False
any_reloaded = False
for key in WatchList:
if WatchList[key]["parent"] == module:
reloaded = reload_recent_submodules(key, mtime, processed)
filemtime = module_getmtime(WatchList[key]["file"])
filemtime = latest_submodule_time(key, filemtime)
any_reloaded = any_reloaded or reloaded
if reloaded or filemtime > WatchList[key]["time"]:
WatchList[key]["time"] = filemtime
for second in WatchList:
if second != key and WatchList[second]["file"] == WatchList[key]["file"]:
WatchList[second]["time"] = filemtime
modkey = module_sys_modules_key(key)
if modkey:
try:
reload_including_local(sys.modules[modkey])
tangelo.log("Reloaded %s" % modkey)
except ImportError:
del sys.modules[modkey]
tangelo.log("Asking %s to reimport" % modkey)
any_reloaded = True
return any_reloaded | [
"def",
"reload_recent_submodules",
"(",
"module",
",",
"mtime",
"=",
"0",
",",
"processed",
"=",
"[",
"]",
")",
":",
"if",
"module",
".",
"endswith",
"(",
"\".py\"",
")",
":",
"module",
"=",
"module",
"[",
":",
"-",
"3",
"]",
"if",
"module",
"in",
... | Recursively reload submodules which are more recent than a specified
timestamp. To be called from a thread that has acquired the import lock to
be thread safe.
:param module: the module name. The WatchList is checked for modules that
list this as a parent.
:param mtime: the latest module time known to this point.
:param processed: a list of modules that were processed (to avoid infinite
recursion).
:returns: True if any submodule was reloaded. | [
"Recursively",
"reload",
"submodules",
"which",
"are",
"more",
"recent",
"than",
"a",
"specified",
"timestamp",
".",
"To",
"be",
"called",
"from",
"a",
"thread",
"that",
"has",
"acquired",
"the",
"import",
"lock",
"to",
"be",
"thread",
"safe",
"."
] | python | train |
fracpete/python-weka-wrapper3 | python/weka/flow/base.py | https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/flow/base.py#L99-L123 | def unique_name(self, name):
"""
Generates a unique name.
:param name: the name to check
:type name: str
:return: the unique name
:rtype: str
"""
result = name
if self.parent is not None:
index = self.index
bname = re.sub(r'-[0-9]+$', '', name)
names = []
for idx, actor in enumerate(self.parent.actors):
if idx != index:
names.append(actor.name)
result = bname
count = 0
while result in names:
count += 1
result = bname + "-" + str(count)
return result | [
"def",
"unique_name",
"(",
"self",
",",
"name",
")",
":",
"result",
"=",
"name",
"if",
"self",
".",
"parent",
"is",
"not",
"None",
":",
"index",
"=",
"self",
".",
"index",
"bname",
"=",
"re",
".",
"sub",
"(",
"r'-[0-9]+$'",
",",
"''",
",",
"name",
... | Generates a unique name.
:param name: the name to check
:type name: str
:return: the unique name
:rtype: str | [
"Generates",
"a",
"unique",
"name",
"."
] | python | train |
hyperledger/indy-plenum | plenum/common/ledger_uncommitted_tracker.py | https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/common/ledger_uncommitted_tracker.py#L45-L60 | def reject_batch(self):
"""
Return hash reverting for and calculate count of reverted txns
:return: root_hash, for reverting to (needed in revertToHead method) and count of reverted txns
"""
prev_size = 0
if len(self.un_committed) == 0:
raise LogicError("No items to return")
if len(self.un_committed) > 0:
_, _, prev_size = self.un_committed.pop()
if len(self.un_committed) == 0:
committed_hash, committed_root, committed_size = self.last_committed
return committed_hash, committed_root, prev_size - committed_size
else:
lhash, ltxn_root, lsize = self.un_committed[-1]
return lhash, ltxn_root, prev_size - lsize | [
"def",
"reject_batch",
"(",
"self",
")",
":",
"prev_size",
"=",
"0",
"if",
"len",
"(",
"self",
".",
"un_committed",
")",
"==",
"0",
":",
"raise",
"LogicError",
"(",
"\"No items to return\"",
")",
"if",
"len",
"(",
"self",
".",
"un_committed",
")",
">",
... | Return hash reverting for and calculate count of reverted txns
:return: root_hash, for reverting to (needed in revertToHead method) and count of reverted txns | [
"Return",
"hash",
"reverting",
"for",
"and",
"calculate",
"count",
"of",
"reverted",
"txns",
":",
"return",
":",
"root_hash",
"for",
"reverting",
"to",
"(",
"needed",
"in",
"revertToHead",
"method",
")",
"and",
"count",
"of",
"reverted",
"txns"
] | python | train |
inveniosoftware/invenio-files-rest | invenio_files_rest/models.py | https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/models.py#L102-L110 | def as_object_version(value):
"""Get an object version object from an object version ID or an object version.
:param value: A :class:`invenio_files_rest.models.ObjectVersion` or an
object version ID.
:returns: A :class:`invenio_files_rest.models.ObjectVersion` instance.
"""
return value if isinstance(value, ObjectVersion) \
else ObjectVersion.query.filter_by(version_id=value).one_or_none() | [
"def",
"as_object_version",
"(",
"value",
")",
":",
"return",
"value",
"if",
"isinstance",
"(",
"value",
",",
"ObjectVersion",
")",
"else",
"ObjectVersion",
".",
"query",
".",
"filter_by",
"(",
"version_id",
"=",
"value",
")",
".",
"one_or_none",
"(",
")"
] | Get an object version object from an object version ID or an object version.
:param value: A :class:`invenio_files_rest.models.ObjectVersion` or an
object version ID.
:returns: A :class:`invenio_files_rest.models.ObjectVersion` instance. | [
"Get",
"an",
"object",
"version",
"object",
"from",
"an",
"object",
"version",
"ID",
"or",
"an",
"object",
"version",
"."
] | python | train |
log2timeline/plaso | plaso/filters/file_entry.py | https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/filters/file_entry.py#L147-L172 | def Print(self, output_writer):
"""Prints a human readable version of the filter.
Args:
output_writer (CLIOutputWriter): output writer.
"""
if self._date_time_ranges:
for date_time_range in self._date_time_ranges:
if date_time_range.start_date_time is None:
end_time_string = date_time_range.end_date_time.CopyToDateTimeString()
output_writer.Write('\t{0:s} after {1:s}\n'.format(
date_time_range.time_value, end_time_string))
elif date_time_range.end_date_time is None:
start_time_string = (
date_time_range.start_date_time.CopyToDateTimeString())
output_writer.Write('\t{0:s} before {1:s}\n'.format(
date_time_range.time_value, start_time_string))
else:
start_time_string = (
date_time_range.start_date_time.CopyToDateTimeString())
end_time_string = date_time_range.end_date_time.CopyToDateTimeString()
output_writer.Write('\t{0:s} between {1:s} and {2:s}\n'.format(
date_time_range.time_value, start_time_string,
end_time_string)) | [
"def",
"Print",
"(",
"self",
",",
"output_writer",
")",
":",
"if",
"self",
".",
"_date_time_ranges",
":",
"for",
"date_time_range",
"in",
"self",
".",
"_date_time_ranges",
":",
"if",
"date_time_range",
".",
"start_date_time",
"is",
"None",
":",
"end_time_string"... | Prints a human readable version of the filter.
Args:
output_writer (CLIOutputWriter): output writer. | [
"Prints",
"a",
"human",
"readable",
"version",
"of",
"the",
"filter",
"."
] | python | train |
saltstack/salt | salt/states/boto_sns.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_sns.py#L210-L287 | def absent(
name,
region=None,
key=None,
keyid=None,
profile=None,
unsubscribe=False):
'''
Ensure the named sns topic is deleted.
name
Name of the SNS topic.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
unsubscribe
If True, unsubscribe all subcriptions to the SNS topic before
deleting the SNS topic
.. versionadded:: 2016.11.0
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
is_present = __salt__['boto_sns.exists'](
name, region=region, key=key, keyid=keyid, profile=profile
)
if is_present:
subscriptions = __salt__['boto_sns.get_all_subscriptions_by_topic'](
name, region=region, key=key, keyid=keyid, profile=profile
) if unsubscribe else []
failed_unsubscribe_subscriptions = []
if __opts__.get('test'):
ret['comment'] = (
'AWS SNS topic {0} is set to be removed. '
'{1} subscription(s) will be removed.'.format(name, len(subscriptions))
)
ret['result'] = None
return ret
for subscription in subscriptions:
unsubscribed = __salt__['boto_sns.unsubscribe'](
name, subscription['SubscriptionArn'], region=region,
key=key, keyid=keyid, profile=profile
)
if unsubscribed is False:
failed_unsubscribe_subscriptions.append(subscription)
deleted = __salt__['boto_sns.delete'](
name, region=region, key=key, keyid=keyid, profile=profile)
if deleted:
ret['comment'] = 'AWS SNS topic {0} deleted.'.format(name)
ret['changes']['new'] = None
if unsubscribe is False:
ret['changes']['old'] = {'topic': name}
else:
ret['changes']['old'] = {'topic': name, 'subscriptions': subscriptions}
if failed_unsubscribe_subscriptions:
ret['changes']['new'] = {'subscriptions': failed_unsubscribe_subscriptions}
else:
ret['result'] = False
ret['comment'] = 'Failed to delete {0} AWS SNS topic.'.format(name)
else:
ret['comment'] = 'AWS SNS topic {0} does not exist.'.format(name)
return ret | [
"def",
"absent",
"(",
"name",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
",",
"unsubscribe",
"=",
"False",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'result'",
":",
"... | Ensure the named sns topic is deleted.
name
Name of the SNS topic.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
unsubscribe
If True, unsubscribe all subcriptions to the SNS topic before
deleting the SNS topic
.. versionadded:: 2016.11.0 | [
"Ensure",
"the",
"named",
"sns",
"topic",
"is",
"deleted",
"."
] | python | train |
toastdriven/restless | restless/fl.py | https://github.com/toastdriven/restless/blob/661593b7b43c42d1bc508dec795356297991255e/restless/fl.py#L59-L82 | def build_endpoint_name(cls, name, endpoint_prefix=None):
"""
Given a ``name`` & an optional ``endpoint_prefix``, this generates a name
for a URL.
:param name: The name for the URL (ex. 'detail')
:type name: string
:param endpoint_prefix: (Optional) A prefix for the URL's name (for
resolving). The default is ``None``, which will autocreate a prefix
based on the class name. Ex: ``BlogPostResource`` ->
``api_blogpost_list``
:type endpoint_prefix: string
:returns: The final name
:rtype: string
"""
if endpoint_prefix is None:
endpoint_prefix = 'api_{}'.format(
cls.__name__.replace('Resource', '').lower()
)
endpoint_prefix = endpoint_prefix.rstrip('_')
return '_'.join([endpoint_prefix, name]) | [
"def",
"build_endpoint_name",
"(",
"cls",
",",
"name",
",",
"endpoint_prefix",
"=",
"None",
")",
":",
"if",
"endpoint_prefix",
"is",
"None",
":",
"endpoint_prefix",
"=",
"'api_{}'",
".",
"format",
"(",
"cls",
".",
"__name__",
".",
"replace",
"(",
"'Resource'... | Given a ``name`` & an optional ``endpoint_prefix``, this generates a name
for a URL.
:param name: The name for the URL (ex. 'detail')
:type name: string
:param endpoint_prefix: (Optional) A prefix for the URL's name (for
resolving). The default is ``None``, which will autocreate a prefix
based on the class name. Ex: ``BlogPostResource`` ->
``api_blogpost_list``
:type endpoint_prefix: string
:returns: The final name
:rtype: string | [
"Given",
"a",
"name",
"&",
"an",
"optional",
"endpoint_prefix",
"this",
"generates",
"a",
"name",
"for",
"a",
"URL",
"."
] | python | train |
shoppimon/figcan | figcan/figcan.py | https://github.com/shoppimon/figcan/blob/bdfa59ceed33277c060fc009fbf44c41b9852681/figcan/figcan.py#L133-L143 | def _create_flat_pointers(dct, key_stack=()):
# type: (Dict[str, Any], Tuple[str, ...]) -> Generator[Tuple[Tuple[str, ...], Dict[str, Any], str], None, None]
"""Create a flattened dictionary of "key stacks" -> (value container, key)
"""
for k in dct.keys():
current_key = key_stack + (k,)
if isinstance(dct[k], BaseMapping):
for flat_ptr in _create_flat_pointers(dct[k], current_key):
yield flat_ptr
else:
yield (current_key, dct, k) | [
"def",
"_create_flat_pointers",
"(",
"dct",
",",
"key_stack",
"=",
"(",
")",
")",
":",
"# type: (Dict[str, Any], Tuple[str, ...]) -> Generator[Tuple[Tuple[str, ...], Dict[str, Any], str], None, None]",
"for",
"k",
"in",
"dct",
".",
"keys",
"(",
")",
":",
"current_key",
"=... | Create a flattened dictionary of "key stacks" -> (value container, key) | [
"Create",
"a",
"flattened",
"dictionary",
"of",
"key",
"stacks",
"-",
">",
"(",
"value",
"container",
"key",
")"
] | python | train |
gsi-upm/soil | soil/history.py | https://github.com/gsi-upm/soil/blob/a3ea434f237f039c3cadbc2e0a83ae626d77b818/soil/history.py#L114-L123 | def flush_cache(self):
'''
Use a cache to save state changes to avoid opening a session for every change.
The cache will be flushed at the end of the simulation, and when history is accessed.
'''
logger.debug('Flushing cache {}'.format(self.db_path))
with self.db:
for rec in self._tups:
self.db.execute("replace into history(agent_id, t_step, key, value) values (?, ?, ?, ?)", (rec.agent_id, rec.t_step, rec.key, rec.value))
self._tups = list() | [
"def",
"flush_cache",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"'Flushing cache {}'",
".",
"format",
"(",
"self",
".",
"db_path",
")",
")",
"with",
"self",
".",
"db",
":",
"for",
"rec",
"in",
"self",
".",
"_tups",
":",
"self",
".",
"db",
... | Use a cache to save state changes to avoid opening a session for every change.
The cache will be flushed at the end of the simulation, and when history is accessed. | [
"Use",
"a",
"cache",
"to",
"save",
"state",
"changes",
"to",
"avoid",
"opening",
"a",
"session",
"for",
"every",
"change",
".",
"The",
"cache",
"will",
"be",
"flushed",
"at",
"the",
"end",
"of",
"the",
"simulation",
"and",
"when",
"history",
"is",
"acces... | python | train |
PiotrDabkowski/Js2Py | js2py/internals/code.py | https://github.com/PiotrDabkowski/Js2Py/blob/c0fa43f5679cf91ca8986c5747fcb07a433dc584/js2py/internals/code.py#L34-L36 | def emit(self, op_code, *args):
''' Adds op_code with specified args to tape '''
self.tape.append(OP_CODES[op_code](*args)) | [
"def",
"emit",
"(",
"self",
",",
"op_code",
",",
"*",
"args",
")",
":",
"self",
".",
"tape",
".",
"append",
"(",
"OP_CODES",
"[",
"op_code",
"]",
"(",
"*",
"args",
")",
")"
] | Adds op_code with specified args to tape | [
"Adds",
"op_code",
"with",
"specified",
"args",
"to",
"tape"
] | python | valid |
ActiveState/simplealchemy | simplealchemy.py | https://github.com/ActiveState/simplealchemy/blob/f745847793f57701776a804ec74791a1f6a66947/simplealchemy.py#L183-L196 | def __assert_field_mapping(self, mapping):
"""Assert that mapping.keys() == FIELDS.
The programmer is not supposed to pass extra/less number of fields
"""
passed_keys = set(mapping.keys())
class_fields = set(self.FIELDS)
if passed_keys != class_fields:
raise ValueError('\n'.join([
"{0} got different fields from expected".format(
self.__class__),
" got : {0}".format(list(sorted(passed_keys))),
" expected: {0}".format(list(sorted(class_fields)))])) | [
"def",
"__assert_field_mapping",
"(",
"self",
",",
"mapping",
")",
":",
"passed_keys",
"=",
"set",
"(",
"mapping",
".",
"keys",
"(",
")",
")",
"class_fields",
"=",
"set",
"(",
"self",
".",
"FIELDS",
")",
"if",
"passed_keys",
"!=",
"class_fields",
":",
"r... | Assert that mapping.keys() == FIELDS.
The programmer is not supposed to pass extra/less number of fields | [
"Assert",
"that",
"mapping",
".",
"keys",
"()",
"==",
"FIELDS",
"."
] | python | train |
justquick/django-activity-stream | actstream/managers.py | https://github.com/justquick/django-activity-stream/blob/a1e06f2e6429cc5fc321e7801440dd7c5b9d5a35/actstream/managers.py#L24-L30 | def actor(self, obj, **kwargs):
"""
Stream of most recent actions where obj is the actor.
Keyword arguments will be passed to Action.objects.filter
"""
check(obj)
return obj.actor_actions.public(**kwargs) | [
"def",
"actor",
"(",
"self",
",",
"obj",
",",
"*",
"*",
"kwargs",
")",
":",
"check",
"(",
"obj",
")",
"return",
"obj",
".",
"actor_actions",
".",
"public",
"(",
"*",
"*",
"kwargs",
")"
] | Stream of most recent actions where obj is the actor.
Keyword arguments will be passed to Action.objects.filter | [
"Stream",
"of",
"most",
"recent",
"actions",
"where",
"obj",
"is",
"the",
"actor",
".",
"Keyword",
"arguments",
"will",
"be",
"passed",
"to",
"Action",
".",
"objects",
".",
"filter"
] | python | train |
kata198/AdvancedHTMLParser | AdvancedHTMLParser/Parser.py | https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Parser.py#L849-L860 | def feed(self, contents):
'''
feed - Feed contents. Use parseStr or parseFile instead.
@param contents - Contents
'''
contents = stripIEConditionals(contents)
try:
HTMLParser.feed(self, contents)
except MultipleRootNodeException:
self.reset()
HTMLParser.feed(self, "%s%s" %(addStartTag(contents, INVISIBLE_ROOT_TAG_START), INVISIBLE_ROOT_TAG_END)) | [
"def",
"feed",
"(",
"self",
",",
"contents",
")",
":",
"contents",
"=",
"stripIEConditionals",
"(",
"contents",
")",
"try",
":",
"HTMLParser",
".",
"feed",
"(",
"self",
",",
"contents",
")",
"except",
"MultipleRootNodeException",
":",
"self",
".",
"reset",
... | feed - Feed contents. Use parseStr or parseFile instead.
@param contents - Contents | [
"feed",
"-",
"Feed",
"contents",
".",
"Use",
"parseStr",
"or",
"parseFile",
"instead",
"."
] | python | train |
zhexiao/ezhost | ezhost/ServerCommon.py | https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerCommon.py#L34-L44 | def common_update_sys(self):
"""
update system package
"""
try:
sudo('apt-get update -y --fix-missing')
except Exception as e:
print(e)
print(green('System package is up to date.'))
print() | [
"def",
"common_update_sys",
"(",
"self",
")",
":",
"try",
":",
"sudo",
"(",
"'apt-get update -y --fix-missing'",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"print",
"(",
"green",
"(",
"'System package is up to date.'",
")",
")",
"print... | update system package | [
"update",
"system",
"package"
] | python | train |
abe-winter/pg13-py | pg13/diff.py | https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/diff.py#L42-L61 | def seqingroups(groups,seq):
'helper for contigsub. takes the list of lists returned by groupelts and an array to check.\
returns (groupindex,indexingroup,matchlen) of longest match or None if no match'
if not (groups and seq): return None
bestmatch=None,None,0
if any(len(g)<2 for g in groups): raise ValueError('some subgroups have length < 2')
for i,g in filter(lambda x:x[1][0],enumerate(groups)): # i.e. we're only interested in groups with common elements
# begin starts at 0 so begin+1 starts at 1. (first elt of each group is the bool indicator)
begin=0
while 1:
try: begin=g.index(seq[0],begin+1)
except ValueError: break
jmax=min(len(g)-begin,len(seq))
for j in range(jmax):
if g[begin+j]!=seq[j]: break
else: j+=1 # so matchlen works below
matchlen=min(j,jmax)
if matchlen<bestmatch[2]: continue
bestmatch=[i,begin,matchlen] # note: begin is an offset including the initial bool
return bestmatch if bestmatch[2] else None | [
"def",
"seqingroups",
"(",
"groups",
",",
"seq",
")",
":",
"if",
"not",
"(",
"groups",
"and",
"seq",
")",
":",
"return",
"None",
"bestmatch",
"=",
"None",
",",
"None",
",",
"0",
"if",
"any",
"(",
"len",
"(",
"g",
")",
"<",
"2",
"for",
"g",
"in"... | helper for contigsub. takes the list of lists returned by groupelts and an array to check.\
returns (groupindex,indexingroup,matchlen) of longest match or None if no match | [
"helper",
"for",
"contigsub",
".",
"takes",
"the",
"list",
"of",
"lists",
"returned",
"by",
"groupelts",
"and",
"an",
"array",
"to",
"check",
".",
"\\",
"returns",
"(",
"groupindex",
"indexingroup",
"matchlen",
")",
"of",
"longest",
"match",
"or",
"None",
... | python | train |
TylerTemp/docpie | docpie/parser.py | https://github.com/TylerTemp/docpie/blob/e658454b81b6c79a020d499f12ad73496392c09a/docpie/parser.py#L413-L476 | def parse_content(self, text):
"""parse section to formal format
raw_content: {title: section(with title)}. For `help` access.
formal_content: {title: section} but the section has been dedented
without title. For parse instance"""
raw_content = self.raw_content
raw_content.clear()
formal_collect = {}
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
split = self.visible_empty_line_re.split(text)
except ValueError: # python >= 3.5
split = [text]
option_split_re = self.option_split_re
name = re.compile(re.escape(self.option_name), re.IGNORECASE)
for text in filter(lambda x: x and x.strip(), split):
# logger.warning('get options group:\n%r', text)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
split_options = option_split_re.split(text)
except ValueError: # python >= 3.5
continue
split_options.pop(0)
for title, section in zip(split_options[::2], split_options[1::2]):
prefix, end = name.split(title)
prefix = prefix.strip()
section = section.rstrip()
if end.endswith('\n'):
formal = section
else:
formal = ' ' * len(title) + section
formal_collect.setdefault(prefix, []).append(formal)
# logger.error((title, section))
if prefix in raw_content:
# TODO: better handling way?
if self.namedoptions:
log = logger.warning
else:
log = logger.debug
log('duplicated options section %s', prefix)
raw_content[prefix] += '\n%s%s' % (title, section)
else:
raw_content[prefix] = title + section
if formal_collect:
for each_title, values in formal_collect.items():
value = '\n'.join(map(textwrap.dedent, values))
formal_collect[each_title] = value
self.formal_content = formal_collect | [
"def",
"parse_content",
"(",
"self",
",",
"text",
")",
":",
"raw_content",
"=",
"self",
".",
"raw_content",
"raw_content",
".",
"clear",
"(",
")",
"formal_collect",
"=",
"{",
"}",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"warnings",
".",
... | parse section to formal format
raw_content: {title: section(with title)}. For `help` access.
formal_content: {title: section} but the section has been dedented
without title. For parse instance | [
"parse",
"section",
"to",
"formal",
"format",
"raw_content",
":",
"{",
"title",
":",
"section",
"(",
"with",
"title",
")",
"}",
".",
"For",
"help",
"access",
".",
"formal_content",
":",
"{",
"title",
":",
"section",
"}",
"but",
"the",
"section",
"has",
... | python | train |
UDST/urbansim | urbansim/models/regression.py | https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/regression.py#L644-L666 | def predict(self, data):
"""
Predict new data for each group in the segmentation.
Parameters
----------
data : pandas.DataFrame
Data to use for prediction. Must have a column with the
same name as `segmentation_col`.
Returns
-------
predicted : pandas.Series
Predicted data in a pandas Series. Will have the index of `data`
after applying filters and minus any groups that do not have
models.
"""
with log_start_finish(
'predicting models in group {}'.format(self.name), logger):
results = [self.models[name].predict(df)
for name, df in self._iter_groups(data)]
return pd.concat(results) | [
"def",
"predict",
"(",
"self",
",",
"data",
")",
":",
"with",
"log_start_finish",
"(",
"'predicting models in group {}'",
".",
"format",
"(",
"self",
".",
"name",
")",
",",
"logger",
")",
":",
"results",
"=",
"[",
"self",
".",
"models",
"[",
"name",
"]",... | Predict new data for each group in the segmentation.
Parameters
----------
data : pandas.DataFrame
Data to use for prediction. Must have a column with the
same name as `segmentation_col`.
Returns
-------
predicted : pandas.Series
Predicted data in a pandas Series. Will have the index of `data`
after applying filters and minus any groups that do not have
models. | [
"Predict",
"new",
"data",
"for",
"each",
"group",
"in",
"the",
"segmentation",
"."
] | python | train |
klahnakoski/pyLibrary | jx_base/query.py | https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/jx_base/query.py#L705-L757 | def _where_terms(master, where, schema):
"""
USE THE SCHEMA TO CONVERT DIMENSION NAMES TO ES FILTERS
master - TOP LEVEL WHERE (FOR PLACING NESTED FILTERS)
"""
if is_data(where):
if where.term:
# MAP TERM
try:
output = _map_term_using_schema(master, [], where.term, schema.edges)
return output
except Exception as e:
Log.error("programmer problem?", e)
elif where.terms:
# MAP TERM
output = FlatList()
for k, v in where.terms.items():
if not is_container(v):
Log.error("terms filter expects list of values")
edge = schema.edges[k]
if not edge:
output.append({"terms": {k: v}})
else:
if is_text(edge):
# DIRECT FIELD REFERENCE
return {"terms": {edge: v}}
try:
domain = edge.getDomain()
except Exception as e:
Log.error("programmer error", e)
fields = domain.dimension.fields
if is_data(fields):
or_agg = []
for vv in v:
and_agg = []
for local_field, es_field in fields.items():
vvv = vv[local_field]
if vvv != None:
and_agg.append({"term": {es_field: vvv}})
or_agg.append({"and": and_agg})
output.append({"or": or_agg})
elif is_list(fields) and len(fields) == 1 and is_variable_name(fields[0]):
output.append({"terms": {fields[0]: v}})
elif domain.partitions:
output.append({"or": [domain.getPartByKey(vv).esfilter for vv in v]})
return {"and": output}
elif where["or"]:
return {"or": [unwrap(_where_terms(master, vv, schema)) for vv in where["or"]]}
elif where["and"]:
return {"and": [unwrap(_where_terms(master, vv, schema)) for vv in where["and"]]}
elif where["not"]:
return {"not": unwrap(_where_terms(master, where["not"], schema))}
return where | [
"def",
"_where_terms",
"(",
"master",
",",
"where",
",",
"schema",
")",
":",
"if",
"is_data",
"(",
"where",
")",
":",
"if",
"where",
".",
"term",
":",
"# MAP TERM",
"try",
":",
"output",
"=",
"_map_term_using_schema",
"(",
"master",
",",
"[",
"]",
",",... | USE THE SCHEMA TO CONVERT DIMENSION NAMES TO ES FILTERS
master - TOP LEVEL WHERE (FOR PLACING NESTED FILTERS) | [
"USE",
"THE",
"SCHEMA",
"TO",
"CONVERT",
"DIMENSION",
"NAMES",
"TO",
"ES",
"FILTERS",
"master",
"-",
"TOP",
"LEVEL",
"WHERE",
"(",
"FOR",
"PLACING",
"NESTED",
"FILTERS",
")"
] | python | train |
spyder-ide/spyder | spyder/api/plugins.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/api/plugins.py#L210-L227 | def refresh_actions(self):
"""
Create options menu.
"""
self.options_menu.clear()
# Decide what additional actions to show
if self.undocked_window is None:
additional_actions = [MENU_SEPARATOR,
self.undock_action,
self.close_plugin_action]
else:
additional_actions = [MENU_SEPARATOR,
self.dock_action]
# Create actions list
self.plugin_actions = self.get_plugin_actions() + additional_actions
add_actions(self.options_menu, self.plugin_actions) | [
"def",
"refresh_actions",
"(",
"self",
")",
":",
"self",
".",
"options_menu",
".",
"clear",
"(",
")",
"# Decide what additional actions to show",
"if",
"self",
".",
"undocked_window",
"is",
"None",
":",
"additional_actions",
"=",
"[",
"MENU_SEPARATOR",
",",
"self"... | Create options menu. | [
"Create",
"options",
"menu",
"."
] | python | train |
globocom/GloboNetworkAPI-client-python | networkapiclient/Ambiente.py | https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/Ambiente.py#L423-L446 | def add_ip_range(self, id_environment, id_ip_config):
"""Makes relationship of environment with ip config and returns your id.
:param id_environment: Environment ID.
:param id_ip_config: IP Configuration ID.
:return: Following dictionary:
{'config_do_ambiente': {'id_config_do_ambiente': < id_config_do_ambiente >}}
:raise InvalidParameterError: Some parameter was invalid.
:raise ConfigEnvironmentDuplicateError: Error saving duplicate Environment Configuration.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
environment_map = dict()
environment_map['id_environment'] = id_environment
environment_map['id_ip_config'] = id_ip_config
code, xml = self.submit(
{'ambiente': environment_map}, 'POST', 'ipconfig/')
return self.response(code, xml) | [
"def",
"add_ip_range",
"(",
"self",
",",
"id_environment",
",",
"id_ip_config",
")",
":",
"environment_map",
"=",
"dict",
"(",
")",
"environment_map",
"[",
"'id_environment'",
"]",
"=",
"id_environment",
"environment_map",
"[",
"'id_ip_config'",
"]",
"=",
"id_ip_c... | Makes relationship of environment with ip config and returns your id.
:param id_environment: Environment ID.
:param id_ip_config: IP Configuration ID.
:return: Following dictionary:
{'config_do_ambiente': {'id_config_do_ambiente': < id_config_do_ambiente >}}
:raise InvalidParameterError: Some parameter was invalid.
:raise ConfigEnvironmentDuplicateError: Error saving duplicate Environment Configuration.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response. | [
"Makes",
"relationship",
"of",
"environment",
"with",
"ip",
"config",
"and",
"returns",
"your",
"id",
"."
] | python | train |
mixmastamyk/console | console/utils.py | https://github.com/mixmastamyk/console/blob/afe6c95d5a7b83d85376f450454e3769e4a5c3d0/console/utils.py#L55-L69 | def clear_line(mode=2):
''' Clear the current line.
Arguments:
mode: | 0 | 'forward' | 'right' - Clear cursor to end of line.
| 1 | 'backward' | 'left' - Clear cursor to beginning of line.
| 2 | 'full' - Clear entire line.
Note:
Cursor position does not change.
'''
text = sc.erase_line(_mode_map.get(mode, mode))
_write(text)
return text | [
"def",
"clear_line",
"(",
"mode",
"=",
"2",
")",
":",
"text",
"=",
"sc",
".",
"erase_line",
"(",
"_mode_map",
".",
"get",
"(",
"mode",
",",
"mode",
")",
")",
"_write",
"(",
"text",
")",
"return",
"text"
] | Clear the current line.
Arguments:
mode: | 0 | 'forward' | 'right' - Clear cursor to end of line.
| 1 | 'backward' | 'left' - Clear cursor to beginning of line.
| 2 | 'full' - Clear entire line.
Note:
Cursor position does not change. | [
"Clear",
"the",
"current",
"line",
"."
] | python | train |
angr/angr | angr/state_plugins/heap/heap_ptmalloc.py | https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/heap/heap_ptmalloc.py#L98-L112 | def is_prev_free(self):
"""
Returns a concrete state of the flag indicating whether the previous chunk is free or not. Issues a warning if
that flag is symbolic and has multiple solutions, and then assumes that the previous chunk is free.
:returns: True if the previous chunk is free; False otherwise
"""
flag = self.state.memory.load(self.base + self._chunk_size_t_size, self._chunk_size_t_size) & CHUNK_P_MASK
def sym_flag_handler(flag):
l.warning("A chunk's P flag is symbolic; assuming it is not set")
return self.state.solver.min_int(flag)
flag = concretize(flag, self.state.solver, sym_flag_handler)
return False if flag else True | [
"def",
"is_prev_free",
"(",
"self",
")",
":",
"flag",
"=",
"self",
".",
"state",
".",
"memory",
".",
"load",
"(",
"self",
".",
"base",
"+",
"self",
".",
"_chunk_size_t_size",
",",
"self",
".",
"_chunk_size_t_size",
")",
"&",
"CHUNK_P_MASK",
"def",
"sym_f... | Returns a concrete state of the flag indicating whether the previous chunk is free or not. Issues a warning if
that flag is symbolic and has multiple solutions, and then assumes that the previous chunk is free.
:returns: True if the previous chunk is free; False otherwise | [
"Returns",
"a",
"concrete",
"state",
"of",
"the",
"flag",
"indicating",
"whether",
"the",
"previous",
"chunk",
"is",
"free",
"or",
"not",
".",
"Issues",
"a",
"warning",
"if",
"that",
"flag",
"is",
"symbolic",
"and",
"has",
"multiple",
"solutions",
"and",
"... | python | train |
blockstack/virtualchain | virtualchain/lib/blockchain/bitcoin_blockchain/bits.py | https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/blockchain/bitcoin_blockchain/bits.py#L1284-L1298 | def block_header_to_hex( block_data, prev_hash ):
"""
Calculate the hex form of a block's header, given its getblock information from bitcoind.
"""
header_info = {
"version": block_data['version'],
"prevhash": prev_hash,
"merkle_root": block_data['merkleroot'],
"timestamp": block_data['time'],
"bits": int(block_data['bits'], 16),
"nonce": block_data['nonce'],
"hash": block_data['hash']
}
return block_header_serialize(header_info) | [
"def",
"block_header_to_hex",
"(",
"block_data",
",",
"prev_hash",
")",
":",
"header_info",
"=",
"{",
"\"version\"",
":",
"block_data",
"[",
"'version'",
"]",
",",
"\"prevhash\"",
":",
"prev_hash",
",",
"\"merkle_root\"",
":",
"block_data",
"[",
"'merkleroot'",
... | Calculate the hex form of a block's header, given its getblock information from bitcoind. | [
"Calculate",
"the",
"hex",
"form",
"of",
"a",
"block",
"s",
"header",
"given",
"its",
"getblock",
"information",
"from",
"bitcoind",
"."
] | python | train |
saltstack/salt | salt/modules/poudriere.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/poudriere.py#L71-L103 | def make_pkgng_aware(jname):
'''
Make jail ``jname`` pkgng aware
CLI Example:
.. code-block:: bash
salt '*' poudriere.make_pkgng_aware <jail name>
'''
ret = {'changes': {}}
cdir = _config_dir()
# ensure cdir is there
if not os.path.isdir(cdir):
os.makedirs(cdir)
if os.path.isdir(cdir):
ret['changes'] = 'Created poudriere make file dir {0}'.format(cdir)
else:
return 'Could not create or find required directory {0}'.format(
cdir)
# Added args to file
__salt__['file.write']('{0}-make.conf'.format(os.path.join(cdir, jname)), 'WITH_PKGNG=yes')
if os.path.isfile(os.path.join(cdir, jname) + '-make.conf'):
ret['changes'] = 'Created {0}'.format(
os.path.join(cdir, '{0}-make.conf'.format(jname))
)
return ret
else:
return 'Looks like file {0} could not be created'.format(
os.path.join(cdir, jname + '-make.conf')
) | [
"def",
"make_pkgng_aware",
"(",
"jname",
")",
":",
"ret",
"=",
"{",
"'changes'",
":",
"{",
"}",
"}",
"cdir",
"=",
"_config_dir",
"(",
")",
"# ensure cdir is there",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"cdir",
")",
":",
"os",
".",
"make... | Make jail ``jname`` pkgng aware
CLI Example:
.. code-block:: bash
salt '*' poudriere.make_pkgng_aware <jail name> | [
"Make",
"jail",
"jname",
"pkgng",
"aware"
] | python | train |
mental32/spotify.py | spotify/models/player.py | https://github.com/mental32/spotify.py/blob/bb296cac7c3dd289908906b7069bd80f43950515/spotify/models/player.py#L96-L109 | async def seek(self, pos, *, device: Optional[SomeDevice] = None):
"""Seeks to the given position in the user’s currently playing track.
Parameters
----------
pos : int
The position in milliseconds to seek to.
Must be a positive number.
Passing in a position that is greater than the length of the track will cause the player to start playing the next song.
device : Optional[:obj:`SomeDevice`]
The Device object or id of the device this command is targeting.
If not supplied, the user’s currently active device is the target.
"""
await self._user.http.seek_playback(pos, device_id=str(device)) | [
"async",
"def",
"seek",
"(",
"self",
",",
"pos",
",",
"*",
",",
"device",
":",
"Optional",
"[",
"SomeDevice",
"]",
"=",
"None",
")",
":",
"await",
"self",
".",
"_user",
".",
"http",
".",
"seek_playback",
"(",
"pos",
",",
"device_id",
"=",
"str",
"(... | Seeks to the given position in the user’s currently playing track.
Parameters
----------
pos : int
The position in milliseconds to seek to.
Must be a positive number.
Passing in a position that is greater than the length of the track will cause the player to start playing the next song.
device : Optional[:obj:`SomeDevice`]
The Device object or id of the device this command is targeting.
If not supplied, the user’s currently active device is the target. | [
"Seeks",
"to",
"the",
"given",
"position",
"in",
"the",
"user’s",
"currently",
"playing",
"track",
"."
] | python | test |
skorch-dev/skorch | skorch/callbacks/training.py | https://github.com/skorch-dev/skorch/blob/5b9b8b7b7712cb6e5aaa759d9608ea6269d5bcd3/skorch/callbacks/training.py#L267-L287 | def _validate_filenames(self):
"""Checks if passed filenames are valid.
Specifically, f_* parameter should not be passed in
conjunction with dirname.
"""
if not self.dirname:
return
def _is_truthy_and_not_str(f):
return f and not isinstance(f, str)
if (
_is_truthy_and_not_str(self.f_optimizer) or
_is_truthy_and_not_str(self.f_params) or
_is_truthy_and_not_str(self.f_history) or
_is_truthy_and_not_str(self.f_pickle)
):
raise SkorchException(
'dirname can only be used when f_* are strings') | [
"def",
"_validate_filenames",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"dirname",
":",
"return",
"def",
"_is_truthy_and_not_str",
"(",
"f",
")",
":",
"return",
"f",
"and",
"not",
"isinstance",
"(",
"f",
",",
"str",
")",
"if",
"(",
"_is_truthy_and... | Checks if passed filenames are valid.
Specifically, f_* parameter should not be passed in
conjunction with dirname. | [
"Checks",
"if",
"passed",
"filenames",
"are",
"valid",
"."
] | python | train |
basho/riak-python-client | riak/multidict.py | https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/multidict.py#L73-L83 | def getone(self, key):
"""
Get one value matching the key, raising a KeyError if multiple
values were found.
"""
v = self.getall(key)
if not v:
raise KeyError('Key not found: %r' % key)
if len(v) > 1:
raise KeyError('Multiple values match %r: %r' % (key, v))
return v[0] | [
"def",
"getone",
"(",
"self",
",",
"key",
")",
":",
"v",
"=",
"self",
".",
"getall",
"(",
"key",
")",
"if",
"not",
"v",
":",
"raise",
"KeyError",
"(",
"'Key not found: %r'",
"%",
"key",
")",
"if",
"len",
"(",
"v",
")",
">",
"1",
":",
"raise",
"... | Get one value matching the key, raising a KeyError if multiple
values were found. | [
"Get",
"one",
"value",
"matching",
"the",
"key",
"raising",
"a",
"KeyError",
"if",
"multiple",
"values",
"were",
"found",
"."
] | python | train |
readbeyond/aeneas | aeneas/syncmap/smfsmil.py | https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/syncmap/smfsmil.py#L55-L89 | def parse(self, input_text, syncmap):
"""
Read from SMIL file.
Limitations:
1. parses only ``<par>`` elements, in order
2. timings must have ``hh:mm:ss.mmm`` or ``ss.mmm`` format (autodetected)
3. both ``clipBegin`` and ``clipEnd`` attributes of ``<audio>`` must be populated
"""
from lxml import etree
smil_ns = "{http://www.w3.org/ns/SMIL}"
root = etree.fromstring(gf.safe_bytes(input_text))
for par in root.iter(smil_ns + "par"):
for child in par:
if child.tag == (smil_ns + "text"):
identifier = gf.safe_unicode(gf.split_url(child.get("src"))[1])
elif child.tag == (smil_ns + "audio"):
begin_text = child.get("clipBegin")
if ":" in begin_text:
begin = gf.time_from_hhmmssmmm(begin_text)
else:
begin = gf.time_from_ssmmm(begin_text)
end_text = child.get("clipEnd")
if ":" in end_text:
end = gf.time_from_hhmmssmmm(end_text)
else:
end = gf.time_from_ssmmm(end_text)
# TODO read text from additional text_file?
self._add_fragment(
syncmap=syncmap,
identifier=identifier,
lines=[u""],
begin=begin,
end=end
) | [
"def",
"parse",
"(",
"self",
",",
"input_text",
",",
"syncmap",
")",
":",
"from",
"lxml",
"import",
"etree",
"smil_ns",
"=",
"\"{http://www.w3.org/ns/SMIL}\"",
"root",
"=",
"etree",
".",
"fromstring",
"(",
"gf",
".",
"safe_bytes",
"(",
"input_text",
")",
")"... | Read from SMIL file.
Limitations:
1. parses only ``<par>`` elements, in order
2. timings must have ``hh:mm:ss.mmm`` or ``ss.mmm`` format (autodetected)
3. both ``clipBegin`` and ``clipEnd`` attributes of ``<audio>`` must be populated | [
"Read",
"from",
"SMIL",
"file",
"."
] | python | train |
DataDog/integrations-core | kubelet/datadog_checks/kubelet/prometheus.py | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/kubelet/datadog_checks/kubelet/prometheus.py#L300-L316 | def _process_pod_rate(self, metric_name, metric, scraper_config):
"""
Takes a simple metric about a pod, reports it as a rate.
If several series are found for a given pod, values are summed before submission.
"""
if metric.type not in METRIC_TYPES:
self.log.error("Metric type %s unsupported for metric %s" % (metric.type, metric.name))
return
samples = self._sum_values_by_context(metric, self._get_pod_uid_if_pod_metric)
for pod_uid, sample in iteritems(samples):
if '.network.' in metric_name and self._is_pod_host_networked(pod_uid):
continue
tags = tagger.tag('kubernetes_pod://%s' % pod_uid, tagger.HIGH)
tags += scraper_config['custom_tags']
val = sample[self.SAMPLE_VALUE]
self.rate(metric_name, val, tags) | [
"def",
"_process_pod_rate",
"(",
"self",
",",
"metric_name",
",",
"metric",
",",
"scraper_config",
")",
":",
"if",
"metric",
".",
"type",
"not",
"in",
"METRIC_TYPES",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"Metric type %s unsupported for metric %s\"",
"%"... | Takes a simple metric about a pod, reports it as a rate.
If several series are found for a given pod, values are summed before submission. | [
"Takes",
"a",
"simple",
"metric",
"about",
"a",
"pod",
"reports",
"it",
"as",
"a",
"rate",
".",
"If",
"several",
"series",
"are",
"found",
"for",
"a",
"given",
"pod",
"values",
"are",
"summed",
"before",
"submission",
"."
] | python | train |
archman/beamline | beamline/ui/myappframe.py | https://github.com/archman/beamline/blob/417bc5dc13e754bc89d246427984590fced64d07/beamline/ui/myappframe.py#L621-L627 | def update_stat(self, mode='open', infostr='', stat=''):
""" write operation stats to log
:param mode: 'open', 'saveas', 'listtree'
:param infostr: string to put into info_st
:param stat: 'OK' or 'ERR'
"""
self._update_stat[mode](mode, infostr, stat) | [
"def",
"update_stat",
"(",
"self",
",",
"mode",
"=",
"'open'",
",",
"infostr",
"=",
"''",
",",
"stat",
"=",
"''",
")",
":",
"self",
".",
"_update_stat",
"[",
"mode",
"]",
"(",
"mode",
",",
"infostr",
",",
"stat",
")"
] | write operation stats to log
:param mode: 'open', 'saveas', 'listtree'
:param infostr: string to put into info_st
:param stat: 'OK' or 'ERR' | [
"write",
"operation",
"stats",
"to",
"log",
":",
"param",
"mode",
":",
"open",
"saveas",
"listtree",
":",
"param",
"infostr",
":",
"string",
"to",
"put",
"into",
"info_st",
":",
"param",
"stat",
":",
"OK",
"or",
"ERR"
] | python | train |
tensorflow/mesh | mesh_tensorflow/layers.py | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/layers.py#L1048-L1113 | def multihead_self_attention_memory_compressed(x,
mask_right,
compression_factor,
kv_channels,
heads,
dropout=0.0,
dropout_broadcast_dims=None,
master_dtype=tf.float32,
slice_dtype=tf.float32,
name="multihead_attention"):
"""Memory-compressed self-attention.
The memory is first average-pooled (strided) to make it shorter by
a factor of compression_factor.
Args:
x: a mtf.Tensor with shape
[<batch_dims>, query_length, io_channels]
mask_right: a boolean
compression_factor: an integer
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
dropout: a floating point value
dropout_broadcast_dims: an optional list of mtf.Dimension
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
A mtf.Tensor with shape [batch, query_length, io_channels]
Raises:
ValueError: if the dimensions do not match.
"""
batch_dims = x.shape.dims[:-2]
length, io_channels = x.shape.dims[-2:]
with tf.variable_scope(name,
default_name="compressed_attention",
values=[x]):
wq, wk, wv, wo = multihead_attention_vars(
x.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, x.dtype)
memory_antecedent = compress_mean(x, length, compression_factor)
memory_antecedent = rename_length_to_memory_length(memory_antecedent)
memory_length = memory_antecedent.shape.dims[-2]
q = mtf.einsum(
[x, wq],
mtf.Shape(batch_dims + [heads, length, kv_channels]))
k = mtf.einsum(
[memory_antecedent, wk],
mtf.Shape(batch_dims + [heads, memory_length, kv_channels]))
v = mtf.einsum(
[memory_antecedent, wv],
mtf.Shape(batch_dims + [heads, memory_length, kv_channels]))
if mask_right:
query_pos = mtf.range(x.mesh, length, dtype=tf.int32)
memory_pos = (
mtf.range(x.mesh, memory_length, dtype=tf.int32) * compression_factor
+ (compression_factor - 1))
mask = mtf.cast(mtf.greater(memory_pos, query_pos), x.dtype) * -1e9
else:
mask = None
o = dot_product_attention(
q, k, v, mask, dropout, dropout_broadcast_dims, extra_logit=0.0)
return mtf.einsum(
[o, wo], mtf.Shape(batch_dims + [length, io_channels])) | [
"def",
"multihead_self_attention_memory_compressed",
"(",
"x",
",",
"mask_right",
",",
"compression_factor",
",",
"kv_channels",
",",
"heads",
",",
"dropout",
"=",
"0.0",
",",
"dropout_broadcast_dims",
"=",
"None",
",",
"master_dtype",
"=",
"tf",
".",
"float32",
"... | Memory-compressed self-attention.
The memory is first average-pooled (strided) to make it shorter by
a factor of compression_factor.
Args:
x: a mtf.Tensor with shape
[<batch_dims>, query_length, io_channels]
mask_right: a boolean
compression_factor: an integer
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
dropout: a floating point value
dropout_broadcast_dims: an optional list of mtf.Dimension
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
A mtf.Tensor with shape [batch, query_length, io_channels]
Raises:
ValueError: if the dimensions do not match. | [
"Memory",
"-",
"compressed",
"self",
"-",
"attention",
"."
] | python | train |
sampsyo/confuse | confuse.py | https://github.com/sampsyo/confuse/blob/9ff0992e30470f6822824711950e6dd906e253fb/confuse.py#L1069-L1073 | def clear(self):
"""Remove all sources from this configuration."""
super(LazyConfig, self).clear()
self._lazy_suffix = []
self._lazy_prefix = [] | [
"def",
"clear",
"(",
"self",
")",
":",
"super",
"(",
"LazyConfig",
",",
"self",
")",
".",
"clear",
"(",
")",
"self",
".",
"_lazy_suffix",
"=",
"[",
"]",
"self",
".",
"_lazy_prefix",
"=",
"[",
"]"
] | Remove all sources from this configuration. | [
"Remove",
"all",
"sources",
"from",
"this",
"configuration",
"."
] | python | train |
idank/bashlex | bashlex/parser.py | https://github.com/idank/bashlex/blob/800cb7e3c634eaa3c81f8a8648fd7fd4e27050ac/bashlex/parser.py#L440-L445 | def p_list_terminator(p):
'''list_terminator : NEWLINE
| SEMICOLON
| EOF'''
if p[1] == ';':
p[0] = ast.node(kind='operator', op=';', pos=p.lexspan(1)) | [
"def",
"p_list_terminator",
"(",
"p",
")",
":",
"if",
"p",
"[",
"1",
"]",
"==",
"';'",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"node",
"(",
"kind",
"=",
"'operator'",
",",
"op",
"=",
"';'",
",",
"pos",
"=",
"p",
".",
"lexspan",
"(",
"1",
... | list_terminator : NEWLINE
| SEMICOLON
| EOF | [
"list_terminator",
":",
"NEWLINE",
"|",
"SEMICOLON",
"|",
"EOF"
] | python | train |
gabfl/dbschema | src/schema_change.py | https://github.com/gabfl/dbschema/blob/37722e6654e9f0374fac5518ebdca22f4c39f92f/src/schema_change.py#L187-L198 | def save_migration(connection, basename):
""" Save a migration in `migrations_applied` table """
# Prepare query
sql = "INSERT INTO migrations_applied (name, date) VALUES (%s, NOW())"
# Run
with connection.cursor() as cursor:
cursor.execute(sql, (basename,))
connection.commit()
return True | [
"def",
"save_migration",
"(",
"connection",
",",
"basename",
")",
":",
"# Prepare query",
"sql",
"=",
"\"INSERT INTO migrations_applied (name, date) VALUES (%s, NOW())\"",
"# Run",
"with",
"connection",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"cursor",
".",
"exe... | Save a migration in `migrations_applied` table | [
"Save",
"a",
"migration",
"in",
"migrations_applied",
"table"
] | python | train |
awslabs/sockeye | sockeye/data_io.py | https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/data_io.py#L1119-L1126 | def ids2strids(ids: Iterable[int]) -> str:
"""
Returns a string representation of a sequence of integers.
:param ids: Sequence of integers.
:return: String sequence
"""
return C.TOKEN_SEPARATOR.join(map(str, ids)) | [
"def",
"ids2strids",
"(",
"ids",
":",
"Iterable",
"[",
"int",
"]",
")",
"->",
"str",
":",
"return",
"C",
".",
"TOKEN_SEPARATOR",
".",
"join",
"(",
"map",
"(",
"str",
",",
"ids",
")",
")"
] | Returns a string representation of a sequence of integers.
:param ids: Sequence of integers.
:return: String sequence | [
"Returns",
"a",
"string",
"representation",
"of",
"a",
"sequence",
"of",
"integers",
"."
] | python | train |
tcalmant/ipopo | pelix/ipopo/handlers/requiresmap.py | https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/handlers/requiresmap.py#L420-L446 | def on_service_arrival(self, svc_ref):
"""
Called when a service has been registered in the framework
:param svc_ref: A service reference
"""
with self._lock:
if svc_ref not in self.services:
# Get the key property
prop_value = svc_ref.get_property(self._key)
if (
prop_value not in self._future_value
and prop_value is not None
or self._allow_none
):
# Matching new property value
service = self._context.get_service(svc_ref)
# Store the information
self._future_value[prop_value] = service
self.services[svc_ref] = service
# Call back iPOPO
self._ipopo_instance.bind(self, service, svc_ref)
return True
return None | [
"def",
"on_service_arrival",
"(",
"self",
",",
"svc_ref",
")",
":",
"with",
"self",
".",
"_lock",
":",
"if",
"svc_ref",
"not",
"in",
"self",
".",
"services",
":",
"# Get the key property",
"prop_value",
"=",
"svc_ref",
".",
"get_property",
"(",
"self",
".",
... | Called when a service has been registered in the framework
:param svc_ref: A service reference | [
"Called",
"when",
"a",
"service",
"has",
"been",
"registered",
"in",
"the",
"framework"
] | python | train |
iotile/coretools | iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Node/__init__.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Node/__init__.py#L830-L846 | def clear(self):
"""Completely clear a Node of all its cached state (so that it
can be re-evaluated by interfaces that do continuous integration
builds).
"""
# The del_binfo() call here isn't necessary for normal execution,
# but is for interactive mode, where we might rebuild the same
# target and need to start from scratch.
self.del_binfo()
self.clear_memoized_values()
self.ninfo = self.new_ninfo()
self.executor_cleanup()
try:
delattr(self, '_calculated_sig')
except AttributeError:
pass
self.includes = None | [
"def",
"clear",
"(",
"self",
")",
":",
"# The del_binfo() call here isn't necessary for normal execution,",
"# but is for interactive mode, where we might rebuild the same",
"# target and need to start from scratch.",
"self",
".",
"del_binfo",
"(",
")",
"self",
".",
"clear_memoized_v... | Completely clear a Node of all its cached state (so that it
can be re-evaluated by interfaces that do continuous integration
builds). | [
"Completely",
"clear",
"a",
"Node",
"of",
"all",
"its",
"cached",
"state",
"(",
"so",
"that",
"it",
"can",
"be",
"re",
"-",
"evaluated",
"by",
"interfaces",
"that",
"do",
"continuous",
"integration",
"builds",
")",
"."
] | python | train |
svenevs/exhale | exhale/parse.py | https://github.com/svenevs/exhale/blob/fe7644829057af622e467bb529db6c03a830da99/exhale/parse.py#L22-L167 | def walk(textRoot, currentTag, level, prefix=None, postfix=None, unwrapUntilPara=False):
'''
.. note::
This method does not cover all possible input doxygen types! This means that
when an unsupported / unrecognized doxygen tag appears in the xml listing, the
**raw xml will appear on the file page being documented**. This traverser is
greedily designed to work for what testing revealed as the *bare minimum*
required. **Please** see the :ref:`Doxygen ALIASES <doxygen_aliases>` section
for how to bypass invalid documentation coming form Exhale.
Recursive traverser method to parse the input parsed xml tree and convert the nodes
into raw reStructuredText from the input doxygen format. **Not all doxygen markup
types are handled**. The current supported doxygen xml markup tags are:
- ``para``
- ``orderedlist``
- ``itemizedlist``
- ``verbatim`` (specifically: ``embed:rst:leading-asterisk``)
- ``formula``
- ``ref``
- ``emphasis`` (e.g., using `em`_)
- ``computeroutput`` (e.g., using `c`_)
- ``bold`` (e.g., using `b`_)
.. _em: http://www.doxygen.nl/manual/commands.html#cmdem
.. _c: http://www.doxygen.nl/manual/commands.html#cmdc
.. _b: http://www.doxygen.nl/manual/commands.html#cmdb
The goal of this method is to "explode" input ``xml`` data into raw reStructuredText
to put at the top of the file pages. Wielding beautiful soup, this essentially
means that you need to expand every non ``para`` tag into a ``para``. So if an
ordered list appears in the xml, then the raw listing must be built up from the
child nodes. After this is finished, though, the :meth:`bs4.BeautifulSoup.get_text`
method will happily remove all remaining ``para`` tags to produce the final
reStructuredText **provided that** the original "exploded" tags (such as the ordered
list definition and its ``listitem`` children) have been *removed* from the soup.
**Parameters**
``textRoot`` (:class:`~exhale.graph.ExhaleRoot`)
The text root object that is calling this method. This parameter is
necessary in order to retrieve / convert the doxygen ``\\ref SomeClass`` tag
and link it to the appropriate node page. The ``textRoot`` object is not
modified by executing this method.
``currentTag`` (:class:`bs4.element.Tag`)
The current xml tag being processed, either to have its contents directly
modified or unraveled.
``level`` (int)
.. warning::
This variable does **not** represent "recursion depth" (as one would
typically see with a variable like this)!
The **block** level of indentation currently being parsed. Because we are
parsing a tree in order to generate raw reStructuredText code, we need to
maintain a notion of "block level". This means tracking when there are
nested structures such as a list within a list:
.. code-block:: rst
1. This is an outer ordered list.
- There is a nested unordered list.
- It is a child of the outer list.
2. This is another item in the outer list.
The outer ordered (numbers ``1`` and ``2``) list is at indentation level
``0``, and the inner unordered (``-``) list is at indentation level ``1``.
Meaning that level is used as
.. code-block:: py
indent = " " * level
# ... later ...
some_text = "\\n{indent}{text}".format(indent=indent, text=some_text)
to indent the ordered / unordered lists accordingly.
'''
if not currentTag:
return
if prefix:
currentTag.insert_before(prefix)
if postfix:
currentTag.insert_after(postfix)
children = currentTag.findChildren(recursive=False)
indent = " " * level
if currentTag.name == "orderedlist":
idx = 1
for child in children:
walk(textRoot, child, level + 1, "\n{0}{1}. ".format(indent, idx), None, True)
idx += 1
child.unwrap()
currentTag.unwrap()
elif currentTag.name == "itemizedlist":
for child in children:
walk(textRoot, child, level + 1, "\n{0}- ".format(indent), None, True)
child.unwrap()
currentTag.unwrap()
elif currentTag.name == "verbatim":
# TODO: find relevant section in breathe.sphinxrenderer and include the versions
# for both leading /// as well as just plain embed:rst.
leading_asterisk = "embed:rst:leading-asterisk\n*"
if currentTag.string.startswith(leading_asterisk):
cont = currentTag.string.replace(leading_asterisk, "")
cont = textwrap.dedent(cont.replace("\n*", "\n"))
currentTag.string = cont
elif currentTag.name == "formula":
currentTag.string = ":math:`{0}`".format(currentTag.string[1:-1])
elif currentTag.name == "ref":
signal = None
if "refid" not in currentTag.attrs:
signal = "No 'refid' in `ref` tag attributes of file documentation. Attributes were: {0}".format(
currentTag.attrs
)
else:
refid = currentTag.attrs["refid"]
if refid not in textRoot.node_by_refid:
signal = "Found unknown 'refid' of [{0}] in file level documentation.".format(refid)
else:
currentTag.string = ":ref:`{0}`".format(textRoot.node_by_refid[refid].link_name)
if signal:
# << verboseBuild
utils.verbose_log(signal, utils.AnsiColors.BOLD_YELLOW)
elif currentTag.name == "emphasis":
currentTag.string = "*{0}*".format(currentTag.string)
elif currentTag.name == "computeroutput":
currentTag.string = "``{0}``".format(currentTag.string)
elif currentTag.name == "bold":
currentTag.string = "**{0}**".format(currentTag.string)
else:
ctr = 0
for child in children:
c_prefix = None
c_postfix = None
if ctr > 0 and child.name == "para":
c_prefix = "\n{0}".format(indent)
walk(textRoot, child, level, c_prefix, c_postfix)
ctr += 1 | [
"def",
"walk",
"(",
"textRoot",
",",
"currentTag",
",",
"level",
",",
"prefix",
"=",
"None",
",",
"postfix",
"=",
"None",
",",
"unwrapUntilPara",
"=",
"False",
")",
":",
"if",
"not",
"currentTag",
":",
"return",
"if",
"prefix",
":",
"currentTag",
".",
... | .. note::
This method does not cover all possible input doxygen types! This means that
when an unsupported / unrecognized doxygen tag appears in the xml listing, the
**raw xml will appear on the file page being documented**. This traverser is
greedily designed to work for what testing revealed as the *bare minimum*
required. **Please** see the :ref:`Doxygen ALIASES <doxygen_aliases>` section
for how to bypass invalid documentation coming form Exhale.
Recursive traverser method to parse the input parsed xml tree and convert the nodes
into raw reStructuredText from the input doxygen format. **Not all doxygen markup
types are handled**. The current supported doxygen xml markup tags are:
- ``para``
- ``orderedlist``
- ``itemizedlist``
- ``verbatim`` (specifically: ``embed:rst:leading-asterisk``)
- ``formula``
- ``ref``
- ``emphasis`` (e.g., using `em`_)
- ``computeroutput`` (e.g., using `c`_)
- ``bold`` (e.g., using `b`_)
.. _em: http://www.doxygen.nl/manual/commands.html#cmdem
.. _c: http://www.doxygen.nl/manual/commands.html#cmdc
.. _b: http://www.doxygen.nl/manual/commands.html#cmdb
The goal of this method is to "explode" input ``xml`` data into raw reStructuredText
to put at the top of the file pages. Wielding beautiful soup, this essentially
means that you need to expand every non ``para`` tag into a ``para``. So if an
ordered list appears in the xml, then the raw listing must be built up from the
child nodes. After this is finished, though, the :meth:`bs4.BeautifulSoup.get_text`
method will happily remove all remaining ``para`` tags to produce the final
reStructuredText **provided that** the original "exploded" tags (such as the ordered
list definition and its ``listitem`` children) have been *removed* from the soup.
**Parameters**
``textRoot`` (:class:`~exhale.graph.ExhaleRoot`)
The text root object that is calling this method. This parameter is
necessary in order to retrieve / convert the doxygen ``\\ref SomeClass`` tag
and link it to the appropriate node page. The ``textRoot`` object is not
modified by executing this method.
``currentTag`` (:class:`bs4.element.Tag`)
The current xml tag being processed, either to have its contents directly
modified or unraveled.
``level`` (int)
.. warning::
This variable does **not** represent "recursion depth" (as one would
typically see with a variable like this)!
The **block** level of indentation currently being parsed. Because we are
parsing a tree in order to generate raw reStructuredText code, we need to
maintain a notion of "block level". This means tracking when there are
nested structures such as a list within a list:
.. code-block:: rst
1. This is an outer ordered list.
- There is a nested unordered list.
- It is a child of the outer list.
2. This is another item in the outer list.
The outer ordered (numbers ``1`` and ``2``) list is at indentation level
``0``, and the inner unordered (``-``) list is at indentation level ``1``.
Meaning that level is used as
.. code-block:: py
indent = " " * level
# ... later ...
some_text = "\\n{indent}{text}".format(indent=indent, text=some_text)
to indent the ordered / unordered lists accordingly. | [
"..",
"note",
"::"
] | python | train |
BoGoEngine/bogo-python | bogo/accent.py | https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/accent.py#L65-L89 | def add_accent(components, accent):
"""
Add accent to the given components. The parameter components is
the result of function separate()
"""
vowel = components[1]
last_consonant = components[2]
if accent == Accent.NONE:
vowel = remove_accent_string(vowel)
return [components[0], vowel, last_consonant]
if vowel == "":
return components
#raw_string is a list, not a str object
raw_string = remove_accent_string(vowel).lower()
new_vowel = ""
# Highest priority for ê and ơ
index = max(raw_string.find("ê"), raw_string.find("ơ"))
if index != -1:
new_vowel = vowel[:index] + add_accent_char(vowel[index], accent) + vowel[index+1:]
elif len(vowel) == 1 or (len(vowel) == 2 and last_consonant == ""):
new_vowel = add_accent_char(vowel[0], accent) + vowel[1:]
else:
new_vowel = vowel[:1] + add_accent_char(vowel[1], accent) + vowel[2:]
return [components[0], new_vowel, components[2]] | [
"def",
"add_accent",
"(",
"components",
",",
"accent",
")",
":",
"vowel",
"=",
"components",
"[",
"1",
"]",
"last_consonant",
"=",
"components",
"[",
"2",
"]",
"if",
"accent",
"==",
"Accent",
".",
"NONE",
":",
"vowel",
"=",
"remove_accent_string",
"(",
"... | Add accent to the given components. The parameter components is
the result of function separate() | [
"Add",
"accent",
"to",
"the",
"given",
"components",
".",
"The",
"parameter",
"components",
"is",
"the",
"result",
"of",
"function",
"separate",
"()"
] | python | train |
Alir3z4/django-databrowse | django_databrowse/plugins/calendars.py | https://github.com/Alir3z4/django-databrowse/blob/4469495cd47a0da506ddf4e8cc752c2f453e0339/django_databrowse/plugins/calendars.py#L51-L64 | def field_dict(self, model):
"""
Helper function that returns a dictionary of all DateFields or
DateTimeFields in the given model. If self.field_names is set,
it takes that into account when building the dictionary.
"""
if self.field_names is None:
return dict([(f.name, f) for f in model._meta.fields
if isinstance(f, models.DateField)])
else:
return dict([(f.name, f)
for f in model._meta.fields
if isinstance(f, models.DateField) and
(f.name in self.field_names)]) | [
"def",
"field_dict",
"(",
"self",
",",
"model",
")",
":",
"if",
"self",
".",
"field_names",
"is",
"None",
":",
"return",
"dict",
"(",
"[",
"(",
"f",
".",
"name",
",",
"f",
")",
"for",
"f",
"in",
"model",
".",
"_meta",
".",
"fields",
"if",
"isinst... | Helper function that returns a dictionary of all DateFields or
DateTimeFields in the given model. If self.field_names is set,
it takes that into account when building the dictionary. | [
"Helper",
"function",
"that",
"returns",
"a",
"dictionary",
"of",
"all",
"DateFields",
"or",
"DateTimeFields",
"in",
"the",
"given",
"model",
".",
"If",
"self",
".",
"field_names",
"is",
"set",
"it",
"takes",
"that",
"into",
"account",
"when",
"building",
"t... | python | train |
HydraChain/hydrachain | hydrachain/consensus/synchronizer.py | https://github.com/HydraChain/hydrachain/blob/6c0919b0575dc8aa481f3a8c703e1a7f0575ecc3/hydrachain/consensus/synchronizer.py#L38-L76 | def request(self):
"""
sync the missing blocks between:
head
highest height with signing lockset
we get these locksets by collecting votes on all heights
"""
missing = self.missing
self.cm.log('sync.request', missing=len(missing), requested=len(self.requested),
received=len(self.received))
if self.requested:
self.cm.log('waiting for requested')
return
if len(self.received) + self.max_getproposals_count >= self.max_queued:
self.cm.log('queue is full')
return
if not missing:
self.cm.log('insync')
return
if self.last_active_protocol is None: # FIXME, check if it is active
self.cm.log('no active protocol', last_active_protocol=self.last_active_protocol)
return
self.cm.log('collecting')
blocknumbers = []
for h in missing:
if h not in self.received and h not in self.requested:
blocknumbers.append(h)
self.requested.add(h)
if len(blocknumbers) == self.max_getproposals_count:
break
self.cm.log('collected', num=len(blocknumbers))
if not blocknumbers:
return
self.cm.log('requesting', num=len(blocknumbers),
requesting_range=(blocknumbers[0], blocknumbers[-1]))
self.last_active_protocol.send_getblockproposals(*blocknumbers)
# setup alarm
self.cm.chainservice.setup_alarm(self.timeout, self.on_alarm, blocknumbers) | [
"def",
"request",
"(",
"self",
")",
":",
"missing",
"=",
"self",
".",
"missing",
"self",
".",
"cm",
".",
"log",
"(",
"'sync.request'",
",",
"missing",
"=",
"len",
"(",
"missing",
")",
",",
"requested",
"=",
"len",
"(",
"self",
".",
"requested",
")",
... | sync the missing blocks between:
head
highest height with signing lockset
we get these locksets by collecting votes on all heights | [
"sync",
"the",
"missing",
"blocks",
"between",
":",
"head",
"highest",
"height",
"with",
"signing",
"lockset"
] | python | test |
gusutabopb/aioinflux | aioinflux/client.py | https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/client.py#L25-L34 | def runner(coro):
"""Function execution decorator."""
@wraps(coro)
def inner(self, *args, **kwargs):
if self.mode == 'async':
return coro(self, *args, **kwargs)
return self._loop.run_until_complete(coro(self, *args, **kwargs))
return inner | [
"def",
"runner",
"(",
"coro",
")",
":",
"@",
"wraps",
"(",
"coro",
")",
"def",
"inner",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"mode",
"==",
"'async'",
":",
"return",
"coro",
"(",
"self",
",",
"*",
... | Function execution decorator. | [
"Function",
"execution",
"decorator",
"."
] | python | train |
tdryer/hangups | hangups/http_utils.py | https://github.com/tdryer/hangups/blob/85c0bf0a57698d077461283895707260f9dbf931/hangups/http_utils.py#L129-L141 | def _get_authorization_headers(sapisid_cookie):
"""Return authorization headers for API request."""
# It doesn't seem to matter what the url and time are as long as they are
# consistent.
time_msec = int(time.time() * 1000)
auth_string = '{} {} {}'.format(time_msec, sapisid_cookie, ORIGIN_URL)
auth_hash = hashlib.sha1(auth_string.encode()).hexdigest()
sapisidhash = 'SAPISIDHASH {}_{}'.format(time_msec, auth_hash)
return {
'authorization': sapisidhash,
'x-origin': ORIGIN_URL,
'x-goog-authuser': '0',
} | [
"def",
"_get_authorization_headers",
"(",
"sapisid_cookie",
")",
":",
"# It doesn't seem to matter what the url and time are as long as they are",
"# consistent.",
"time_msec",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"*",
"1000",
")",
"auth_string",
"=",
"'{} {} {... | Return authorization headers for API request. | [
"Return",
"authorization",
"headers",
"for",
"API",
"request",
"."
] | python | valid |
phaethon/kamene | kamene/contrib/gsm_um.py | https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/contrib/gsm_um.py#L471-L477 | def configurationChangeAcknowledge():
"""CONFIGURATION CHANGE ACKNOWLEDGE Section 9.1.12c"""
a = TpPd(pd=0x6)
b = MessageType(mesType=0x31) # 00110001
c = MobileId()
packet = a / b / c
return packet | [
"def",
"configurationChangeAcknowledge",
"(",
")",
":",
"a",
"=",
"TpPd",
"(",
"pd",
"=",
"0x6",
")",
"b",
"=",
"MessageType",
"(",
"mesType",
"=",
"0x31",
")",
"# 00110001",
"c",
"=",
"MobileId",
"(",
")",
"packet",
"=",
"a",
"/",
"b",
"/",
"c",
"... | CONFIGURATION CHANGE ACKNOWLEDGE Section 9.1.12c | [
"CONFIGURATION",
"CHANGE",
"ACKNOWLEDGE",
"Section",
"9",
".",
"1",
".",
"12c"
] | python | train |
tanghaibao/jcvi | jcvi/variation/snp.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/variation/snp.py#L84-L171 | def gatk(args):
"""
%prog gatk bamfile reference.fasta
Call SNPs based on GATK best practices.
"""
p = OptionParser(gatk.__doc__)
p.add_option("--indelrealign", default=False, action="store_true",
help="Perform indel realignment")
p.set_home("gatk")
p.set_home("picard")
p.set_phred()
p.set_cpus(cpus=24)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, ref = args
pf = bamfile.rsplit(".", 1)[0]
mm = MakeManager()
picard = "java -Xmx32g -jar {0}/picard.jar".format(opts.picard_home)
tk = "java -Xmx32g -jar {0}/GenomeAnalysisTK.jar".format(opts.gatk_home)
tk += " -R {0}".format(ref)
# Step 0 - build reference
dictfile = ref.rsplit(".", 1)[0] + ".dict"
cmd1 = picard + " CreateSequenceDictionary"
cmd1 += " R={0} O={1}".format(ref, dictfile)
cmd2 = "samtools faidx {0}".format(ref)
mm.add(ref, dictfile, (cmd1, cmd2))
# Step 1 - sort bam
sortedbamfile = pf + ".sorted.bam"
cmd = picard + " SortSam"
cmd += " INPUT={0} OUTPUT={1}".format(bamfile, sortedbamfile)
cmd += " SORT_ORDER=coordinate CREATE_INDEX=true"
mm.add(bamfile, sortedbamfile, cmd)
# Step 2 - mark duplicates
dedupbamfile = pf + ".dedup.bam"
cmd = picard + " MarkDuplicates"
cmd += " INPUT={0} OUTPUT={1}".format(sortedbamfile, dedupbamfile)
cmd += " METRICS_FILE=dedup.log CREATE_INDEX=true"
mm.add(sortedbamfile, dedupbamfile, cmd)
if opts.indelrealign:
# Step 3 - create indel realignment targets
intervals = pf + ".intervals"
cmd = tk + " -T RealignerTargetCreator"
cmd += " -I {0} -o {1}".format(dedupbamfile, intervals)
mm.add(dedupbamfile, intervals, cmd)
# Step 4 - indel realignment
realignedbamfile = pf + ".realigned.bam"
cmd = tk + " -T IndelRealigner"
cmd += " -targetIntervals {0}".format(intervals)
cmd += " -I {0} -o {1}".format(dedupbamfile, realignedbamfile)
mm.add((dictfile, intervals), realignedbamfile, cmd)
else:
realignedbamfile = dedupbamfile
# Step 5 - SNP calling
vcf = pf + ".vcf"
cmd = tk + " -T HaplotypeCaller"
cmd += " -I {0}".format(realignedbamfile)
cmd += " --genotyping_mode DISCOVERY"
cmd += " -stand_emit_conf 10 -stand_call_conf 30"
cmd += " -nct {0}".format(opts.cpus)
cmd += " -o {0}".format(vcf)
if opts.phred == "64":
cmd += " --fix_misencoded_quality_scores"
mm.add(realignedbamfile, vcf, cmd)
# Step 6 - SNP filtering
filtered_vcf = pf + ".filtered.vcf"
cmd = tk + " -T VariantFiltration"
cmd += " -V {0}".format(vcf)
cmd += ' --filterExpression "DP < 10 || DP > 300 || QD < 2.0 || FS > 60.0 || MQ < 40.0"'
cmd += ' --filterName "LOWQUAL"'
cmd += ' --genotypeFilterExpression "isHomVar == 1"'
cmd += ' --genotypeFilterName "HOMOVAR"'
cmd += ' --genotypeFilterExpression "isHet == 1"'
cmd += ' --genotypeFilterName "HET"'
cmd += " -o {0}".format(filtered_vcf)
mm.add(vcf, filtered_vcf, cmd)
mm.write() | [
"def",
"gatk",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"gatk",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--indelrealign\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Perform indel real... | %prog gatk bamfile reference.fasta
Call SNPs based on GATK best practices. | [
"%prog",
"gatk",
"bamfile",
"reference",
".",
"fasta"
] | python | train |
Bernardo-MG/tox-test-command | setup.py | https://github.com/Bernardo-MG/tox-test-command/blob/b8412adae08fa4399fc8b1a33b277aa96dec35c8/setup.py#L35-L65 | def extract_version(path):
"""
Reads the file at the specified path and returns the version contained in it.
This is meant for reading the __init__.py file inside a package, and so it
expects a version field like:
__version__ = '1.0.0'
:param path: path to the Python file
:return: the version inside the file
"""
# Regular expression for the version
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open(path + '__init__.py', 'r', encoding='utf-8') as f:
version = f.read()
if version:
version = _version_re.search(version)
if version:
version = version.group(1)
version = str(ast.literal_eval(version.rstrip()))
extracted = version
else:
extracted = None
else:
extracted = None
return extracted | [
"def",
"extract_version",
"(",
"path",
")",
":",
"# Regular expression for the version",
"_version_re",
"=",
"re",
".",
"compile",
"(",
"r'__version__\\s+=\\s+(.*)'",
")",
"with",
"open",
"(",
"path",
"+",
"'__init__.py'",
",",
"'r'",
",",
"encoding",
"=",
"'utf-8... | Reads the file at the specified path and returns the version contained in it.
This is meant for reading the __init__.py file inside a package, and so it
expects a version field like:
__version__ = '1.0.0'
:param path: path to the Python file
:return: the version inside the file | [
"Reads",
"the",
"file",
"at",
"the",
"specified",
"path",
"and",
"returns",
"the",
"version",
"contained",
"in",
"it",
"."
] | python | train |
poulp/zenipy | zenipy/zenipy.py | https://github.com/poulp/zenipy/blob/fd1de3c268bb1cffcb35b4f8186893c492dd6eaf/zenipy/zenipy.py#L440-L457 | def error(title="", text="", width=DEFAULT_WIDTH,
height=DEFAULT_HEIGHT, timeout=None):
"""
Display a simple error
:param text: text inside the window
:type text: str
:param title: title of the window
:type title: str
:param width: window width
:type width: int
:param height: window height
:type height: int
:param timeout: close the window after n seconds
:type timeout: int
"""
return _simple_dialog(Gtk.MessageType.ERROR,
text, title, width, height, timeout) | [
"def",
"error",
"(",
"title",
"=",
"\"\"",
",",
"text",
"=",
"\"\"",
",",
"width",
"=",
"DEFAULT_WIDTH",
",",
"height",
"=",
"DEFAULT_HEIGHT",
",",
"timeout",
"=",
"None",
")",
":",
"return",
"_simple_dialog",
"(",
"Gtk",
".",
"MessageType",
".",
"ERROR"... | Display a simple error
:param text: text inside the window
:type text: str
:param title: title of the window
:type title: str
:param width: window width
:type width: int
:param height: window height
:type height: int
:param timeout: close the window after n seconds
:type timeout: int | [
"Display",
"a",
"simple",
"error"
] | python | train |
florianholzapfel/panasonic-viera | panasonic_viera/__init__.py | https://github.com/florianholzapfel/panasonic-viera/blob/bf912ff6eb03b59e3dde30b994a0fb1d883eb873/panasonic_viera/__init__.py#L193-L200 | def get_volume(self):
"""Return the current volume level."""
params = '<InstanceID>0</InstanceID><Channel>Master</Channel>'
res = self.soap_request(URL_CONTROL_DMR, URN_RENDERING_CONTROL,
'GetVolume', params)
root = ET.fromstring(res)
el_volume = root.find('.//CurrentVolume')
return int(el_volume.text) | [
"def",
"get_volume",
"(",
"self",
")",
":",
"params",
"=",
"'<InstanceID>0</InstanceID><Channel>Master</Channel>'",
"res",
"=",
"self",
".",
"soap_request",
"(",
"URL_CONTROL_DMR",
",",
"URN_RENDERING_CONTROL",
",",
"'GetVolume'",
",",
"params",
")",
"root",
"=",
"E... | Return the current volume level. | [
"Return",
"the",
"current",
"volume",
"level",
"."
] | python | train |
openstack/networking-cisco | networking_cisco/apps/saf/server/dfa_server.py | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/dfa_server.py#L1629-L1649 | def listener_create_event(self, listener_info):
"""Process listener create event.
This is lbaas v2
vif will be plugged into ovs when first
listener is created and unpluged from ovs
when last listener is deleted
"""
listener_data = listener_info.get('listener')
lb_list = listener_data.get('loadbalancers')
for lb in lb_list:
lb_id = lb.get('id')
req = dict(instance_id=(lb_id.replace('-', '')))
instances = self.get_vms_for_this_req(**req)
if not instances:
lb_info = self.neutronclient.show_loadbalancer(lb_id)
if lb_info:
port_id = lb_info["loadbalancer"]["vip_port_id"]
self.add_lbaas_port(port_id, lb_id)
else:
LOG.info("lbaas port for lb %s already added" % lb_id) | [
"def",
"listener_create_event",
"(",
"self",
",",
"listener_info",
")",
":",
"listener_data",
"=",
"listener_info",
".",
"get",
"(",
"'listener'",
")",
"lb_list",
"=",
"listener_data",
".",
"get",
"(",
"'loadbalancers'",
")",
"for",
"lb",
"in",
"lb_list",
":",... | Process listener create event.
This is lbaas v2
vif will be plugged into ovs when first
listener is created and unpluged from ovs
when last listener is deleted | [
"Process",
"listener",
"create",
"event",
"."
] | python | train |
rackerlabs/fleece | fleece/xray.py | https://github.com/rackerlabs/fleece/blob/42d79dfa0777e99dbb09bc46105449a9be5dbaa9/fleece/xray.py#L61-L94 | def get_trace_id():
"""Parse X-Ray Trace ID environment variable.
The value looks something like this:
Root=1-5901e3bc-8da3814a5f3ccbc864b66ecc;Parent=328f72132deac0ce;Sampled=1
`Root` is the main X-Ray Trace ID, `Parent` points to the top-level
segment, and `Sampled` shows whether the current request should be traced
or not.
If the environment variable doesn't exist, just return an `XRayTraceID`
instance with default values, which means that tracing will be skipped
due to `sampled` being set to `False`.
"""
raw_trace_id = os.environ.get('_X_AMZN_TRACE_ID', '')
trace_id_parts = raw_trace_id.split(';')
trace_kwargs = {
'trace_id': None,
'parent_id': None,
'sampled': False,
}
if trace_id_parts[0] != '':
# This means the trace ID environment variable is not empty
for part in trace_id_parts:
name, value = part.split('=')
if name == 'Root':
trace_kwargs['trace_id'] = value
elif name == 'Parent':
trace_kwargs['parent_id'] = value
elif name == 'Sampled':
trace_kwargs['sampled'] = bool(int(value))
return XRayTraceID(**trace_kwargs) | [
"def",
"get_trace_id",
"(",
")",
":",
"raw_trace_id",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'_X_AMZN_TRACE_ID'",
",",
"''",
")",
"trace_id_parts",
"=",
"raw_trace_id",
".",
"split",
"(",
"';'",
")",
"trace_kwargs",
"=",
"{",
"'trace_id'",
":",
"None... | Parse X-Ray Trace ID environment variable.
The value looks something like this:
Root=1-5901e3bc-8da3814a5f3ccbc864b66ecc;Parent=328f72132deac0ce;Sampled=1
`Root` is the main X-Ray Trace ID, `Parent` points to the top-level
segment, and `Sampled` shows whether the current request should be traced
or not.
If the environment variable doesn't exist, just return an `XRayTraceID`
instance with default values, which means that tracing will be skipped
due to `sampled` being set to `False`. | [
"Parse",
"X",
"-",
"Ray",
"Trace",
"ID",
"environment",
"variable",
"."
] | python | train |
xtuml/pyxtuml | bridgepoint/oal.py | https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/bridgepoint/oal.py#L1045-L1048 | def t_DOT(self, t):
r"\."
t.endlexpos = t.lexpos + len(t.value)
return t | [
"def",
"t_DOT",
"(",
"self",
",",
"t",
")",
":",
"t",
".",
"endlexpos",
"=",
"t",
".",
"lexpos",
"+",
"len",
"(",
"t",
".",
"value",
")",
"return",
"t"
] | r"\. | [
"r",
"\\",
"."
] | python | test |
saltstack/salt | salt/states/rabbitmq_policy.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/rabbitmq_policy.py#L37-L124 | def present(name,
pattern,
definition,
priority=0,
vhost='/',
runas=None,
apply_to=None):
'''
Ensure the RabbitMQ policy exists.
Reference: http://www.rabbitmq.com/ha.html
name
Policy name
pattern
A regex of queues to apply the policy to
definition
A json dict describing the policy
priority
Priority (defaults to 0)
vhost
Virtual host to apply to (defaults to '/')
runas
Name of the user to run the command as
apply_to
Apply policy to 'queues', 'exchanges' or 'all' (default to 'all')
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
result = {}
policies = __salt__['rabbitmq.list_policies'](vhost=vhost, runas=runas)
policy = policies.get(vhost, {}).get(name)
updates = []
if policy:
if policy.get('pattern') != pattern:
updates.append('Pattern')
current_definition = policy.get('definition')
current_definition = json.loads(current_definition) if current_definition else ''
new_definition = json.loads(definition) if definition else ''
if current_definition != new_definition:
updates.append('Definition')
if apply_to and (policy.get('apply-to') != apply_to):
updates.append('Applyto')
if int(policy.get('priority')) != priority:
updates.append('Priority')
if policy and not updates:
ret['comment'] = 'Policy {0} {1} is already present'.format(vhost, name)
return ret
if not policy:
ret['changes'].update({'old': {}, 'new': name})
if __opts__['test']:
ret['comment'] = 'Policy {0} {1} is set to be created'.format(vhost, name)
else:
log.debug('Policy doesn\'t exist - Creating')
result = __salt__['rabbitmq.set_policy'](vhost,
name,
pattern,
definition,
priority=priority,
runas=runas,
apply_to=apply_to)
elif updates:
ret['changes'].update({'old': policy, 'new': updates})
if __opts__['test']:
ret['comment'] = 'Policy {0} {1} is set to be updated'.format(vhost, name)
else:
log.debug('Policy exists but needs updating')
result = __salt__['rabbitmq.set_policy'](vhost,
name,
pattern,
definition,
priority=priority,
runas=runas,
apply_to=apply_to)
if 'Error' in result:
ret['result'] = False
ret['comment'] = result['Error']
elif ret['changes'] == {}:
ret['comment'] = '\'{0}\' is already in the desired state.'.format(name)
elif __opts__['test']:
ret['result'] = None
elif 'Set' in result:
ret['comment'] = result['Set']
return ret | [
"def",
"present",
"(",
"name",
",",
"pattern",
",",
"definition",
",",
"priority",
"=",
"0",
",",
"vhost",
"=",
"'/'",
",",
"runas",
"=",
"None",
",",
"apply_to",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'result'",
":",
... | Ensure the RabbitMQ policy exists.
Reference: http://www.rabbitmq.com/ha.html
name
Policy name
pattern
A regex of queues to apply the policy to
definition
A json dict describing the policy
priority
Priority (defaults to 0)
vhost
Virtual host to apply to (defaults to '/')
runas
Name of the user to run the command as
apply_to
Apply policy to 'queues', 'exchanges' or 'all' (default to 'all') | [
"Ensure",
"the",
"RabbitMQ",
"policy",
"exists",
"."
] | python | train |
caffeinehit/django-oauth2-provider | provider/scope.py | https://github.com/caffeinehit/django-oauth2-provider/blob/6b5bc0d3ad706d2aaa47fa476f38406cddd01236/provider/scope.py#L18-L63 | def check(wants, has):
"""
Check if a desired scope ``wants`` is part of an available scope ``has``.
Returns ``False`` if not, return ``True`` if yes.
:example:
If a list of scopes such as
::
READ = 1 << 1
WRITE = 1 << 2
READ_WRITE = READ | WRITE
SCOPES = (
(READ, 'read'),
(WRITE, 'write'),
(READ_WRITE, 'read+write'),
)
is defined, we can check if a given scope is part of another:
::
>>> from provider import scope
>>> scope.check(READ, READ)
True
>>> scope.check(WRITE, READ)
False
>>> scope.check(WRITE, WRITE)
True
>>> scope.check(READ, WRITE)
False
>>> scope.check(READ, READ_WRITE)
True
>>> scope.check(WRITE, READ_WRITE)
True
"""
if wants & has == 0:
return False
if wants & has < wants:
return False
return True | [
"def",
"check",
"(",
"wants",
",",
"has",
")",
":",
"if",
"wants",
"&",
"has",
"==",
"0",
":",
"return",
"False",
"if",
"wants",
"&",
"has",
"<",
"wants",
":",
"return",
"False",
"return",
"True"
] | Check if a desired scope ``wants`` is part of an available scope ``has``.
Returns ``False`` if not, return ``True`` if yes.
:example:
If a list of scopes such as
::
READ = 1 << 1
WRITE = 1 << 2
READ_WRITE = READ | WRITE
SCOPES = (
(READ, 'read'),
(WRITE, 'write'),
(READ_WRITE, 'read+write'),
)
is defined, we can check if a given scope is part of another:
::
>>> from provider import scope
>>> scope.check(READ, READ)
True
>>> scope.check(WRITE, READ)
False
>>> scope.check(WRITE, WRITE)
True
>>> scope.check(READ, WRITE)
False
>>> scope.check(READ, READ_WRITE)
True
>>> scope.check(WRITE, READ_WRITE)
True | [
"Check",
"if",
"a",
"desired",
"scope",
"wants",
"is",
"part",
"of",
"an",
"available",
"scope",
"has",
"."
] | python | train |
sanger-pathogens/circlator | circlator/clean.py | https://github.com/sanger-pathogens/circlator/blob/a4befb8c9dbbcd4b3ad1899a95aa3e689d58b638/circlator/clean.py#L82-L98 | def _load_nucmer_hits(self, infile):
'''Returns two dictionaries:
1) name=>contig length.
2) Second is dictionary of nucmer hits (ignoring self matches).
contig name => list of hits'''
hits = {}
lengths = {}
file_reader = pymummer.coords_file.reader(infile)
for al in file_reader:
if al.qry_name == al.ref_name:
continue
elif al.qry_name not in hits:
hits[al.qry_name] = []
hits[al.qry_name].append(al)
lengths[al.qry_name] = al.qry_length
lengths[al.ref_name] = al.ref_length
return lengths, hits | [
"def",
"_load_nucmer_hits",
"(",
"self",
",",
"infile",
")",
":",
"hits",
"=",
"{",
"}",
"lengths",
"=",
"{",
"}",
"file_reader",
"=",
"pymummer",
".",
"coords_file",
".",
"reader",
"(",
"infile",
")",
"for",
"al",
"in",
"file_reader",
":",
"if",
"al",... | Returns two dictionaries:
1) name=>contig length.
2) Second is dictionary of nucmer hits (ignoring self matches).
contig name => list of hits | [
"Returns",
"two",
"dictionaries",
":",
"1",
")",
"name",
"=",
">",
"contig",
"length",
".",
"2",
")",
"Second",
"is",
"dictionary",
"of",
"nucmer",
"hits",
"(",
"ignoring",
"self",
"matches",
")",
".",
"contig",
"name",
"=",
">",
"list",
"of",
"hits"
] | python | train |
Alignak-monitoring/alignak | alignak/external_command.py | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L2869-L2883 | def enable_passive_svc_checks(self, service):
"""Enable passive checks for a service
Format of the line that triggers function call::
ENABLE_PASSIVE_SVC_CHECKS;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None
"""
if not service.passive_checks_enabled:
service.modified_attributes |= \
DICT_MODATTR["MODATTR_PASSIVE_CHECKS_ENABLED"].value
service.passive_checks_enabled = True
self.send_an_element(service.get_update_status_brok()) | [
"def",
"enable_passive_svc_checks",
"(",
"self",
",",
"service",
")",
":",
"if",
"not",
"service",
".",
"passive_checks_enabled",
":",
"service",
".",
"modified_attributes",
"|=",
"DICT_MODATTR",
"[",
"\"MODATTR_PASSIVE_CHECKS_ENABLED\"",
"]",
".",
"value",
"service",... | Enable passive checks for a service
Format of the line that triggers function call::
ENABLE_PASSIVE_SVC_CHECKS;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None | [
"Enable",
"passive",
"checks",
"for",
"a",
"service",
"Format",
"of",
"the",
"line",
"that",
"triggers",
"function",
"call",
"::"
] | python | train |
ralphbean/bugwarrior | bugwarrior/services/__init__.py | https://github.com/ralphbean/bugwarrior/blob/b2a5108f7b40cb0c437509b64eaa28f941f7ac8b/bugwarrior/services/__init__.py#L483-L513 | def _aggregate_issues(conf, main_section, target, queue, service_name):
""" This worker function is separated out from the main
:func:`aggregate_issues` func only so that we can use multiprocessing
on it for speed reasons.
"""
start = time.time()
try:
service = get_service(service_name)(conf, main_section, target)
issue_count = 0
for issue in service.issues():
queue.put(issue)
issue_count += 1
except SystemExit as e:
log.critical(str(e))
queue.put((SERVICE_FINISHED_ERROR, (target, e)))
except BaseException as e:
if hasattr(e, 'request') and e.request:
# Exceptions raised by requests library have the HTTP request
# object stored as attribute. The request can have hooks attached
# to it, and we need to remove them, as there can be unpickleable
# methods. There is no one left to call these hooks anyway.
e.request.hooks = {}
log.exception("Worker for [%s] failed: %s" % (target, e))
queue.put((SERVICE_FINISHED_ERROR, (target, e)))
else:
queue.put((SERVICE_FINISHED_OK, (target, issue_count, )))
finally:
duration = time.time() - start
log.info("Done with [%s] in %fs" % (target, duration)) | [
"def",
"_aggregate_issues",
"(",
"conf",
",",
"main_section",
",",
"target",
",",
"queue",
",",
"service_name",
")",
":",
"start",
"=",
"time",
".",
"time",
"(",
")",
"try",
":",
"service",
"=",
"get_service",
"(",
"service_name",
")",
"(",
"conf",
",",
... | This worker function is separated out from the main
:func:`aggregate_issues` func only so that we can use multiprocessing
on it for speed reasons. | [
"This",
"worker",
"function",
"is",
"separated",
"out",
"from",
"the",
"main",
":",
"func",
":",
"aggregate_issues",
"func",
"only",
"so",
"that",
"we",
"can",
"use",
"multiprocessing",
"on",
"it",
"for",
"speed",
"reasons",
"."
] | python | test |
usc-isi-i2/dig-sandpaper | digsandpaper/coarse/postprocess/similarity_score_rerank_component.py | https://github.com/usc-isi-i2/dig-sandpaper/blob/c7a905ceec28ad0cc9e7da7ede2fd3d2fc93c3d6/digsandpaper/coarse/postprocess/similarity_score_rerank_component.py#L85-L110 | def add_highlights_docs(docs):
"""
"highlight": {
"knowledge_graph.title.value": [
"Before 1 January 2018, will <em>South</em> <em>Korea</em> file a World Trade Organization dispute against the United States related to solar panels?"
]
}
"""
if not isinstance(docs, list):
docs = [docs]
for doc in docs:
if 'matched_sentence' in doc['_source']:
matched_sentences = doc['_source']['matched_sentence']
for sentence in matched_sentences:
# also add matched sentence to knowledge graph
doc['_source']['knowledge_graph']['matched_sentence'] = [{'key': sentence, 'value': sentence}]
paragraph = SimilarityScoreRerank.get_description(doc)
if paragraph:
high_para = SimilarityScoreRerank.create_highlighted_sentences(matched_sentences, paragraph)
if high_para:
if 'highlight' not in doc:
doc['highlight'] = dict()
doc['highlight']['knowledge_graph.description.value'] = [high_para]
return docs | [
"def",
"add_highlights_docs",
"(",
"docs",
")",
":",
"if",
"not",
"isinstance",
"(",
"docs",
",",
"list",
")",
":",
"docs",
"=",
"[",
"docs",
"]",
"for",
"doc",
"in",
"docs",
":",
"if",
"'matched_sentence'",
"in",
"doc",
"[",
"'_source'",
"]",
":",
"... | "highlight": {
"knowledge_graph.title.value": [
"Before 1 January 2018, will <em>South</em> <em>Korea</em> file a World Trade Organization dispute against the United States related to solar panels?"
]
} | [
"highlight",
":",
"{",
"knowledge_graph",
".",
"title",
".",
"value",
":",
"[",
"Before",
"1",
"January",
"2018",
"will",
"<em",
">",
"South<",
"/",
"em",
">",
"<em",
">",
"Korea<",
"/",
"em",
">",
"file",
"a",
"World",
"Trade",
"Organization",
"disput... | python | train |
thesharp/htpasswd | htpasswd/group.py | https://github.com/thesharp/htpasswd/blob/8bf5cee0bd5362af586729f4c9cea8131eedd74f/htpasswd/group.py#L63-L67 | def add_user(self, user, group):
""" Adds user to a group """
if self.is_user_in(user, group):
raise UserAlreadyInAGroup
self.new_groups.add(group, user) | [
"def",
"add_user",
"(",
"self",
",",
"user",
",",
"group",
")",
":",
"if",
"self",
".",
"is_user_in",
"(",
"user",
",",
"group",
")",
":",
"raise",
"UserAlreadyInAGroup",
"self",
".",
"new_groups",
".",
"add",
"(",
"group",
",",
"user",
")"
] | Adds user to a group | [
"Adds",
"user",
"to",
"a",
"group"
] | python | train |
achiku/jungle | jungle/ec2.py | https://github.com/achiku/jungle/blob/fb63f845cfa9e9c0dfbabd8cfa3ebca8177a11ca/jungle/ec2.py#L10-L22 | def format_output(instances, flag):
"""return formatted string for instance"""
out = []
line_format = '{0}\t{1}\t{2}\t{3}\t{4}'
name_len = _get_max_name_len(instances) + 3
if flag:
line_format = '{0:<' + str(name_len) + '}{1:<16}{2:<21}{3:<16}{4:<16}'
for i in instances:
tag_name = get_tag_value(i.tags, 'Name')
out.append(line_format.format(
tag_name, i.state['Name'], i.id, i.private_ip_address, str(i.public_ip_address)))
return out | [
"def",
"format_output",
"(",
"instances",
",",
"flag",
")",
":",
"out",
"=",
"[",
"]",
"line_format",
"=",
"'{0}\\t{1}\\t{2}\\t{3}\\t{4}'",
"name_len",
"=",
"_get_max_name_len",
"(",
"instances",
")",
"+",
"3",
"if",
"flag",
":",
"line_format",
"=",
"'{0:<'",
... | return formatted string for instance | [
"return",
"formatted",
"string",
"for",
"instance"
] | python | train |
PedalPi/PluginsManager | pluginsmanager/model/pedalboard.py | https://github.com/PedalPi/PluginsManager/blob/2dcc9f6a79b48e9c9be82efffd855352fa15c5c7/pluginsmanager/model/pedalboard.py#L193-L212 | def connect(self, output_port, input_port):
"""
Connect two :class:`.Effect` instances in this pedalboard.
For this, is necessary informs the output port origin and the input port destination::
>>> pedalboard.append(driver)
>>> pedalboard.append(reverb)
>>> driver_output = driver.outputs[0]
>>> reverb_input = reverb.inputs[0]
>>> Connection(driver_output, reverb_input) in driver.connections
False
>>> pedalboard.connect(driver_output, reverb_input)
>>> Connection(driver_output, reverb_input) in driver.connections
True
:param Port output_port: Effect output port
:param Port input_port: Effect input port
"""
ConnectionClass = output_port.connection_class
self.connections.append(ConnectionClass(output_port, input_port)) | [
"def",
"connect",
"(",
"self",
",",
"output_port",
",",
"input_port",
")",
":",
"ConnectionClass",
"=",
"output_port",
".",
"connection_class",
"self",
".",
"connections",
".",
"append",
"(",
"ConnectionClass",
"(",
"output_port",
",",
"input_port",
")",
")"
] | Connect two :class:`.Effect` instances in this pedalboard.
For this, is necessary informs the output port origin and the input port destination::
>>> pedalboard.append(driver)
>>> pedalboard.append(reverb)
>>> driver_output = driver.outputs[0]
>>> reverb_input = reverb.inputs[0]
>>> Connection(driver_output, reverb_input) in driver.connections
False
>>> pedalboard.connect(driver_output, reverb_input)
>>> Connection(driver_output, reverb_input) in driver.connections
True
:param Port output_port: Effect output port
:param Port input_port: Effect input port | [
"Connect",
"two",
":",
"class",
":",
".",
"Effect",
"instances",
"in",
"this",
"pedalboard",
".",
"For",
"this",
"is",
"necessary",
"informs",
"the",
"output",
"port",
"origin",
"and",
"the",
"input",
"port",
"destination",
"::"
] | python | train |
kavdev/ldap-groups | ldap_groups/groups.py | https://github.com/kavdev/ldap-groups/blob/0dd3a7d9eafa3903127364839b12a4b3dd3ca521/ldap_groups/groups.py#L547-L563 | def add_child(self, group_lookup_attribute_value):
""" Attempts to add a child to the AD group.
:param group_lookup_attribute_value: The value for the LDAP_GROUPS_GROUP_LOOKUP_ATTRIBUTE.
:type group_lookup_attribute_value: str
:raises: **GroupDoesNotExist** if the provided group doesn't exist in the active directory.
(inherited from _get_group_dn)
:raises: **EntryAlreadyExists** if the child already exists in this group. (subclass of ModificationFailed)
:raises: **InsufficientPermissions** if the bind user does not have permission to modify this group.
(subclass of ModificationFailed)
:raises: **ModificationFailed** if the modification could not be performed for an unforseen reason.
"""
add_child = {'member': (MODIFY_ADD, [self._get_group_dn(group_lookup_attribute_value)])}
self._attempt_modification("child", group_lookup_attribute_value, add_child) | [
"def",
"add_child",
"(",
"self",
",",
"group_lookup_attribute_value",
")",
":",
"add_child",
"=",
"{",
"'member'",
":",
"(",
"MODIFY_ADD",
",",
"[",
"self",
".",
"_get_group_dn",
"(",
"group_lookup_attribute_value",
")",
"]",
")",
"}",
"self",
".",
"_attempt_m... | Attempts to add a child to the AD group.
:param group_lookup_attribute_value: The value for the LDAP_GROUPS_GROUP_LOOKUP_ATTRIBUTE.
:type group_lookup_attribute_value: str
:raises: **GroupDoesNotExist** if the provided group doesn't exist in the active directory.
(inherited from _get_group_dn)
:raises: **EntryAlreadyExists** if the child already exists in this group. (subclass of ModificationFailed)
:raises: **InsufficientPermissions** if the bind user does not have permission to modify this group.
(subclass of ModificationFailed)
:raises: **ModificationFailed** if the modification could not be performed for an unforseen reason. | [
"Attempts",
"to",
"add",
"a",
"child",
"to",
"the",
"AD",
"group",
".",
":",
"param",
"group_lookup_attribute_value",
":",
"The",
"value",
"for",
"the",
"LDAP_GROUPS_GROUP_LOOKUP_ATTRIBUTE",
".",
":",
"type",
"group_lookup_attribute_value",
":",
"str",
":",
"raise... | python | train |
goldhand/django-nupages | nupages/managers.py | https://github.com/goldhand/django-nupages/blob/4e54fae7e057f9530c22dc30c03812fd660cb7f4/nupages/managers.py#L9-L18 | def published(self, **kwargs):
''' Returns pages that are both status: Active and have a
non-conflicting activate_date / deactivate_date
'''
return self.filter(
Q(activate_date__lte=timezone.now()) | Q(
activate_date__isnull=True),
Q(deactivate_date__gte=timezone.now()) | Q(
deactivate_date__isnull=True),
Q(status=1), **kwargs) | [
"def",
"published",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"filter",
"(",
"Q",
"(",
"activate_date__lte",
"=",
"timezone",
".",
"now",
"(",
")",
")",
"|",
"Q",
"(",
"activate_date__isnull",
"=",
"True",
")",
",",
"Q",
... | Returns pages that are both status: Active and have a
non-conflicting activate_date / deactivate_date | [
"Returns",
"pages",
"that",
"are",
"both",
"status",
":",
"Active",
"and",
"have",
"a",
"non",
"-",
"conflicting",
"activate_date",
"/",
"deactivate_date"
] | python | train |
inveniosoftware/invenio-celery | invenio_celery/ext.py | https://github.com/inveniosoftware/invenio-celery/blob/4d075d5dbdb7ee849abdb0c8d7e7a49cb7973474/invenio_celery/ext.py#L33-L39 | def init_app(self, app, entry_point_group='invenio_celery.tasks',
**kwargs):
"""Initialize application object."""
self.init_config(app)
self.celery = FlaskCeleryExt(app).celery
self.entry_point_group = entry_point_group
app.extensions['invenio-celery'] = self | [
"def",
"init_app",
"(",
"self",
",",
"app",
",",
"entry_point_group",
"=",
"'invenio_celery.tasks'",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"init_config",
"(",
"app",
")",
"self",
".",
"celery",
"=",
"FlaskCeleryExt",
"(",
"app",
")",
".",
"cele... | Initialize application object. | [
"Initialize",
"application",
"object",
"."
] | python | train |
saltstack/salt | salt/modules/freebsdports.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/freebsdports.py#L480-L512 | def search(name):
'''
Search for matches in the ports tree. Globs are supported, and the category
is optional
CLI Examples:
.. code-block:: bash
salt '*' ports.search 'security/*'
salt '*' ports.search 'security/n*'
salt '*' ports.search nmap
.. warning::
Takes a while to run
'''
name = six.text_type(name)
all_ports = list_all()
if '/' in name:
if name.count('/') > 1:
raise SaltInvocationError(
'Invalid search string \'{0}\'. Port names cannot have more '
'than one slash'
)
else:
return fnmatch.filter(all_ports, name)
else:
ret = []
for port in all_ports:
if fnmatch.fnmatch(port.rsplit('/')[-1], name):
ret.append(port)
return ret | [
"def",
"search",
"(",
"name",
")",
":",
"name",
"=",
"six",
".",
"text_type",
"(",
"name",
")",
"all_ports",
"=",
"list_all",
"(",
")",
"if",
"'/'",
"in",
"name",
":",
"if",
"name",
".",
"count",
"(",
"'/'",
")",
">",
"1",
":",
"raise",
"SaltInvo... | Search for matches in the ports tree. Globs are supported, and the category
is optional
CLI Examples:
.. code-block:: bash
salt '*' ports.search 'security/*'
salt '*' ports.search 'security/n*'
salt '*' ports.search nmap
.. warning::
Takes a while to run | [
"Search",
"for",
"matches",
"in",
"the",
"ports",
"tree",
".",
"Globs",
"are",
"supported",
"and",
"the",
"category",
"is",
"optional"
] | python | train |
gabstopper/smc-python | smc/examples/ip_lists.py | https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/examples/ip_lists.py#L57-L68 | def upload_as_text(name, filename):
"""
Upload the IPList as text from a file.
:param str name: name of IPList
:param str filename: name of text file to upload
:return: None
"""
location = list(IPList.objects.filter(name))
if location:
iplist = location[0]
return iplist.upload(filename=filename, as_type='txt') | [
"def",
"upload_as_text",
"(",
"name",
",",
"filename",
")",
":",
"location",
"=",
"list",
"(",
"IPList",
".",
"objects",
".",
"filter",
"(",
"name",
")",
")",
"if",
"location",
":",
"iplist",
"=",
"location",
"[",
"0",
"]",
"return",
"iplist",
".",
"... | Upload the IPList as text from a file.
:param str name: name of IPList
:param str filename: name of text file to upload
:return: None | [
"Upload",
"the",
"IPList",
"as",
"text",
"from",
"a",
"file",
"."
] | python | train |
edx/edx-django-sites-extensions | django_sites_extensions/models.py | https://github.com/edx/edx-django-sites-extensions/blob/d4fc18cb4831b7b95ccd1b2f9afc9afa1f19b096/django_sites_extensions/models.py#L79-L98 | def patched_get_site_by_request(self, request):
"""
Monkey patched version of Django's SiteManager._get_site_by_request() function.
Adds a configurable timeout to the in-memory SITE_CACHE for each cached Site.
This allows for the use of an in-memory cache for Site models, avoiding one
or more DB hits on every request made to the Django application, but also allows
for changes made to models associated with the Site model and accessed via the
Site model's relationship accessors to take effect without having to manual
recycle all Django worker processes active in an application environment.
"""
host = request.get_host()
now = datetime.datetime.utcnow()
site = models.SITE_CACHE.get(host)
cache_timeout = SITE_CACHE_TIMEOUTS.get(host, now)
if not site or cache_timeout <= now:
site = self.get(domain__iexact=host)
models.SITE_CACHE[host] = site
SITE_CACHE_TIMEOUTS[host] = now + get_site_cache_ttl()
return models.SITE_CACHE[host] | [
"def",
"patched_get_site_by_request",
"(",
"self",
",",
"request",
")",
":",
"host",
"=",
"request",
".",
"get_host",
"(",
")",
"now",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"site",
"=",
"models",
".",
"SITE_CACHE",
".",
"get",
"(",
... | Monkey patched version of Django's SiteManager._get_site_by_request() function.
Adds a configurable timeout to the in-memory SITE_CACHE for each cached Site.
This allows for the use of an in-memory cache for Site models, avoiding one
or more DB hits on every request made to the Django application, but also allows
for changes made to models associated with the Site model and accessed via the
Site model's relationship accessors to take effect without having to manual
recycle all Django worker processes active in an application environment. | [
"Monkey",
"patched",
"version",
"of",
"Django",
"s",
"SiteManager",
".",
"_get_site_by_request",
"()",
"function",
"."
] | python | train |
nwilming/ocupy | ocupy/utils.py | https://github.com/nwilming/ocupy/blob/a0bd64f822576feaa502939d6bafd1183b237d16/ocupy/utils.py#L63-L70 | def randsample(vec, nr_samples, with_replacement = False):
"""
Draws nr_samples random samples from vec.
"""
if not with_replacement:
return np.random.permutation(vec)[0:nr_samples]
else:
return np.asarray(vec)[np.random.randint(0, len(vec), nr_samples)] | [
"def",
"randsample",
"(",
"vec",
",",
"nr_samples",
",",
"with_replacement",
"=",
"False",
")",
":",
"if",
"not",
"with_replacement",
":",
"return",
"np",
".",
"random",
".",
"permutation",
"(",
"vec",
")",
"[",
"0",
":",
"nr_samples",
"]",
"else",
":",
... | Draws nr_samples random samples from vec. | [
"Draws",
"nr_samples",
"random",
"samples",
"from",
"vec",
"."
] | python | train |
Azure/msrest-for-python | msrest/universal_http/requests.py | https://github.com/Azure/msrest-for-python/blob/0732bc90bdb290e5f58c675ffdd7dbfa9acefc93/msrest/universal_http/requests.py#L231-L305 | def _configure_send(self, request, **kwargs):
# type: (ClientRequest, Any) -> Dict[str, str]
"""Configure the kwargs to use with requests.
See "send" for kwargs details.
:param ClientRequest request: The request object to be sent.
:returns: The requests.Session.request kwargs
:rtype: dict[str,str]
"""
requests_kwargs = {} # type: Any
session = kwargs.pop('session', self.session)
# If custom session was not create here
if session is not self.session:
self._init_session(session)
session.max_redirects = int(self.config.redirect_policy())
session.trust_env = bool(self.config.proxies.use_env_settings)
# Initialize requests_kwargs with "config" value
requests_kwargs.update(self.config.connection())
requests_kwargs['allow_redirects'] = bool(self.config.redirect_policy)
requests_kwargs['headers'] = self.config.headers.copy()
proxies = self.config.proxies()
if proxies:
requests_kwargs['proxies'] = proxies
# Replace by operation level kwargs
# We allow some of them, since some like stream or json are controled by msrest
for key in kwargs:
if key in self._REQUESTS_KWARGS:
requests_kwargs[key] = kwargs[key]
# Hooks. Deprecated, should be a policy
def make_user_hook_cb(user_hook, session):
def user_hook_cb(r, *args, **kwargs):
kwargs.setdefault("msrest", {})['session'] = session
return user_hook(r, *args, **kwargs)
return user_hook_cb
hooks = []
for user_hook in self.config.hooks:
hooks.append(make_user_hook_cb(user_hook, self.session))
if hooks:
requests_kwargs['hooks'] = {'response': hooks}
# Configuration callback. Deprecated, should be a policy
output_kwargs = self.config.session_configuration_callback(
session,
self.config,
kwargs,
**requests_kwargs
)
if output_kwargs is not None:
requests_kwargs = output_kwargs
# If custom session was not create here
if session is not self.session:
requests_kwargs['session'] = session
### Autorest forced kwargs now ###
# If Autorest needs this response to be streamable. True for compat.
requests_kwargs['stream'] = kwargs.get('stream', True)
if request.files:
requests_kwargs['files'] = request.files
elif request.data:
requests_kwargs['data'] = request.data
requests_kwargs['headers'].update(request.headers)
return requests_kwargs | [
"def",
"_configure_send",
"(",
"self",
",",
"request",
",",
"*",
"*",
"kwargs",
")",
":",
"# type: (ClientRequest, Any) -> Dict[str, str]",
"requests_kwargs",
"=",
"{",
"}",
"# type: Any",
"session",
"=",
"kwargs",
".",
"pop",
"(",
"'session'",
",",
"self",
".",... | Configure the kwargs to use with requests.
See "send" for kwargs details.
:param ClientRequest request: The request object to be sent.
:returns: The requests.Session.request kwargs
:rtype: dict[str,str] | [
"Configure",
"the",
"kwargs",
"to",
"use",
"with",
"requests",
"."
] | python | train |
f3at/feat | src/feat/models/model.py | https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/models/model.py#L177-L205 | def create(name, *effects, **kwargs):
"""
Annotate a non-idempotent create action to the model being defined.
Should really be::
create(name, *effects, value=None, params=None, label=None, desc=None)
but it is not supported by python < 3.
@param name: item name unique for the model being defined.
@type name: str or unicode
@param effects:
@type effects: str or unicode
@param value: input value information or None if not required.
@type value: IValuInfo or None
@param params: action paremeter or list of action parameters.
@type params: IActionPram or list of IActionParam
@param label: the action label or None.
@type label: str or unicode or None
@param desc: the action description or None if not documented.
@type desc: str or unicode or None
"""
value_info = kwargs.pop("value", None)
params = kwargs.pop("params", None)
label = kwargs.pop("label", None)
desc = kwargs.pop("desc", None)
if kwargs:
raise TypeError("create() got an unexpected keyword '%s'"
% kwargs.keys()[0])
_annotate("create", name, value_info=value_info, params=params,
effects=effects, label=label, desc=desc) | [
"def",
"create",
"(",
"name",
",",
"*",
"effects",
",",
"*",
"*",
"kwargs",
")",
":",
"value_info",
"=",
"kwargs",
".",
"pop",
"(",
"\"value\"",
",",
"None",
")",
"params",
"=",
"kwargs",
".",
"pop",
"(",
"\"params\"",
",",
"None",
")",
"label",
"=... | Annotate a non-idempotent create action to the model being defined.
Should really be::
create(name, *effects, value=None, params=None, label=None, desc=None)
but it is not supported by python < 3.
@param name: item name unique for the model being defined.
@type name: str or unicode
@param effects:
@type effects: str or unicode
@param value: input value information or None if not required.
@type value: IValuInfo or None
@param params: action paremeter or list of action parameters.
@type params: IActionPram or list of IActionParam
@param label: the action label or None.
@type label: str or unicode or None
@param desc: the action description or None if not documented.
@type desc: str or unicode or None | [
"Annotate",
"a",
"non",
"-",
"idempotent",
"create",
"action",
"to",
"the",
"model",
"being",
"defined",
".",
"Should",
"really",
"be",
"::",
"create",
"(",
"name",
"*",
"effects",
"value",
"=",
"None",
"params",
"=",
"None",
"label",
"=",
"None",
"desc"... | python | train |
allelos/vectors | vectors/vectors.py | https://github.com/allelos/vectors/blob/55db2a7e489ae5f4380e70b3c5b7a6ce39de5cee/vectors/vectors.py#L199-L220 | def rotate(self, angle, axis=(0, 0, 1)):
"""Returns the rotated vector. Assumes angle is in radians"""
if not all(isinstance(a, int) for a in axis):
raise ValueError
x, y, z = self.x, self.y, self.z
# Z axis rotation
if(axis[2]):
x = (self.x * math.cos(angle) - self.y * math.sin(angle))
y = (self.x * math.sin(angle) + self.y * math.cos(angle))
# Y axis rotation
if(axis[1]):
x = self.x * math.cos(angle) + self.z * math.sin(angle)
z = -self.x * math.sin(angle) + self.z * math.cos(angle)
# X axis rotation
if(axis[0]):
y = self.y * math.cos(angle) - self.z * math.sin(angle)
z = self.y * math.sin(angle) + self.z * math.cos(angle)
return Vector(x, y, z) | [
"def",
"rotate",
"(",
"self",
",",
"angle",
",",
"axis",
"=",
"(",
"0",
",",
"0",
",",
"1",
")",
")",
":",
"if",
"not",
"all",
"(",
"isinstance",
"(",
"a",
",",
"int",
")",
"for",
"a",
"in",
"axis",
")",
":",
"raise",
"ValueError",
"x",
",",
... | Returns the rotated vector. Assumes angle is in radians | [
"Returns",
"the",
"rotated",
"vector",
".",
"Assumes",
"angle",
"is",
"in",
"radians"
] | python | train |
HPENetworking/PYHPEIMC | build/lib/pyhpeimc/plat/perf.py | https://github.com/HPENetworking/PYHPEIMC/blob/4fba31827573587e03a6233c7db60f188038c8e5/build/lib/pyhpeimc/plat/perf.py#L56-L92 | def get_perf_task(task_name, auth, url):
"""
function takes the a str object containing the name of an existing performance tasks and issues a RESTFUL call
to the IMC REST service. It will return a list
:param task_name: str containing the name of the performance task
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: 204
:rtype: dict
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.perf import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> selected_task = get_perf_task('Cisco_Temperature', auth.creds, auth.url)
>>> assert type(selected_task) is dict
>>> assert 'taskName' in selected_task
"""
get_perf_task_url = "/imcrs/perf/task?name="+task_name+"&orderBy=taskId&desc=false"
f_url = url + get_perf_task_url
# creates the URL using the payload variable as the contents
r = requests.get(f_url, auth=auth, headers=headers)
try:
if r.status_code == 200:
perf_task_info = (json.loads(r.text))['task']
return perf_task_info
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + ' get_dev_alarms: An Error has occured' | [
"def",
"get_perf_task",
"(",
"task_name",
",",
"auth",
",",
"url",
")",
":",
"get_perf_task_url",
"=",
"\"/imcrs/perf/task?name=\"",
"+",
"task_name",
"+",
"\"&orderBy=taskId&desc=false\"",
"f_url",
"=",
"url",
"+",
"get_perf_task_url",
"# creates the URL using the payloa... | function takes the a str object containing the name of an existing performance tasks and issues a RESTFUL call
to the IMC REST service. It will return a list
:param task_name: str containing the name of the performance task
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: 204
:rtype: dict
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.perf import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> selected_task = get_perf_task('Cisco_Temperature', auth.creds, auth.url)
>>> assert type(selected_task) is dict
>>> assert 'taskName' in selected_task | [
"function",
"takes",
"the",
"a",
"str",
"object",
"containing",
"the",
"name",
"of",
"an",
"existing",
"performance",
"tasks",
"and",
"issues",
"a",
"RESTFUL",
"call",
"to",
"the",
"IMC",
"REST",
"service",
".",
"It",
"will",
"return",
"a",
"list"
] | python | train |
twisted/mantissa | xmantissa/websession.py | https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/websession.py#L34-L47 | def usernameFromRequest(request):
"""
Take an HTTP request and return a username of the form <user>@<domain>.
@type request: L{inevow.IRequest}
@param request: A HTTP request
@return: A C{str}
"""
username = request.args.get('username', [''])[0]
if '@' not in username:
username = '%s@%s' % (
username, request.getHeader('host').split(':')[0])
return username | [
"def",
"usernameFromRequest",
"(",
"request",
")",
":",
"username",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'username'",
",",
"[",
"''",
"]",
")",
"[",
"0",
"]",
"if",
"'@'",
"not",
"in",
"username",
":",
"username",
"=",
"'%s@%s'",
"%",
"(",
... | Take an HTTP request and return a username of the form <user>@<domain>.
@type request: L{inevow.IRequest}
@param request: A HTTP request
@return: A C{str} | [
"Take",
"an",
"HTTP",
"request",
"and",
"return",
"a",
"username",
"of",
"the",
"form",
"<user",
">",
"@<domain",
">",
"."
] | python | train |
Fantomas42/django-blog-zinnia | zinnia/templatetags/zinnia.py | https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/templatetags/zinnia.py#L314-L327 | def zinnia_breadcrumbs(context, root_name='',
template='zinnia/tags/breadcrumbs.html',):
"""
Return a breadcrumb for the application.
"""
path = context['request'].path
context_object = get_context_first_object(
context, ['object', 'category', 'tag', 'author'])
context_page = context.get('page_obj')
breadcrumbs = retrieve_breadcrumbs(
path, context_object, context_page, root_name)
return {'template': template,
'breadcrumbs': breadcrumbs} | [
"def",
"zinnia_breadcrumbs",
"(",
"context",
",",
"root_name",
"=",
"''",
",",
"template",
"=",
"'zinnia/tags/breadcrumbs.html'",
",",
")",
":",
"path",
"=",
"context",
"[",
"'request'",
"]",
".",
"path",
"context_object",
"=",
"get_context_first_object",
"(",
"... | Return a breadcrumb for the application. | [
"Return",
"a",
"breadcrumb",
"for",
"the",
"application",
"."
] | python | train |
uyar/pygenstub | pygenstub.py | https://github.com/uyar/pygenstub/blob/a6b18a823382d3c6be29c411fb33c58b6090d22c/pygenstub.py#L448-L480 | def visit_Assign(self, node):
"""Visit an assignment node."""
line = self._code_lines[node.lineno - 1]
if SIG_COMMENT in line:
line = _RE_COMMENT_IN_STRING.sub("", line)
if (SIG_COMMENT not in line) and (not self.generic):
return
if SIG_COMMENT in line:
_, signature = line.split(SIG_COMMENT)
_, return_type, requires = parse_signature(signature)
self.required_types |= requires
parent = self._parents[-1]
for var in node.targets:
if isinstance(var, ast.Name):
name, p = var.id, parent
elif (
isinstance(var, ast.Attribute)
and isinstance(var.value, ast.Name)
and (var.value.id == "self")
):
name, p = var.attr, parent.parent
else:
name, p = None, None
if name is not None:
if self.generic:
return_type = "Any"
self.required_types.add(return_type)
stub_node = VariableNode(name, return_type)
p.add_variable(stub_node) | [
"def",
"visit_Assign",
"(",
"self",
",",
"node",
")",
":",
"line",
"=",
"self",
".",
"_code_lines",
"[",
"node",
".",
"lineno",
"-",
"1",
"]",
"if",
"SIG_COMMENT",
"in",
"line",
":",
"line",
"=",
"_RE_COMMENT_IN_STRING",
".",
"sub",
"(",
"\"\"",
",",
... | Visit an assignment node. | [
"Visit",
"an",
"assignment",
"node",
"."
] | python | train |
JoelBender/bacpypes | py34/bacpypes/pdu.py | https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py34/bacpypes/pdu.py#L385-L389 | def pack_ip_addr(addr):
"""Given an IP address tuple like ('1.2.3.4', 47808) return the six-octet string
useful for a BACnet address."""
addr, port = addr
return socket.inet_aton(addr) + struct.pack('!H', port & _short_mask) | [
"def",
"pack_ip_addr",
"(",
"addr",
")",
":",
"addr",
",",
"port",
"=",
"addr",
"return",
"socket",
".",
"inet_aton",
"(",
"addr",
")",
"+",
"struct",
".",
"pack",
"(",
"'!H'",
",",
"port",
"&",
"_short_mask",
")"
] | Given an IP address tuple like ('1.2.3.4', 47808) return the six-octet string
useful for a BACnet address. | [
"Given",
"an",
"IP",
"address",
"tuple",
"like",
"(",
"1",
".",
"2",
".",
"3",
".",
"4",
"47808",
")",
"return",
"the",
"six",
"-",
"octet",
"string",
"useful",
"for",
"a",
"BACnet",
"address",
"."
] | python | train |
rbaier/python-urltools | urltools/urltools.py | https://github.com/rbaier/python-urltools/blob/76bf599aeb4cb463df8e38367aa40a7d8ec7d9a1/urltools/urltools.py#L217-L234 | def normalize_query(query):
"""Normalize query: sort params by name, remove params without value.
>>> normalize_query('z=3&y=&x=1')
'x=1&z=3'
"""
if query == '' or len(query) <= 2:
return ''
nquery = unquote(query, exceptions=QUOTE_EXCEPTIONS['query'])
params = nquery.split('&')
nparams = []
for param in params:
if '=' in param:
k, v = param.split('=', 1)
if k and v:
nparams.append("%s=%s" % (k, v))
nparams.sort()
return '&'.join(nparams) | [
"def",
"normalize_query",
"(",
"query",
")",
":",
"if",
"query",
"==",
"''",
"or",
"len",
"(",
"query",
")",
"<=",
"2",
":",
"return",
"''",
"nquery",
"=",
"unquote",
"(",
"query",
",",
"exceptions",
"=",
"QUOTE_EXCEPTIONS",
"[",
"'query'",
"]",
")",
... | Normalize query: sort params by name, remove params without value.
>>> normalize_query('z=3&y=&x=1')
'x=1&z=3' | [
"Normalize",
"query",
":",
"sort",
"params",
"by",
"name",
"remove",
"params",
"without",
"value",
"."
] | python | train |
ericsomdahl/python-bittrex | bittrex/bittrex.py | https://github.com/ericsomdahl/python-bittrex/blob/2dbc08e3221e07a9e618eaa025d98ed197d28e31/bittrex/bittrex.py#L299-L329 | def get_market_history(self, market):
"""
Used to retrieve the latest trades that have occurred for a
specific market.
Endpoint:
1.1 /market/getmarkethistory
2.0 NO Equivalent
Example ::
{'success': True,
'message': '',
'result': [ {'Id': 5625015,
'TimeStamp': '2017-08-31T01:29:50.427',
'Quantity': 7.31008193,
'Price': 0.00177639,
'Total': 0.01298555,
'FillType': 'FILL',
'OrderType': 'BUY'},
...
]
}
:param market: String literal for the market (ex: BTC-LTC)
:type market: str
:return: Market history in JSON
:rtype : dict
"""
return self._api_query(path_dict={
API_V1_1: '/public/getmarkethistory',
}, options={'market': market, 'marketname': market}, protection=PROTECTION_PUB) | [
"def",
"get_market_history",
"(",
"self",
",",
"market",
")",
":",
"return",
"self",
".",
"_api_query",
"(",
"path_dict",
"=",
"{",
"API_V1_1",
":",
"'/public/getmarkethistory'",
",",
"}",
",",
"options",
"=",
"{",
"'market'",
":",
"market",
",",
"'marketnam... | Used to retrieve the latest trades that have occurred for a
specific market.
Endpoint:
1.1 /market/getmarkethistory
2.0 NO Equivalent
Example ::
{'success': True,
'message': '',
'result': [ {'Id': 5625015,
'TimeStamp': '2017-08-31T01:29:50.427',
'Quantity': 7.31008193,
'Price': 0.00177639,
'Total': 0.01298555,
'FillType': 'FILL',
'OrderType': 'BUY'},
...
]
}
:param market: String literal for the market (ex: BTC-LTC)
:type market: str
:return: Market history in JSON
:rtype : dict | [
"Used",
"to",
"retrieve",
"the",
"latest",
"trades",
"that",
"have",
"occurred",
"for",
"a",
"specific",
"market",
"."
] | python | train |
tamasgal/km3pipe | km3pipe/utils/i3shower2hdf5.py | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/i3shower2hdf5.py#L348-L354 | def main():
"""Entry point when running as script from commandline."""
from docopt import docopt
args = docopt(__doc__)
infile = args['INFILE']
outfile = args['OUTFILE']
i3extract(infile, outfile) | [
"def",
"main",
"(",
")",
":",
"from",
"docopt",
"import",
"docopt",
"args",
"=",
"docopt",
"(",
"__doc__",
")",
"infile",
"=",
"args",
"[",
"'INFILE'",
"]",
"outfile",
"=",
"args",
"[",
"'OUTFILE'",
"]",
"i3extract",
"(",
"infile",
",",
"outfile",
")"
... | Entry point when running as script from commandline. | [
"Entry",
"point",
"when",
"running",
"as",
"script",
"from",
"commandline",
"."
] | python | train |
Calysto/calysto | calysto/ai/conx.py | https://github.com/Calysto/calysto/blob/20813c0f48096317aa775d03a5c6b20f12fafc93/calysto/ai/conx.py#L507-L515 | def setActivations(self, value):
"""
Sets all activations to the value of the argument. Value should be in the range [0,1].
"""
#if self.verify and not self.activationSet == 0:
# raise LayerError, \
# ('Activation flag not reset. Activations may have been set multiple times without any intervening call to propagate().', self.activationSet)
Numeric.put(self.activation, Numeric.arange(len(self.activation)), value)
self.activationSet = 1 | [
"def",
"setActivations",
"(",
"self",
",",
"value",
")",
":",
"#if self.verify and not self.activationSet == 0:",
"# raise LayerError, \\",
"# ('Activation flag not reset. Activations may have been set multiple times without any intervening call to propagate().', self.activationSet)"... | Sets all activations to the value of the argument. Value should be in the range [0,1]. | [
"Sets",
"all",
"activations",
"to",
"the",
"value",
"of",
"the",
"argument",
".",
"Value",
"should",
"be",
"in",
"the",
"range",
"[",
"0",
"1",
"]",
"."
] | python | train |
datacats/datacats | datacats/userprofile.py | https://github.com/datacats/datacats/blob/e4bae503efa997660fb3f34fe166699569653157/datacats/userprofile.py#L77-L87 | def generate_ssh_key(self):
"""
Generate a new ssh private and public key
"""
web_command(
command=["ssh-keygen", "-q", "-t", "rsa", "-N", "", "-C",
"datacats generated {0}@{1}".format(
getuser(), gethostname()),
"-f", "/output/id_rsa"],
rw={self.profiledir: '/output'},
) | [
"def",
"generate_ssh_key",
"(",
"self",
")",
":",
"web_command",
"(",
"command",
"=",
"[",
"\"ssh-keygen\"",
",",
"\"-q\"",
",",
"\"-t\"",
",",
"\"rsa\"",
",",
"\"-N\"",
",",
"\"\"",
",",
"\"-C\"",
",",
"\"datacats generated {0}@{1}\"",
".",
"format",
"(",
"... | Generate a new ssh private and public key | [
"Generate",
"a",
"new",
"ssh",
"private",
"and",
"public",
"key"
] | python | train |
crs4/pydoop | pydoop/hdfs/__init__.py | https://github.com/crs4/pydoop/blob/f375be2a06f9c67eaae3ce6f605195dbca143b2b/pydoop/hdfs/__init__.py#L146-L159 | def load(hdfs_path, **kwargs):
"""\
Read the content of ``hdfs_path`` and return it.
Keyword arguments are passed to :func:`open`. The `"mode"` kwarg
must be readonly.
"""
m, _ = common.parse_mode(kwargs.get("mode", "r"))
if m != "r":
raise ValueError("opening mode must be readonly")
with open(hdfs_path, **kwargs) as fi:
data = fi.read()
fi.fs.close()
return data | [
"def",
"load",
"(",
"hdfs_path",
",",
"*",
"*",
"kwargs",
")",
":",
"m",
",",
"_",
"=",
"common",
".",
"parse_mode",
"(",
"kwargs",
".",
"get",
"(",
"\"mode\"",
",",
"\"r\"",
")",
")",
"if",
"m",
"!=",
"\"r\"",
":",
"raise",
"ValueError",
"(",
"\... | \
Read the content of ``hdfs_path`` and return it.
Keyword arguments are passed to :func:`open`. The `"mode"` kwarg
must be readonly. | [
"\\",
"Read",
"the",
"content",
"of",
"hdfs_path",
"and",
"return",
"it",
"."
] | python | train |
sassoftware/epdb | epdb/epdb_server.py | https://github.com/sassoftware/epdb/blob/5a8375aa59862d787e6496810a508297a5522967/epdb/epdb_server.py#L104-L118 | def handle(self):
"""
Performs endless processing of socket input/output, passing
cooked information onto the local process.
"""
while True:
toRead = select.select([self.local, self.remote], [], [], 0.1)[0]
if self.local in toRead:
data = os.read(self.local, 4096)
self.sock.sendall(data)
continue
if self.remote in toRead or self.rawq:
buf = self.read_eager()
os.write(self.local, buf)
continue | [
"def",
"handle",
"(",
"self",
")",
":",
"while",
"True",
":",
"toRead",
"=",
"select",
".",
"select",
"(",
"[",
"self",
".",
"local",
",",
"self",
".",
"remote",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"0.1",
")",
"[",
"0",
"]",
"if",
"self",... | Performs endless processing of socket input/output, passing
cooked information onto the local process. | [
"Performs",
"endless",
"processing",
"of",
"socket",
"input",
"/",
"output",
"passing",
"cooked",
"information",
"onto",
"the",
"local",
"process",
"."
] | python | train |
pyupio/dparse | dparse/parser.py | https://github.com/pyupio/dparse/blob/0cd5aa7eb1f78c39da78b6c63dde6b49a1732cd2/dparse/parser.py#L221-L278 | def parse(self):
"""
Parses a requirements.txt-like file
"""
index_server = None
for num, line in enumerate(self.iter_lines()):
line = line.rstrip()
if not line:
continue
if line.startswith('#'):
# comments are lines that start with # only
continue
if line.startswith('-i') or \
line.startswith('--index-url') or \
line.startswith('--extra-index-url'):
# this file is using a private index server, try to parse it
index_server = self.parse_index_server(line)
continue
elif self.obj.path and (line.startswith('-r') or line.startswith('--requirement')):
self.obj.resolved_files.append(self.resolve_file(self.obj.path, line))
elif line.startswith('-f') or line.startswith('--find-links') or \
line.startswith('--no-index') or line.startswith('--allow-external') or \
line.startswith('--allow-unverified') or line.startswith('-Z') or \
line.startswith('--always-unzip'):
continue
elif self.is_marked_line(line):
continue
else:
try:
parseable_line = line
# multiline requirements are not parseable
if "\\" in line:
parseable_line = line.replace("\\", "")
for next_line in self.iter_lines(num + 1):
parseable_line += next_line.strip().replace("\\", "")
line += "\n" + next_line
if "\\" in next_line:
continue
break
# ignore multiline requirements if they are marked
if self.is_marked_line(parseable_line):
continue
hashes = []
if "--hash" in parseable_line:
parseable_line, hashes = Parser.parse_hashes(parseable_line)
req = RequirementsTXTLineParser.parse(parseable_line)
if req:
req.hashes = hashes
req.index_server = index_server
# replace the requirements line with the 'real' line
req.line = line
self.obj.dependencies.append(req)
except ValueError:
continue | [
"def",
"parse",
"(",
"self",
")",
":",
"index_server",
"=",
"None",
"for",
"num",
",",
"line",
"in",
"enumerate",
"(",
"self",
".",
"iter_lines",
"(",
")",
")",
":",
"line",
"=",
"line",
".",
"rstrip",
"(",
")",
"if",
"not",
"line",
":",
"continue"... | Parses a requirements.txt-like file | [
"Parses",
"a",
"requirements",
".",
"txt",
"-",
"like",
"file"
] | python | train |
santoshphilip/eppy | eppy/modeleditor.py | https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L190-L199 | def addobject1(bunchdt, data, commdct, key, **kwargs):
"""add an object to the eplus model"""
obj = newrawobject(data, commdct, key)
abunch = obj2bunch(data, commdct, obj)
data.dt[key].append(obj)
bunchdt[key].append(abunch)
# adict = getnamedargs(*args, **kwargs)
for kkey, value in iteritems(kwargs):
abunch[kkey] = value
return abunch | [
"def",
"addobject1",
"(",
"bunchdt",
",",
"data",
",",
"commdct",
",",
"key",
",",
"*",
"*",
"kwargs",
")",
":",
"obj",
"=",
"newrawobject",
"(",
"data",
",",
"commdct",
",",
"key",
")",
"abunch",
"=",
"obj2bunch",
"(",
"data",
",",
"commdct",
",",
... | add an object to the eplus model | [
"add",
"an",
"object",
"to",
"the",
"eplus",
"model"
] | python | train |
briancappello/flask-unchained | flask_unchained/bundles/controller/route.py | https://github.com/briancappello/flask-unchained/blob/4d536cb90e2cc4829c1c05f2c74d3e22901a1399/flask_unchained/bundles/controller/route.py#L179-L186 | def rule(self):
"""
The (partial) url rule for this route.
"""
if self._rule:
return self._rule
return self._make_rule(member_param=self._member_param,
unique_member_param=self._unique_member_param) | [
"def",
"rule",
"(",
"self",
")",
":",
"if",
"self",
".",
"_rule",
":",
"return",
"self",
".",
"_rule",
"return",
"self",
".",
"_make_rule",
"(",
"member_param",
"=",
"self",
".",
"_member_param",
",",
"unique_member_param",
"=",
"self",
".",
"_unique_membe... | The (partial) url rule for this route. | [
"The",
"(",
"partial",
")",
"url",
"rule",
"for",
"this",
"route",
"."
] | python | train |
wummel/linkchecker | linkcheck/log.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/log.py#L37-L59 | def _stack_format (stack):
"""Format a stack trace to a message.
@return: formatted stack message
@rtype: string
"""
s = StringIO()
s.write('Traceback:')
s.write(os.linesep)
for frame, fname, lineno, method, lines, dummy in reversed(stack):
s.write(' File %r, line %d, in %s' % (fname, lineno, method))
s.write(os.linesep)
s.write(' %s' % lines[0].lstrip())
if PRINT_LOCALVARS:
for key, value in frame.f_locals.items():
s.write(" %s = " % key)
# be careful not to cause a new error in the error output
try:
s.write(repr(value))
s.write(os.linesep)
except Exception:
s.write("error in repr() call%s" % os.linesep)
return s.getvalue() | [
"def",
"_stack_format",
"(",
"stack",
")",
":",
"s",
"=",
"StringIO",
"(",
")",
"s",
".",
"write",
"(",
"'Traceback:'",
")",
"s",
".",
"write",
"(",
"os",
".",
"linesep",
")",
"for",
"frame",
",",
"fname",
",",
"lineno",
",",
"method",
",",
"lines"... | Format a stack trace to a message.
@return: formatted stack message
@rtype: string | [
"Format",
"a",
"stack",
"trace",
"to",
"a",
"message",
"."
] | python | train |
sixpack/sixpack | sixpack/models.py | https://github.com/sixpack/sixpack/blob/fec044a35eea79dd7b9af73fafe1b7d15f1d9ef8/sixpack/models.py#L322-L341 | def get_alternative(self, client, dt=None, prefetch=False):
"""Returns and records an alternative according to the following
precedence:
1. An existing alternative
2. A server-chosen alternative
"""
if self.is_archived() or self.is_paused():
return self.control
if self.is_client_excluded(client):
return self.control
chosen_alternative = self.existing_alternative(client)
if not chosen_alternative:
chosen_alternative, participate = self.choose_alternative(client)
if participate and not prefetch:
chosen_alternative.record_participation(client, dt=dt)
return chosen_alternative | [
"def",
"get_alternative",
"(",
"self",
",",
"client",
",",
"dt",
"=",
"None",
",",
"prefetch",
"=",
"False",
")",
":",
"if",
"self",
".",
"is_archived",
"(",
")",
"or",
"self",
".",
"is_paused",
"(",
")",
":",
"return",
"self",
".",
"control",
"if",
... | Returns and records an alternative according to the following
precedence:
1. An existing alternative
2. A server-chosen alternative | [
"Returns",
"and",
"records",
"an",
"alternative",
"according",
"to",
"the",
"following",
"precedence",
":",
"1",
".",
"An",
"existing",
"alternative",
"2",
".",
"A",
"server",
"-",
"chosen",
"alternative"
] | python | train |
saltstack/salt | salt/cloud/clouds/linode.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L647-L673 | def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result) | [
"def",
"create_swap_disk",
"(",
"vm_",
",",
"linode_id",
",",
"swap_size",
"=",
"None",
")",
":",
"kwargs",
"=",
"{",
"}",
"if",
"not",
"swap_size",
":",
"swap_size",
"=",
"get_swap_size",
"(",
"vm_",
")",
"kwargs",
".",
"update",
"(",
"{",
"'LinodeID'",... | r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB. | [
"r",
"Creates",
"the",
"disk",
"for",
"the",
"specified",
"Linode",
"."
] | python | train |
bird-house/birdhousebuilder.recipe.conda | birdhousebuilder/recipe/conda/__init__.py | https://github.com/bird-house/birdhousebuilder.recipe.conda/blob/a5c0224ca4424c0c5cb1c302ba220c43cbc7ab3d/birdhousebuilder/recipe/conda/__init__.py#L172-L211 | def install_pkgs(self, offline=False):
"""
TODO: maybe use conda as python package
"""
if not offline and self.pkgs:
self.logger.info("Installing conda packages ...")
cmd = [join(self.anaconda_home, 'bin', 'conda')]
cmd.append('install')
# if offline:
# cmd.append('--offline')
# self.logger.info("... offline mode ...")
if not self.newest:
cmd.append('--no-update-deps')
self.logger.info("... no update dependencies ...")
if self.env:
self.logger.info("... in conda environment %s ...", self.env)
cmd.extend(['-n', self.env])
cmd.append('--yes')
if self.no_pin:
cmd.append('--no-pin')
self.logger.info("... no pin ...")
if self.channel_priority:
self.logger.info("... channel priority ...")
cmd.append('--channel-priority')
if self.channels:
if self.override_channels:
self.logger.info('... override channels ...')
cmd.append('--override-channels')
self.logger.info("... with conda channels: %s ...",
', '.join(self.channels))
for channel in self.channels:
cmd.append('-c')
cmd.append(channel)
cmd.extend(self.pkgs)
try:
self.logger.debug("install_pkgs cmd: %s", cmd)
check_call(cmd)
except CalledProcessError as err:
self.logger.error("Conda exited with errors: %s", err.output)
return self.pkgs | [
"def",
"install_pkgs",
"(",
"self",
",",
"offline",
"=",
"False",
")",
":",
"if",
"not",
"offline",
"and",
"self",
".",
"pkgs",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Installing conda packages ...\"",
")",
"cmd",
"=",
"[",
"join",
"(",
"self",
... | TODO: maybe use conda as python package | [
"TODO",
":",
"maybe",
"use",
"conda",
"as",
"python",
"package"
] | python | train |
cykl/infoqscraper | infoqscraper/cache.py | https://github.com/cykl/infoqscraper/blob/4fc026b994f98a0a7fe8578e0c9a3a9664982b2e/infoqscraper/cache.py#L158-L165 | def size(self):
"""Returns the size of the cache in bytes."""
total_size = 0
for dir_path, dir_names, filenames in os.walk(self.dir):
for f in filenames:
fp = os.path.join(dir_path, f)
total_size += os.path.getsize(fp)
return total_size | [
"def",
"size",
"(",
"self",
")",
":",
"total_size",
"=",
"0",
"for",
"dir_path",
",",
"dir_names",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"self",
".",
"dir",
")",
":",
"for",
"f",
"in",
"filenames",
":",
"fp",
"=",
"os",
".",
"path",
"."... | Returns the size of the cache in bytes. | [
"Returns",
"the",
"size",
"of",
"the",
"cache",
"in",
"bytes",
"."
] | python | train |
secnot/rectpack | rectpack/packer.py | https://github.com/secnot/rectpack/blob/21d46be48fd453500ea49de699bc9eabc427bdf7/rectpack/packer.py#L227-L261 | def _new_open_bin(self, width=None, height=None, rid=None):
"""
Extract the next empty bin and append it to open bins
Returns:
PackingAlgorithm: Initialized empty packing bin.
None: No bin big enough for the rectangle was found
"""
factories_to_delete = set() #
new_bin = None
for key, binfac in self._empty_bins.items():
# Only return the new bin if the rect fits.
# (If width or height is None, caller doesn't know the size.)
if not binfac.fits_inside(width, height):
continue
# Create bin and add to open_bins
new_bin = binfac.new_bin()
if new_bin is None:
continue
self._open_bins.append(new_bin)
# If the factory was depleted mark for deletion
if binfac.is_empty():
factories_to_delete.add(key)
break
# Delete marked factories
for f in factories_to_delete:
del self._empty_bins[f]
return new_bin | [
"def",
"_new_open_bin",
"(",
"self",
",",
"width",
"=",
"None",
",",
"height",
"=",
"None",
",",
"rid",
"=",
"None",
")",
":",
"factories_to_delete",
"=",
"set",
"(",
")",
"#",
"new_bin",
"=",
"None",
"for",
"key",
",",
"binfac",
"in",
"self",
".",
... | Extract the next empty bin and append it to open bins
Returns:
PackingAlgorithm: Initialized empty packing bin.
None: No bin big enough for the rectangle was found | [
"Extract",
"the",
"next",
"empty",
"bin",
"and",
"append",
"it",
"to",
"open",
"bins"
] | python | train |
diffeo/rejester | rejester/_queue.py | https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/_queue.py#L91-L120 | def dump_queue(self, *names):
"""Debug-log some of the queues.
``names`` may include any of "worker", "available", "priorities",
"expiration", "workers", or "reservations_ITEM" filling in some
specific item.
"""
conn = redis.StrictRedis(connection_pool=self.pool)
for name in names:
if name == 'worker':
logger.debug('last worker: ' + conn.get(self._key_worker()))
elif name == 'available':
logger.debug('available: ' +
str(conn.zrevrange(self._key_available(), 0, -1,
withscores=True)))
elif name == 'priorities':
logger.debug('priorities: ' +
str(conn.hgetall(self._key_priorities())))
elif name == 'expiration':
logger.debug('expiration: ' +
str(conn.zrevrange(self._key_expiration(), 0, -1,
withscores=True)))
elif name == 'workers':
logger.debug('workers: ' +
str(conn.hgetall(self._key_workers())))
elif name.startswith('reservations_'):
item = name[len('reservations_'):]
logger.debug('reservations for ' + item + ': ' +
str(conn.smembers(self._key_reservations(item)))) | [
"def",
"dump_queue",
"(",
"self",
",",
"*",
"names",
")",
":",
"conn",
"=",
"redis",
".",
"StrictRedis",
"(",
"connection_pool",
"=",
"self",
".",
"pool",
")",
"for",
"name",
"in",
"names",
":",
"if",
"name",
"==",
"'worker'",
":",
"logger",
".",
"de... | Debug-log some of the queues.
``names`` may include any of "worker", "available", "priorities",
"expiration", "workers", or "reservations_ITEM" filling in some
specific item. | [
"Debug",
"-",
"log",
"some",
"of",
"the",
"queues",
"."
] | python | train |
Kentzo/Power | power/freebsd.py | https://github.com/Kentzo/Power/blob/2c99b156546225e448f7030681af3df5cd345e4b/power/freebsd.py#L13-L30 | def power_source_type():
"""
FreeBSD use sysctl hw.acpi.acline to tell if Mains (1) is used or Battery (0).
Beware, that on a Desktop machines this hw.acpi.acline oid may not exist.
@return: One of common.POWER_TYPE_*
@raise: Runtime error if type of power source is not supported
"""
try:
supply=int(subprocess.check_output(["sysctl","-n","hw.acpi.acline"]))
except:
return common.POWER_TYPE_AC
if supply == 1:
return common.POWER_TYPE_AC
elif supply == 0:
return common.POWER_TYPE_BATTERY
else:
raise RuntimeError("Unknown power source type!") | [
"def",
"power_source_type",
"(",
")",
":",
"try",
":",
"supply",
"=",
"int",
"(",
"subprocess",
".",
"check_output",
"(",
"[",
"\"sysctl\"",
",",
"\"-n\"",
",",
"\"hw.acpi.acline\"",
"]",
")",
")",
"except",
":",
"return",
"common",
".",
"POWER_TYPE_AC",
"... | FreeBSD use sysctl hw.acpi.acline to tell if Mains (1) is used or Battery (0).
Beware, that on a Desktop machines this hw.acpi.acline oid may not exist.
@return: One of common.POWER_TYPE_*
@raise: Runtime error if type of power source is not supported | [
"FreeBSD",
"use",
"sysctl",
"hw",
".",
"acpi",
".",
"acline",
"to",
"tell",
"if",
"Mains",
"(",
"1",
")",
"is",
"used",
"or",
"Battery",
"(",
"0",
")",
".",
"Beware",
"that",
"on",
"a",
"Desktop",
"machines",
"this",
"hw",
".",
"acpi",
".",
"acline... | python | train |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/utils/strdispatch.py | https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/utils/strdispatch.py#L63-L68 | def flat_matches(self, key):
""" Yield all 'value' targets, without priority """
for val in self.dispatch(key):
for el in val:
yield el[1] # only value, no priority
return | [
"def",
"flat_matches",
"(",
"self",
",",
"key",
")",
":",
"for",
"val",
"in",
"self",
".",
"dispatch",
"(",
"key",
")",
":",
"for",
"el",
"in",
"val",
":",
"yield",
"el",
"[",
"1",
"]",
"# only value, no priority",
"return"
] | Yield all 'value' targets, without priority | [
"Yield",
"all",
"value",
"targets",
"without",
"priority"
] | python | test |
aconrad/pycobertura | pycobertura/cobertura.py | https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cobertura.py#L371-L418 | def file_source(self, filename):
"""
Return a list of namedtuple `Line` for each line of code found in the
given file `filename`.
"""
if self.cobertura1.has_file(filename) and \
self.cobertura1.filesystem.has_file(filename):
lines1 = self.cobertura1.source_lines(filename)
line_statuses1 = dict(self.cobertura1.line_statuses(
filename))
else:
lines1 = []
line_statuses1 = {}
lines2 = self.cobertura2.source_lines(filename)
line_statuses2 = dict(self.cobertura2.line_statuses(filename))
# Build a dict of lineno2 -> lineno1
lineno_map = reconcile_lines(lines2, lines1)
lines = []
for lineno, source in enumerate(lines2, start=1):
status = None
reason = None
if lineno not in lineno_map:
# line was added or removed, just use whatever coverage status
# is available as there is nothing to compare against.
status = line_statuses2.get(lineno)
reason = 'line-edit'
else:
other_lineno = lineno_map[lineno]
line_status1 = line_statuses1.get(other_lineno)
line_status2 = line_statuses2.get(lineno)
if line_status1 is line_status2:
status = None # unchanged
reason = None
elif line_status1 is True and line_status2 is False:
status = False # decreased
reason = 'cov-down'
elif line_status1 is False and line_status2 is True:
status = True # increased
reason = 'cov-up'
line = Line(lineno, source, status, reason)
lines.append(line)
return lines | [
"def",
"file_source",
"(",
"self",
",",
"filename",
")",
":",
"if",
"self",
".",
"cobertura1",
".",
"has_file",
"(",
"filename",
")",
"and",
"self",
".",
"cobertura1",
".",
"filesystem",
".",
"has_file",
"(",
"filename",
")",
":",
"lines1",
"=",
"self",
... | Return a list of namedtuple `Line` for each line of code found in the
given file `filename`. | [
"Return",
"a",
"list",
"of",
"namedtuple",
"Line",
"for",
"each",
"line",
"of",
"code",
"found",
"in",
"the",
"given",
"file",
"filename",
"."
] | python | train |
datacats/datacats | datacats/cli/shell.py | https://github.com/datacats/datacats/blob/e4bae503efa997660fb3f34fe166699569653157/datacats/cli/shell.py#L10-L28 | def shell(environment, opts):
"""Run a command or interactive shell within this environment
Usage:
datacats [-d] [-s NAME] shell [ENVIRONMENT [COMMAND...]]
Options:
-d --detach Run the resulting container in the background
-s --site=NAME Specify a site to run the shell on [default: primary]
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
"""
environment.require_data()
environment.start_supporting_containers()
return environment.interactive_shell(
opts['COMMAND'],
detach=opts['--detach']
) | [
"def",
"shell",
"(",
"environment",
",",
"opts",
")",
":",
"environment",
".",
"require_data",
"(",
")",
"environment",
".",
"start_supporting_containers",
"(",
")",
"return",
"environment",
".",
"interactive_shell",
"(",
"opts",
"[",
"'COMMAND'",
"]",
",",
"d... | Run a command or interactive shell within this environment
Usage:
datacats [-d] [-s NAME] shell [ENVIRONMENT [COMMAND...]]
Options:
-d --detach Run the resulting container in the background
-s --site=NAME Specify a site to run the shell on [default: primary]
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.' | [
"Run",
"a",
"command",
"or",
"interactive",
"shell",
"within",
"this",
"environment"
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.