repo stringlengths 7 54 | path stringlengths 4 192 | url stringlengths 87 284 | code stringlengths 78 104k | code_tokens list | docstring stringlengths 1 46.9k | docstring_tokens list | language stringclasses 1
value | partition stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|
michael-lazar/rtv | rtv/terminal.py | https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/terminal.py#L878-L910 | def strip_textpad(text):
"""
Attempt to intelligently strip excess whitespace from the output of a
curses textpad.
"""
if text is None:
return text
# Trivial case where the textbox is only one line long.
if '\n' not in text:
return text.rstrip()
# Allow one space at the end of the line. If there is more than one
# space, assume that a newline operation was intended by the user
stack, current_line = [], ''
for line in text.split('\n'):
if line.endswith(' ') or not line:
stack.append(current_line + line.rstrip())
current_line = ''
else:
current_line += line
stack.append(current_line)
# Prune empty lines at the bottom of the textbox.
for item in stack[::-1]:
if not item:
stack.pop()
else:
break
out = '\n'.join(stack)
return out | [
"def",
"strip_textpad",
"(",
"text",
")",
":",
"if",
"text",
"is",
"None",
":",
"return",
"text",
"# Trivial case where the textbox is only one line long.",
"if",
"'\\n'",
"not",
"in",
"text",
":",
"return",
"text",
".",
"rstrip",
"(",
")",
"# Allow one space at t... | Attempt to intelligently strip excess whitespace from the output of a
curses textpad. | [
"Attempt",
"to",
"intelligently",
"strip",
"excess",
"whitespace",
"from",
"the",
"output",
"of",
"a",
"curses",
"textpad",
"."
] | python | train |
astrorafael/twisted-mqtt | examples/pubsubs.py | https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/examples/pubsubs.py#L72-L89 | def connectToBroker(self, protocol):
'''
Connect to MQTT broker
'''
self.protocol = protocol
self.protocol.onPublish = self.onPublish
self.protocol.onDisconnection = self.onDisconnection
self.protocol.setWindowSize(3)
self.task = task.LoopingCall(self.publish)
self.task.start(5.0, now=False)
try:
yield self.protocol.connect("TwistedMQTT-pubsubs", keepalive=60)
yield self.subscribe()
except Exception as e:
log.error("Connecting to {broker} raised {excp!s}",
broker=BROKER, excp=e)
else:
log.info("Connected and subscribed to {broker}", broker=BROKER) | [
"def",
"connectToBroker",
"(",
"self",
",",
"protocol",
")",
":",
"self",
".",
"protocol",
"=",
"protocol",
"self",
".",
"protocol",
".",
"onPublish",
"=",
"self",
".",
"onPublish",
"self",
".",
"protocol",
".",
"onDisconnection",
"=",
"self",
".",
"onDisc... | Connect to MQTT broker | [
"Connect",
"to",
"MQTT",
"broker"
] | python | test |
Fantomas42/django-blog-zinnia | zinnia/xmlrpc/metaweblog.py | https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/xmlrpc/metaweblog.py#L205-L212 | def get_post(post_id, username, password):
"""
metaWeblog.getPost(post_id, username, password)
=> post structure
"""
user = authenticate(username, password)
site = Site.objects.get_current()
return post_structure(Entry.objects.get(id=post_id, authors=user), site) | [
"def",
"get_post",
"(",
"post_id",
",",
"username",
",",
"password",
")",
":",
"user",
"=",
"authenticate",
"(",
"username",
",",
"password",
")",
"site",
"=",
"Site",
".",
"objects",
".",
"get_current",
"(",
")",
"return",
"post_structure",
"(",
"Entry",
... | metaWeblog.getPost(post_id, username, password)
=> post structure | [
"metaWeblog",
".",
"getPost",
"(",
"post_id",
"username",
"password",
")",
"=",
">",
"post",
"structure"
] | python | train |
numenta/htmresearch | htmresearch/frameworks/sp_paper/sp_metrics.py | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/sp_paper/sp_metrics.py#L888-L920 | def mutualInformation(sp, activeColumnsCurrentEpoch, column_1, column_2):
"""
Computes the mutual information of the binary variables that represent
the activation probabilities of two columns. The mutual information I(X,Y)
of two random variables is given by
\[
I (X,Y) = \sum_{x,y} p(x,y) log( p(x,y) / ( p(x) p(y) ) ).
\]
(https://en.wikipedia.org/wiki/Mutual_information)
"""
i, j = column_1, column_2
batchSize = activeColumnsCurrentEpoch.shape[0]
# Activity Counts
ci, cj, cij = 0., 0., dict([((0,0),0.), ((1,0),0.), ((0,1),0.), ((1,1),0.)])
for t in range(batchSize):
ai = activeColumnsCurrentEpoch[t, i]
aj = activeColumnsCurrentEpoch[t, j]
cij[(ai, aj)] += 1.
ci += ai
cj += aj
# Mutual information calculation
Iij = 0
for a,b in [(0,0), (1,0), (0,1), (1,1)]:
# Compute probabilities
pij = cij[(a,b)]/batchSize
pi = ci/batchSize if a == 1 else 1. - ci/batchSize
pj = cj/batchSize if b == 1 else 1. - cj/batchSize
# Add current term of mutual information
Iij += pij * np.log2(pij/(pi*pj)) if pij > 0 else 0
return Iij | [
"def",
"mutualInformation",
"(",
"sp",
",",
"activeColumnsCurrentEpoch",
",",
"column_1",
",",
"column_2",
")",
":",
"i",
",",
"j",
"=",
"column_1",
",",
"column_2",
"batchSize",
"=",
"activeColumnsCurrentEpoch",
".",
"shape",
"[",
"0",
"]",
"# Activity Counts",... | Computes the mutual information of the binary variables that represent
the activation probabilities of two columns. The mutual information I(X,Y)
of two random variables is given by
\[
I (X,Y) = \sum_{x,y} p(x,y) log( p(x,y) / ( p(x) p(y) ) ).
\]
(https://en.wikipedia.org/wiki/Mutual_information) | [
"Computes",
"the",
"mutual",
"information",
"of",
"the",
"binary",
"variables",
"that",
"represent",
"the",
"activation",
"probabilities",
"of",
"two",
"columns",
".",
"The",
"mutual",
"information",
"I",
"(",
"X",
"Y",
")",
"of",
"two",
"random",
"variables",... | python | train |
lablup/backend.ai-client-py | src/ai/backend/client/cli/admin/images.py | https://github.com/lablup/backend.ai-client-py/blob/a063d774fea6f4350b89498c40d3c837ec3029a7/src/ai/backend/client/cli/admin/images.py#L11-L34 | def images():
'''
Show the list of registered images in this cluster.
'''
fields = [
('Name', 'name'),
('Registry', 'registry'),
('Tag', 'tag'),
('Digest', 'digest'),
('Size', 'size_bytes'),
('Aliases', 'aliases'),
]
with Session() as session:
try:
items = session.Image.list(fields=(item[1] for item in fields))
except Exception as e:
print_error(e)
sys.exit(1)
if len(items) == 0:
print('There are no registered images.')
return
print(tabulate((item.values() for item in items),
headers=(item[0] for item in fields),
floatfmt=',.0f')) | [
"def",
"images",
"(",
")",
":",
"fields",
"=",
"[",
"(",
"'Name'",
",",
"'name'",
")",
",",
"(",
"'Registry'",
",",
"'registry'",
")",
",",
"(",
"'Tag'",
",",
"'tag'",
")",
",",
"(",
"'Digest'",
",",
"'digest'",
")",
",",
"(",
"'Size'",
",",
"'si... | Show the list of registered images in this cluster. | [
"Show",
"the",
"list",
"of",
"registered",
"images",
"in",
"this",
"cluster",
"."
] | python | train |
opencobra/cobrapy | cobra/core/reaction.py | https://github.com/opencobra/cobrapy/blob/9d1987cdb3a395cf4125a3439c3b002ff2be2009/cobra/core/reaction.py#L490-L504 | def functional(self):
"""All required enzymes for reaction are functional.
Returns
-------
bool
True if the gene-protein-reaction (GPR) rule is fulfilled for
this reaction, or if reaction is not associated to a model,
otherwise False.
"""
if self._model:
tree, _ = parse_gpr(self.gene_reaction_rule)
return eval_gpr(tree, {gene.id for gene in self.genes if
not gene.functional})
return True | [
"def",
"functional",
"(",
"self",
")",
":",
"if",
"self",
".",
"_model",
":",
"tree",
",",
"_",
"=",
"parse_gpr",
"(",
"self",
".",
"gene_reaction_rule",
")",
"return",
"eval_gpr",
"(",
"tree",
",",
"{",
"gene",
".",
"id",
"for",
"gene",
"in",
"self"... | All required enzymes for reaction are functional.
Returns
-------
bool
True if the gene-protein-reaction (GPR) rule is fulfilled for
this reaction, or if reaction is not associated to a model,
otherwise False. | [
"All",
"required",
"enzymes",
"for",
"reaction",
"are",
"functional",
"."
] | python | valid |
lingthio/Flask-User | flask_user/user_mixin.py | https://github.com/lingthio/Flask-User/blob/a379fa0a281789618c484b459cb41236779b95b1/flask_user/user_mixin.py#L59-L102 | def has_roles(self, *requirements):
""" Return True if the user has all of the specified roles. Return False otherwise.
has_roles() accepts a list of requirements:
has_role(requirement1, requirement2, requirement3).
Each requirement is either a role_name, or a tuple_of_role_names.
role_name example: 'manager'
tuple_of_role_names: ('funny', 'witty', 'hilarious')
A role_name-requirement is accepted when the user has this role.
A tuple_of_role_names-requirement is accepted when the user has ONE of these roles.
has_roles() returns true if ALL of the requirements have been accepted.
For example:
has_roles('a', ('b', 'c'), d)
Translates to:
User has role 'a' AND (role 'b' OR role 'c') AND role 'd'"""
# Translates a list of role objects to a list of role_names
user_manager = current_app.user_manager
role_names = user_manager.db_manager.get_user_roles(self)
# has_role() accepts a list of requirements
for requirement in requirements:
if isinstance(requirement, (list, tuple)):
# this is a tuple_of_role_names requirement
tuple_of_role_names = requirement
authorized = False
for role_name in tuple_of_role_names:
if role_name in role_names:
# tuple_of_role_names requirement was met: break out of loop
authorized = True
break
if not authorized:
return False # tuple_of_role_names requirement failed: return False
else:
# this is a role_name requirement
role_name = requirement
# the user must have this role
if not role_name in role_names:
return False # role_name requirement failed: return False
# All requirements have been met: return True
return True | [
"def",
"has_roles",
"(",
"self",
",",
"*",
"requirements",
")",
":",
"# Translates a list of role objects to a list of role_names",
"user_manager",
"=",
"current_app",
".",
"user_manager",
"role_names",
"=",
"user_manager",
".",
"db_manager",
".",
"get_user_roles",
"(",
... | Return True if the user has all of the specified roles. Return False otherwise.
has_roles() accepts a list of requirements:
has_role(requirement1, requirement2, requirement3).
Each requirement is either a role_name, or a tuple_of_role_names.
role_name example: 'manager'
tuple_of_role_names: ('funny', 'witty', 'hilarious')
A role_name-requirement is accepted when the user has this role.
A tuple_of_role_names-requirement is accepted when the user has ONE of these roles.
has_roles() returns true if ALL of the requirements have been accepted.
For example:
has_roles('a', ('b', 'c'), d)
Translates to:
User has role 'a' AND (role 'b' OR role 'c') AND role 'd | [
"Return",
"True",
"if",
"the",
"user",
"has",
"all",
"of",
"the",
"specified",
"roles",
".",
"Return",
"False",
"otherwise",
"."
] | python | train |
raff/dynash | dynash2/dynash2.py | https://github.com/raff/dynash/blob/a2b4fab67dd85ceaa9c1bb7604ebc1768a7fc28e/dynash2/dynash2.py#L463-L477 | def do_refresh(self, line):
"refresh {table_name}"
table = self.get_table(line)
while True:
desc = table.describe()
status = desc['Table']['TableStatus']
if status == 'ACTIVE':
break
else:
print status, "..."
time.sleep(5)
print ""
self.pprint(desc) | [
"def",
"do_refresh",
"(",
"self",
",",
"line",
")",
":",
"table",
"=",
"self",
".",
"get_table",
"(",
"line",
")",
"while",
"True",
":",
"desc",
"=",
"table",
".",
"describe",
"(",
")",
"status",
"=",
"desc",
"[",
"'Table'",
"]",
"[",
"'TableStatus'"... | refresh {table_name} | [
"refresh",
"{",
"table_name",
"}"
] | python | train |
CiscoUcs/UcsPythonSDK | src/UcsSdk/UcsBase.py | https://github.com/CiscoUcs/UcsPythonSDK/blob/bf6b07d6abeacb922c92b198352eda4eb9e4629b/src/UcsSdk/UcsBase.py#L68-L73 | def childWriteXml(self, w, option):
"""Method writes the xml representation for the object."""
ch = []
for c in self.child:
ch.append(c.WriteXml(w, option))
return ch | [
"def",
"childWriteXml",
"(",
"self",
",",
"w",
",",
"option",
")",
":",
"ch",
"=",
"[",
"]",
"for",
"c",
"in",
"self",
".",
"child",
":",
"ch",
".",
"append",
"(",
"c",
".",
"WriteXml",
"(",
"w",
",",
"option",
")",
")",
"return",
"ch"
] | Method writes the xml representation for the object. | [
"Method",
"writes",
"the",
"xml",
"representation",
"for",
"the",
"object",
"."
] | python | train |
apache/incubator-mxnet | python/mxnet/operator.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/operator.py#L506-L527 | def infer_type(self, in_type):
"""infer_type interface. override to create new operators
Parameters
----------
in_type : list of np.dtype
list of argument types in the same order as
declared in list_arguments.
Returns
-------
in_type : list
list of argument types. Can be modified from in_type.
out_type : list
list of output types calculated from in_type,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_type,
in the same order as declared in list_auxiliary_states.
"""
return in_type, [in_type[0]]*len(self.list_outputs()), \
[in_type[0]]*len(self.list_auxiliary_states()) | [
"def",
"infer_type",
"(",
"self",
",",
"in_type",
")",
":",
"return",
"in_type",
",",
"[",
"in_type",
"[",
"0",
"]",
"]",
"*",
"len",
"(",
"self",
".",
"list_outputs",
"(",
")",
")",
",",
"[",
"in_type",
"[",
"0",
"]",
"]",
"*",
"len",
"(",
"se... | infer_type interface. override to create new operators
Parameters
----------
in_type : list of np.dtype
list of argument types in the same order as
declared in list_arguments.
Returns
-------
in_type : list
list of argument types. Can be modified from in_type.
out_type : list
list of output types calculated from in_type,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_type,
in the same order as declared in list_auxiliary_states. | [
"infer_type",
"interface",
".",
"override",
"to",
"create",
"new",
"operators"
] | python | train |
mozilla/treeherder | treeherder/seta/job_priorities.py | https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/seta/job_priorities.py#L27-L56 | def _process(self, project, build_system, job_priorities):
'''Return list of ref_data_name for job_priorities'''
jobs = []
# we cache the reference data names in order to reduce API calls
cache_key = '{}-{}-ref_data_names_cache'.format(project, build_system)
ref_data_names_map = cache.get(cache_key)
if not ref_data_names_map:
# cache expired so re-build the reference data names map; the map
# contains the ref_data_name of every treeherder *test* job for this project
ref_data_names_map = self._build_ref_data_names(project, build_system)
# update the cache
cache.set(cache_key, ref_data_names_map, SETA_REF_DATA_NAMES_CACHE_TIMEOUT)
# now check the JobPriority table against the list of valid runnable
for jp in job_priorities:
# if this JobPriority entry is no longer supported in SETA then ignore it
if not valid_platform(jp.platform):
continue
if is_job_blacklisted(jp.testtype):
continue
key = jp.unique_identifier()
if key in ref_data_names_map:
# e.g. desktop-test-linux64-pgo/opt-reftest-13 or builder name
jobs.append(ref_data_names_map[key])
else:
logger.warning('Job priority (%s) not found in accepted jobs list', jp)
return jobs | [
"def",
"_process",
"(",
"self",
",",
"project",
",",
"build_system",
",",
"job_priorities",
")",
":",
"jobs",
"=",
"[",
"]",
"# we cache the reference data names in order to reduce API calls",
"cache_key",
"=",
"'{}-{}-ref_data_names_cache'",
".",
"format",
"(",
"projec... | Return list of ref_data_name for job_priorities | [
"Return",
"list",
"of",
"ref_data_name",
"for",
"job_priorities"
] | python | train |
linode/linode_api4-python | linode_api4/objects/linode.py | https://github.com/linode/linode_api4-python/blob/1dd7318d2aed014c746d48c7957464c57af883ca/linode_api4/objects/linode.py#L643-L648 | def initiate_migration(self):
"""
Initiates a pending migration that is already scheduled for this Linode
Instance
"""
self._client.post('{}/migrate'.format(Instance.api_endpoint), model=self) | [
"def",
"initiate_migration",
"(",
"self",
")",
":",
"self",
".",
"_client",
".",
"post",
"(",
"'{}/migrate'",
".",
"format",
"(",
"Instance",
".",
"api_endpoint",
")",
",",
"model",
"=",
"self",
")"
] | Initiates a pending migration that is already scheduled for this Linode
Instance | [
"Initiates",
"a",
"pending",
"migration",
"that",
"is",
"already",
"scheduled",
"for",
"this",
"Linode",
"Instance"
] | python | train |
marcotcr/lime | lime/lime_text.py | https://github.com/marcotcr/lime/blob/08133d47df00ed918e22005e0c98f6eefd5a1d71/lime/lime_text.py#L356-L418 | def explain_instance(self,
text_instance,
classifier_fn,
labels=(1,),
top_labels=None,
num_features=10,
num_samples=5000,
distance_metric='cosine',
model_regressor=None):
"""Generates explanations for a prediction.
First, we generate neighborhood data by randomly hiding features from
the instance (see __data_labels_distance_mapping). We then learn
locally weighted linear models on this neighborhood data to explain
each of the classes in an interpretable way (see lime_base.py).
Args:
text_instance: raw text string to be explained.
classifier_fn: classifier prediction probability function, which
takes a list of d strings and outputs a (d, k) numpy array with
prediction probabilities, where k is the number of classes.
For ScikitClassifiers , this is classifier.predict_proba.
labels: iterable with labels to be explained.
top_labels: if not None, ignore labels and produce explanations for
the K labels with highest prediction probabilities, where K is
this parameter.
num_features: maximum number of features present in explanation
num_samples: size of the neighborhood to learn the linear model
distance_metric: the distance metric to use for sample weighting,
defaults to cosine similarity
model_regressor: sklearn regressor to use in explanation. Defaults
to Ridge regression in LimeBase. Must have model_regressor.coef_
and 'sample_weight' as a parameter to model_regressor.fit()
Returns:
An Explanation object (see explanation.py) with the corresponding
explanations.
"""
indexed_string = IndexedCharacters(
text_instance, bow=self.bow) if self.char_level else IndexedString(
text_instance, bow=self.bow, split_expression=self.split_expression)
domain_mapper = TextDomainMapper(indexed_string)
data, yss, distances = self.__data_labels_distances(
indexed_string, classifier_fn, num_samples,
distance_metric=distance_metric)
if self.class_names is None:
self.class_names = [str(x) for x in range(yss[0].shape[0])]
ret_exp = explanation.Explanation(domain_mapper=domain_mapper,
class_names=self.class_names,
random_state=self.random_state)
ret_exp.predict_proba = yss[0]
if top_labels:
labels = np.argsort(yss[0])[-top_labels:]
ret_exp.top_labels = list(labels)
ret_exp.top_labels.reverse()
for label in labels:
(ret_exp.intercept[label],
ret_exp.local_exp[label],
ret_exp.score, ret_exp.local_pred) = self.base.explain_instance_with_data(
data, yss, distances, label, num_features,
model_regressor=model_regressor,
feature_selection=self.feature_selection)
return ret_exp | [
"def",
"explain_instance",
"(",
"self",
",",
"text_instance",
",",
"classifier_fn",
",",
"labels",
"=",
"(",
"1",
",",
")",
",",
"top_labels",
"=",
"None",
",",
"num_features",
"=",
"10",
",",
"num_samples",
"=",
"5000",
",",
"distance_metric",
"=",
"'cosi... | Generates explanations for a prediction.
First, we generate neighborhood data by randomly hiding features from
the instance (see __data_labels_distance_mapping). We then learn
locally weighted linear models on this neighborhood data to explain
each of the classes in an interpretable way (see lime_base.py).
Args:
text_instance: raw text string to be explained.
classifier_fn: classifier prediction probability function, which
takes a list of d strings and outputs a (d, k) numpy array with
prediction probabilities, where k is the number of classes.
For ScikitClassifiers , this is classifier.predict_proba.
labels: iterable with labels to be explained.
top_labels: if not None, ignore labels and produce explanations for
the K labels with highest prediction probabilities, where K is
this parameter.
num_features: maximum number of features present in explanation
num_samples: size of the neighborhood to learn the linear model
distance_metric: the distance metric to use for sample weighting,
defaults to cosine similarity
model_regressor: sklearn regressor to use in explanation. Defaults
to Ridge regression in LimeBase. Must have model_regressor.coef_
and 'sample_weight' as a parameter to model_regressor.fit()
Returns:
An Explanation object (see explanation.py) with the corresponding
explanations. | [
"Generates",
"explanations",
"for",
"a",
"prediction",
"."
] | python | train |
raiden-network/raiden | raiden/network/proxies/utils.py | https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/network/proxies/utils.py#L16-L42 | def compare_contract_versions(
proxy: ContractProxy,
expected_version: str,
contract_name: str,
address: Address,
) -> None:
"""Compare version strings of a contract.
If not matching raise ContractVersionMismatch. Also may raise AddressWrongContract
if the contract contains no code."""
assert isinstance(expected_version, str)
try:
deployed_version = proxy.contract.functions.contract_version().call()
except BadFunctionCallOutput:
raise AddressWrongContract('')
deployed_version = deployed_version.replace('_', '0')
expected_version = expected_version.replace('_', '0')
deployed = [int(x) for x in deployed_version.split('.')]
expected = [int(x) for x in expected_version.split('.')]
if deployed != expected:
raise ContractVersionMismatch(
f'Provided {contract_name} contract ({to_normalized_address(address)}) '
f'version mismatch. Expected: {expected_version} Got: {deployed_version}',
) | [
"def",
"compare_contract_versions",
"(",
"proxy",
":",
"ContractProxy",
",",
"expected_version",
":",
"str",
",",
"contract_name",
":",
"str",
",",
"address",
":",
"Address",
",",
")",
"->",
"None",
":",
"assert",
"isinstance",
"(",
"expected_version",
",",
"s... | Compare version strings of a contract.
If not matching raise ContractVersionMismatch. Also may raise AddressWrongContract
if the contract contains no code. | [
"Compare",
"version",
"strings",
"of",
"a",
"contract",
"."
] | python | train |
wonambi-python/wonambi | wonambi/detect/spindle.py | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/detect/spindle.py#L1728-L1751 | def remove_straddlers(events, time, s_freq, toler=0.1):
"""Reject an event if it straddles a stitch, by comparing its
duration to its timespan.
Parameters
----------
events : ndarray (dtype='int')
N x M matrix with start, ..., end samples
time : ndarray (dtype='float')
vector with time points
s_freq : float
sampling frequency
toler : float, def=0.1
maximum tolerated difference between event duration and timespan
Returns
-------
ndarray (dtype='int')
N x M matrix with start , ..., end samples
"""
dur = (events[:, -1] - 1 - events[:, 0]) / s_freq
continuous = time[events[:, -1] - 1] - time[events[:, 0]] - dur < toler
return events[continuous, :] | [
"def",
"remove_straddlers",
"(",
"events",
",",
"time",
",",
"s_freq",
",",
"toler",
"=",
"0.1",
")",
":",
"dur",
"=",
"(",
"events",
"[",
":",
",",
"-",
"1",
"]",
"-",
"1",
"-",
"events",
"[",
":",
",",
"0",
"]",
")",
"/",
"s_freq",
"continuou... | Reject an event if it straddles a stitch, by comparing its
duration to its timespan.
Parameters
----------
events : ndarray (dtype='int')
N x M matrix with start, ..., end samples
time : ndarray (dtype='float')
vector with time points
s_freq : float
sampling frequency
toler : float, def=0.1
maximum tolerated difference between event duration and timespan
Returns
-------
ndarray (dtype='int')
N x M matrix with start , ..., end samples | [
"Reject",
"an",
"event",
"if",
"it",
"straddles",
"a",
"stitch",
"by",
"comparing",
"its",
"duration",
"to",
"its",
"timespan",
"."
] | python | train |
F5Networks/f5-common-python | f5/utils/iapp_parser.py | https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5/utils/iapp_parser.py#L200-L230 | def _add_cli_scripts(self):
'''Add the found external sections to the templ_dict.'''
pattern = r"cli script\s+" \
r"(\/[\w\.\-]+\/)?" \
r"(?P<name>[\w\.\-]+)\s*\{"
sections = re.finditer(pattern, self.template_str)
for section in sections:
if 'scripts' not in self.templ_dict:
self.templ_dict['scripts'] = []
try:
sec_start = self._get_section_start_index(
section.group('name')
)
except NonextantSectionException:
continue
sec_end = self._get_section_end_index(
section.group('name'), sec_start
)
section_value = self.template_str[sec_start+1:sec_end].strip()
self.templ_dict['scripts'].append(dict(
name=section.group('name'),
script=section_value
))
self.template_str = self.template_str[:sec_start+1] + \
self.template_str[sec_end:] | [
"def",
"_add_cli_scripts",
"(",
"self",
")",
":",
"pattern",
"=",
"r\"cli script\\s+\"",
"r\"(\\/[\\w\\.\\-]+\\/)?\"",
"r\"(?P<name>[\\w\\.\\-]+)\\s*\\{\"",
"sections",
"=",
"re",
".",
"finditer",
"(",
"pattern",
",",
"self",
".",
"template_str",
")",
"for",
"section"... | Add the found external sections to the templ_dict. | [
"Add",
"the",
"found",
"external",
"sections",
"to",
"the",
"templ_dict",
"."
] | python | train |
ska-sa/katcp-python | katcp/resource_client.py | https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/resource_client.py#L549-L582 | def set_sensor_listener(self, sensor_name, listener):
"""Set a sensor listener for a sensor even if it is not yet known
The listener registration should persist across sensor disconnect/reconnect.
sensor_name : str
Name of the sensor
listener : callable
Listening callable that will be registered on the named sensor when it becomes
available. Callable as for :meth:`KATCPSensor.register_listener`
"""
sensor_name = resource.escape_name(sensor_name)
sensor_obj = dict.get(self._sensor, sensor_name)
self._sensor_listener_cache[sensor_name].append(listener)
sensor_dict = {}
self._logger.debug(
'Cached listener {} for sensor {}'
.format(listener, sensor_name))
if sensor_obj:
# The sensor exists, so register the listener and continue.
try:
sensor_obj.register_listener(listener, reading=True)
sensor_dict[sensor_name] = listener
self._logger.debug(
'Registered listener {} for sensor {}'
.format(listener, sensor_name))
except Exception as exc:
self._logger.exception(
'Unhandled exception trying to set sensor listener {} for sensor {} ({})'
.format(listener, sensor_name, exc))
sensor_dict[sensor_name] = str(exc)
# Otherwise, depend on self._add_sensors() to handle it from the cache when the sensor appears
raise tornado.gen.Return(sensor_dict) | [
"def",
"set_sensor_listener",
"(",
"self",
",",
"sensor_name",
",",
"listener",
")",
":",
"sensor_name",
"=",
"resource",
".",
"escape_name",
"(",
"sensor_name",
")",
"sensor_obj",
"=",
"dict",
".",
"get",
"(",
"self",
".",
"_sensor",
",",
"sensor_name",
")"... | Set a sensor listener for a sensor even if it is not yet known
The listener registration should persist across sensor disconnect/reconnect.
sensor_name : str
Name of the sensor
listener : callable
Listening callable that will be registered on the named sensor when it becomes
available. Callable as for :meth:`KATCPSensor.register_listener` | [
"Set",
"a",
"sensor",
"listener",
"for",
"a",
"sensor",
"even",
"if",
"it",
"is",
"not",
"yet",
"known",
"The",
"listener",
"registration",
"should",
"persist",
"across",
"sensor",
"disconnect",
"/",
"reconnect",
"."
] | python | train |
aiortc/pylibsrtp | pylibsrtp/__init__.py | https://github.com/aiortc/pylibsrtp/blob/31824d1f8430ff6dc217cfc101093b6ba2a307b2/pylibsrtp/__init__.py#L167-L174 | def add_stream(self, policy):
"""
Add a stream to the SRTP session, applying the given `policy`
to the stream.
:param policy: :class:`Policy`
"""
_srtp_assert(lib.srtp_add_stream(self._srtp[0], policy._policy)) | [
"def",
"add_stream",
"(",
"self",
",",
"policy",
")",
":",
"_srtp_assert",
"(",
"lib",
".",
"srtp_add_stream",
"(",
"self",
".",
"_srtp",
"[",
"0",
"]",
",",
"policy",
".",
"_policy",
")",
")"
] | Add a stream to the SRTP session, applying the given `policy`
to the stream.
:param policy: :class:`Policy` | [
"Add",
"a",
"stream",
"to",
"the",
"SRTP",
"session",
"applying",
"the",
"given",
"policy",
"to",
"the",
"stream",
"."
] | python | train |
fastai/fastai | fastai/data_block.py | https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/data_block.py#L113-L118 | def from_folder(cls, path:PathOrStr, extensions:Collection[str]=None, recurse:bool=True,
include:Optional[Collection[str]]=None, processor:PreProcessors=None, **kwargs)->'ItemList':
"""Create an `ItemList` in `path` from the filenames that have a suffix in `extensions`.
`recurse` determines if we search subfolders."""
path = Path(path)
return cls(get_files(path, extensions, recurse=recurse, include=include), path=path, processor=processor, **kwargs) | [
"def",
"from_folder",
"(",
"cls",
",",
"path",
":",
"PathOrStr",
",",
"extensions",
":",
"Collection",
"[",
"str",
"]",
"=",
"None",
",",
"recurse",
":",
"bool",
"=",
"True",
",",
"include",
":",
"Optional",
"[",
"Collection",
"[",
"str",
"]",
"]",
"... | Create an `ItemList` in `path` from the filenames that have a suffix in `extensions`.
`recurse` determines if we search subfolders. | [
"Create",
"an",
"ItemList",
"in",
"path",
"from",
"the",
"filenames",
"that",
"have",
"a",
"suffix",
"in",
"extensions",
".",
"recurse",
"determines",
"if",
"we",
"search",
"subfolders",
"."
] | python | train |
hadrianl/huobi | huobitrade/utils.py | https://github.com/hadrianl/huobi/blob/bbfa2036703ee84a76d5d8e9f89c25fc8a55f2c7/huobitrade/utils.py#L211-L241 | def http_post_request(url, params, add_to_headers=None, _async=False):
"""
from 火币demo, post方法
:param url:
:param params:
:param add_to_headers:
:return:
"""
headers = {
"Accept": "application/json",
'Content-Type': 'application/json'
}
if add_to_headers:
headers.update(add_to_headers)
postdata = json.dumps(params)
if _async:
response = async_session.post(url, postdata, headers=headers, timeout=10)
return response
else:
response = requests.post(url, postdata, headers=headers, timeout=10)
try:
if response.status_code == 200:
return response.json()
else:
logger.debug(f'<POST>error_code:{response.status_code} reason:{response.reason} detail:{response.text}')
return
except BaseException as e:
logger.exception(
f'<POST>httpPost failed, detail is:{response.text},{e}')
return | [
"def",
"http_post_request",
"(",
"url",
",",
"params",
",",
"add_to_headers",
"=",
"None",
",",
"_async",
"=",
"False",
")",
":",
"headers",
"=",
"{",
"\"Accept\"",
":",
"\"application/json\"",
",",
"'Content-Type'",
":",
"'application/json'",
"}",
"if",
"add_... | from 火币demo, post方法
:param url:
:param params:
:param add_to_headers:
:return: | [
"from",
"火币demo",
"post方法",
":",
"param",
"url",
":",
":",
"param",
"params",
":",
":",
"param",
"add_to_headers",
":",
":",
"return",
":"
] | python | train |
taizilongxu/douban.fm | doubanfm/API/api.py | https://github.com/taizilongxu/douban.fm/blob/d65126d3bd3e12d8a7109137caff8da0efc22b2f/doubanfm/API/api.py#L229-L268 | def get_lrc(self, playingsong):
"""
获取歌词
如果测试频繁会发如下信息:
{'msg': 'You API access rate limit has been exceeded.
Contact api-master@douban.com if you want higher limit. ',
'code': 1998,
'request': 'GET /j/v2/lyric'}
"""
try:
url = "https://douban.fm/j/v2/lyric"
postdata = {
'sid': playingsong['sid'],
'ssid': playingsong['ssid'],
}
s = requests.session()
response = s.get(url, params=postdata, headers=HEADERS)
# 把歌词解析成字典
lyric = json.loads(response.text, object_hook=decode_dict)
logger.info(lyric)
if lyric.get('code', None) == 1998:
logger.info('lrc API access rate limit has been exceeded')
return {}
elif lyric.get('code', None) == 107:
logger.info('lrc API invalid_request_uri')
return {}
lrc_dic = lrc2dict(lyric['lyric'])
# 原歌词用的unicode,为了兼容
for key, value in iteritems(lrc_dic):
# lrc_dic[key] = value.decode('utf-8')
lrc_dic[key] = value
if lrc_dic:
logger.debug('Get lyric success!')
return lrc_dic
except requests.exceptions.RequestException:
logger.error('Get lyric failed!')
return {} | [
"def",
"get_lrc",
"(",
"self",
",",
"playingsong",
")",
":",
"try",
":",
"url",
"=",
"\"https://douban.fm/j/v2/lyric\"",
"postdata",
"=",
"{",
"'sid'",
":",
"playingsong",
"[",
"'sid'",
"]",
",",
"'ssid'",
":",
"playingsong",
"[",
"'ssid'",
"]",
",",
"}",
... | 获取歌词
如果测试频繁会发如下信息:
{'msg': 'You API access rate limit has been exceeded.
Contact api-master@douban.com if you want higher limit. ',
'code': 1998,
'request': 'GET /j/v2/lyric'} | [
"获取歌词"
] | python | train |
inasafe/inasafe | safe/report/expressions/infographic.py | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/report/expressions/infographic.py#L317-L323 | def additional_minimum_needs_section_header_element(feature, parent):
"""Retrieve additional minimum needs section header string
from definitions.
"""
_ = feature, parent # NOQA
header = additional_minimum_needs_section_header['string_format']
return header.capitalize() | [
"def",
"additional_minimum_needs_section_header_element",
"(",
"feature",
",",
"parent",
")",
":",
"_",
"=",
"feature",
",",
"parent",
"# NOQA",
"header",
"=",
"additional_minimum_needs_section_header",
"[",
"'string_format'",
"]",
"return",
"header",
".",
"capitalize",... | Retrieve additional minimum needs section header string
from definitions. | [
"Retrieve",
"additional",
"minimum",
"needs",
"section",
"header",
"string",
"from",
"definitions",
"."
] | python | train |
croscon/fleaker | fleaker/logging.py | https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/logging.py#L59-L70 | def format(self, record):
"""Format the log record."""
levelname = getattr(record, 'levelname', None)
record.levelcolor = ''
record.endlevelcolor = ''
if levelname:
level_color = getattr(self.TermColors, levelname, '')
record.levelcolor = level_color
record.endlevelcolor = self.TermColors.ENDC if level_color else ''
return super(FleakerLogFormatter, self).format(record) | [
"def",
"format",
"(",
"self",
",",
"record",
")",
":",
"levelname",
"=",
"getattr",
"(",
"record",
",",
"'levelname'",
",",
"None",
")",
"record",
".",
"levelcolor",
"=",
"''",
"record",
".",
"endlevelcolor",
"=",
"''",
"if",
"levelname",
":",
"level_col... | Format the log record. | [
"Format",
"the",
"log",
"record",
"."
] | python | train |
pypa/pipenv | pipenv/vendor/distlib/util.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/util.py#L1698-L1704 | def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result | [
"def",
"inc_convert",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"value",
")",
":",
"value",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"base",
",",
"value",
")",
"with",
"codecs",
".",
"... | Default converter for the inc:// protocol. | [
"Default",
"converter",
"for",
"the",
"inc",
":",
"//",
"protocol",
"."
] | python | train |
dnanexus/dx-toolkit | src/python/dxpy/bindings/search.py | https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/search.py#L719-L736 | def find_one_app(zero_ok=False, more_ok=True, **kwargs):
"""
:param zero_ok:
If False (default), :class:`~dxpy.exceptions.DXSearchError` is
raised if the search has 0 results; if True, returns None if the
search has 0 results
:type zero_ok: bool
:param more_ok:
If False, :class:`~dxpy.exceptions.DXSearchError` is raised if
the search has 2 or more results
:type more_ok: bool
Returns one app that satisfies the supplied constraints, or None if
none exist (provided *zero_ok* is True). Supports all search
constraint arguments supported by :meth:`find_apps()`.
"""
return _find_one(find_apps, zero_ok=zero_ok, more_ok=more_ok, **kwargs) | [
"def",
"find_one_app",
"(",
"zero_ok",
"=",
"False",
",",
"more_ok",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_find_one",
"(",
"find_apps",
",",
"zero_ok",
"=",
"zero_ok",
",",
"more_ok",
"=",
"more_ok",
",",
"*",
"*",
"kwargs",
")"
] | :param zero_ok:
If False (default), :class:`~dxpy.exceptions.DXSearchError` is
raised if the search has 0 results; if True, returns None if the
search has 0 results
:type zero_ok: bool
:param more_ok:
If False, :class:`~dxpy.exceptions.DXSearchError` is raised if
the search has 2 or more results
:type more_ok: bool
Returns one app that satisfies the supplied constraints, or None if
none exist (provided *zero_ok* is True). Supports all search
constraint arguments supported by :meth:`find_apps()`. | [
":",
"param",
"zero_ok",
":",
"If",
"False",
"(",
"default",
")",
":",
"class",
":",
"~dxpy",
".",
"exceptions",
".",
"DXSearchError",
"is",
"raised",
"if",
"the",
"search",
"has",
"0",
"results",
";",
"if",
"True",
"returns",
"None",
"if",
"the",
"sea... | python | train |
openstack/quark | quark/plugin_modules/floating_ips.py | https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/floating_ips.py#L610-L635 | def get_scalingips(context, filters=None, fields=None, sorts=['id'],
limit=None, marker=None, page_reverse=False):
"""Retrieve a list of scaling ips.
:param context: neutron api request context.
:param filters: a dictionary with keys that are valid keys for
a scaling ip as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictionary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
:param fields: a list of strings that are valid keys in a
scaling IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
:returns: List of scaling IPs that are accessible to the tenant who
submits the request (as indicated by the tenant id of the context)
as well as any filters.
"""
LOG.info('get_scalingips for tenant %s filters %s fields %s' %
(context.tenant_id, filters, fields))
scaling_ips = _get_ips_by_type(context, ip_types.SCALING,
filters=filters, fields=fields)
return [v._make_scaling_ip_dict(scip) for scip in scaling_ips] | [
"def",
"get_scalingips",
"(",
"context",
",",
"filters",
"=",
"None",
",",
"fields",
"=",
"None",
",",
"sorts",
"=",
"[",
"'id'",
"]",
",",
"limit",
"=",
"None",
",",
"marker",
"=",
"None",
",",
"page_reverse",
"=",
"False",
")",
":",
"LOG",
".",
"... | Retrieve a list of scaling ips.
:param context: neutron api request context.
:param filters: a dictionary with keys that are valid keys for
a scaling ip as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictionary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
:param fields: a list of strings that are valid keys in a
scaling IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
:returns: List of scaling IPs that are accessible to the tenant who
submits the request (as indicated by the tenant id of the context)
as well as any filters. | [
"Retrieve",
"a",
"list",
"of",
"scaling",
"ips",
"."
] | python | valid |
mjirik/imtools | imtools/select_label_qt.py | https://github.com/mjirik/imtools/blob/eb29fa59df0e0684d8334eb3bc5ef36ea46d1d3a/imtools/select_label_qt.py#L47-L67 | def init_slab(self, slab=None, segmentation=None, voxelsize_mm=None, show_ok_button=False):
"""
Create widget with segmentation labels information used to select labels.
:param slab: dict with label name and its id
:param segmentation: 3D label ndarray
:param voxelsize_mm: size of voxel in mm
:return:
"""
self.segmentation = segmentation
self.voxelsize_mm = voxelsize_mm
from . import show_segmentation
self.slab = show_segmentation.create_slab_from_segmentation(
self.segmentation, slab=slab)
if show_ok_button:
ok_button = QPushButton("Ok")
ok_button.clicked.connect(self._action_ok_button)
self.superMainScrollLayout.addWidget(ok_button) | [
"def",
"init_slab",
"(",
"self",
",",
"slab",
"=",
"None",
",",
"segmentation",
"=",
"None",
",",
"voxelsize_mm",
"=",
"None",
",",
"show_ok_button",
"=",
"False",
")",
":",
"self",
".",
"segmentation",
"=",
"segmentation",
"self",
".",
"voxelsize_mm",
"="... | Create widget with segmentation labels information used to select labels.
:param slab: dict with label name and its id
:param segmentation: 3D label ndarray
:param voxelsize_mm: size of voxel in mm
:return: | [
"Create",
"widget",
"with",
"segmentation",
"labels",
"information",
"used",
"to",
"select",
"labels",
"."
] | python | train |
allenai/allennlp | allennlp/semparse/contexts/atis_tables.py | https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/contexts/atis_tables.py#L128-L170 | def get_numbers_from_utterance(utterance: str, tokenized_utterance: List[Token]) -> Dict[str, List[int]]:
"""
Given an utterance, this function finds all the numbers that are in the action space. Since we need to
keep track of linking scores, we represent the numbers as a dictionary, where the keys are the string
representation of the number and the values are lists of the token indices that triggers that number.
"""
# When we use a regex to find numbers or strings, we need a mapping from
# the character to which token triggered it.
char_offset_to_token_index = {token.idx : token_index
for token_index, token in enumerate(tokenized_utterance)}
# We want to look up later for each time whether it appears after a word
# such as "about" or "approximately".
indices_of_approximate_words = {index for index, token in enumerate(tokenized_utterance)
if token.text in APPROX_WORDS}
indices_of_words_preceding_time = {index for index, token in enumerate(tokenized_utterance)
if token.text in WORDS_PRECEDING_TIME}
indices_of_am_pm = {index for index, token in enumerate(tokenized_utterance)
if token.text in {'am', 'pm'}}
number_linking_dict: Dict[str, List[int]] = defaultdict(list)
for token_index, token in enumerate(tokenized_utterance):
if token.text.isdigit():
if token_index - 1 in indices_of_words_preceding_time and token_index + 1 not in indices_of_am_pm:
for time in digit_to_query_time(token.text):
number_linking_dict[str(time)].append(token_index)
times_linking_dict = get_times_from_utterance(utterance,
char_offset_to_token_index,
indices_of_approximate_words)
for key, value in times_linking_dict.items():
number_linking_dict[key].extend(value)
for index, token in enumerate(tokenized_utterance):
for number in NUMBER_TRIGGER_DICT.get(token.text, []):
if index - 1 in indices_of_approximate_words:
for approx_time in get_approximate_times([int(number)]):
number_linking_dict[str(approx_time)].append(index)
else:
number_linking_dict[number].append(index)
return number_linking_dict | [
"def",
"get_numbers_from_utterance",
"(",
"utterance",
":",
"str",
",",
"tokenized_utterance",
":",
"List",
"[",
"Token",
"]",
")",
"->",
"Dict",
"[",
"str",
",",
"List",
"[",
"int",
"]",
"]",
":",
"# When we use a regex to find numbers or strings, we need a mapping... | Given an utterance, this function finds all the numbers that are in the action space. Since we need to
keep track of linking scores, we represent the numbers as a dictionary, where the keys are the string
representation of the number and the values are lists of the token indices that triggers that number. | [
"Given",
"an",
"utterance",
"this",
"function",
"finds",
"all",
"the",
"numbers",
"that",
"are",
"in",
"the",
"action",
"space",
".",
"Since",
"we",
"need",
"to",
"keep",
"track",
"of",
"linking",
"scores",
"we",
"represent",
"the",
"numbers",
"as",
"a",
... | python | train |
bernardopires/django-tenant-schemas | tenant_schemas/postgresql_backend/base.py | https://github.com/bernardopires/django-tenant-schemas/blob/75faf00834e1fb7ed017949bfb54531f6329a8dd/tenant_schemas/postgresql_backend/base.py#L66-L72 | def set_tenant(self, tenant, include_public=True):
"""
Main API method to current database schema,
but it does not actually modify the db connection.
"""
self.set_schema(tenant.schema_name, include_public)
self.tenant = tenant | [
"def",
"set_tenant",
"(",
"self",
",",
"tenant",
",",
"include_public",
"=",
"True",
")",
":",
"self",
".",
"set_schema",
"(",
"tenant",
".",
"schema_name",
",",
"include_public",
")",
"self",
".",
"tenant",
"=",
"tenant"
] | Main API method to current database schema,
but it does not actually modify the db connection. | [
"Main",
"API",
"method",
"to",
"current",
"database",
"schema",
"but",
"it",
"does",
"not",
"actually",
"modify",
"the",
"db",
"connection",
"."
] | python | train |
tritemio/PyBroMo | pybromo/iter_chunks.py | https://github.com/tritemio/PyBroMo/blob/b75f82a4551ff37e7c7a7e6954c536451f3e6d06/pybromo/iter_chunks.py#L42-L51 | def iter_chunk_index(num_samples, chunksize):
"""Iterator used to iterate in chunks over an array of size `num_samples`.
At each iteration returns a start and stop index for a slice of size
`chunksize`. In the last iteration the slice may be smaller.
"""
i = 0
for c_size in iter_chunksize(num_samples, chunksize):
yield i, i + c_size
i += c_size | [
"def",
"iter_chunk_index",
"(",
"num_samples",
",",
"chunksize",
")",
":",
"i",
"=",
"0",
"for",
"c_size",
"in",
"iter_chunksize",
"(",
"num_samples",
",",
"chunksize",
")",
":",
"yield",
"i",
",",
"i",
"+",
"c_size",
"i",
"+=",
"c_size"
] | Iterator used to iterate in chunks over an array of size `num_samples`.
At each iteration returns a start and stop index for a slice of size
`chunksize`. In the last iteration the slice may be smaller. | [
"Iterator",
"used",
"to",
"iterate",
"in",
"chunks",
"over",
"an",
"array",
"of",
"size",
"num_samples",
"."
] | python | valid |
limpyd/redis-limpyd | limpyd/fields.py | https://github.com/limpyd/redis-limpyd/blob/3c745dde1390a0bd09690b77a089dcc08c6c7e43/limpyd/fields.py#L640-L647 | def _del(self, command, *args, **kwargs):
"""
Shortcut for commands that remove all values of the field.
All will be deindexed.
"""
if self.indexable:
self.deindex()
return self._traverse_command(command, *args, **kwargs) | [
"def",
"_del",
"(",
"self",
",",
"command",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"indexable",
":",
"self",
".",
"deindex",
"(",
")",
"return",
"self",
".",
"_traverse_command",
"(",
"command",
",",
"*",
"args",
",... | Shortcut for commands that remove all values of the field.
All will be deindexed. | [
"Shortcut",
"for",
"commands",
"that",
"remove",
"all",
"values",
"of",
"the",
"field",
".",
"All",
"will",
"be",
"deindexed",
"."
] | python | train |
FlaskGuys/Flask-Imagine | flask_imagine/filters/thumbnail.py | https://github.com/FlaskGuys/Flask-Imagine/blob/f79c6517ecb5480b63a2b3b8554edb6e2ac8be8c/flask_imagine/filters/thumbnail.py#L96-L118 | def outbound_sizes(cls, original_width, original_height, target_width, target_height):
"""
Calculate new image sizes for outbound mode
:param original_width: int
:param original_height: int
:param target_width: int
:param target_height: int
:return: tuple(int, int)
"""
if target_width <= original_width and target_height <= original_height:
k = original_width / float(original_height)
k_w = original_width / float(target_width)
k_h = original_height / float(target_height)
if k_w > k_h:
target_width = int(target_height * k)
else:
target_height = int(target_width / k)
else:
target_width = original_width
target_height = original_height
return target_width, target_height | [
"def",
"outbound_sizes",
"(",
"cls",
",",
"original_width",
",",
"original_height",
",",
"target_width",
",",
"target_height",
")",
":",
"if",
"target_width",
"<=",
"original_width",
"and",
"target_height",
"<=",
"original_height",
":",
"k",
"=",
"original_width",
... | Calculate new image sizes for outbound mode
:param original_width: int
:param original_height: int
:param target_width: int
:param target_height: int
:return: tuple(int, int) | [
"Calculate",
"new",
"image",
"sizes",
"for",
"outbound",
"mode",
":",
"param",
"original_width",
":",
"int",
":",
"param",
"original_height",
":",
"int",
":",
"param",
"target_width",
":",
"int",
":",
"param",
"target_height",
":",
"int",
":",
"return",
":",... | python | train |
SoftwareDefinedBuildings/XBOS | apps/data_analysis/XBOS_data_analytics/Import_Data.py | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/data_analysis/XBOS_data_analytics/Import_Data.py#L193-L318 | def get_weather_power_tstat(self, site, start, end, data_type=['weather', 'power']):
""" Get weather and power data.
Parameters
----------
site : str
Site name.
start : str
Start date.
end : str
End date.
data_type : str
Type of data needed (all, weather, power, temperature, hsp, csp)
"""
m = dataclient.MDALClient("corbusier.cs.berkeley.edu:8088")
request = {
"Variables": {
"greenbutton": {
"Definition": """SELECT ?meter ?meter_uuid FROM %s WHERE {
?meter rdf:type brick:Green_Button_Meter .
?meter bf:uuid ?meter_uuid
};""" % site,
},
"weather": {
"Definition": """SELECT ?t ?t_uuid FROM %s WHERE {
?t rdf:type/rdfs:subClassOf* brick:Weather_Temperature_Sensor .
?t bf:uuid ?t_uuid
};""" % site,
},
"tstat_state": {
"Definition": """SELECT ?t ?t_uuid ?tstat FROM %s WHERE {
?t rdf:type/rdfs:subClassOf* brick:Thermostat_Status .
?t bf:uuid ?t_uuid
?t bf:isPointOf ?tstat .
?tstat rdf:type brick:Thermostat
};""" % site,
},
"tstat_hsp": {
"Definition": """SELECT ?t ?t_uuid ?tstat FROM %s WHERE {
?t rdf:type/rdfs:subClassOf* brick:Supply_Air_Temperature_Heating_Setpoint .
?t bf:uuid ?t_uuid .
?t bf:isPointOf ?tstat .
?tstat rdf:type brick:Thermostat
};""" % site,
},
"tstat_csp": {
"Definition": """SELECT ?t ?t_uuid ?tstat FROM %s WHERE {
?t rdf:type/rdfs:subClassOf* brick:Supply_Air_Temperature_Cooling_Setpoint .
?t bf:uuid ?t_uuid .
?t bf:isPointOf ?tstat .
?tstat rdf:type brick:Thermostat
};""" % site,
},
"tstat_temp": {
"Definition": """SELECT ?t ?t_uuid ?tstat FROM %s WHERE {
?t rdf:type/rdfs:subClassOf* brick:Temperature_Sensor .
?t bf:uuid ?t_uuid .
?t bf:isPointOf ?tstat .
?tstat rdf:type brick:Thermostat
};""" % site,
},
},
}
# outside air temp
request['Composition'] = ['weather']
request['Aggregation'] = {'weather': ['MEAN']}
request['Time'] = {
'Start': start,
'End': end,
'Window': '15m',
'Aligned': True
}
resp_weather = m.query(request)
self.weather_data = resp_weather.df
# power
request['Composition'] = ['greenbutton']
request['Aggregation'] = {'greenbutton': ['MEAN']}
resp_power = m.query(request)
self.power_data = resp_power.df
# tstat temperature
request['Composition'] = ['tstat_temp', 'tstat_hsp', 'tstat_csp']
request['Aggregation'] = {'tstat_temp': ['MEAN']}
resp_temp = m.query(request)
self.temp_data = resp_temp
# tstat heat setpoint
request['Composition'] = ['tstat_hsp']
request['Aggregation'] = {'tstat_hsp': ['MAX']}
resp_hsp = m.query(request)
self.hsp_data = resp_hsp
# tstat cool setpoint
request['Composition'] = ['tstat_csp']
request['Aggregation'] = {'tstat_csp': ['MAX']}
resp_csp = m.query(request)
self.csp_data = resp_csp
mapping = {
'weather': resp_weather,
'power': resp_power,
'temperature': resp_temp,
'hsp': resp_hsp,
'csp': resp_csp
}
first = True
for dat in data_type:
if first:
try:
self.data = mapping[dat].df
first = False
except:
raise SystemError('Undefined data_type (Make sure all characters are lowercase)')
else:
try:
self.data = self.data.join(mapping[dat].df)
except:
raise SystemError('Undefined data_type (Make sure all characters are lowercase)')
return mapping | [
"def",
"get_weather_power_tstat",
"(",
"self",
",",
"site",
",",
"start",
",",
"end",
",",
"data_type",
"=",
"[",
"'weather'",
",",
"'power'",
"]",
")",
":",
"m",
"=",
"dataclient",
".",
"MDALClient",
"(",
"\"corbusier.cs.berkeley.edu:8088\"",
")",
"request",
... | Get weather and power data.
Parameters
----------
site : str
Site name.
start : str
Start date.
end : str
End date.
data_type : str
Type of data needed (all, weather, power, temperature, hsp, csp) | [
"Get",
"weather",
"and",
"power",
"data",
"."
] | python | train |
syndbg/demonoid-api | demonoid/urls.py | https://github.com/syndbg/demonoid-api/blob/518aa389ac91b5243b92fc19923103f31041a61e/demonoid/urls.py#L112-L122 | def fetch(self):
"""
Makes a request to combined url with `self._params` as parameters.
If the server at combined url responds with Client or Server error, raises an exception.
:return: the response from combined url
:rtype: requests.models.Response
"""
response = self._session.get(self.url, params=self.params)
response.raise_for_status()
return response | [
"def",
"fetch",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"_session",
".",
"get",
"(",
"self",
".",
"url",
",",
"params",
"=",
"self",
".",
"params",
")",
"response",
".",
"raise_for_status",
"(",
")",
"return",
"response"
] | Makes a request to combined url with `self._params` as parameters.
If the server at combined url responds with Client or Server error, raises an exception.
:return: the response from combined url
:rtype: requests.models.Response | [
"Makes",
"a",
"request",
"to",
"combined",
"url",
"with",
"self",
".",
"_params",
"as",
"parameters",
".",
"If",
"the",
"server",
"at",
"combined",
"url",
"responds",
"with",
"Client",
"or",
"Server",
"error",
"raises",
"an",
"exception",
"."
] | python | train |
Esri/ArcREST | src/arcrest/manageorg/_community.py | https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageorg/_community.py#L399-L408 | def group(self, groupId):
"""
gets a group based on it's ID
"""
url = "%s/%s" % (self.root, groupId)
return Group(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initalize=False) | [
"def",
"group",
"(",
"self",
",",
"groupId",
")",
":",
"url",
"=",
"\"%s/%s\"",
"%",
"(",
"self",
".",
"root",
",",
"groupId",
")",
"return",
"Group",
"(",
"url",
"=",
"url",
",",
"securityHandler",
"=",
"self",
".",
"_securityHandler",
",",
"proxy_url... | gets a group based on it's ID | [
"gets",
"a",
"group",
"based",
"on",
"it",
"s",
"ID"
] | python | train |
bodylabs/lace | lace/geometry.py | https://github.com/bodylabs/lace/blob/b68f4a60a4cac66c0607ffbae38ef9d07d37f459/lace/geometry.py#L206-L233 | def cut_across_axis(self, dim, minval=None, maxval=None):
'''
Cut the mesh by a plane, discarding vertices that lie behind that
plane. Or cut the mesh by two parallel planes, discarding vertices
that lie outside them.
The region to keep is defined by an axis of perpendicularity,
specified by `dim`: 0 means x, 1 means y, 2 means z. `minval`
and `maxval` indicate the portion of that axis to keep.
Return the original indices of the kept vertices.
'''
# vertex_mask keeps track of the vertices we want to keep.
vertex_mask = np.ones((len(self.v),), dtype=bool)
if minval is not None:
predicate = self.v[:, dim] >= minval
vertex_mask = np.logical_and(vertex_mask, predicate)
if maxval is not None:
predicate = self.v[:, dim] <= maxval
vertex_mask = np.logical_and(vertex_mask, predicate)
vertex_indices = np.flatnonzero(vertex_mask)
self.keep_vertices(vertex_indices)
return vertex_indices | [
"def",
"cut_across_axis",
"(",
"self",
",",
"dim",
",",
"minval",
"=",
"None",
",",
"maxval",
"=",
"None",
")",
":",
"# vertex_mask keeps track of the vertices we want to keep.",
"vertex_mask",
"=",
"np",
".",
"ones",
"(",
"(",
"len",
"(",
"self",
".",
"v",
... | Cut the mesh by a plane, discarding vertices that lie behind that
plane. Or cut the mesh by two parallel planes, discarding vertices
that lie outside them.
The region to keep is defined by an axis of perpendicularity,
specified by `dim`: 0 means x, 1 means y, 2 means z. `minval`
and `maxval` indicate the portion of that axis to keep.
Return the original indices of the kept vertices. | [
"Cut",
"the",
"mesh",
"by",
"a",
"plane",
"discarding",
"vertices",
"that",
"lie",
"behind",
"that",
"plane",
".",
"Or",
"cut",
"the",
"mesh",
"by",
"two",
"parallel",
"planes",
"discarding",
"vertices",
"that",
"lie",
"outside",
"them",
"."
] | python | train |
PmagPy/PmagPy | dialogs/grid_frame3.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/grid_frame3.py#L884-L894 | def onCopySelection(self, event):
"""
Copies self.df_slice to the Clipboard if slice exists
"""
if self.df_slice is not None:
pd.DataFrame.to_clipboard(self.df_slice, header=False, index=False)
self.grid.ClearSelection()
self.df_slice = None
print('-I- You have copied the selected cells. You may paste them into a text document or spreadsheet using Command v.')
else:
print('-W- No cells were copied! You must highlight a selection cells before hitting the copy button. You can do this by clicking and dragging, or by using the Shift key and click.') | [
"def",
"onCopySelection",
"(",
"self",
",",
"event",
")",
":",
"if",
"self",
".",
"df_slice",
"is",
"not",
"None",
":",
"pd",
".",
"DataFrame",
".",
"to_clipboard",
"(",
"self",
".",
"df_slice",
",",
"header",
"=",
"False",
",",
"index",
"=",
"False",
... | Copies self.df_slice to the Clipboard if slice exists | [
"Copies",
"self",
".",
"df_slice",
"to",
"the",
"Clipboard",
"if",
"slice",
"exists"
] | python | train |
napalm-automation/napalm-logs | napalm_logs/listener_proc.py | https://github.com/napalm-automation/napalm-logs/blob/4b89100a6e4f994aa004f3ea42a06dc803a7ccb0/napalm_logs/listener_proc.py#L62-L76 | def _setup_ipc(self):
'''
Setup the listener ICP pusher.
'''
log.debug('Setting up the listener IPC pusher')
self.ctx = zmq.Context()
self.pub = self.ctx.socket(zmq.PUSH)
self.pub.connect(LST_IPC_URL)
log.debug('Setting HWM for the listener: %d', self.opts['hwm'])
try:
self.pub.setsockopt(zmq.HWM, self.opts['hwm'])
# zmq 2
except AttributeError:
# zmq 3
self.pub.setsockopt(zmq.SNDHWM, self.opts['hwm']) | [
"def",
"_setup_ipc",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"'Setting up the listener IPC pusher'",
")",
"self",
".",
"ctx",
"=",
"zmq",
".",
"Context",
"(",
")",
"self",
".",
"pub",
"=",
"self",
".",
"ctx",
".",
"socket",
"(",
"zmq",
".",
... | Setup the listener ICP pusher. | [
"Setup",
"the",
"listener",
"ICP",
"pusher",
"."
] | python | train |
phaethon/kamene | kamene/contrib/gsm_um.py | https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/contrib/gsm_um.py#L2485-L2490 | def authenticationAndCipheringReject():
"""AUTHENTICATION AND CIPHERING REJECT Section 9.4.11"""
a = TpPd(pd=0x3)
b = MessageType(mesType=0x14) # 00010100
packet = a / b
return packet | [
"def",
"authenticationAndCipheringReject",
"(",
")",
":",
"a",
"=",
"TpPd",
"(",
"pd",
"=",
"0x3",
")",
"b",
"=",
"MessageType",
"(",
"mesType",
"=",
"0x14",
")",
"# 00010100",
"packet",
"=",
"a",
"/",
"b",
"return",
"packet"
] | AUTHENTICATION AND CIPHERING REJECT Section 9.4.11 | [
"AUTHENTICATION",
"AND",
"CIPHERING",
"REJECT",
"Section",
"9",
".",
"4",
".",
"11"
] | python | train |
xsleonard/pystmark | pystmark.py | https://github.com/xsleonard/pystmark/blob/329ccae1a7c8d57f28fa72cd8dbbee3e39413ed6/pystmark.py#L1108-L1124 | def send(self, messages=None, api_key=None, secure=None, test=None,
**request_args):
'''Send batch request to Postmark API.
Returns result of :func:`requests.post`.
:param messages: Batch messages to send to the Postmark API.
:type messages: A list of :class:`Message`
:param api_key: Your Postmark API key. Defaults to `self.api_key`.
:param test: Make a test request to the Postmark API.
Defaults to `self.test`.
:param secure: Use the https Postmark API. Defaults to `self.secure`.
:param \*\*request_args: Passed to :func:`requests.request`
:rtype: :class:`BatchSendResponse`
'''
return super(BatchSender, self).send(message=messages, test=test,
api_key=api_key, secure=secure,
**request_args) | [
"def",
"send",
"(",
"self",
",",
"messages",
"=",
"None",
",",
"api_key",
"=",
"None",
",",
"secure",
"=",
"None",
",",
"test",
"=",
"None",
",",
"*",
"*",
"request_args",
")",
":",
"return",
"super",
"(",
"BatchSender",
",",
"self",
")",
".",
"sen... | Send batch request to Postmark API.
Returns result of :func:`requests.post`.
:param messages: Batch messages to send to the Postmark API.
:type messages: A list of :class:`Message`
:param api_key: Your Postmark API key. Defaults to `self.api_key`.
:param test: Make a test request to the Postmark API.
Defaults to `self.test`.
:param secure: Use the https Postmark API. Defaults to `self.secure`.
:param \*\*request_args: Passed to :func:`requests.request`
:rtype: :class:`BatchSendResponse` | [
"Send",
"batch",
"request",
"to",
"Postmark",
"API",
".",
"Returns",
"result",
"of",
":",
"func",
":",
"requests",
".",
"post",
"."
] | python | train |
relekang/python-semantic-release | semantic_release/cli.py | https://github.com/relekang/python-semantic-release/blob/76123f410180599a19e7c48da413880185bbea20/semantic_release/cli.py#L132-L188 | def publish(**kwargs):
"""
Runs the version task before pushing to git and uploading to pypi.
"""
current_version = get_current_version()
click.echo('Current version: {0}'.format(current_version))
retry = kwargs.get("retry")
debug('publish: retry=', retry)
if retry:
# The "new" version will actually be the current version, and the
# "current" version will be the previous version.
new_version = current_version
current_version = get_previous_version(current_version)
else:
level_bump = evaluate_version_bump(current_version, kwargs['force_level'])
new_version = get_new_version(current_version, level_bump)
owner, name = get_repository_owner_and_name()
ci_checks.check('master')
checkout('master')
if version(**kwargs):
push_new_version(
gh_token=os.environ.get('GH_TOKEN'),
owner=owner,
name=name
)
if config.getboolean('semantic_release', 'upload_to_pypi'):
upload_to_pypi(
username=os.environ.get('PYPI_USERNAME'),
password=os.environ.get('PYPI_PASSWORD'),
# We are retrying, so we don't want errors for files that are already on PyPI.
skip_existing=retry,
)
if check_token():
click.echo('Updating changelog')
try:
log = generate_changelog(current_version, new_version)
post_changelog(
owner,
name,
new_version,
markdown_changelog(new_version, log, header=False)
)
except GitError:
click.echo(click.style('Posting changelog failed.', 'red'), err=True)
else:
click.echo(
click.style('Missing token: cannot post changelog', 'red'), err=True)
click.echo(click.style('New release published', 'green'))
else:
click.echo('Version failed, no release will be published.', err=True) | [
"def",
"publish",
"(",
"*",
"*",
"kwargs",
")",
":",
"current_version",
"=",
"get_current_version",
"(",
")",
"click",
".",
"echo",
"(",
"'Current version: {0}'",
".",
"format",
"(",
"current_version",
")",
")",
"retry",
"=",
"kwargs",
".",
"get",
"(",
"\"... | Runs the version task before pushing to git and uploading to pypi. | [
"Runs",
"the",
"version",
"task",
"before",
"pushing",
"to",
"git",
"and",
"uploading",
"to",
"pypi",
"."
] | python | train |
phoebe-project/phoebe2 | phoebe/parameters/parameters.py | https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/parameters/parameters.py#L211-L224 | def update_if_client(fctn):
"""Intercept and check updates from server if bundle is in client mode."""
@functools.wraps(fctn)
def _update_if_client(self, *args, **kwargs):
b = self._bundle
if b is None or not hasattr(b, 'is_client'):
return fctn(self, *args, **kwargs)
elif b.is_client and \
(b._last_client_update is None or
(datetime.now() - b._last_client_update).seconds > 1):
b.client_update()
return fctn(self, *args, **kwargs)
return _update_if_client | [
"def",
"update_if_client",
"(",
"fctn",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"fctn",
")",
"def",
"_update_if_client",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"b",
"=",
"self",
".",
"_bundle",
"if",
"b",
"is",
"No... | Intercept and check updates from server if bundle is in client mode. | [
"Intercept",
"and",
"check",
"updates",
"from",
"server",
"if",
"bundle",
"is",
"in",
"client",
"mode",
"."
] | python | train |
ResidentMario/missingno | missingno/missingno.py | https://github.com/ResidentMario/missingno/blob/1d67f91fbab0695a919c6bb72c796db57024e0ca/missingno/missingno.py#L195-L263 | def bar(df, figsize=(24, 10), fontsize=16, labels=None, log=False, color='dimgray', inline=False,
filter=None, n=0, p=0, sort=None):
"""
A bar chart visualization of the nullity of the given DataFrame.
:param df: The input DataFrame.
:param log: Whether or not to display a logorithmic plot. Defaults to False (linear).
:param filter: The filter to apply to the heatmap. Should be one of "top", "bottom", or None (default).
:param n: The cap on the number of columns to include in the filtered DataFrame.
:param p: The cap on the percentage fill of the columns in the filtered DataFrame.
:param sort: The sort to apply to the heatmap. Should be one of "ascending", "descending", or None (default).
:param figsize: The size of the figure to display.
:param fontsize: The figure's font size. This default to 16.
:param labels: Whether or not to display the column names. Would need to be turned off on particularly large
displays. Defaults to True.
:param color: The color of the filled columns. Default to the RGB multiple `(0.25, 0.25, 0.25)`.
:return: If `inline` is False, the underlying `matplotlib.figure` object. Else, nothing.
"""
nullity_counts = len(df) - df.isnull().sum()
df = nullity_filter(df, filter=filter, n=n, p=p)
df = nullity_sort(df, sort=sort)
plt.figure(figsize=figsize)
(nullity_counts / len(df)).plot(kind='bar', figsize=figsize, fontsize=fontsize, log=log, color=color)
ax1 = plt.gca()
axes = [ax1]
# Start appending elements, starting with a modified bottom x axis.
if labels or (labels is None and len(df.columns) <= 50):
ax1.set_xticklabels(ax1.get_xticklabels(), rotation=45, ha='right', fontsize=fontsize)
# Create the numerical ticks.
ax2 = ax1.twinx()
axes.append(ax2)
if not log:
ax1.set_ylim([0, 1])
ax2.set_yticks(ax1.get_yticks())
ax2.set_yticklabels([int(n*len(df)) for n in ax1.get_yticks()], fontsize=fontsize)
else:
# For some reason when a logarithmic plot is specified `ax1` always contains two more ticks than actually
# appears in the plot. The fix is to ignore the first and last entries. Also note that when a log scale
# is used, we have to make it match the `ax1` layout ourselves.
ax2.set_yscale('log')
ax2.set_ylim(ax1.get_ylim())
ax2.set_yticklabels([int(n*len(df)) for n in ax1.get_yticks()], fontsize=fontsize)
else:
ax1.set_xticks([])
# Create the third axis, which displays columnar totals above the rest of the plot.
ax3 = ax1.twiny()
axes.append(ax3)
ax3.set_xticks(ax1.get_xticks())
ax3.set_xlim(ax1.get_xlim())
ax3.set_xticklabels(nullity_counts.values, fontsize=fontsize, rotation=45, ha='left')
ax3.grid(False)
for ax in axes:
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
if inline:
plt.show()
else:
return ax1 | [
"def",
"bar",
"(",
"df",
",",
"figsize",
"=",
"(",
"24",
",",
"10",
")",
",",
"fontsize",
"=",
"16",
",",
"labels",
"=",
"None",
",",
"log",
"=",
"False",
",",
"color",
"=",
"'dimgray'",
",",
"inline",
"=",
"False",
",",
"filter",
"=",
"None",
... | A bar chart visualization of the nullity of the given DataFrame.
:param df: The input DataFrame.
:param log: Whether or not to display a logorithmic plot. Defaults to False (linear).
:param filter: The filter to apply to the heatmap. Should be one of "top", "bottom", or None (default).
:param n: The cap on the number of columns to include in the filtered DataFrame.
:param p: The cap on the percentage fill of the columns in the filtered DataFrame.
:param sort: The sort to apply to the heatmap. Should be one of "ascending", "descending", or None (default).
:param figsize: The size of the figure to display.
:param fontsize: The figure's font size. This default to 16.
:param labels: Whether or not to display the column names. Would need to be turned off on particularly large
displays. Defaults to True.
:param color: The color of the filled columns. Default to the RGB multiple `(0.25, 0.25, 0.25)`.
:return: If `inline` is False, the underlying `matplotlib.figure` object. Else, nothing. | [
"A",
"bar",
"chart",
"visualization",
"of",
"the",
"nullity",
"of",
"the",
"given",
"DataFrame",
"."
] | python | train |
sorgerlab/indra | indra/literature/deft_tools.py | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/literature/deft_tools.py#L87-L114 | def universal_extract_paragraphs(xml):
"""Extract paragraphs from xml that could be from different sources
First try to parse the xml as if it came from elsevier. if we do not
have valid elsevier xml this will throw an exception. the text extraction
function in the pmc client may not throw an exception when parsing elsevier
xml, silently processing the xml incorrectly
Parameters
----------
xml : str
Either an NLM xml, Elsevier xml or plaintext
Returns
-------
paragraphs : str
Extracted plaintext paragraphs from NLM or Elsevier XML
"""
try:
paragraphs = elsevier_client.extract_paragraphs(xml)
except Exception:
paragraphs = None
if paragraphs is None:
try:
paragraphs = pmc_client.extract_paragraphs(xml)
except Exception:
paragraphs = [xml]
return paragraphs | [
"def",
"universal_extract_paragraphs",
"(",
"xml",
")",
":",
"try",
":",
"paragraphs",
"=",
"elsevier_client",
".",
"extract_paragraphs",
"(",
"xml",
")",
"except",
"Exception",
":",
"paragraphs",
"=",
"None",
"if",
"paragraphs",
"is",
"None",
":",
"try",
":",... | Extract paragraphs from xml that could be from different sources
First try to parse the xml as if it came from elsevier. if we do not
have valid elsevier xml this will throw an exception. the text extraction
function in the pmc client may not throw an exception when parsing elsevier
xml, silently processing the xml incorrectly
Parameters
----------
xml : str
Either an NLM xml, Elsevier xml or plaintext
Returns
-------
paragraphs : str
Extracted plaintext paragraphs from NLM or Elsevier XML | [
"Extract",
"paragraphs",
"from",
"xml",
"that",
"could",
"be",
"from",
"different",
"sources"
] | python | train |
enkore/i3pystatus | i3pystatus/core/util.py | https://github.com/enkore/i3pystatus/blob/14cfde967cecf79b40e223e35a04600f4c875af7/i3pystatus/core/util.py#L673-L698 | def get_module(function):
"""Function decorator for retrieving the ``self`` argument from the stack.
Intended for use with callbacks that need access to a modules variables, for example:
.. code:: python
from i3pystatus import Status, get_module
from i3pystatus.core.command import execute
status = Status(...)
# other modules etc.
@get_module
def display_ip_verbose(module):
execute('sh -c "ip addr show dev {dev} | xmessage -file -"'.format(dev=module.interface))
status.register("network", interface="wlan1", on_leftclick=display_ip_verbose)
"""
@functools.wraps(function)
def call_wrapper(*args, **kwargs):
stack = inspect.stack()
caller_frame_info = stack[1]
self = caller_frame_info[0].f_locals["self"]
# not completly sure whether this is necessary
# see note in Python docs about stack frames
del stack
function(self, *args, **kwargs)
return call_wrapper | [
"def",
"get_module",
"(",
"function",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"function",
")",
"def",
"call_wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"stack",
"=",
"inspect",
".",
"stack",
"(",
")",
"caller_frame_info",
"=",... | Function decorator for retrieving the ``self`` argument from the stack.
Intended for use with callbacks that need access to a modules variables, for example:
.. code:: python
from i3pystatus import Status, get_module
from i3pystatus.core.command import execute
status = Status(...)
# other modules etc.
@get_module
def display_ip_verbose(module):
execute('sh -c "ip addr show dev {dev} | xmessage -file -"'.format(dev=module.interface))
status.register("network", interface="wlan1", on_leftclick=display_ip_verbose) | [
"Function",
"decorator",
"for",
"retrieving",
"the",
"self",
"argument",
"from",
"the",
"stack",
"."
] | python | train |
blockstack/blockstack-core | blockstack/lib/atlas.py | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L3213-L3231 | def run(self, peer_table=None):
"""
Loop forever, pinging someone every pass.
"""
self.running = True
while self.running:
local_inv = atlas_get_zonefile_inventory()
t1 = time_now()
self.step( peer_table=peer_table, local_inv=local_inv, path=self.atlasdb_path )
t2 = time_now()
# don't go too fast
if t2 - t1 < PEER_HEALTH_NEIGHBOR_WORK_INTERVAL:
deadline = time_now() + PEER_HEALTH_NEIGHBOR_WORK_INTERVAL - (t2 - t1)
while time_now() < deadline and self.running:
time_sleep( self.hostport, self.__class__.__name__, 1.0 )
if not self.running:
break | [
"def",
"run",
"(",
"self",
",",
"peer_table",
"=",
"None",
")",
":",
"self",
".",
"running",
"=",
"True",
"while",
"self",
".",
"running",
":",
"local_inv",
"=",
"atlas_get_zonefile_inventory",
"(",
")",
"t1",
"=",
"time_now",
"(",
")",
"self",
".",
"s... | Loop forever, pinging someone every pass. | [
"Loop",
"forever",
"pinging",
"someone",
"every",
"pass",
"."
] | python | train |
pantsbuild/pants | contrib/scrooge/src/python/pants/contrib/scrooge/tasks/thrift_util.py | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/contrib/scrooge/src/python/pants/contrib/scrooge/tasks/thrift_util.py#L15-L43 | def find_includes(basedirs, source, log=None):
"""Finds all thrift files included by the given thrift source.
:basedirs: A set of thrift source file base directories to look for includes in.
:source: The thrift source file to scan for includes.
:log: An optional logger
"""
all_basedirs = [os.path.dirname(source)]
all_basedirs.extend(basedirs)
includes = set()
with open(source, 'r') as thrift:
for line in thrift.readlines():
match = INCLUDE_PARSER.match(line)
if match:
capture = match.group(1)
added = False
for basedir in all_basedirs:
include = os.path.join(basedir, capture)
if os.path.exists(include):
if log:
log.debug('{} has include {}'.format(source, include))
includes.add(include)
added = True
if not added:
raise ValueError("{} included in {} not found in bases {}"
.format(include, source, all_basedirs))
return includes | [
"def",
"find_includes",
"(",
"basedirs",
",",
"source",
",",
"log",
"=",
"None",
")",
":",
"all_basedirs",
"=",
"[",
"os",
".",
"path",
".",
"dirname",
"(",
"source",
")",
"]",
"all_basedirs",
".",
"extend",
"(",
"basedirs",
")",
"includes",
"=",
"set"... | Finds all thrift files included by the given thrift source.
:basedirs: A set of thrift source file base directories to look for includes in.
:source: The thrift source file to scan for includes.
:log: An optional logger | [
"Finds",
"all",
"thrift",
"files",
"included",
"by",
"the",
"given",
"thrift",
"source",
"."
] | python | train |
sqlalchemy-redshift/sqlalchemy-redshift | sqlalchemy_redshift/dialect.py | https://github.com/sqlalchemy-redshift/sqlalchemy-redshift/blob/b1a24872da0c8151aa60da4524605b6243d8d765/sqlalchemy_redshift/dialect.py#L440-L459 | def get_pk_constraint(self, connection, table_name, schema=None, **kw):
"""
Return information about the primary key constraint on `table_name`.
Overrides interface
:meth:`~sqlalchemy.engine.interfaces.Dialect.get_pk_constraint`.
"""
constraints = self._get_redshift_constraints(connection, table_name,
schema, **kw)
pk_constraints = [c for c in constraints if c.contype == 'p']
if not pk_constraints:
return {'constrained_columns': [], 'name': ''}
pk_constraint = pk_constraints[0]
m = PRIMARY_KEY_RE.match(pk_constraint.condef)
colstring = m.group('columns')
constrained_columns = SQL_IDENTIFIER_RE.findall(colstring)
return {
'constrained_columns': constrained_columns,
'name': pk_constraint.conname,
} | [
"def",
"get_pk_constraint",
"(",
"self",
",",
"connection",
",",
"table_name",
",",
"schema",
"=",
"None",
",",
"*",
"*",
"kw",
")",
":",
"constraints",
"=",
"self",
".",
"_get_redshift_constraints",
"(",
"connection",
",",
"table_name",
",",
"schema",
",",
... | Return information about the primary key constraint on `table_name`.
Overrides interface
:meth:`~sqlalchemy.engine.interfaces.Dialect.get_pk_constraint`. | [
"Return",
"information",
"about",
"the",
"primary",
"key",
"constraint",
"on",
"table_name",
"."
] | python | train |
totalgood/pugnlp | src/pugnlp/plots.py | https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/plots.py#L296-L323 | def scatmat(df, category=None, colors='rgob',
num_plots=4, num_topics=100, num_columns=4,
show=False, block=False, data_path=DATA_PATH, save=False, verbose=1):
"""Scatter plot with colored markers depending on the discrete values in a "category" column
FIXME: empty plots that dont go away, Plot and/save scatter matrix in groups of num_columns topics"""
if category is None:
category = list(df.columns)[-1]
if isinstance(category, (str, bytes, int)) and category in df.columns:
category = df[category]
else:
category = pd.Series(category)
suffix = '{}x{}'.format(*list(df.shape))
# suffix = compose_suffix(len(df), num_topics, save)
# save = bool(save)
for i in range(min(num_plots * num_columns, num_topics) / num_plots):
scatter_matrix(df[df.columns[i * num_columns:(i + 1) * num_columns]],
marker='+', c=[colors[int(x) % len(colors)] for x in category.values],
figsize=(18, 12))
if save:
name = 'scatmat_topics_{}-{}.jpg'.format(i * num_columns, (i + 1) * num_columns) + suffix
plt.savefig(os.path.join(data_path, name + '.jpg'))
if show:
if block:
plt.show()
else:
plt.show(block=False) | [
"def",
"scatmat",
"(",
"df",
",",
"category",
"=",
"None",
",",
"colors",
"=",
"'rgob'",
",",
"num_plots",
"=",
"4",
",",
"num_topics",
"=",
"100",
",",
"num_columns",
"=",
"4",
",",
"show",
"=",
"False",
",",
"block",
"=",
"False",
",",
"data_path",... | Scatter plot with colored markers depending on the discrete values in a "category" column
FIXME: empty plots that dont go away, Plot and/save scatter matrix in groups of num_columns topics | [
"Scatter",
"plot",
"with",
"colored",
"markers",
"depending",
"on",
"the",
"discrete",
"values",
"in",
"a",
"category",
"column"
] | python | train |
BlueBrain/NeuroM | neurom/view/view.py | https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/view.py#L233-L255 | def plot_soma3d(ax, soma, color=None, alpha=_ALPHA):
'''Generates a 3d figure of the soma.
Args:
ax(matplotlib axes): on what to plot
soma(neurom.core.Soma): plotted soma
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values
'''
color = _get_color(color, tree_type=NeuriteType.soma)
if isinstance(soma, SomaCylinders):
for start, end in zip(soma.points, soma.points[1:]):
common.plot_cylinder(ax,
start=start[COLS.XYZ], end=end[COLS.XYZ],
start_radius=start[COLS.R], end_radius=end[COLS.R],
color=color, alpha=alpha)
else:
common.plot_sphere(ax, center=soma.center[COLS.XYZ], radius=soma.radius,
color=color, alpha=alpha)
# unlike w/ 2d Axes, the dataLim isn't set by collections, so it has to be updated manually
_update_3d_datalim(ax, soma) | [
"def",
"plot_soma3d",
"(",
"ax",
",",
"soma",
",",
"color",
"=",
"None",
",",
"alpha",
"=",
"_ALPHA",
")",
":",
"color",
"=",
"_get_color",
"(",
"color",
",",
"tree_type",
"=",
"NeuriteType",
".",
"soma",
")",
"if",
"isinstance",
"(",
"soma",
",",
"S... | Generates a 3d figure of the soma.
Args:
ax(matplotlib axes): on what to plot
soma(neurom.core.Soma): plotted soma
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values | [
"Generates",
"a",
"3d",
"figure",
"of",
"the",
"soma",
"."
] | python | train |
MisterY/gnucash-portfolio | gnucash_portfolio/scheduledtxaggregate.py | https://github.com/MisterY/gnucash-portfolio/blob/bfaad8345a5479d1cd111acee1939e25c2a638c2/gnucash_portfolio/scheduledtxaggregate.py#L262-L264 | def get_by_id(self, tx_id: str) -> ScheduledTransaction:
""" Fetches a tx by id """
return self.query.filter(ScheduledTransaction.guid == tx_id).first() | [
"def",
"get_by_id",
"(",
"self",
",",
"tx_id",
":",
"str",
")",
"->",
"ScheduledTransaction",
":",
"return",
"self",
".",
"query",
".",
"filter",
"(",
"ScheduledTransaction",
".",
"guid",
"==",
"tx_id",
")",
".",
"first",
"(",
")"
] | Fetches a tx by id | [
"Fetches",
"a",
"tx",
"by",
"id"
] | python | train |
mlperf/training | rnn_translator/pytorch/seq2seq/models/attention.py | https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/rnn_translator/pytorch/seq2seq/models/attention.py#L63-L81 | def set_mask(self, context_len, context):
"""
sets self.mask which is applied before softmax
ones for inactive context fields, zeros for active context fields
:param context_len: b
:param context: if batch_first: (b x t_k x n) else: (t_k x b x n)
self.mask: (b x t_k)
"""
if self.batch_first:
max_len = context.size(1)
else:
max_len = context.size(0)
indices = torch.arange(0, max_len, dtype=torch.int64,
device=context.device)
self.mask = indices >= (context_len.unsqueeze(1)) | [
"def",
"set_mask",
"(",
"self",
",",
"context_len",
",",
"context",
")",
":",
"if",
"self",
".",
"batch_first",
":",
"max_len",
"=",
"context",
".",
"size",
"(",
"1",
")",
"else",
":",
"max_len",
"=",
"context",
".",
"size",
"(",
"0",
")",
"indices",... | sets self.mask which is applied before softmax
ones for inactive context fields, zeros for active context fields
:param context_len: b
:param context: if batch_first: (b x t_k x n) else: (t_k x b x n)
self.mask: (b x t_k) | [
"sets",
"self",
".",
"mask",
"which",
"is",
"applied",
"before",
"softmax",
"ones",
"for",
"inactive",
"context",
"fields",
"zeros",
"for",
"active",
"context",
"fields"
] | python | train |
onnx/onnx | onnx/backend/base.py | https://github.com/onnx/onnx/blob/2f7dc10f03a072526d94b6820cedbf2a1ec5a2c4/onnx/backend/base.py#L89-L111 | def run_node(cls,
node, # type: NodeProto
inputs, # type: Any
device='CPU', # type: Text
outputs_info=None, # type: Optional[Sequence[Tuple[numpy.dtype, Tuple[int, ...]]]]
**kwargs # type: Dict[Text, Any]
): # type: (...) -> Optional[Tuple[Any, ...]]
'''Simple run one operator and return the results.
Args:
outputs_info: a list of tuples, which contains the element type and
shape of each output. First element of the tuple is the dtype, and
the second element is the shape. More use case can be found in
https://github.com/onnx/onnx/blob/master/onnx/backend/test/runner/__init__.py
'''
# TODO Remove Optional from return type
if 'opset_version' in kwargs:
special_context = c_checker.CheckerContext()
special_context.ir_version = IR_VERSION
special_context.opset_imports = {'': kwargs['opset_version']} # type: ignore
onnx.checker.check_node(node, special_context)
else:
onnx.checker.check_node(node)
return None | [
"def",
"run_node",
"(",
"cls",
",",
"node",
",",
"# type: NodeProto",
"inputs",
",",
"# type: Any",
"device",
"=",
"'CPU'",
",",
"# type: Text",
"outputs_info",
"=",
"None",
",",
"# type: Optional[Sequence[Tuple[numpy.dtype, Tuple[int, ...]]]]",
"*",
"*",
"kwargs",
"#... | Simple run one operator and return the results.
Args:
outputs_info: a list of tuples, which contains the element type and
shape of each output. First element of the tuple is the dtype, and
the second element is the shape. More use case can be found in
https://github.com/onnx/onnx/blob/master/onnx/backend/test/runner/__init__.py | [
"Simple",
"run",
"one",
"operator",
"and",
"return",
"the",
"results",
".",
"Args",
":",
"outputs_info",
":",
"a",
"list",
"of",
"tuples",
"which",
"contains",
"the",
"element",
"type",
"and",
"shape",
"of",
"each",
"output",
".",
"First",
"element",
"of",... | python | train |
Devoxin/Lavalink.py | lavalink/PlayerManager.py | https://github.com/Devoxin/Lavalink.py/blob/63f55c3d726d24c4cfd3674d3cd6aab6f5be110d/lavalink/PlayerManager.py#L226-L230 | def remove(self, guild_id):
""" Removes a player from the current players. """
if guild_id in self._players:
self._players[guild_id].cleanup()
del self._players[guild_id] | [
"def",
"remove",
"(",
"self",
",",
"guild_id",
")",
":",
"if",
"guild_id",
"in",
"self",
".",
"_players",
":",
"self",
".",
"_players",
"[",
"guild_id",
"]",
".",
"cleanup",
"(",
")",
"del",
"self",
".",
"_players",
"[",
"guild_id",
"]"
] | Removes a player from the current players. | [
"Removes",
"a",
"player",
"from",
"the",
"current",
"players",
"."
] | python | valid |
thisfred/val | val/_val.py | https://github.com/thisfred/val/blob/ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c/val/_val.py#L339-L343 | def _validated(self, data):
"""Validate data if all subschemas validate it."""
for sub in self.schemas:
data = sub(data)
return data | [
"def",
"_validated",
"(",
"self",
",",
"data",
")",
":",
"for",
"sub",
"in",
"self",
".",
"schemas",
":",
"data",
"=",
"sub",
"(",
"data",
")",
"return",
"data"
] | Validate data if all subschemas validate it. | [
"Validate",
"data",
"if",
"all",
"subschemas",
"validate",
"it",
"."
] | python | train |
erdc/RAPIDpy | RAPIDpy/postprocess/generate_return_periods.py | https://github.com/erdc/RAPIDpy/blob/50e14e130554b254a00ff23b226cd7e4c6cfe91a/RAPIDpy/postprocess/generate_return_periods.py#L20-L153 | def generate_single_return_period(args):
"""
This function calculates a single return period for a single reach
"""
qout_file, return_period_file, rivid_index_list, step, num_years, \
method, mp_lock = args
skewvals = [-3.0, -2.8, -2.6, -2.4, -2.2, -2.0, -1.8, -1.6, -1.4, -1.2,
-1.0, -0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1.0,
1.2, 1.4, 1.6, 1.8, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0]
kfac2 = [0.396, 0.384, 0.368, 0.351, 0.33, 0.307, 0.282, 0.254, 0.225,
0.195, 0.164, 0.132, 0.099, 0.066, 0.033, 0, -0.033, -0.066,
-0.099, -0.132, -0.164, -0.195, -0.225, -0.254, -0.282, -0.307,
-0.33, -0.351, -0.368, -0.384, -0.396]
kfac10 = [0.66, 0.702, 0.747, 0.795, 0.844, 0.895, 0.945, 0.994, 1.041,
1.086, 1.128, 1.166, 1.2, 1.231, 1.258, 1.282, 1.301, 1.317,
1.328, 1.336, 1.34, 1.34, 1.337, 1.329, 1.318, 1.302, 1.284,
1.262, 1.238, 1.21, 1.18]
kfac25 = [.666, .712, .764, .823, .888, .959, 1.035, 1.116, 1.198, 1.282,
1.366, 1.448, 1.528, 1.606, 1.680, 1.751, 1.818, 1.880, 1.939,
1.993, 2.043, 2.087, 2.128, 2.163, 2.193, 2.219, 2.240, 2.256,
2.267, 2.275, 2.278]
kfac50 = [0.666, 0.714, 0.768, 0.83, 0.9, 0.98, 1.069, 1.166, 1.27, 1.379,
1.492, 1.606, 1.72, 1.834, 1.945, 2.054, 2.159, 2.261, 2.359,
2.453, 2.542, 2.626, 2.706, 2.78, 2.848, 2.912, 2.97, 3.023,
3.071, 3.114, 3.152]
kfac100 = [0.667, 0.714, 0.769, 0.832, 0.905, 0.99, 1.087, 1.197, 1.318,
1.499, 1.588, 1.733, 1.88, 2.029, 2.178, 2.326, 2.472, 2.615,
2.755, 2.891, 3.022, 3.149, 3.271, 3.388, 3.499, 3.605, 3.705,
3.8, 3.889, 3.973, 4.051]
with RAPIDDataset(qout_file) as qout_nc_file:
# get index of return period data
if method == 'weibull':
rp_index_20 = int((num_years + 1)/20.0)
rp_index_10 = int((num_years + 1)/10.0)
rp_index_2 = int((num_years + 1)/2.0)
if method == 'weibull':
return_20_array = np.zeros(len(rivid_index_list))
elif method == 'gumble':
return_100_array = np.zeros(len(rivid_index_list))
return_50_array = np.zeros(len(rivid_index_list))
return_20_array = np.zeros(len(rivid_index_list))
elif method == 'log_pearson':
return_100_array = np.zeros(len(rivid_index_list))
return_50_array = np.zeros(len(rivid_index_list))
return_25_array = np.zeros(len(rivid_index_list))
return_10_array = np.zeros(len(rivid_index_list))
return_2_array = np.zeros(len(rivid_index_list))
max_flow_array = np.zeros(len(rivid_index_list))
# iterate through rivids to generate return periods
for iter_idx, rivid_index in enumerate(rivid_index_list):
filtered_flow_data = qout_nc_file.get_qout_index(
rivid_index,
pd_filter="{0}D".format(step),
filter_mode="max")
sorted_flow_data = np.sort(filtered_flow_data)[:num_years:-1]
max_flow = sorted_flow_data[0]
if max_flow < 0.01:
log("Return period data < 0.01 generated for rivid {0}"
.format(qout_nc_file.qout_nc.variables[
qout_nc_file.river_id_dimension][rivid_index]),
"WARNING")
max_flow_array[iter_idx] = max_flow
if method == 'weibull':
return_20_array[iter_idx] = sorted_flow_data[rp_index_20]
return_10_array[iter_idx] = sorted_flow_data[rp_index_10]
return_2_array[iter_idx] = sorted_flow_data[rp_index_2]
elif method == 'gumble':
mean_flow = np.mean(filtered_flow_data)
stddev = np.std(filtered_flow_data)
return_100_array[iter_idx] = mean_flow + 3.14*stddev
return_50_array[iter_idx] = mean_flow + 2.59*stddev
return_20_array[iter_idx] = mean_flow + 1.87*stddev
return_10_array[iter_idx] = mean_flow + 1.3*stddev
return_2_array[iter_idx] = mean_flow - .164*stddev
elif method == 'log_pearson':
log_flow = np.log10(filtered_flow_data[filtered_flow_data > 0])
if len(log_flow) <= 0:
continue
mean_log_flow = np.mean(log_flow)
std_log_flow = np.std(log_flow)
log_flow_array = np.array(log_flow)
skew = (num_years * (np.sum(
np.power((log_flow_array - mean_log_flow), 3)))) / \
((num_years - 1) * (num_years - 2) * std_log_flow ** 3)
k2 = np.interp(skew, skewvals, kfac2)
k10 = np.interp(skew, skewvals, kfac10)
k25 = np.interp(skew, skewvals, kfac25)
k50 = np.interp(skew, skewvals, kfac50)
k100 = np.interp(skew, skewvals, kfac100)
return_100_array[iter_idx] = \
np.power(10, (mean_log_flow + k100*std_log_flow))
return_50_array[iter_idx] = \
np.power(10, (mean_log_flow + k50*std_log_flow))
return_25_array[iter_idx] = \
np.power(10, (mean_log_flow + k25*std_log_flow))
return_10_array[iter_idx] = \
np.power(10, (mean_log_flow + k10*std_log_flow))
return_2_array[iter_idx] = \
np.power(10, (mean_log_flow + k2*std_log_flow))
mp_lock.acquire()
return_period_nc = Dataset(return_period_file, 'a')
return_period_nc.variables['max_flow'][rivid_index_list] = \
max_flow_array
if method == 'weibull':
return_period_nc.variables['return_period_20'][
rivid_index_list] = return_20_array
elif method in 'gumble':
return_period_nc.variables['return_period_100'][
rivid_index_list] = return_100_array
return_period_nc.variables['return_period_50'][
rivid_index_list] = return_50_array
return_period_nc.variables['return_period_20'][
rivid_index_list] = return_20_array
elif method == 'log_pearson':
return_period_nc.variables['return_period_100'][
rivid_index_list] = return_100_array
return_period_nc.variables['return_period_50'][
rivid_index_list] = return_50_array
return_period_nc.variables['return_period_25'][
rivid_index_list] = return_25_array
return_period_nc.variables['return_period_10'][
rivid_index_list] = return_10_array
return_period_nc.variables['return_period_2'][
rivid_index_list] = return_2_array
return_period_nc.close()
mp_lock.release() | [
"def",
"generate_single_return_period",
"(",
"args",
")",
":",
"qout_file",
",",
"return_period_file",
",",
"rivid_index_list",
",",
"step",
",",
"num_years",
",",
"method",
",",
"mp_lock",
"=",
"args",
"skewvals",
"=",
"[",
"-",
"3.0",
",",
"-",
"2.8",
",",... | This function calculates a single return period for a single reach | [
"This",
"function",
"calculates",
"a",
"single",
"return",
"period",
"for",
"a",
"single",
"reach"
] | python | train |
twilio/twilio-python | twilio/rest/api/v2010/account/outgoing_caller_id.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/outgoing_caller_id.py#L327-L341 | def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: OutgoingCallerIdContext for this OutgoingCallerIdInstance
:rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdContext
"""
if self._context is None:
self._context = OutgoingCallerIdContext(
self._version,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
)
return self._context | [
"def",
"_proxy",
"(",
"self",
")",
":",
"if",
"self",
".",
"_context",
"is",
"None",
":",
"self",
".",
"_context",
"=",
"OutgoingCallerIdContext",
"(",
"self",
".",
"_version",
",",
"account_sid",
"=",
"self",
".",
"_solution",
"[",
"'account_sid'",
"]",
... | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: OutgoingCallerIdContext for this OutgoingCallerIdInstance
:rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdContext | [
"Generate",
"an",
"instance",
"context",
"for",
"the",
"instance",
"the",
"context",
"is",
"capable",
"of",
"performing",
"various",
"actions",
".",
"All",
"instance",
"actions",
"are",
"proxied",
"to",
"the",
"context"
] | python | train |
angr/angr | angr/misc/plugins.py | https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/misc/plugins.py#L42-L50 | def register_preset(cls, name, preset):
"""
Register a preset instance with the class of the hub it corresponds to. This allows individual plugin objects to
automatically register themselves with a preset by using a classmethod of their own with only the name of the
preset to register with.
"""
if cls._presets is None:
cls._presets = {}
cls._presets[name] = preset | [
"def",
"register_preset",
"(",
"cls",
",",
"name",
",",
"preset",
")",
":",
"if",
"cls",
".",
"_presets",
"is",
"None",
":",
"cls",
".",
"_presets",
"=",
"{",
"}",
"cls",
".",
"_presets",
"[",
"name",
"]",
"=",
"preset"
] | Register a preset instance with the class of the hub it corresponds to. This allows individual plugin objects to
automatically register themselves with a preset by using a classmethod of their own with only the name of the
preset to register with. | [
"Register",
"a",
"preset",
"instance",
"with",
"the",
"class",
"of",
"the",
"hub",
"it",
"corresponds",
"to",
".",
"This",
"allows",
"individual",
"plugin",
"objects",
"to",
"automatically",
"register",
"themselves",
"with",
"a",
"preset",
"by",
"using",
"a",
... | python | train |
riordan/py-copyfile | copyfile/copyfile.py | https://github.com/riordan/py-copyfile/blob/ea7c45de8ac8e6f3a8a9dc0deee87f8f882a8e79/copyfile/copyfile.py#L19-L45 | def copyFile(src, dest):
"""Copies a source file to a destination whose path may not yet exist.
Keyword arguments:
src -- Source path to a file (string)
dest -- Path for destination file (also a string)
"""
#Src Exists?
try:
if os.path.isfile(src):
dpath, dfile = os.path.split(dest)
if not os.path.isdir(dpath):
os.makedirs(dpath)
if not os.path.exists(dest):
touch(dest)
try:
shutil.copy2(src, dest)
# eg. src and dest are the same file
except shutil.Error as e:
logging.exception('Error: %s' % e)
# eg. source or destination doesn't exist
except IOError as e:
logging.exception('Error: %s' % e.strerror)
except:
logging.exception('Error: src to copy does not exist.') | [
"def",
"copyFile",
"(",
"src",
",",
"dest",
")",
":",
"#Src Exists?",
"try",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"src",
")",
":",
"dpath",
",",
"dfile",
"=",
"os",
".",
"path",
".",
"split",
"(",
"dest",
")",
"if",
"not",
"os",
".... | Copies a source file to a destination whose path may not yet exist.
Keyword arguments:
src -- Source path to a file (string)
dest -- Path for destination file (also a string) | [
"Copies",
"a",
"source",
"file",
"to",
"a",
"destination",
"whose",
"path",
"may",
"not",
"yet",
"exist",
"."
] | python | train |
luckydonald/pytgbot | code_generation/output/pytgbot/bot.py | https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/code_generation/output/pytgbot/bot.py#L2698-L2730 | def get_sticker_set(self, name):
"""
Use this method to get a sticker set. On success, a StickerSet object is returned.
https://core.telegram.org/bots/api#getstickerset
Parameters:
:param name: Name of the sticker set
:type name: str|unicode
Returns:
:return: On success, a StickerSet object is returned
:rtype: pytgbot.api_types.receivable.stickers.StickerSet
"""
assert_type_or_raise(name, unicode_type, parameter_name="name")
result = self.do("getStickerSet", name=name)
if self.return_python_objects:
logger.debug("Trying to parse {data}".format(data=repr(result)))
from pytgbot.api_types.receivable.stickers import StickerSet
try:
return StickerSet.from_array(result)
except TgApiParseException:
logger.debug("Failed parsing as api_type StickerSet", exc_info=True)
# end try
# no valid parsing so far
raise TgApiParseException("Could not parse result.") # See debug log for details!
# end if return_python_objects
return result | [
"def",
"get_sticker_set",
"(",
"self",
",",
"name",
")",
":",
"assert_type_or_raise",
"(",
"name",
",",
"unicode_type",
",",
"parameter_name",
"=",
"\"name\"",
")",
"result",
"=",
"self",
".",
"do",
"(",
"\"getStickerSet\"",
",",
"name",
"=",
"name",
")",
... | Use this method to get a sticker set. On success, a StickerSet object is returned.
https://core.telegram.org/bots/api#getstickerset
Parameters:
:param name: Name of the sticker set
:type name: str|unicode
Returns:
:return: On success, a StickerSet object is returned
:rtype: pytgbot.api_types.receivable.stickers.StickerSet | [
"Use",
"this",
"method",
"to",
"get",
"a",
"sticker",
"set",
".",
"On",
"success",
"a",
"StickerSet",
"object",
"is",
"returned",
"."
] | python | train |
delfick/harpoon | harpoon/option_spec/image_objs.py | https://github.com/delfick/harpoon/blob/a2d39311d6127b7da2e15f40468bf320d598e461/harpoon/option_spec/image_objs.py#L217-L232 | def cache_from_names(self):
"""Yield the image names to do --cache-from from"""
cache_from = self.cache_from()
if not cache_from or cache_from is NotSpecified:
return
if cache_from is True:
yield self.image_name
return
for thing in cache_from:
if not isinstance(thing, six.string_types):
yield thing.image_name
else:
yield thing | [
"def",
"cache_from_names",
"(",
"self",
")",
":",
"cache_from",
"=",
"self",
".",
"cache_from",
"(",
")",
"if",
"not",
"cache_from",
"or",
"cache_from",
"is",
"NotSpecified",
":",
"return",
"if",
"cache_from",
"is",
"True",
":",
"yield",
"self",
".",
"imag... | Yield the image names to do --cache-from from | [
"Yield",
"the",
"image",
"names",
"to",
"do",
"--",
"cache",
"-",
"from",
"from"
] | python | train |
photo/openphoto-python | trovebox/api/api_tag.py | https://github.com/photo/openphoto-python/blob/209a1da27c8d8c88dbcf4ea6c6f57031ea1bc44b/trovebox/api/api_tag.py#L31-L41 | def delete(self, tag, **kwds):
"""
Endpoint: /tag/<id>/delete.json
Deletes a tag.
Returns True if successful.
Raises a TroveboxError if not.
"""
return self._client.post("/tag/%s/delete.json" %
self._quote_url(self._extract_id(tag)),
**kwds)["result"] | [
"def",
"delete",
"(",
"self",
",",
"tag",
",",
"*",
"*",
"kwds",
")",
":",
"return",
"self",
".",
"_client",
".",
"post",
"(",
"\"/tag/%s/delete.json\"",
"%",
"self",
".",
"_quote_url",
"(",
"self",
".",
"_extract_id",
"(",
"tag",
")",
")",
",",
"*",... | Endpoint: /tag/<id>/delete.json
Deletes a tag.
Returns True if successful.
Raises a TroveboxError if not. | [
"Endpoint",
":",
"/",
"tag",
"/",
"<id",
">",
"/",
"delete",
".",
"json"
] | python | train |
linkedin/luminol | src/luminol/algorithms/anomaly_detector_algorithms/bitmap_detector.py | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/algorithms/anomaly_detector_algorithms/bitmap_detector.py#L176-L194 | def _compute_anom_score_between_two_windows(self, i):
"""
Compute distance difference between two windows' chunk frequencies,
which is then marked as the anomaly score of the data point on the window boundary in the middle.
:param int i: index of the data point between two windows.
:return float: the anomaly score.
"""
lag_window_chunk_dict = self.lag_dicts[i]
future_window_chunk_dict = self.fut_dicts[i]
score = 0
for chunk in lag_window_chunk_dict:
if chunk in future_window_chunk_dict:
score += math.pow(future_window_chunk_dict[chunk] - lag_window_chunk_dict[chunk], 2)
else:
score += math.pow(lag_window_chunk_dict[chunk], 2)
for chunk in future_window_chunk_dict:
if chunk not in lag_window_chunk_dict:
score += math.pow(future_window_chunk_dict[chunk], 2)
return score | [
"def",
"_compute_anom_score_between_two_windows",
"(",
"self",
",",
"i",
")",
":",
"lag_window_chunk_dict",
"=",
"self",
".",
"lag_dicts",
"[",
"i",
"]",
"future_window_chunk_dict",
"=",
"self",
".",
"fut_dicts",
"[",
"i",
"]",
"score",
"=",
"0",
"for",
"chunk... | Compute distance difference between two windows' chunk frequencies,
which is then marked as the anomaly score of the data point on the window boundary in the middle.
:param int i: index of the data point between two windows.
:return float: the anomaly score. | [
"Compute",
"distance",
"difference",
"between",
"two",
"windows",
"chunk",
"frequencies",
"which",
"is",
"then",
"marked",
"as",
"the",
"anomaly",
"score",
"of",
"the",
"data",
"point",
"on",
"the",
"window",
"boundary",
"in",
"the",
"middle",
".",
":",
"par... | python | train |
3ll3d00d/vibe | backend/src/recorder/resources/measurements.py | https://github.com/3ll3d00d/vibe/blob/124b029f13ac746723e92cb47e9cb56edd2e54b5/backend/src/recorder/resources/measurements.py#L42-L49 | def get(self, deviceId, measurementId):
"""
details the specific measurement.
"""
record = self.measurements.get(deviceId)
if record is not None:
return record.get(measurementId)
return None | [
"def",
"get",
"(",
"self",
",",
"deviceId",
",",
"measurementId",
")",
":",
"record",
"=",
"self",
".",
"measurements",
".",
"get",
"(",
"deviceId",
")",
"if",
"record",
"is",
"not",
"None",
":",
"return",
"record",
".",
"get",
"(",
"measurementId",
")... | details the specific measurement. | [
"details",
"the",
"specific",
"measurement",
"."
] | python | train |
PmagPy/PmagPy | dialogs/magic_grid3.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/magic_grid3.py#L160-L191 | def add_items(self, dataframe, hide_cols=()):
"""
Add items and/or update existing items in grid
"""
# replace "None" values with ""
dataframe = dataframe.fillna("")
# remove any columns that shouldn't be shown
for col in hide_cols:
if col in dataframe.columns:
del dataframe[col]
# add more rows
self.AppendRows(len(dataframe))
columns = dataframe.columns
row_num = -1
# fill in all rows with appropriate values
for ind, row in dataframe.iterrows():
row_num += 1
for col_num, col in enumerate(columns):
value = row[col]
self.SetCellValue(row_num, col_num, str(value))
# set citation default value
if col == 'citations':
citation = row['citations']
if (citation is None) or (citation is np.nan):
self.SetCellValue(row_num, col_num, 'This study')
else:
if 'This study' not in citation:
if len(citation):
citation += ':'
citation += 'This study'
self.SetCellValue(row_num, col_num, citation)
self.row_labels.extend(dataframe.index) | [
"def",
"add_items",
"(",
"self",
",",
"dataframe",
",",
"hide_cols",
"=",
"(",
")",
")",
":",
"# replace \"None\" values with \"\"",
"dataframe",
"=",
"dataframe",
".",
"fillna",
"(",
"\"\"",
")",
"# remove any columns that shouldn't be shown",
"for",
"col",
"in",
... | Add items and/or update existing items in grid | [
"Add",
"items",
"and",
"/",
"or",
"update",
"existing",
"items",
"in",
"grid"
] | python | train |
amirziai/flatten | util.py | https://github.com/amirziai/flatten/blob/e8e2cbbdd6fe21177bfc0ce034562463ae555799/util.py#L1-L9 | def check_if_numbers_are_consecutive(list_):
"""
Returns True if numbers in the list are consecutive
:param list_: list of integers
:return: Boolean
"""
return all((True if second - first == 1 else False
for first, second in zip(list_[:-1], list_[1:]))) | [
"def",
"check_if_numbers_are_consecutive",
"(",
"list_",
")",
":",
"return",
"all",
"(",
"(",
"True",
"if",
"second",
"-",
"first",
"==",
"1",
"else",
"False",
"for",
"first",
",",
"second",
"in",
"zip",
"(",
"list_",
"[",
":",
"-",
"1",
"]",
",",
"l... | Returns True if numbers in the list are consecutive
:param list_: list of integers
:return: Boolean | [
"Returns",
"True",
"if",
"numbers",
"in",
"the",
"list",
"are",
"consecutive"
] | python | train |
django-parler/django-parler | parler/cache.py | https://github.com/django-parler/django-parler/blob/11ae4af5e8faddb74c69c848870122df4006a54e/parler/cache.py#L112-L144 | def _get_cached_values(instance, translated_model, language_code, use_fallback=False):
"""
Fetch an cached field.
"""
if not appsettings.PARLER_ENABLE_CACHING or not instance.pk or instance._state.adding:
return None
key = get_translation_cache_key(translated_model, instance.pk, language_code)
values = cache.get(key)
if not values:
return None
# Check for a stored fallback marker
if values.get('__FALLBACK__', False):
# Internal trick, already set the fallback marker, so no query will be performed.
instance._translations_cache[translated_model][language_code] = MISSING
# Allow to return the fallback language instead.
if use_fallback:
lang_dict = get_language_settings(language_code)
# iterate over list of fallback languages, which should be already
# in proper order
for fallback_lang in lang_dict['fallbacks']:
if fallback_lang != language_code:
return _get_cached_values(
instance, translated_model, fallback_lang,
use_fallback=False
)
return None
values['master'] = instance
values['language_code'] = language_code
return values | [
"def",
"_get_cached_values",
"(",
"instance",
",",
"translated_model",
",",
"language_code",
",",
"use_fallback",
"=",
"False",
")",
":",
"if",
"not",
"appsettings",
".",
"PARLER_ENABLE_CACHING",
"or",
"not",
"instance",
".",
"pk",
"or",
"instance",
".",
"_state... | Fetch an cached field. | [
"Fetch",
"an",
"cached",
"field",
"."
] | python | train |
qualisys/qualisys_python_sdk | qtm/packet.py | https://github.com/qualisys/qualisys_python_sdk/blob/127d7eeebc2b38b5cafdfa5d1d0198437fedd274/qtm/packet.py#L397-L409 | def get_6d(self, component_info=None, data=None, component_position=None):
"""Get 6D data."""
components = []
append_components = components.append
for _ in range(component_info.body_count):
component_position, position = QRTPacket._get_exact(
RT6DBodyPosition, data, component_position
)
component_position, matrix = QRTPacket._get_tuple(
RT6DBodyRotation, data, component_position
)
append_components((position, matrix))
return components | [
"def",
"get_6d",
"(",
"self",
",",
"component_info",
"=",
"None",
",",
"data",
"=",
"None",
",",
"component_position",
"=",
"None",
")",
":",
"components",
"=",
"[",
"]",
"append_components",
"=",
"components",
".",
"append",
"for",
"_",
"in",
"range",
"... | Get 6D data. | [
"Get",
"6D",
"data",
"."
] | python | valid |
stratis-storage/into-dbus-python | src/into_dbus_python/_xformer.py | https://github.com/stratis-storage/into-dbus-python/blob/81366049671f79116bbb81c97bf621800a2f6315/src/into_dbus_python/_xformer.py#L78-L103 | def _handle_variant(self):
"""
Generate the correct function for a variant signature.
:returns: function that returns an appropriate value
:rtype: ((str * object) or list)-> object
"""
def the_func(a_tuple, variant=0):
"""
Function for generating a variant value from a tuple.
:param a_tuple: the parts of the variant
:type a_tuple: (str * object) or list
:param int variant: object's variant index
:returns: a value of the correct type with correct variant level
:rtype: object * int
"""
# pylint: disable=unused-argument
(signature, an_obj) = a_tuple
(func, sig) = self.COMPLETE.parseString(signature)[0]
assert sig == signature
(xformed, _) = func(an_obj, variant=variant + 1)
return (xformed, xformed.variant_level)
return (the_func, 'v') | [
"def",
"_handle_variant",
"(",
"self",
")",
":",
"def",
"the_func",
"(",
"a_tuple",
",",
"variant",
"=",
"0",
")",
":",
"\"\"\"\n Function for generating a variant value from a tuple.\n\n :param a_tuple: the parts of the variant\n :type a_tuple: (str ... | Generate the correct function for a variant signature.
:returns: function that returns an appropriate value
:rtype: ((str * object) or list)-> object | [
"Generate",
"the",
"correct",
"function",
"for",
"a",
"variant",
"signature",
"."
] | python | valid |
sprockets/sprockets-influxdb | sprockets_influxdb.py | https://github.com/sprockets/sprockets-influxdb/blob/cce73481b8f26b02e65e3f9914a9a22eceff3063/sprockets_influxdb.py#L658-L667 | def _trigger_batch_write():
"""Stop a timeout if it's running, and then write the measurements."""
global _batch_future
LOGGER.debug('Batch write triggered (%r/%r)',
_buffer_size, _trigger_size)
_maybe_stop_timeout()
_maybe_warn_about_buffer_size()
_batch_future = _write_measurements()
return _batch_future | [
"def",
"_trigger_batch_write",
"(",
")",
":",
"global",
"_batch_future",
"LOGGER",
".",
"debug",
"(",
"'Batch write triggered (%r/%r)'",
",",
"_buffer_size",
",",
"_trigger_size",
")",
"_maybe_stop_timeout",
"(",
")",
"_maybe_warn_about_buffer_size",
"(",
")",
"_batch_f... | Stop a timeout if it's running, and then write the measurements. | [
"Stop",
"a",
"timeout",
"if",
"it",
"s",
"running",
"and",
"then",
"write",
"the",
"measurements",
"."
] | python | train |
SmokinCaterpillar/pypet | pypet/storageservice.py | https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/storageservice.py#L4110-L4157 | def _prm_write_dict_as_table(self, key, data_to_store, group, fullname, **kwargs):
"""Stores a python dictionary as pytable
:param key:
Name of data item to store
:param data_to_store:
Dictionary to store
:param group:
Group node where to store data in hdf5 file
:param fullname:
Full name of the `data_to_store`s original container, only needed for throwing errors.
"""
if key in group:
raise ValueError(
'Dictionary `%s` already exists in `%s`. Appending is not supported (yet).')
if key in group:
raise ValueError('Dict `%s` already exists in `%s`. Appending is not supported (yet).')
temp_dict = {}
for innerkey in data_to_store:
val = data_to_store[innerkey]
temp_dict[innerkey] = [val]
# Convert dictionary to object table
objtable = ObjectTable(data=temp_dict)
# Then store the object table
self._prm_write_into_pytable(key, objtable, group, fullname, **kwargs)
new_table = group._f_get_child(key)
# Remember that the Object Table represents a dictionary
self._all_set_attributes_to_recall_natives(temp_dict, new_table,
HDF5StorageService.DATA_PREFIX)
setattr(new_table._v_attrs, HDF5StorageService.STORAGE_TYPE,
HDF5StorageService.DICT)
self._hdf5file.flush() | [
"def",
"_prm_write_dict_as_table",
"(",
"self",
",",
"key",
",",
"data_to_store",
",",
"group",
",",
"fullname",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"key",
"in",
"group",
":",
"raise",
"ValueError",
"(",
"'Dictionary `%s` already exists in `%s`. Appending is ... | Stores a python dictionary as pytable
:param key:
Name of data item to store
:param data_to_store:
Dictionary to store
:param group:
Group node where to store data in hdf5 file
:param fullname:
Full name of the `data_to_store`s original container, only needed for throwing errors. | [
"Stores",
"a",
"python",
"dictionary",
"as",
"pytable"
] | python | test |
bcbio/bcbio-nextgen | bcbio/bam/counts.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/bam/counts.py#L41-L45 | def coverage_pileup(self, space, start, end):
"""Retrieve pileup coverage across a specified region.
"""
return ((col.pos, self._normalize(col.n, self._total))
for col in self._bam.pileup(space, start, end)) | [
"def",
"coverage_pileup",
"(",
"self",
",",
"space",
",",
"start",
",",
"end",
")",
":",
"return",
"(",
"(",
"col",
".",
"pos",
",",
"self",
".",
"_normalize",
"(",
"col",
".",
"n",
",",
"self",
".",
"_total",
")",
")",
"for",
"col",
"in",
"self"... | Retrieve pileup coverage across a specified region. | [
"Retrieve",
"pileup",
"coverage",
"across",
"a",
"specified",
"region",
"."
] | python | train |
deep-compute/logagg | logagg/collector.py | https://github.com/deep-compute/logagg/blob/7863bc1b5ddf3e67c4d4b55746799304180589a0/logagg/collector.py#L356-L412 | def _scan_fpatterns(self, state):
'''
For a list of given fpatterns, this starts a thread
collecting log lines from file
>>> os.path.isfile = lambda path: path == '/path/to/log_file.log'
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> print(lc.fpaths)
file=/path/to/log_file.log:formatter=logagg.formatters.basescript
>>> print('formatters loaded:', lc.formatters)
{}
>>> print('log file reader threads started:', lc.log_reader_threads)
{}
>>> state = AttrDict(files_tracked=list())
>>> print('files bieng tracked:', state.files_tracked)
[]
>>> if not state.files_tracked:
>>> lc._scan_fpatterns(state)
>>> print('formatters loaded:', lc.formatters)
>>> print('log file reader threads started:', lc.log_reader_threads)
>>> print('files bieng tracked:', state.files_tracked)
'''
for f in self.fpaths:
fpattern, formatter =(a.split('=')[1] for a in f.split(':', 1))
self.log.debug('scan_fpatterns', fpattern=fpattern, formatter=formatter)
# TODO code for scanning fpatterns for the files not yet present goes here
fpaths = glob.glob(fpattern)
# Load formatter_fn if not in list
fpaths = list(set(fpaths) - set(state.files_tracked))
for fpath in fpaths:
try:
formatter_fn = self.formatters.get(formatter,
load_formatter_fn(formatter))
self.log.info('found_formatter_fn', fn=formatter)
self.formatters[formatter] = formatter_fn
except (SystemExit, KeyboardInterrupt): raise
except (ImportError, AttributeError):
self.log.exception('formatter_fn_not_found', fn=formatter)
sys.exit(-1)
# Start a thread for every file
self.log.info('found_log_file', log_file=fpath)
log_f = dict(fpath=fpath, fpattern=fpattern,
formatter=formatter, formatter_fn=formatter_fn)
log_key = (fpath, fpattern, formatter)
if log_key not in self.log_reader_threads:
self.log.info('starting_collect_log_lines_thread', log_key=log_key)
# There is no existing thread tracking this log file. Start one
log_reader_thread = util.start_daemon_thread(self.collect_log_lines, (log_f,))
self.log_reader_threads[log_key] = log_reader_thread
state.files_tracked.append(fpath)
time.sleep(self.SCAN_FPATTERNS_INTERVAL) | [
"def",
"_scan_fpatterns",
"(",
"self",
",",
"state",
")",
":",
"for",
"f",
"in",
"self",
".",
"fpaths",
":",
"fpattern",
",",
"formatter",
"=",
"(",
"a",
".",
"split",
"(",
"'='",
")",
"[",
"1",
"]",
"for",
"a",
"in",
"f",
".",
"split",
"(",
"'... | For a list of given fpatterns, this starts a thread
collecting log lines from file
>>> os.path.isfile = lambda path: path == '/path/to/log_file.log'
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> print(lc.fpaths)
file=/path/to/log_file.log:formatter=logagg.formatters.basescript
>>> print('formatters loaded:', lc.formatters)
{}
>>> print('log file reader threads started:', lc.log_reader_threads)
{}
>>> state = AttrDict(files_tracked=list())
>>> print('files bieng tracked:', state.files_tracked)
[]
>>> if not state.files_tracked:
>>> lc._scan_fpatterns(state)
>>> print('formatters loaded:', lc.formatters)
>>> print('log file reader threads started:', lc.log_reader_threads)
>>> print('files bieng tracked:', state.files_tracked) | [
"For",
"a",
"list",
"of",
"given",
"fpatterns",
"this",
"starts",
"a",
"thread",
"collecting",
"log",
"lines",
"from",
"file"
] | python | train |
phaethon/kamene | kamene/arch/windows/__init__.py | https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/arch/windows/__init__.py#L193-L198 | def devname_from_index(self, if_index):
"""Return interface name from interface index"""
for devname, iface in self.items():
if iface.win_index == if_index:
return iface.name
raise ValueError("Unknown network interface index %r" % if_index) | [
"def",
"devname_from_index",
"(",
"self",
",",
"if_index",
")",
":",
"for",
"devname",
",",
"iface",
"in",
"self",
".",
"items",
"(",
")",
":",
"if",
"iface",
".",
"win_index",
"==",
"if_index",
":",
"return",
"iface",
".",
"name",
"raise",
"ValueError",... | Return interface name from interface index | [
"Return",
"interface",
"name",
"from",
"interface",
"index"
] | python | train |
DataBiosphere/dsub | dsub/providers/google_base.py | https://github.com/DataBiosphere/dsub/blob/443ce31daa6023dc2fd65ef2051796e19d18d5a7/dsub/providers/google_base.py#L301-L357 | def parse_rfc3339_utc_string(rfc3339_utc_string):
"""Converts a datestamp from RFC3339 UTC to a datetime.
Args:
rfc3339_utc_string: a datetime string in RFC3339 UTC "Zulu" format
Returns:
A datetime.
"""
# The timestamp from the Google Operations are all in RFC3339 format, but
# they are sometimes formatted to millisconds, microseconds, sometimes
# nanoseconds, and sometimes only seconds:
# * 2016-11-14T23:05:56Z
# * 2016-11-14T23:05:56.010Z
# * 2016-11-14T23:05:56.010429Z
# * 2016-11-14T23:05:56.010429380Z
m = re.match(r'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}).?(\d*)Z',
rfc3339_utc_string)
# It would be unexpected to get a different date format back from Google.
# If we raise an exception here, we can break people completely.
# Instead, let's just return None and people can report that some dates
# are not showing up.
# We might reconsider this approach in the future; it was originally
# established when dates were only used for display.
if not m:
return None
groups = m.groups()
if len(groups[6]) not in (0, 3, 6, 9):
return None
# Create a UTC datestamp from parsed components
# 1- Turn components 0-5 from strings to integers
# 2- If the last component does not exist, set it to 0.
# If it does exist, make sure to interpret it as milliseconds.
g = [int(val) for val in groups[:6]]
fraction = groups[6]
if not fraction:
micros = 0
elif len(fraction) == 3:
micros = int(fraction) * 1000
elif len(fraction) == 6:
micros = int(fraction)
elif len(fraction) == 9:
# When nanoseconds are provided, we round
micros = int(round(int(fraction) / 1000))
else:
assert False, 'Fraction length not 0, 6, or 9: {}'.len(fraction)
try:
return datetime(g[0], g[1], g[2], g[3], g[4], g[5], micros, tzinfo=pytz.utc)
except ValueError as e:
assert False, 'Could not parse RFC3339 datestring: {} exception: {}'.format(
rfc3339_utc_string, e) | [
"def",
"parse_rfc3339_utc_string",
"(",
"rfc3339_utc_string",
")",
":",
"# The timestamp from the Google Operations are all in RFC3339 format, but",
"# they are sometimes formatted to millisconds, microseconds, sometimes",
"# nanoseconds, and sometimes only seconds:",
"# * 2016-11-14T23:05:56Z",
... | Converts a datestamp from RFC3339 UTC to a datetime.
Args:
rfc3339_utc_string: a datetime string in RFC3339 UTC "Zulu" format
Returns:
A datetime. | [
"Converts",
"a",
"datestamp",
"from",
"RFC3339",
"UTC",
"to",
"a",
"datetime",
"."
] | python | valid |
mitsei/dlkit | dlkit/json_/utilities.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/utilities.py#L914-L938 | def convert_catalog_id_to_object_id_string(catalog_id):
"""When doing hierarchies, need to convert a catalogId into an
ObjectId, so convert to a string, then into a hex format.
i.e. Bank Assessment hierarchy should become
BANKASSESSME
'42414e4b4153534553534d45'
"""
if not isinstance(catalog_id, Id):
raise TypeError('input needs to be an Id')
seed_str = catalog_id.get_identifier() + catalog_id.get_authority() + '000000000000'
try:
seed_str = str.encode(seed_str[:12])
except TypeError:
# sometimes unicode is returned, in which case Python 2 can't handle it
seed_str = seed_str[:12]
seed_str = binascii.hexlify(seed_str)
try:
# python 3
seed_str = str(seed_str, 'utf8')
except TypeError:
# python 2
seed_str = str(seed_str)
return seed_str | [
"def",
"convert_catalog_id_to_object_id_string",
"(",
"catalog_id",
")",
":",
"if",
"not",
"isinstance",
"(",
"catalog_id",
",",
"Id",
")",
":",
"raise",
"TypeError",
"(",
"'input needs to be an Id'",
")",
"seed_str",
"=",
"catalog_id",
".",
"get_identifier",
"(",
... | When doing hierarchies, need to convert a catalogId into an
ObjectId, so convert to a string, then into a hex format.
i.e. Bank Assessment hierarchy should become
BANKASSESSME
'42414e4b4153534553534d45' | [
"When",
"doing",
"hierarchies",
"need",
"to",
"convert",
"a",
"catalogId",
"into",
"an",
"ObjectId",
"so",
"convert",
"to",
"a",
"string",
"then",
"into",
"a",
"hex",
"format",
"."
] | python | train |
OpenTreeOfLife/peyotl | scripts/nexson/prune_to_clean_mapped.py | https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/scripts/nexson/prune_to_clean_mapped.py#L147-L159 | def prune_clade(self, node_id):
"""Prune `node_id` and the edges and nodes that are tipward of it.
Caller must delete the edge to node_id."""
to_del_nodes = [node_id]
while bool(to_del_nodes):
node_id = to_del_nodes.pop(0)
self._flag_node_as_del_and_del_in_by_target(node_id)
ebsd = self._edge_by_source.get(node_id)
if ebsd is not None:
child_edges = list(ebsd.values())
to_del_nodes.extend([i['@target'] for i in child_edges])
del self._edge_by_source[
node_id] | [
"def",
"prune_clade",
"(",
"self",
",",
"node_id",
")",
":",
"to_del_nodes",
"=",
"[",
"node_id",
"]",
"while",
"bool",
"(",
"to_del_nodes",
")",
":",
"node_id",
"=",
"to_del_nodes",
".",
"pop",
"(",
"0",
")",
"self",
".",
"_flag_node_as_del_and_del_in_by_ta... | Prune `node_id` and the edges and nodes that are tipward of it.
Caller must delete the edge to node_id. | [
"Prune",
"node_id",
"and",
"the",
"edges",
"and",
"nodes",
"that",
"are",
"tipward",
"of",
"it",
".",
"Caller",
"must",
"delete",
"the",
"edge",
"to",
"node_id",
"."
] | python | train |
EnergieID/smappy | smappy/smappy.py | https://github.com/EnergieID/smappy/blob/1ada3abc9a51c76205c072369258f6f4f4e8fd0f/smappy/smappy.py#L139-L151 | def get_service_locations(self):
"""
Request service locations
Returns
-------
dict
"""
url = URLS['servicelocation']
headers = {"Authorization": "Bearer {}".format(self.access_token)}
r = requests.get(url, headers=headers)
r.raise_for_status()
return r.json() | [
"def",
"get_service_locations",
"(",
"self",
")",
":",
"url",
"=",
"URLS",
"[",
"'servicelocation'",
"]",
"headers",
"=",
"{",
"\"Authorization\"",
":",
"\"Bearer {}\"",
".",
"format",
"(",
"self",
".",
"access_token",
")",
"}",
"r",
"=",
"requests",
".",
... | Request service locations
Returns
-------
dict | [
"Request",
"service",
"locations"
] | python | train |
sorgerlab/indra | indra/sources/sparser/processor.py | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/sparser/processor.py#L155-L172 | def set_statements_pmid(self, pmid):
"""Set the evidence PMID of Statements that have been extracted.
Parameters
----------
pmid : str or None
The PMID to be used in the Evidence objects of the Statements
that were extracted by the processor.
"""
# Replace PMID value in JSON dict first
for stmt in self.json_stmts:
evs = stmt.get('evidence', [])
for ev in evs:
ev['pmid'] = pmid
# Replace PMID value in extracted Statements next
for stmt in self.statements:
for ev in stmt.evidence:
ev.pmid = pmid | [
"def",
"set_statements_pmid",
"(",
"self",
",",
"pmid",
")",
":",
"# Replace PMID value in JSON dict first",
"for",
"stmt",
"in",
"self",
".",
"json_stmts",
":",
"evs",
"=",
"stmt",
".",
"get",
"(",
"'evidence'",
",",
"[",
"]",
")",
"for",
"ev",
"in",
"evs... | Set the evidence PMID of Statements that have been extracted.
Parameters
----------
pmid : str or None
The PMID to be used in the Evidence objects of the Statements
that were extracted by the processor. | [
"Set",
"the",
"evidence",
"PMID",
"of",
"Statements",
"that",
"have",
"been",
"extracted",
"."
] | python | train |
bcbio/bcbio-nextgen | bcbio/cwl/hpc.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/hpc.py#L8-L40 | def create_cromwell_config(args, work_dir, sample_file):
"""Prepare a cromwell configuration within the current working directory.
"""
docker_attrs = ["String? docker", "String? docker_user"]
cwl_attrs = ["Int? cpuMin", "Int? cpuMax", "Int? memoryMin", "Int? memoryMax", "String? outDirMin",
"String? outDirMax", "String? tmpDirMin", "String? tmpDirMax"]
out_file = os.path.join(work_dir, "bcbio-cromwell.conf")
run_config = _load_custom_config(args.runconfig) if args.runconfig else {}
# Avoid overscheduling jobs for local runs by limiting concurrent jobs
# Longer term would like to keep these within defined core window
joblimit = args.joblimit
if joblimit == 0 and not args.scheduler:
joblimit = 1
file_types = _get_filesystem_types(args, sample_file)
std_args = {"docker_attrs": "" if args.no_container else "\n ".join(docker_attrs),
"submit_docker": 'submit-docker: ""' if args.no_container else "",
"joblimit": "concurrent-job-limit = %s" % (joblimit) if joblimit > 0 else "",
"cwl_attrs": "\n ".join(cwl_attrs),
"filesystem": _get_filesystem_config(file_types),
"database": run_config.get("database", DATABASE_CONFIG % {"work_dir": work_dir})}
cl_args, conf_args, scheduler, cloud_type = _args_to_cromwell(args)
std_args["engine"] = _get_engine_filesystem_config(file_types, args, conf_args)
conf_args.update(std_args)
main_config = {"hpc": (HPC_CONFIGS[scheduler] % conf_args) if scheduler else "",
"cloud": (CLOUD_CONFIGS[cloud_type] % conf_args) if cloud_type else "",
"work_dir": work_dir}
main_config.update(std_args)
# Local run always seems to need docker set because of submit-docker in default configuration
# Can we unset submit-docker based on configuration so it doesn't inherit?
# main_config["docker_attrs"] = "\n ".join(docker_attrs)
with open(out_file, "w") as out_handle:
out_handle.write(CROMWELL_CONFIG % main_config)
return out_file | [
"def",
"create_cromwell_config",
"(",
"args",
",",
"work_dir",
",",
"sample_file",
")",
":",
"docker_attrs",
"=",
"[",
"\"String? docker\"",
",",
"\"String? docker_user\"",
"]",
"cwl_attrs",
"=",
"[",
"\"Int? cpuMin\"",
",",
"\"Int? cpuMax\"",
",",
"\"Int? memoryMin\"... | Prepare a cromwell configuration within the current working directory. | [
"Prepare",
"a",
"cromwell",
"configuration",
"within",
"the",
"current",
"working",
"directory",
"."
] | python | train |
ihgazni2/elist | elist/elist.py | https://github.com/ihgazni2/elist/blob/8c07b5029bda34ead60ce10335ceb145f209263c/elist/elist.py#L4328-L4365 | def filter(ol,test_func,*args,**kwargs):
'''
from elist.elist import *
def test_func(ele,x):
cond = (ele > x)
return(cond)
ol = [1,2,3,4]
id(ol)
new = filter(ol,test_func,3)
new
id(new)
#####
ol = [10,20,30,40]
id(ol)
rslt = filter(ol,test_func,3,mode="original")
rslt
id(rslt)
'''
if('mode' in kwargs):
mode = kwargs['mode']
else:
mode = "new"
length = ol.__len__()
new = []
cpol = copy.deepcopy(ol)
for i in range(0,length):
cond = test_func(cpol[i],*args)
if(cond):
new.append(cpol[i])
else:
pass
if(mode == "new"):
return(new)
else:
ol.clear()
ol.extend(new)
return(ol) | [
"def",
"filter",
"(",
"ol",
",",
"test_func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"(",
"'mode'",
"in",
"kwargs",
")",
":",
"mode",
"=",
"kwargs",
"[",
"'mode'",
"]",
"else",
":",
"mode",
"=",
"\"new\"",
"length",
"=",
"ol",... | from elist.elist import *
def test_func(ele,x):
cond = (ele > x)
return(cond)
ol = [1,2,3,4]
id(ol)
new = filter(ol,test_func,3)
new
id(new)
#####
ol = [10,20,30,40]
id(ol)
rslt = filter(ol,test_func,3,mode="original")
rslt
id(rslt) | [
"from",
"elist",
".",
"elist",
"import",
"*",
"def",
"test_func",
"(",
"ele",
"x",
")",
":",
"cond",
"=",
"(",
"ele",
">",
"x",
")",
"return",
"(",
"cond",
")",
"ol",
"=",
"[",
"1",
"2",
"3",
"4",
"]",
"id",
"(",
"ol",
")",
"new",
"=",
"fil... | python | valid |
kubernetes-client/python | kubernetes/client/apis/custom_objects_api.py | https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/custom_objects_api.py#L415-L442 | def delete_namespaced_custom_object(self, group, version, namespace, plural, name, body, **kwargs):
"""
Deletes the specified namespace scoped custom object
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_custom_object(group, version, namespace, plural, name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str namespace: The custom resource's namespace (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:param V1DeleteOptions body: (required)
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_custom_object_with_http_info(group, version, namespace, plural, name, body, **kwargs)
else:
(data) = self.delete_namespaced_custom_object_with_http_info(group, version, namespace, plural, name, body, **kwargs)
return data | [
"def",
"delete_namespaced_custom_object",
"(",
"self",
",",
"group",
",",
"version",
",",
"namespace",
",",
"plural",
",",
"name",
",",
"body",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
... | Deletes the specified namespace scoped custom object
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_custom_object(group, version, namespace, plural, name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str namespace: The custom resource's namespace (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:param V1DeleteOptions body: (required)
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.
:return: object
If the method is called asynchronously,
returns the request thread. | [
"Deletes",
"the",
"specified",
"namespace",
"scoped",
"custom",
"object",
"This",
"method",
"makes",
"a",
"synchronous",
"HTTP",
"request",
"by",
"default",
".",
"To",
"make",
"an",
"asynchronous",
"HTTP",
"request",
"please",
"pass",
"async_req",
"=",
"True",
... | python | train |
ssato/python-anyconfig | src/anyconfig/backend/base.py | https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/backend/base.py#L362-L380 | def dump(self, cnf, ioi, **kwargs):
"""
Dump config 'cnf' to output object of which 'ioi' refering.
:param cnf: Configuration data to dump
:param ioi:
an 'anyconfig.globals.IOInfo' namedtuple object provides various
info of input object to load data from
:param kwargs: optional keyword parameters to be sanitized :: dict
:raises IOError, OSError, AttributeError: When dump failed.
"""
kwargs = anyconfig.utils.filter_options(self._dump_opts, kwargs)
if anyconfig.utils.is_stream_ioinfo(ioi):
self.dump_to_stream(cnf, ioi.src, **kwargs)
else:
ensure_outdir_exists(ioi.path)
self.dump_to_path(cnf, ioi.path, **kwargs) | [
"def",
"dump",
"(",
"self",
",",
"cnf",
",",
"ioi",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"=",
"anyconfig",
".",
"utils",
".",
"filter_options",
"(",
"self",
".",
"_dump_opts",
",",
"kwargs",
")",
"if",
"anyconfig",
".",
"utils",
".",
"is_stre... | Dump config 'cnf' to output object of which 'ioi' refering.
:param cnf: Configuration data to dump
:param ioi:
an 'anyconfig.globals.IOInfo' namedtuple object provides various
info of input object to load data from
:param kwargs: optional keyword parameters to be sanitized :: dict
:raises IOError, OSError, AttributeError: When dump failed. | [
"Dump",
"config",
"cnf",
"to",
"output",
"object",
"of",
"which",
"ioi",
"refering",
"."
] | python | train |
rameshg87/pyremotevbox | pyremotevbox/ZSI/parse.py | https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/parse.py#L328-L333 | def WhatMustIUnderstand(self):
'''Return a list of (uri,localname) tuples for all elements in the
header that have mustUnderstand set.
'''
return [ ( E.namespaceURI, E.localName )
for E in self.header_elements if _find_mu(E) == "1" ] | [
"def",
"WhatMustIUnderstand",
"(",
"self",
")",
":",
"return",
"[",
"(",
"E",
".",
"namespaceURI",
",",
"E",
".",
"localName",
")",
"for",
"E",
"in",
"self",
".",
"header_elements",
"if",
"_find_mu",
"(",
"E",
")",
"==",
"\"1\"",
"]"
] | Return a list of (uri,localname) tuples for all elements in the
header that have mustUnderstand set. | [
"Return",
"a",
"list",
"of",
"(",
"uri",
"localname",
")",
"tuples",
"for",
"all",
"elements",
"in",
"the",
"header",
"that",
"have",
"mustUnderstand",
"set",
"."
] | python | train |
learningequality/ricecooker | ricecooker/utils/linecook.py | https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/linecook.py#L117-L126 | def keep_folder(raw_path):
"""
Keep only folders that don't contain patterns in `DIR_EXCLUDE_PATTERNS`.
"""
keep = True
for pattern in DIR_EXCLUDE_PATTERNS:
if pattern in raw_path:
LOGGER.debug('rejecting', raw_path)
keep = False
return keep | [
"def",
"keep_folder",
"(",
"raw_path",
")",
":",
"keep",
"=",
"True",
"for",
"pattern",
"in",
"DIR_EXCLUDE_PATTERNS",
":",
"if",
"pattern",
"in",
"raw_path",
":",
"LOGGER",
".",
"debug",
"(",
"'rejecting'",
",",
"raw_path",
")",
"keep",
"=",
"False",
"retu... | Keep only folders that don't contain patterns in `DIR_EXCLUDE_PATTERNS`. | [
"Keep",
"only",
"folders",
"that",
"don",
"t",
"contain",
"patterns",
"in",
"DIR_EXCLUDE_PATTERNS",
"."
] | python | train |
nephila/djangocms-page-tags | djangocms_page_tags/utils.py | https://github.com/nephila/djangocms-page-tags/blob/602c9d74456d689f46ddb8d67cd64d1a42747359/djangocms_page_tags/utils.py#L5-L23 | def get_cache_key(request, page, lang, site_id, title):
"""
Create the cache key for the current page and tag type
"""
from cms.cache import _get_cache_key
from cms.templatetags.cms_tags import _get_page_by_untyped_arg
from cms.models import Page
if not isinstance(page, Page):
page = _get_page_by_untyped_arg(page, request, site_id)
if not site_id:
try:
site_id = page.node.site_id
except AttributeError: # CMS_3_4
site_id = page.site_id
if not title:
return _get_cache_key('page_tags', page, '', site_id) + '_type:tags_list'
else:
return _get_cache_key('title_tags', page, lang, site_id) + '_type:tags_list' | [
"def",
"get_cache_key",
"(",
"request",
",",
"page",
",",
"lang",
",",
"site_id",
",",
"title",
")",
":",
"from",
"cms",
".",
"cache",
"import",
"_get_cache_key",
"from",
"cms",
".",
"templatetags",
".",
"cms_tags",
"import",
"_get_page_by_untyped_arg",
"from"... | Create the cache key for the current page and tag type | [
"Create",
"the",
"cache",
"key",
"for",
"the",
"current",
"page",
"and",
"tag",
"type"
] | python | train |
LonamiWebs/Telethon | telethon/events/common.py | https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/events/common.py#L83-L94 | async def resolve(self, client):
"""Helper method to allow event builders to be resolved before usage"""
if self.resolved:
return
if not self._resolve_lock:
self._resolve_lock = asyncio.Lock(loop=client.loop)
async with self._resolve_lock:
if not self.resolved:
await self._resolve(client)
self.resolved = True | [
"async",
"def",
"resolve",
"(",
"self",
",",
"client",
")",
":",
"if",
"self",
".",
"resolved",
":",
"return",
"if",
"not",
"self",
".",
"_resolve_lock",
":",
"self",
".",
"_resolve_lock",
"=",
"asyncio",
".",
"Lock",
"(",
"loop",
"=",
"client",
".",
... | Helper method to allow event builders to be resolved before usage | [
"Helper",
"method",
"to",
"allow",
"event",
"builders",
"to",
"be",
"resolved",
"before",
"usage"
] | python | train |
explosion/thinc | thinc/extra/_vendorized/keras_generic_utils.py | https://github.com/explosion/thinc/blob/90129be5f0d6c665344245a7c37dbe1b8afceea2/thinc/extra/_vendorized/keras_generic_utils.py#L53-L63 | def func_load(code, defaults=None, closure=None, globs=None):
'''Deserialize user defined function.'''
if isinstance(code, (tuple, list)): # unpack previous dump
code, defaults, closure = code
code = marshal.loads(code.encode('raw_unicode_escape'))
if globs is None:
globs = globals()
return python_types.FunctionType(code, globs,
name=code.co_name,
argdefs=defaults,
closure=closure) | [
"def",
"func_load",
"(",
"code",
",",
"defaults",
"=",
"None",
",",
"closure",
"=",
"None",
",",
"globs",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"code",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"# unpack previous dump",
"code",
",",
"d... | Deserialize user defined function. | [
"Deserialize",
"user",
"defined",
"function",
"."
] | python | train |
cmbruns/pyopenvr | src/openvr/__init__.py | https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L4631-L4639 | def getOverlayErrorNameFromEnum(self, error):
"""
returns a string that corresponds with the specified overlay error. The string will be the name
of the error enum value for all valid error codes
"""
fn = self.function_table.getOverlayErrorNameFromEnum
result = fn(error)
return result | [
"def",
"getOverlayErrorNameFromEnum",
"(",
"self",
",",
"error",
")",
":",
"fn",
"=",
"self",
".",
"function_table",
".",
"getOverlayErrorNameFromEnum",
"result",
"=",
"fn",
"(",
"error",
")",
"return",
"result"
] | returns a string that corresponds with the specified overlay error. The string will be the name
of the error enum value for all valid error codes | [
"returns",
"a",
"string",
"that",
"corresponds",
"with",
"the",
"specified",
"overlay",
"error",
".",
"The",
"string",
"will",
"be",
"the",
"name",
"of",
"the",
"error",
"enum",
"value",
"for",
"all",
"valid",
"error",
"codes"
] | python | train |
aegirhall/console-menu | consolemenu/items/submenu_item.py | https://github.com/aegirhall/console-menu/blob/1a28959d6f1dd6ac79c87b11efd8529d05532422/consolemenu/items/submenu_item.py#L42-L48 | def clean_up(self):
"""
This class overrides this method
"""
self.submenu.join()
self.menu.clear_screen()
self.menu.resume() | [
"def",
"clean_up",
"(",
"self",
")",
":",
"self",
".",
"submenu",
".",
"join",
"(",
")",
"self",
".",
"menu",
".",
"clear_screen",
"(",
")",
"self",
".",
"menu",
".",
"resume",
"(",
")"
] | This class overrides this method | [
"This",
"class",
"overrides",
"this",
"method"
] | python | train |
saltstack/salt | salt/modules/virt.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/virt.py#L516-L532 | def _get_migrate_command():
'''
Returns the command shared by the different migration types
'''
tunnel = __salt__['config.option']('virt.tunnel')
if tunnel:
salt.utils.versions.warn_until(
'Sodium',
'\'virt.tunnel\' has been deprecated in favor of '
'\'virt:tunnel\'. \'virt.tunnel\' will stop '
'being used in {version}.')
else:
tunnel = __salt__['config.get']('virt:tunnel')
if tunnel:
return ('virsh migrate --p2p --tunnelled --live --persistent '
'--undefinesource ')
return 'virsh migrate --live --persistent --undefinesource ' | [
"def",
"_get_migrate_command",
"(",
")",
":",
"tunnel",
"=",
"__salt__",
"[",
"'config.option'",
"]",
"(",
"'virt.tunnel'",
")",
"if",
"tunnel",
":",
"salt",
".",
"utils",
".",
"versions",
".",
"warn_until",
"(",
"'Sodium'",
",",
"'\\'virt.tunnel\\' has been dep... | Returns the command shared by the different migration types | [
"Returns",
"the",
"command",
"shared",
"by",
"the",
"different",
"migration",
"types"
] | python | train |
ozak/georasters | georasters/georasters.py | https://github.com/ozak/georasters/blob/0612bd91bb2a2cb2f1d59ba89c1ff131dae27d70/georasters/georasters.py#L137-L155 | def create_geotiff(name, Array, driver, ndv, xsize, ysize, geot, projection, datatype, band=1):
'''
Creates new geotiff from array
'''
if isinstance(datatype, np.int) == False:
if datatype.startswith('gdal.GDT_') == False:
datatype = eval('gdal.GDT_'+datatype)
newfilename = name+'.tif'
# Set nans to the original No Data Value
Array[np.isnan(Array)] = ndv
# Set up the dataset
DataSet = driver.Create(newfilename, xsize, ysize, 1, datatype)
# the '1' is for band 1.
DataSet.SetGeoTransform(geot)
DataSet.SetProjection(projection.ExportToWkt())
# Write the array
DataSet.GetRasterBand(band).WriteArray(Array)
DataSet.GetRasterBand(band).SetNoDataValue(ndv)
return newfilename | [
"def",
"create_geotiff",
"(",
"name",
",",
"Array",
",",
"driver",
",",
"ndv",
",",
"xsize",
",",
"ysize",
",",
"geot",
",",
"projection",
",",
"datatype",
",",
"band",
"=",
"1",
")",
":",
"if",
"isinstance",
"(",
"datatype",
",",
"np",
".",
"int",
... | Creates new geotiff from array | [
"Creates",
"new",
"geotiff",
"from",
"array"
] | python | train |
django-json-api/django-rest-framework-json-api | rest_framework_json_api/parsers.py | https://github.com/django-json-api/django-rest-framework-json-api/blob/de7021f9e011615ce8b65d0cb38227c6c12721b6/rest_framework_json_api/parsers.py#L85-L153 | def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as JSON and returns the resulting data
"""
result = super(JSONParser, self).parse(
stream, media_type=media_type, parser_context=parser_context
)
if not isinstance(result, dict) or 'data' not in result:
raise ParseError('Received document does not contain primary data')
data = result.get('data')
view = parser_context['view']
from rest_framework_json_api.views import RelationshipView
if isinstance(view, RelationshipView):
# We skip parsing the object as JSONAPI Resource Identifier Object and not a regular
# Resource Object
if isinstance(data, list):
for resource_identifier_object in data:
if not (
resource_identifier_object.get('id') and
resource_identifier_object.get('type')
):
raise ParseError(
'Received data contains one or more malformed JSONAPI '
'Resource Identifier Object(s)'
)
elif not (data.get('id') and data.get('type')):
raise ParseError('Received data is not a valid JSONAPI Resource Identifier Object')
return data
request = parser_context.get('request')
# Check for inconsistencies
if request.method in ('PUT', 'POST', 'PATCH'):
resource_name = utils.get_resource_name(
parser_context, expand_polymorphic_types=True)
if isinstance(resource_name, six.string_types):
if data.get('type') != resource_name:
raise exceptions.Conflict(
"The resource object's type ({data_type}) is not the type that "
"constitute the collection represented by the endpoint "
"({resource_type}).".format(
data_type=data.get('type'),
resource_type=resource_name))
else:
if data.get('type') not in resource_name:
raise exceptions.Conflict(
"The resource object's type ({data_type}) is not the type that "
"constitute the collection represented by the endpoint "
"(one of [{resource_types}]).".format(
data_type=data.get('type'),
resource_types=", ".join(resource_name)))
if not data.get('id') and request.method in ('PATCH', 'PUT'):
raise ParseError("The resource identifier object must contain an 'id' member")
# Construct the return data
serializer_class = getattr(view, 'serializer_class', None)
parsed_data = {'id': data.get('id')} if 'id' in data else {}
# `type` field needs to be allowed in none polymorphic serializers
if serializer_class is not None:
if issubclass(serializer_class, serializers.PolymorphicModelSerializer):
parsed_data['type'] = data.get('type')
parsed_data.update(self.parse_attributes(data))
parsed_data.update(self.parse_relationships(data))
parsed_data.update(self.parse_metadata(result))
return parsed_data | [
"def",
"parse",
"(",
"self",
",",
"stream",
",",
"media_type",
"=",
"None",
",",
"parser_context",
"=",
"None",
")",
":",
"result",
"=",
"super",
"(",
"JSONParser",
",",
"self",
")",
".",
"parse",
"(",
"stream",
",",
"media_type",
"=",
"media_type",
",... | Parses the incoming bytestream as JSON and returns the resulting data | [
"Parses",
"the",
"incoming",
"bytestream",
"as",
"JSON",
"and",
"returns",
"the",
"resulting",
"data"
] | python | train |
jpadilla/django-dotenv | dotenv.py | https://github.com/jpadilla/django-dotenv/blob/16489ebda8716071bdff6b6365aa64b7fa420f17/dotenv.py#L37-L65 | def read_dotenv(dotenv=None, override=False):
"""
Read a .env file into os.environ.
If not given a path to a dotenv path, does filthy magic stack backtracking
to find manage.py and then find the dotenv.
If tests rely on .env files, setting the overwrite flag to True is a safe
way to ensure tests run consistently across all environments.
:param override: True if values in .env should override system variables.
"""
if dotenv is None:
frame_filename = sys._getframe().f_back.f_code.co_filename
dotenv = os.path.join(os.path.dirname(frame_filename), '.env')
if os.path.isdir(dotenv) and os.path.isfile(os.path.join(dotenv, '.env')):
dotenv = os.path.join(dotenv, '.env')
if os.path.exists(dotenv):
with open(dotenv) as f:
for k, v in parse_dotenv(f.read()).items():
if override:
os.environ[k] = v
else:
os.environ.setdefault(k, v)
else:
warnings.warn("Not reading {0} - it doesn't exist.".format(dotenv),
stacklevel=2) | [
"def",
"read_dotenv",
"(",
"dotenv",
"=",
"None",
",",
"override",
"=",
"False",
")",
":",
"if",
"dotenv",
"is",
"None",
":",
"frame_filename",
"=",
"sys",
".",
"_getframe",
"(",
")",
".",
"f_back",
".",
"f_code",
".",
"co_filename",
"dotenv",
"=",
"os... | Read a .env file into os.environ.
If not given a path to a dotenv path, does filthy magic stack backtracking
to find manage.py and then find the dotenv.
If tests rely on .env files, setting the overwrite flag to True is a safe
way to ensure tests run consistently across all environments.
:param override: True if values in .env should override system variables. | [
"Read",
"a",
".",
"env",
"file",
"into",
"os",
".",
"environ",
"."
] | python | train |
DAI-Lab/Copulas | copulas/multivariate/vine.py | https://github.com/DAI-Lab/Copulas/blob/821df61c3d36a6b81ef2883935f935c2eaaa862c/copulas/multivariate/vine.py#L127-L137 | def get_likelihood(self, uni_matrix):
"""Compute likelihood of the vine."""
num_tree = len(self.trees)
values = np.empty([1, num_tree])
for i in range(num_tree):
value, new_uni_matrix = self.trees[i].get_likelihood(uni_matrix)
uni_matrix = new_uni_matrix
values[0, i] = value
return np.sum(values) | [
"def",
"get_likelihood",
"(",
"self",
",",
"uni_matrix",
")",
":",
"num_tree",
"=",
"len",
"(",
"self",
".",
"trees",
")",
"values",
"=",
"np",
".",
"empty",
"(",
"[",
"1",
",",
"num_tree",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"num_tree",
")",... | Compute likelihood of the vine. | [
"Compute",
"likelihood",
"of",
"the",
"vine",
"."
] | python | train |
lucaslamounier/USGSDownload | usgsdownload/usgs.py | https://github.com/lucaslamounier/USGSDownload/blob/0969483ea9f9648aa17b099f36d2e1010488b2a4/usgsdownload/usgs.py#L193-L215 | def connect_earthexplorer(self):
""" Connection to Earth explorer without proxy """
logger.info("Establishing connection to Earthexplorer")
print("\n Establishing connection to Earthexplorer")
try:
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor())
urllib.request.install_opener(opener)
params = urllib.parse.urlencode(dict(username=self.user, password=self.password))
params = params.encode('utf-8')
f = opener.open("https://ers.cr.usgs.gov/login", params)
data = f.read().decode('utf-8')
f.close()
if data.find(
'You must sign in as a registered user to download data or place orders for USGS EROS products') > 0:
print("\n Authentification failed")
logger.error("Authentification failed")
raise AutenticationUSGSFailed('Authentification USGS failed')
print('User %s connected with USGS' % self.user)
logger.debug('User %s connected with USGS' % self.user)
return
except Exception as e:
print('\nError when trying to connect USGS: %s' % e)
raise logger.error('Error when trying to connect USGS: %s' % e) | [
"def",
"connect_earthexplorer",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"\"Establishing connection to Earthexplorer\"",
")",
"print",
"(",
"\"\\n Establishing connection to Earthexplorer\"",
")",
"try",
":",
"opener",
"=",
"urllib",
".",
"request",
".",
"bu... | Connection to Earth explorer without proxy | [
"Connection",
"to",
"Earth",
"explorer",
"without",
"proxy"
] | python | test |
apache/airflow | airflow/contrib/hooks/bigquery_hook.py | https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L1341-L1353 | def get_schema(self, dataset_id, table_id):
"""
Get the schema for a given datset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:return: a table schema
"""
tables_resource = self.service.tables() \
.get(projectId=self.project_id, datasetId=dataset_id, tableId=table_id) \
.execute(num_retries=self.num_retries)
return tables_resource['schema'] | [
"def",
"get_schema",
"(",
"self",
",",
"dataset_id",
",",
"table_id",
")",
":",
"tables_resource",
"=",
"self",
".",
"service",
".",
"tables",
"(",
")",
".",
"get",
"(",
"projectId",
"=",
"self",
".",
"project_id",
",",
"datasetId",
"=",
"dataset_id",
",... | Get the schema for a given datset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:return: a table schema | [
"Get",
"the",
"schema",
"for",
"a",
"given",
"datset",
".",
"table",
".",
"see",
"https",
":",
"//",
"cloud",
".",
"google",
".",
"com",
"/",
"bigquery",
"/",
"docs",
"/",
"reference",
"/",
"v2",
"/",
"tables#resource"
] | python | test |
ctuning/ck | ck/kernel.py | https://github.com/ctuning/ck/blob/7e009814e975f8742790d3106340088a46223714/ck/kernel.py#L6100-L6202 | def update(i):
"""
Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - data UOA
(data_uid) - data UID (if uoa is an alias)
(data_name) - user friendly data name
(dict_from_cid) -
(dict_from_repo_uoa) -
(dict_from_module_uoa) -
(dict_from_data_uoa) - if present, pre-load dict
from this (module_uoa):data_uoa (analog of copy)
(dict) - meta description to record
(substitute) - if 'yes', substitute dictionaries, otherwise merge!
(tags) - list or comma separated list of tags to add to entry
(info) - entry info to record - normally, should not use it!
(updates) - entry updates info to record - normally, should not use it!
(ignore_update) - if 'yes', do not add info about update
(ask) - if 'yes', ask questions, otherwise silent
(unlock_uid) - unlock UID if was previously locked
(sort_keys) - if 'yes', sort keys
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
Output from the 'add' function (the last one in case of wildcards)
}
"""
# Check if global writing is allowed
r=check_writing({})
if r['return']>0: return r
# Try to load entry, if doesn't exist, add entry
dd={}
o=i.get('out','')
i['out']=''
# Check wildcards
lst=[]
a=i.get('repo_uoa','')
m=i.get('module_uoa','')
duoa=i.get('data_uoa','')
if duoa=='': duoa='*'
single_not_found=False # If no wild cards and entry not found, then add
if a.find('*')>=0 or a.find('?')>=0 or m.find('*')>=0 or m.find('?')>=0 or duoa.find('*')>=0 or duoa.find('?')>=0:
r=list_data({'repo_uoa':a, 'module_uoa':m, 'data_uoa':duoa})
if r['return']>0: return r
lst=r['lst']
else:
# Find path to data
r=find_path_to_data(i)
if r['return']>0:
single_not_found=True
else:
p=r['path']
ruoa=r.get('repo_uoa','')
ruid=r.get('repo_uid','')
muoa=r.get('module_uoa','')
muid=r.get('module_uid','')
duid=r.get('data_uid','')
duoa=r.get('data_alias','')
if duoa=='': duoa=duid
lst.append({'path':p, 'repo_uoa':ruoa, 'repo_uid':ruid,
'module_uoa':muoa, 'module_uid':muid,
'data_uoa':duoa, 'data_uid': duid})
# Update entries
i['out']=o
r={'return':0}
if single_not_found:
r=add(i)
else:
i['update']='yes'
for q in lst:
ii={}
ii.update(i)
ii.update(q)
r=add(ii)
if r['return']>0: return r
return r | [
"def",
"update",
"(",
"i",
")",
":",
"# Check if global writing is allowed",
"r",
"=",
"check_writing",
"(",
"{",
"}",
")",
"if",
"r",
"[",
"'return'",
"]",
">",
"0",
":",
"return",
"r",
"# Try to load entry, if doesn't exist, add entry",
"dd",
"=",
"{",
"}",
... | Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - data UOA
(data_uid) - data UID (if uoa is an alias)
(data_name) - user friendly data name
(dict_from_cid) -
(dict_from_repo_uoa) -
(dict_from_module_uoa) -
(dict_from_data_uoa) - if present, pre-load dict
from this (module_uoa):data_uoa (analog of copy)
(dict) - meta description to record
(substitute) - if 'yes', substitute dictionaries, otherwise merge!
(tags) - list or comma separated list of tags to add to entry
(info) - entry info to record - normally, should not use it!
(updates) - entry updates info to record - normally, should not use it!
(ignore_update) - if 'yes', do not add info about update
(ask) - if 'yes', ask questions, otherwise silent
(unlock_uid) - unlock UID if was previously locked
(sort_keys) - if 'yes', sort keys
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
Output from the 'add' function (the last one in case of wildcards)
} | [
"Input",
":",
"{",
"(",
"repo_uoa",
")",
"-",
"repo",
"UOA",
"module_uoa",
"-",
"module",
"UOA",
"data_uoa",
"-",
"data",
"UOA",
"(",
"data_uid",
")",
"-",
"data",
"UID",
"(",
"if",
"uoa",
"is",
"an",
"alias",
")",
"(",
"data_name",
")",
"-",
"user... | python | train |
planetlabs/es_fluent | es_fluent/filters/geometry.py | https://github.com/planetlabs/es_fluent/blob/74f8db3a1bf9aa1d54512cf2d5e0ec58ee2f4b1c/es_fluent/filters/geometry.py#L84-L99 | def to_query(self):
"""
Returns a json-serializable representation.
"""
return {
"geo_shape": {
self.name: {
"indexed_shape": {
"index": self.index_name,
"type": self.doc_type,
"id": self.shape_id,
"path": self.path
}
}
}
} | [
"def",
"to_query",
"(",
"self",
")",
":",
"return",
"{",
"\"geo_shape\"",
":",
"{",
"self",
".",
"name",
":",
"{",
"\"indexed_shape\"",
":",
"{",
"\"index\"",
":",
"self",
".",
"index_name",
",",
"\"type\"",
":",
"self",
".",
"doc_type",
",",
"\"id\"",
... | Returns a json-serializable representation. | [
"Returns",
"a",
"json",
"-",
"serializable",
"representation",
"."
] | python | train |
callowayproject/Calloway | calloway/apps/django_ext/views.py | https://github.com/callowayproject/Calloway/blob/d22e98d41fbd298ab6393ba7bd84a75528be9f81/calloway/apps/django_ext/views.py#L6-L31 | def custom_server_error(request, template_name='500.html', admin_template_name='500A.html'):
"""
500 error handler. Displays a full trackback for superusers and the first line of the
traceback for staff members.
Templates: `500.html` or `500A.html` (admin)
Context: trace
Holds the traceback information for debugging.
"""
trace = None
if request.user.is_authenticated() and (request.user.is_staff or request.user.is_superuser):
try:
import traceback, sys
trace = traceback.format_exception(*(sys.exc_info()))
if not request.user.is_superuser and trace:
trace = trace[-1:]
trace = '\n'.join(trace)
except:
pass
# if url is part of the admin site, use the 500A.html template
if request.path.startswith('/%s' % admin.site.name):
template_name = admin_template_name
t = loader.get_template(template_name) # You need to create a 500.html and 500A.html template.
return http.HttpResponseServerError(t.render(Context({'trace': trace}))) | [
"def",
"custom_server_error",
"(",
"request",
",",
"template_name",
"=",
"'500.html'",
",",
"admin_template_name",
"=",
"'500A.html'",
")",
":",
"trace",
"=",
"None",
"if",
"request",
".",
"user",
".",
"is_authenticated",
"(",
")",
"and",
"(",
"request",
".",
... | 500 error handler. Displays a full trackback for superusers and the first line of the
traceback for staff members.
Templates: `500.html` or `500A.html` (admin)
Context: trace
Holds the traceback information for debugging. | [
"500",
"error",
"handler",
".",
"Displays",
"a",
"full",
"trackback",
"for",
"superusers",
"and",
"the",
"first",
"line",
"of",
"the",
"traceback",
"for",
"staff",
"members",
"."
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.