repo stringlengths 7 54 | path stringlengths 4 192 | url stringlengths 87 284 | code stringlengths 78 104k | code_tokens list | docstring stringlengths 1 46.9k | docstring_tokens list | language stringclasses 1
value | partition stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|
rwl/godot | godot/ui/graph_view.py | https://github.com/rwl/godot/blob/013687c9e8983d2aa2ceebb8a76c5c4f1e37c90f/godot/ui/graph_view.py#L58-L67 | def node_factory(**row_factory_kw):
""" Give new nodes a unique ID. """
if "__table_editor__" in row_factory_kw:
graph = row_factory_kw["__table_editor__"].object
ID = make_unique_name("n", [node.ID for node in graph.nodes])
del row_factory_kw["__table_editor__"]
return godot.node.Node(ID)
else:
return godot.node.Node(uuid.uuid4().hex[:6]) | [
"def",
"node_factory",
"(",
"*",
"*",
"row_factory_kw",
")",
":",
"if",
"\"__table_editor__\"",
"in",
"row_factory_kw",
":",
"graph",
"=",
"row_factory_kw",
"[",
"\"__table_editor__\"",
"]",
".",
"object",
"ID",
"=",
"make_unique_name",
"(",
"\"n\"",
",",
"[",
... | Give new nodes a unique ID. | [
"Give",
"new",
"nodes",
"a",
"unique",
"ID",
"."
] | python | test |
materials-data-facility/toolbox | mdf_toolbox/toolbox.py | https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/toolbox.py#L471-L539 | def format_gmeta(data, acl=None, identifier=None):
"""Format input into GMeta format, suitable for ingesting into Globus Search.
Formats a dictionary into a GMetaEntry.
Formats a list of GMetaEntry into a GMetaList inside a GMetaIngest.
**Example usage**::
glist = []
for document in all_my_documents:
gmeta_entry = format_gmeta(document, ["public"], document["id"])
glist.append(gmeta_entry)
ingest_ready_document = format_gmeta(glist)
Arguments:
data (dict or list): The data to be formatted.
If data is a dict, arguments ``acl`` and ``identifier`` are required.
If data is a list, it must consist of GMetaEntry documents.
acl (list of str): The list of Globus UUIDs allowed to view the document,
or the special value ``["public"]`` to allow anyone access.
Required if data is a dict. Ignored if data is a list.
Will be formatted into URNs if required.
identifier (str): A unique identifier for this document. If this value is not unique,
ingests into Globus Search may merge entries.
Required is data is a dict. Ignored if data is a list.
Returns:
dict (if ``data`` is ``dict``): The data as a GMetaEntry.
dict (if ``data`` is ``list``): The data as a GMetaIngest.
"""
if isinstance(data, dict):
if acl is None or identifier is None:
raise ValueError("acl and identifier are required when formatting a GMetaEntry.")
if isinstance(acl, str):
acl = [acl]
# "Correctly" format ACL entries into URNs
prefixed_acl = []
for uuid in acl:
# If entry is not special value "public" and is not a URN, make URN
# It is not known what the type of UUID is, so use both
# This solution is known to be hacky
if uuid != "public" and not uuid.lower().startswith("urn:"):
prefixed_acl.append("urn:globus:auth:identity:"+uuid.lower())
prefixed_acl.append("urn:globus:groups:id:"+uuid.lower())
# Otherwise, no modification
else:
prefixed_acl.append(uuid)
return {
"@datatype": "GMetaEntry",
"@version": "2016-11-09",
"subject": identifier,
"visible_to": prefixed_acl,
"content": data
}
elif isinstance(data, list):
return {
"@datatype": "GIngest",
"@version": "2016-11-09",
"ingest_type": "GMetaList",
"ingest_data": {
"@datatype": "GMetaList",
"@version": "2016-11-09",
"gmeta": data
}
}
else:
raise TypeError("Cannot format '" + str(type(data)) + "' into GMeta.") | [
"def",
"format_gmeta",
"(",
"data",
",",
"acl",
"=",
"None",
",",
"identifier",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"if",
"acl",
"is",
"None",
"or",
"identifier",
"is",
"None",
":",
"raise",
"ValueError",
"(... | Format input into GMeta format, suitable for ingesting into Globus Search.
Formats a dictionary into a GMetaEntry.
Formats a list of GMetaEntry into a GMetaList inside a GMetaIngest.
**Example usage**::
glist = []
for document in all_my_documents:
gmeta_entry = format_gmeta(document, ["public"], document["id"])
glist.append(gmeta_entry)
ingest_ready_document = format_gmeta(glist)
Arguments:
data (dict or list): The data to be formatted.
If data is a dict, arguments ``acl`` and ``identifier`` are required.
If data is a list, it must consist of GMetaEntry documents.
acl (list of str): The list of Globus UUIDs allowed to view the document,
or the special value ``["public"]`` to allow anyone access.
Required if data is a dict. Ignored if data is a list.
Will be formatted into URNs if required.
identifier (str): A unique identifier for this document. If this value is not unique,
ingests into Globus Search may merge entries.
Required is data is a dict. Ignored if data is a list.
Returns:
dict (if ``data`` is ``dict``): The data as a GMetaEntry.
dict (if ``data`` is ``list``): The data as a GMetaIngest. | [
"Format",
"input",
"into",
"GMeta",
"format",
"suitable",
"for",
"ingesting",
"into",
"Globus",
"Search",
".",
"Formats",
"a",
"dictionary",
"into",
"a",
"GMetaEntry",
".",
"Formats",
"a",
"list",
"of",
"GMetaEntry",
"into",
"a",
"GMetaList",
"inside",
"a",
... | python | train |
econ-ark/HARK | HARK/ConsumptionSaving/ConsIndShockModel.py | https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/ConsumptionSaving/ConsIndShockModel.py#L912-L936 | def makeBasicSolution(self,EndOfPrdvP,aNrm,interpolator):
'''
Given end of period assets and end of period marginal value, construct
the basic solution for this period.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aNrm : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
interpolator : function
A function that constructs and returns a consumption function.
Returns
-------
solution_now : ConsumerSolution
The solution to this period's consumption-saving problem, with a
consumption function, marginal value function, and minimum m.
'''
cNrm,mNrm = self.getPointsForInterpolation(EndOfPrdvP,aNrm)
solution_now = self.usePointsForInterpolation(cNrm,mNrm,interpolator)
return solution_now | [
"def",
"makeBasicSolution",
"(",
"self",
",",
"EndOfPrdvP",
",",
"aNrm",
",",
"interpolator",
")",
":",
"cNrm",
",",
"mNrm",
"=",
"self",
".",
"getPointsForInterpolation",
"(",
"EndOfPrdvP",
",",
"aNrm",
")",
"solution_now",
"=",
"self",
".",
"usePointsForInte... | Given end of period assets and end of period marginal value, construct
the basic solution for this period.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aNrm : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
interpolator : function
A function that constructs and returns a consumption function.
Returns
-------
solution_now : ConsumerSolution
The solution to this period's consumption-saving problem, with a
consumption function, marginal value function, and minimum m. | [
"Given",
"end",
"of",
"period",
"assets",
"and",
"end",
"of",
"period",
"marginal",
"value",
"construct",
"the",
"basic",
"solution",
"for",
"this",
"period",
"."
] | python | train |
dj-stripe/dj-stripe | djstripe/models/core.py | https://github.com/dj-stripe/dj-stripe/blob/a5308a3808cd6e2baba49482f7a699f3a8992518/djstripe/models/core.py#L323-L333 | def capture(self):
"""
Capture the payment of an existing, uncaptured, charge.
This is the second half of the two-step payment flow, where first you
created a charge with the capture option set to False.
See https://stripe.com/docs/api#capture_charge
"""
captured_charge = self.api_retrieve().capture()
return self.__class__.sync_from_stripe_data(captured_charge) | [
"def",
"capture",
"(",
"self",
")",
":",
"captured_charge",
"=",
"self",
".",
"api_retrieve",
"(",
")",
".",
"capture",
"(",
")",
"return",
"self",
".",
"__class__",
".",
"sync_from_stripe_data",
"(",
"captured_charge",
")"
] | Capture the payment of an existing, uncaptured, charge.
This is the second half of the two-step payment flow, where first you
created a charge with the capture option set to False.
See https://stripe.com/docs/api#capture_charge | [
"Capture",
"the",
"payment",
"of",
"an",
"existing",
"uncaptured",
"charge",
".",
"This",
"is",
"the",
"second",
"half",
"of",
"the",
"two",
"-",
"step",
"payment",
"flow",
"where",
"first",
"you",
"created",
"a",
"charge",
"with",
"the",
"capture",
"optio... | python | train |
alfred82santa/dirty-models | dirty_models/models.py | https://github.com/alfred82santa/dirty-models/blob/354becdb751b21f673515eae928c256c7e923c50/dirty_models/models.py#L433-L438 | def export_original_data(self):
"""
Get the original data
"""
return {key: self.get_original_field_value(key) for key in self.__original_data__.keys()} | [
"def",
"export_original_data",
"(",
"self",
")",
":",
"return",
"{",
"key",
":",
"self",
".",
"get_original_field_value",
"(",
"key",
")",
"for",
"key",
"in",
"self",
".",
"__original_data__",
".",
"keys",
"(",
")",
"}"
] | Get the original data | [
"Get",
"the",
"original",
"data"
] | python | train |
square/pylink | setup.py | https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/setup.py#L52-L68 | def finalize_options(self):
"""Populate the attributes.
Args:
self (CleanCommand): the ``CleanCommand`` instance
Returns:
``None``
"""
self.cwd = os.path.abspath(os.path.dirname(__file__))
self.build_dirs = [
os.path.join(self.cwd, 'build'),
os.path.join(self.cwd, 'htmlcov'),
os.path.join(self.cwd, 'dist'),
os.path.join(self.cwd, 'pylink_square.egg-info')
]
self.build_artifacts = ['.pyc', '.o', '.elf', '.bin'] | [
"def",
"finalize_options",
"(",
"self",
")",
":",
"self",
".",
"cwd",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"self",
".",
"build_dirs",
"=",
"[",
"os",
".",
"path",
".",
"join",
... | Populate the attributes.
Args:
self (CleanCommand): the ``CleanCommand`` instance
Returns:
``None`` | [
"Populate",
"the",
"attributes",
"."
] | python | train |
peerplays-network/python-peerplays | peerplays/peerplays.py | https://github.com/peerplays-network/python-peerplays/blob/188f04238e7e21d5f73e9b01099eea44289ef6b7/peerplays/peerplays.py#L847-L872 | def sport_update(self, sport_id, names=[], account=None, **kwargs):
""" Update a sport. This needs to be **proposed**.
:param str sport_id: The id of the sport to update
:param list names: Internationalized names, e.g. ``[['de', 'Foo'],
['en', 'bar']]``
:param str account: (optional) the account to allow access
to (defaults to ``default_account``)
"""
assert isinstance(names, list)
if not account:
if "default_account" in self.config:
account = self.config["default_account"]
if not account:
raise ValueError("You need to provide an account")
account = Account(account)
sport = Sport(sport_id)
op = operations.Sport_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"sport_id": sport["id"],
"new_name": names,
"prefix": self.prefix,
}
)
return self.finalizeOp(op, account["name"], "active", **kwargs) | [
"def",
"sport_update",
"(",
"self",
",",
"sport_id",
",",
"names",
"=",
"[",
"]",
",",
"account",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"isinstance",
"(",
"names",
",",
"list",
")",
"if",
"not",
"account",
":",
"if",
"\"default_ac... | Update a sport. This needs to be **proposed**.
:param str sport_id: The id of the sport to update
:param list names: Internationalized names, e.g. ``[['de', 'Foo'],
['en', 'bar']]``
:param str account: (optional) the account to allow access
to (defaults to ``default_account``) | [
"Update",
"a",
"sport",
".",
"This",
"needs",
"to",
"be",
"**",
"proposed",
"**",
"."
] | python | train |
TUT-ARG/sed_eval | sed_eval/audio_tag.py | https://github.com/TUT-ARG/sed_eval/blob/0cb1b6d11ceec4fe500cc9b31079c9d8666ed6eb/sed_eval/audio_tag.py#L659-L672 | def result_report_parameters(self):
"""Report metric parameters
Returns
-------
str
result report in string format
"""
output = self.ui.data(field='Tags', value=len(self.tag_label_list)) + '\n'
output += self.ui.data(field='Evaluated units', value=int(self.overall['Nref'])) + '\n'
return output | [
"def",
"result_report_parameters",
"(",
"self",
")",
":",
"output",
"=",
"self",
".",
"ui",
".",
"data",
"(",
"field",
"=",
"'Tags'",
",",
"value",
"=",
"len",
"(",
"self",
".",
"tag_label_list",
")",
")",
"+",
"'\\n'",
"output",
"+=",
"self",
".",
"... | Report metric parameters
Returns
-------
str
result report in string format | [
"Report",
"metric",
"parameters"
] | python | train |
EVEprosper/ProsperCommon | prosper/common/prosper_logging.py | https://github.com/EVEprosper/ProsperCommon/blob/bcada3b25420099e1f204db8d55eb268e7b4dc27/prosper/common/prosper_logging.py#L472-L486 | def webhook(self, webhook_url):
"""Load object with webhook_url
Args:
webhook_url (str): full webhook url given by Discord 'create webhook' func
"""
if not webhook_url:
raise Exception('Url can not be None')
matcher = re.match(self.__webhook_url_format, webhook_url)
if not matcher:
raise Exception('Invalid url format, looking for: ' + self.__webhook_url_format)
self.api_keys(int(matcher.group(1)), matcher.group(2)) | [
"def",
"webhook",
"(",
"self",
",",
"webhook_url",
")",
":",
"if",
"not",
"webhook_url",
":",
"raise",
"Exception",
"(",
"'Url can not be None'",
")",
"matcher",
"=",
"re",
".",
"match",
"(",
"self",
".",
"__webhook_url_format",
",",
"webhook_url",
")",
"if"... | Load object with webhook_url
Args:
webhook_url (str): full webhook url given by Discord 'create webhook' func | [
"Load",
"object",
"with",
"webhook_url"
] | python | train |
saltstack/salt | salt/modules/pw_group.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pw_group.py#L145-L162 | def adduser(name, username):
'''
Add a user in the group.
CLI Example:
.. code-block:: bash
salt '*' group.adduser foo bar
Verifies if a valid username 'bar' as a member of an existing group 'foo',
if not then adds it.
'''
# Note: pw exits with code 65 if group is unknown
retcode = __salt__['cmd.retcode']('pw groupmod {0} -m {1}'.format(
name, username), python_shell=False)
return not retcode | [
"def",
"adduser",
"(",
"name",
",",
"username",
")",
":",
"# Note: pw exits with code 65 if group is unknown",
"retcode",
"=",
"__salt__",
"[",
"'cmd.retcode'",
"]",
"(",
"'pw groupmod {0} -m {1}'",
".",
"format",
"(",
"name",
",",
"username",
")",
",",
"python_shel... | Add a user in the group.
CLI Example:
.. code-block:: bash
salt '*' group.adduser foo bar
Verifies if a valid username 'bar' as a member of an existing group 'foo',
if not then adds it. | [
"Add",
"a",
"user",
"in",
"the",
"group",
"."
] | python | train |
Microsoft/azure-devops-python-api | azure-devops/azure/devops/v5_0/work/work_client.py | https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/work/work_client.py#L856-L884 | def delete_team_iteration(self, team_context, id):
"""DeleteTeamIteration.
Delete a team's iteration by iterationId
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:param str id: ID of the iteration
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
self._send(http_method='DELETE',
location_id='c9175577-28a1-4b06-9197-8636af9f64ad',
version='5.0',
route_values=route_values) | [
"def",
"delete_team_iteration",
"(",
"self",
",",
"team_context",
",",
"id",
")",
":",
"project",
"=",
"None",
"team",
"=",
"None",
"if",
"team_context",
"is",
"not",
"None",
":",
"if",
"team_context",
".",
"project_id",
":",
"project",
"=",
"team_context",
... | DeleteTeamIteration.
Delete a team's iteration by iterationId
:param :class:`<TeamContext> <azure.devops.v5_0.work.models.TeamContext>` team_context: The team context for the operation
:param str id: ID of the iteration | [
"DeleteTeamIteration",
".",
"Delete",
"a",
"team",
"s",
"iteration",
"by",
"iterationId",
":",
"param",
":",
"class",
":",
"<TeamContext",
">",
"<azure",
".",
"devops",
".",
"v5_0",
".",
"work",
".",
"models",
".",
"TeamContext",
">",
"team_context",
":",
... | python | train |
apache/incubator-heron | heron/tools/common/src/python/utils/config.py | https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/common/src/python/utils/config.py#L379-L387 | def defaults_cluster_role_env(cluster_role_env):
"""
if role is not provided, supply userid
if environ is not provided, supply 'default'
"""
if len(cluster_role_env[1]) == 0 and len(cluster_role_env[2]) == 0:
return (cluster_role_env[0], getpass.getuser(), ENVIRON)
return (cluster_role_env[0], cluster_role_env[1], cluster_role_env[2]) | [
"def",
"defaults_cluster_role_env",
"(",
"cluster_role_env",
")",
":",
"if",
"len",
"(",
"cluster_role_env",
"[",
"1",
"]",
")",
"==",
"0",
"and",
"len",
"(",
"cluster_role_env",
"[",
"2",
"]",
")",
"==",
"0",
":",
"return",
"(",
"cluster_role_env",
"[",
... | if role is not provided, supply userid
if environ is not provided, supply 'default' | [
"if",
"role",
"is",
"not",
"provided",
"supply",
"userid",
"if",
"environ",
"is",
"not",
"provided",
"supply",
"default"
] | python | valid |
spyder-ide/spyder-kernels | spyder_kernels/utils/nsview.py | https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/nsview.py#L217-L231 | def get_color_name(value):
"""Return color name depending on value type"""
if not is_known_type(value):
return CUSTOM_TYPE_COLOR
for typ, name in list(COLORS.items()):
if isinstance(value, typ):
return name
else:
np_dtype = get_numpy_dtype(value)
if np_dtype is None or not hasattr(value, 'size'):
return UNSUPPORTED_COLOR
elif value.size == 1:
return SCALAR_COLOR
else:
return ARRAY_COLOR | [
"def",
"get_color_name",
"(",
"value",
")",
":",
"if",
"not",
"is_known_type",
"(",
"value",
")",
":",
"return",
"CUSTOM_TYPE_COLOR",
"for",
"typ",
",",
"name",
"in",
"list",
"(",
"COLORS",
".",
"items",
"(",
")",
")",
":",
"if",
"isinstance",
"(",
"va... | Return color name depending on value type | [
"Return",
"color",
"name",
"depending",
"on",
"value",
"type"
] | python | train |
antevens/listen | listen/signal_handler.py | https://github.com/antevens/listen/blob/d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67/listen/signal_handler.py#L94-L117 | def pause(self, signum, seconds=0, callback_function=None):
"""
Pause execution, execution will resume in X seconds or when the
appropriate resume signal is received. Execution will jump to the
callback_function, the default callback function is the handler
method which will run all tasks registered with the reg_on_resume
methodi.
Returns True if timer expired, otherwise returns False
"""
if callback_function is None:
callback_function = self.default_handler
if seconds > 0:
self.log.info("Signal handler pausing for {0} seconds or until it receives SIGALRM or SIGCONT".format(seconds))
signal.signal(signal.SIGALRM, callback_function)
signal.alarm(seconds)
else:
self.log.info('Signal handler pausing until it receives SIGALRM or SIGCONT')
signal.signal(signal.SIGCONT, callback_function)
signal.pause()
self.log.info('Signal handler resuming from pause')
if signum == signal.SIGALRM:
return True
else:
return False | [
"def",
"pause",
"(",
"self",
",",
"signum",
",",
"seconds",
"=",
"0",
",",
"callback_function",
"=",
"None",
")",
":",
"if",
"callback_function",
"is",
"None",
":",
"callback_function",
"=",
"self",
".",
"default_handler",
"if",
"seconds",
">",
"0",
":",
... | Pause execution, execution will resume in X seconds or when the
appropriate resume signal is received. Execution will jump to the
callback_function, the default callback function is the handler
method which will run all tasks registered with the reg_on_resume
methodi.
Returns True if timer expired, otherwise returns False | [
"Pause",
"execution",
"execution",
"will",
"resume",
"in",
"X",
"seconds",
"or",
"when",
"the",
"appropriate",
"resume",
"signal",
"is",
"received",
".",
"Execution",
"will",
"jump",
"to",
"the",
"callback_function",
"the",
"default",
"callback",
"function",
"is... | python | test |
tylertreat/BigQuery-Python | bigquery/client.py | https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/client.py#L1472-L1502 | def _get_all_tables_for_dataset(self, dataset_id, project_id=None):
"""Retrieve a list of all tables for the dataset.
Parameters
----------
dataset_id : str
The dataset to retrieve table names for
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
dict
A ``dict`` containing tables key with all tables
"""
project_id = self._get_project_id(project_id)
result = self.bigquery.tables().list(
projectId=project_id,
datasetId=dataset_id).execute(num_retries=self.num_retries)
page_token = result.get('nextPageToken')
while page_token:
res = self.bigquery.tables().list(
projectId=project_id,
datasetId=dataset_id,
pageToken=page_token
).execute(num_retries=self.num_retries)
page_token = res.get('nextPageToken')
result['tables'] += res.get('tables', [])
return result | [
"def",
"_get_all_tables_for_dataset",
"(",
"self",
",",
"dataset_id",
",",
"project_id",
"=",
"None",
")",
":",
"project_id",
"=",
"self",
".",
"_get_project_id",
"(",
"project_id",
")",
"result",
"=",
"self",
".",
"bigquery",
".",
"tables",
"(",
")",
".",
... | Retrieve a list of all tables for the dataset.
Parameters
----------
dataset_id : str
The dataset to retrieve table names for
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
dict
A ``dict`` containing tables key with all tables | [
"Retrieve",
"a",
"list",
"of",
"all",
"tables",
"for",
"the",
"dataset",
"."
] | python | train |
dmwm/DBS | Server/Python/src/dbs/web/DBSReaderModel.py | https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/web/DBSReaderModel.py#L1138-L1155 | def listDatasetParents(self, dataset=''):
"""
API to list A datasets parents in DBS.
:param dataset: dataset (Required)
:type dataset: str
:returns: List of dictionaries containing the following keys (this_dataset, parent_dataset_id, parent_dataset)
:rtype: list of dicts
"""
try:
return self.dbsDataset.listDatasetParents(dataset)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listDatasetParents. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) | [
"def",
"listDatasetParents",
"(",
"self",
",",
"dataset",
"=",
"''",
")",
":",
"try",
":",
"return",
"self",
".",
"dbsDataset",
".",
"listDatasetParents",
"(",
"dataset",
")",
"except",
"dbsException",
"as",
"de",
":",
"dbsExceptionHandler",
"(",
"de",
".",
... | API to list A datasets parents in DBS.
:param dataset: dataset (Required)
:type dataset: str
:returns: List of dictionaries containing the following keys (this_dataset, parent_dataset_id, parent_dataset)
:rtype: list of dicts | [
"API",
"to",
"list",
"A",
"datasets",
"parents",
"in",
"DBS",
"."
] | python | train |
mitsei/dlkit | dlkit/json_/repository/managers.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/managers.py#L1574-L1591 | def get_composition_lookup_session(self, proxy):
"""Gets the ``OsidSession`` associated with the composition lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.CompositionLookupSession) - the new
``CompositionLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_composition_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_composition_lookup()`` is ``true``.*
"""
if not self.supports_composition_lookup():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.CompositionLookupSession(proxy=proxy, runtime=self._runtime) | [
"def",
"get_composition_lookup_session",
"(",
"self",
",",
"proxy",
")",
":",
"if",
"not",
"self",
".",
"supports_composition_lookup",
"(",
")",
":",
"raise",
"errors",
".",
"Unimplemented",
"(",
")",
"# pylint: disable=no-member",
"return",
"sessions",
".",
"Comp... | Gets the ``OsidSession`` associated with the composition lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.CompositionLookupSession) - the new
``CompositionLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_composition_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_composition_lookup()`` is ``true``.* | [
"Gets",
"the",
"OsidSession",
"associated",
"with",
"the",
"composition",
"lookup",
"service",
"."
] | python | train |
klen/adrest | adrest/mixin/auth.py | https://github.com/klen/adrest/blob/8b75c67123cffabe5ed98c222bb7ab43c904d89c/adrest/mixin/auth.py#L46-L73 | def authenticate(self, request):
""" Attempt to authenticate the request.
:param request: django.http.Request instance
:return bool: True if success else raises HTTP_401
"""
authenticators = self._meta.authenticators
if request.method == 'OPTIONS' and ADREST_ALLOW_OPTIONS:
self.auth = AnonimousAuthenticator(self)
return True
error_message = "Authorization required."
for authenticator in authenticators:
auth = authenticator(self)
try:
if not auth.authenticate(request):
raise AssertionError(error_message)
self.auth = auth
auth.configure(request)
return True
except AssertionError, e:
error_message = str(e)
raise HttpError(error_message, status=status.HTTP_401_UNAUTHORIZED) | [
"def",
"authenticate",
"(",
"self",
",",
"request",
")",
":",
"authenticators",
"=",
"self",
".",
"_meta",
".",
"authenticators",
"if",
"request",
".",
"method",
"==",
"'OPTIONS'",
"and",
"ADREST_ALLOW_OPTIONS",
":",
"self",
".",
"auth",
"=",
"AnonimousAuthent... | Attempt to authenticate the request.
:param request: django.http.Request instance
:return bool: True if success else raises HTTP_401 | [
"Attempt",
"to",
"authenticate",
"the",
"request",
"."
] | python | train |
Spinmob/spinmob | _functions.py | https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/_functions.py#L547-L588 | def find_N_peaks(array, N=4, max_iterations=100, rec_max_iterations=3, recursion=1):
"""
This will run the find_peaks algorythm, adjusting the baseline until exactly N peaks are found.
"""
if recursion<0: return None
# get an initial guess as to the baseline
ymin = min(array)
ymax = max(array)
for n in range(max_iterations):
# bisect the range to estimate the baseline
y1 = (ymin+ymax)/2.0
# now see how many peaks this finds. p could have 40 for all we know
p, s, i = find_peaks(array, y1, True)
# now loop over the subarrays and make sure there aren't two peaks in any of them
for n in range(len(i)):
# search the subarray for two peaks, iterating 3 times (75% selectivity)
p2 = find_N_peaks(s[n], 2, rec_max_iterations, rec_max_iterations=rec_max_iterations, recursion=recursion-1)
# if we found a double-peak
if not p2 is None:
# push these non-duplicate values into the master array
for x in p2:
# if this point is not already in p, push it on
if not x in p: p.append(x+i[n]) # don't forget the offset, since subarrays start at 0
# if we nailed it, finish up
if len(p) == N: return p
# if we have too many peaks, we need to increase the baseline
if len(p) > N: ymin = y1
# too few? decrease the baseline
else: ymax = y1
return None | [
"def",
"find_N_peaks",
"(",
"array",
",",
"N",
"=",
"4",
",",
"max_iterations",
"=",
"100",
",",
"rec_max_iterations",
"=",
"3",
",",
"recursion",
"=",
"1",
")",
":",
"if",
"recursion",
"<",
"0",
":",
"return",
"None",
"# get an initial guess as to the basel... | This will run the find_peaks algorythm, adjusting the baseline until exactly N peaks are found. | [
"This",
"will",
"run",
"the",
"find_peaks",
"algorythm",
"adjusting",
"the",
"baseline",
"until",
"exactly",
"N",
"peaks",
"are",
"found",
"."
] | python | train |
valency/deeputils | deeputils/common.py | https://github.com/valency/deeputils/blob/27efd91668de0223ed8b07cfadf2151632521520/deeputils/common.py#L133-L158 | def dict_format_type(d, source, formatter, include_list=True):
"""
Replace the values of a dict with certain type to other values
:param d: the dictionary
:param source: the source type, e.g., int
:param formatter: the formatter method, e.g., return the string format of an int
:param include_list: whether list should be formatted, otherwise list will be considered as source type
:return: formatted dictionary
"""
if not isinstance(d, dict):
if isinstance(d, source):
return formatter(d)
else:
return d
else:
dd = dict()
for key, value in d.items():
if include_list and isinstance(value, list):
dd[key] = [dict_format_type(i, source, formatter) for i in value]
elif isinstance(value, dict):
dd[key] = dict_format_type(value, source, formatter)
elif isinstance(value, source):
dd[key] = formatter(value)
else:
dd[key] = value
return dd | [
"def",
"dict_format_type",
"(",
"d",
",",
"source",
",",
"formatter",
",",
"include_list",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"d",
",",
"dict",
")",
":",
"if",
"isinstance",
"(",
"d",
",",
"source",
")",
":",
"return",
"formatter",
... | Replace the values of a dict with certain type to other values
:param d: the dictionary
:param source: the source type, e.g., int
:param formatter: the formatter method, e.g., return the string format of an int
:param include_list: whether list should be formatted, otherwise list will be considered as source type
:return: formatted dictionary | [
"Replace",
"the",
"values",
"of",
"a",
"dict",
"with",
"certain",
"type",
"to",
"other",
"values",
":",
"param",
"d",
":",
"the",
"dictionary",
":",
"param",
"source",
":",
"the",
"source",
"type",
"e",
".",
"g",
".",
"int",
":",
"param",
"formatter",
... | python | valid |
pandas-dev/pandas | pandas/core/internals/blocks.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/blocks.py#L2677-L2690 | def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
if isinstance(other, ABCDatetimeIndex):
# May get a DatetimeIndex here. Unbox it.
other = other.array
if isinstance(other, DatetimeArray):
# hit in pandas/tests/indexing/test_coercion.py
# ::TestWhereCoercion::test_where_series_datetime64[datetime64tz]
# when falling back to ObjectBlock.where
other = other.astype(object)
return values, other | [
"def",
"_try_coerce_args",
"(",
"self",
",",
"values",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"ABCDatetimeIndex",
")",
":",
"# May get a DatetimeIndex here. Unbox it.",
"other",
"=",
"other",
".",
"array",
"if",
"isinstance",
"(",
"other"... | provide coercion to our input arguments | [
"provide",
"coercion",
"to",
"our",
"input",
"arguments"
] | python | train |
Parisson/TimeSide | timeside/core/processor.py | https://github.com/Parisson/TimeSide/blob/0618d75cd2f16021afcfd3d5b77f692adad76ea5/timeside/core/processor.py#L230-L267 | def process(self, frames, eod):
"""Returns an iterator over tuples of the form (buffer, eod)
where buffer is a fixed-sized block of data, and eod indicates whether
this is the last block.
In case padding is deactivated the last block may be smaller than
the buffer size.
"""
src_index = 0
remaining = len(frames)
while remaining:
space = self.buffer_size - self.len
copylen = remaining < space and remaining or space
src = frames[src_index:src_index + copylen]
if self.len == 0 and copylen == self.buffer_size:
# avoid unnecessary copy
buffer = src
else:
buffer = self.buffer
buffer[self.len:self.len + copylen] = src
remaining -= copylen
src_index += copylen
self.len += copylen
if self.len == self.buffer_size:
yield buffer, (eod and not remaining)
self.len = 0
if eod and self.len:
block = self.buffer
if self.pad:
self.buffer[self.len:self.buffer_size] = 0
else:
block = self.buffer[0:self.len]
yield block, True
self.len = 0 | [
"def",
"process",
"(",
"self",
",",
"frames",
",",
"eod",
")",
":",
"src_index",
"=",
"0",
"remaining",
"=",
"len",
"(",
"frames",
")",
"while",
"remaining",
":",
"space",
"=",
"self",
".",
"buffer_size",
"-",
"self",
".",
"len",
"copylen",
"=",
"rem... | Returns an iterator over tuples of the form (buffer, eod)
where buffer is a fixed-sized block of data, and eod indicates whether
this is the last block.
In case padding is deactivated the last block may be smaller than
the buffer size. | [
"Returns",
"an",
"iterator",
"over",
"tuples",
"of",
"the",
"form",
"(",
"buffer",
"eod",
")",
"where",
"buffer",
"is",
"a",
"fixed",
"-",
"sized",
"block",
"of",
"data",
"and",
"eod",
"indicates",
"whether",
"this",
"is",
"the",
"last",
"block",
".",
... | python | train |
freshbooks/statsdecor | statsdecor/decorators.py | https://github.com/freshbooks/statsdecor/blob/1c4a98e120799b430fd40c8fede9020a91162d31/statsdecor/decorators.py#L45-L61 | def timed(name, tags=None):
"""Function decorator for tracking timing information
on a function's invocation.
>>> from statsdecor.decorators import timed
>>> @timed('my.metric')
>>> def my_func():
>>> pass
"""
def wrap(f):
@wraps(f)
def decorator(*args, **kwargs):
stats = client()
with stats.timer(name, tags=tags):
return f(*args, **kwargs)
return decorator
return wrap | [
"def",
"timed",
"(",
"name",
",",
"tags",
"=",
"None",
")",
":",
"def",
"wrap",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"decorator",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"stats",
"=",
"client",
"(",
")",
"with"... | Function decorator for tracking timing information
on a function's invocation.
>>> from statsdecor.decorators import timed
>>> @timed('my.metric')
>>> def my_func():
>>> pass | [
"Function",
"decorator",
"for",
"tracking",
"timing",
"information",
"on",
"a",
"function",
"s",
"invocation",
"."
] | python | train |
Alignak-monitoring/alignak | alignak/scheduler.py | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/scheduler.py#L1224-L1232 | def reset_topology_change_flag(self):
"""Set topology_change attribute to False in all hosts and services
:return: None
"""
for i in self.hosts:
i.topology_change = False
for i in self.services:
i.topology_change = False | [
"def",
"reset_topology_change_flag",
"(",
"self",
")",
":",
"for",
"i",
"in",
"self",
".",
"hosts",
":",
"i",
".",
"topology_change",
"=",
"False",
"for",
"i",
"in",
"self",
".",
"services",
":",
"i",
".",
"topology_change",
"=",
"False"
] | Set topology_change attribute to False in all hosts and services
:return: None | [
"Set",
"topology_change",
"attribute",
"to",
"False",
"in",
"all",
"hosts",
"and",
"services"
] | python | train |
saltstack/salt | salt/modules/azurearm_compute.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/azurearm_compute.py#L181-L211 | def availability_set_get(name, resource_group, **kwargs):
'''
.. versionadded:: 2019.2.0
Get a dictionary representing an availability set's properties.
:param name: The availability set to get.
:param resource_group: The resource group name assigned to the
availability set.
CLI Example:
.. code-block:: bash
salt-call azurearm_compute.availability_set_get testset testgroup
'''
compconn = __utils__['azurearm.get_client']('compute', **kwargs)
try:
av_set = compconn.availability_sets.get(
resource_group_name=resource_group,
availability_set_name=name
)
result = av_set.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs)
result = {'error': str(exc)}
return result | [
"def",
"availability_set_get",
"(",
"name",
",",
"resource_group",
",",
"*",
"*",
"kwargs",
")",
":",
"compconn",
"=",
"__utils__",
"[",
"'azurearm.get_client'",
"]",
"(",
"'compute'",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"av_set",
"=",
"compconn",
".... | .. versionadded:: 2019.2.0
Get a dictionary representing an availability set's properties.
:param name: The availability set to get.
:param resource_group: The resource group name assigned to the
availability set.
CLI Example:
.. code-block:: bash
salt-call azurearm_compute.availability_set_get testset testgroup | [
"..",
"versionadded",
"::",
"2019",
".",
"2",
".",
"0"
] | python | train |
dakrauth/strutil | strutil.py | https://github.com/dakrauth/strutil/blob/c513645a919488d9b22ab612a539773bef866f10/strutil.py#L53-L60 | def replace_each(text, items, count=None, strip=False):
'''
Like ``replace``, where each occurrence in ``items`` is a 2-tuple of
``(old, new)`` pair.
'''
for a,b in items:
text = replace(text, a, b, count=count, strip=strip)
return text | [
"def",
"replace_each",
"(",
"text",
",",
"items",
",",
"count",
"=",
"None",
",",
"strip",
"=",
"False",
")",
":",
"for",
"a",
",",
"b",
"in",
"items",
":",
"text",
"=",
"replace",
"(",
"text",
",",
"a",
",",
"b",
",",
"count",
"=",
"count",
",... | Like ``replace``, where each occurrence in ``items`` is a 2-tuple of
``(old, new)`` pair. | [
"Like",
"replace",
"where",
"each",
"occurrence",
"in",
"items",
"is",
"a",
"2",
"-",
"tuple",
"of",
"(",
"old",
"new",
")",
"pair",
"."
] | python | train |
CalebBell/ht | ht/condensation.py | https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/condensation.py#L33-L90 | def Nusselt_laminar(Tsat, Tw, rhog, rhol, kl, mul, Hvap, L, angle=90.):
r'''Calculates heat transfer coefficient for laminar film condensation
of a pure chemical on a flat plate, as presented in [1]_ according to an
analysis performed by Nusselt in 1916.
.. math::
h=0.943\left[\frac{g\sin(\theta)\rho_{liq}(\rho_l-\rho_v)k_{l}^3
\Delta H_{vap}}{\mu_l(T_{sat}-T_w)L}\right]^{0.25}
Parameters
----------
Tsat : float
Saturation temperature at operating pressure [Pa]
Tw : float
Wall temperature, [K]
rhog : float
Density of the gas [kg/m^3]
rhol : float
Density of the liquid [kg/m^3]
kl : float
Thermal conductivity of liquid [W/m/K]
mul : float
Viscosity of liquid [Pa*s]
Hvap : float
Heat of vaporization of the fluid at P, [J/kg]
L : float
Length of the plate [m]
angle : float, optional
Angle of inclination of the plate [degrees]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
Optionally, the plate may be inclined.
The constant 0.943 is actually:
.. math::
2\sqrt{2}/3
Examples
--------
p. 578 in [1]_, matches exactly.
>>> Nusselt_laminar(Tsat=370, Tw=350, rhog=7.0, rhol=585., kl=0.091,
... mul=158.9E-6, Hvap=776900, L=0.1)
1482.206403453679
References
----------
.. [1] Hewitt, G. L. Shires T. Reg Bott G. F., George L. Shires, and
T. R. Bott. Process Heat Transfer. 1E. Boca Raton: CRC Press, 1994.
'''
return 2.*2.**0.5/3.*(kl**3*rhol*(rhol - rhog)*g*sin(angle/180.*pi)
*Hvap/(mul*(Tsat - Tw)*L))**0.25 | [
"def",
"Nusselt_laminar",
"(",
"Tsat",
",",
"Tw",
",",
"rhog",
",",
"rhol",
",",
"kl",
",",
"mul",
",",
"Hvap",
",",
"L",
",",
"angle",
"=",
"90.",
")",
":",
"return",
"2.",
"*",
"2.",
"**",
"0.5",
"/",
"3.",
"*",
"(",
"kl",
"**",
"3",
"*",
... | r'''Calculates heat transfer coefficient for laminar film condensation
of a pure chemical on a flat plate, as presented in [1]_ according to an
analysis performed by Nusselt in 1916.
.. math::
h=0.943\left[\frac{g\sin(\theta)\rho_{liq}(\rho_l-\rho_v)k_{l}^3
\Delta H_{vap}}{\mu_l(T_{sat}-T_w)L}\right]^{0.25}
Parameters
----------
Tsat : float
Saturation temperature at operating pressure [Pa]
Tw : float
Wall temperature, [K]
rhog : float
Density of the gas [kg/m^3]
rhol : float
Density of the liquid [kg/m^3]
kl : float
Thermal conductivity of liquid [W/m/K]
mul : float
Viscosity of liquid [Pa*s]
Hvap : float
Heat of vaporization of the fluid at P, [J/kg]
L : float
Length of the plate [m]
angle : float, optional
Angle of inclination of the plate [degrees]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
Optionally, the plate may be inclined.
The constant 0.943 is actually:
.. math::
2\sqrt{2}/3
Examples
--------
p. 578 in [1]_, matches exactly.
>>> Nusselt_laminar(Tsat=370, Tw=350, rhog=7.0, rhol=585., kl=0.091,
... mul=158.9E-6, Hvap=776900, L=0.1)
1482.206403453679
References
----------
.. [1] Hewitt, G. L. Shires T. Reg Bott G. F., George L. Shires, and
T. R. Bott. Process Heat Transfer. 1E. Boca Raton: CRC Press, 1994. | [
"r",
"Calculates",
"heat",
"transfer",
"coefficient",
"for",
"laminar",
"film",
"condensation",
"of",
"a",
"pure",
"chemical",
"on",
"a",
"flat",
"plate",
"as",
"presented",
"in",
"[",
"1",
"]",
"_",
"according",
"to",
"an",
"analysis",
"performed",
"by",
... | python | train |
CenturyLinkCloud/clc-python-sdk | src/clc/APIv2/group.py | https://github.com/CenturyLinkCloud/clc-python-sdk/blob/f4dba40c627cb08dd4b7d0d277e8d67578010b05/src/clc/APIv2/group.py#L84-L98 | def Search(self,key):
"""Search group list by providing partial name, ID, description or other key.
>>> clc.v2.Datacenter().Groups().Search("Default Group")
[<clc.APIv2.group.Group object at 0x1065b0f50>, <clc.APIv2.group.Group object at 0x1065b0d10>]
"""
results = []
for group in self.groups:
if group.id.lower().find(key.lower()) != -1: results.append(group)
elif group.name.lower().find(key.lower()) != -1: results.append(group)
elif group.description.lower().find(key.lower()) != -1: results.append(group)
return(results) | [
"def",
"Search",
"(",
"self",
",",
"key",
")",
":",
"results",
"=",
"[",
"]",
"for",
"group",
"in",
"self",
".",
"groups",
":",
"if",
"group",
".",
"id",
".",
"lower",
"(",
")",
".",
"find",
"(",
"key",
".",
"lower",
"(",
")",
")",
"!=",
"-",... | Search group list by providing partial name, ID, description or other key.
>>> clc.v2.Datacenter().Groups().Search("Default Group")
[<clc.APIv2.group.Group object at 0x1065b0f50>, <clc.APIv2.group.Group object at 0x1065b0d10>] | [
"Search",
"group",
"list",
"by",
"providing",
"partial",
"name",
"ID",
"description",
"or",
"other",
"key",
"."
] | python | train |
IRC-SPHERE/HyperStream | hyperstream/time_interval.py | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/time_interval.py#L118-L135 | def split(self, points):
'''Splits the list of time intervals in the specified points
The function assumes that the time intervals do not overlap and ignores
points that are not inside of any interval.
Parameters
==========
points: list of datetime
'''
for p in points:
for i in range(len(self.intervals)):
if (self.intervals[i].start < p) and (self.intervals[i].end > p):
self.intervals = (self.intervals[:i]
+ [TimeInterval(self.intervals[i].start, p),
TimeInterval(p, self.intervals[i].end)]
+ self.intervals[(i + 1):])
break | [
"def",
"split",
"(",
"self",
",",
"points",
")",
":",
"for",
"p",
"in",
"points",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"intervals",
")",
")",
":",
"if",
"(",
"self",
".",
"intervals",
"[",
"i",
"]",
".",
"start",
"<",
... | Splits the list of time intervals in the specified points
The function assumes that the time intervals do not overlap and ignores
points that are not inside of any interval.
Parameters
==========
points: list of datetime | [
"Splits",
"the",
"list",
"of",
"time",
"intervals",
"in",
"the",
"specified",
"points"
] | python | train |
evandempsey/porter2-stemmer | porter2stemmer/porter2stemmer.py | https://github.com/evandempsey/porter2-stemmer/blob/949824b7767c25efb014ef738e682442fa70c10b/porter2stemmer/porter2stemmer.py#L315-L337 | def process_terminals(self, word):
"""
Deal with terminal Es and Ls and
convert any uppercase Ys back to lowercase.
"""
length = len(word)
if word[length - 1] == 'e':
if self.r2 <= (length - 1):
word = word[:-1]
elif self.r1 <= (length - 1):
if not self.is_short(word[:-1]):
word = word[:-1]
elif word[length - 1] == 'l':
if self.r2 <= (length - 1) and word[length - 2] == 'l':
word = word[:-1]
char_list = [x if x != 'Y' else 'y' for x in word]
word = ''.join(char_list)
return word | [
"def",
"process_terminals",
"(",
"self",
",",
"word",
")",
":",
"length",
"=",
"len",
"(",
"word",
")",
"if",
"word",
"[",
"length",
"-",
"1",
"]",
"==",
"'e'",
":",
"if",
"self",
".",
"r2",
"<=",
"(",
"length",
"-",
"1",
")",
":",
"word",
"=",... | Deal with terminal Es and Ls and
convert any uppercase Ys back to lowercase. | [
"Deal",
"with",
"terminal",
"Es",
"and",
"Ls",
"and",
"convert",
"any",
"uppercase",
"Ys",
"back",
"to",
"lowercase",
"."
] | python | train |
saltstack/salt | salt/pillar/nsot.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/nsot.py#L103-L120 | def _check_regex(minion_id, regex):
'''
check whether or not this minion should have this external pillar returned
:param minion_id: str
:param minion_regex: list
:return: bool
'''
get_pillar = False
for pattern in regex:
log.debug('nsot external pillar comparing %s with %s', minion_id, regex)
match = re.search(pattern, minion_id)
if match and match.string == minion_id:
log.debug('nsot external pillar found a match!')
get_pillar = True
break
log.debug('nsot external pillar unable to find a match!')
return get_pillar | [
"def",
"_check_regex",
"(",
"minion_id",
",",
"regex",
")",
":",
"get_pillar",
"=",
"False",
"for",
"pattern",
"in",
"regex",
":",
"log",
".",
"debug",
"(",
"'nsot external pillar comparing %s with %s'",
",",
"minion_id",
",",
"regex",
")",
"match",
"=",
"re",... | check whether or not this minion should have this external pillar returned
:param minion_id: str
:param minion_regex: list
:return: bool | [
"check",
"whether",
"or",
"not",
"this",
"minion",
"should",
"have",
"this",
"external",
"pillar",
"returned"
] | python | train |
h2non/paco | paco/apply.py | https://github.com/h2non/paco/blob/1e5ef4df317e7cbbcefdf67d8dee28ce90538f3d/paco/apply.py#L8-L54 | def apply(coro, *args, **kw):
"""
Creates a continuation coroutine function with some arguments
already applied.
Useful as a shorthand when combined with other control flow functions.
Any arguments passed to the returned function are added to the arguments
originally passed to apply.
This is similar to `paco.partial()`.
This function can be used as decorator.
arguments:
coro (coroutinefunction): coroutine function to wrap.
*args (mixed): mixed variadic arguments for partial application.
*kwargs (mixed): mixed variadic keyword arguments for partial
application.
Raises:
TypeError: if coro argument is not a coroutine function.
Returns:
coroutinefunction: wrapped coroutine function.
Usage::
async def hello(name, mark='!'):
print('Hello, {name}{mark}'.format(name=name, mark=mark))
hello_mike = paco.apply(hello, 'Mike')
await hello_mike()
# => Hello, Mike!
hello_mike = paco.apply(hello, 'Mike', mark='?')
await hello_mike()
# => Hello, Mike?
"""
assert_corofunction(coro=coro)
@asyncio.coroutine
def wrapper(*_args, **_kw):
# Explicitely ignore wrapper arguments
return (yield from coro(*args, **kw))
return wrapper | [
"def",
"apply",
"(",
"coro",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"assert_corofunction",
"(",
"coro",
"=",
"coro",
")",
"@",
"asyncio",
".",
"coroutine",
"def",
"wrapper",
"(",
"*",
"_args",
",",
"*",
"*",
"_kw",
")",
":",
"# Explicitel... | Creates a continuation coroutine function with some arguments
already applied.
Useful as a shorthand when combined with other control flow functions.
Any arguments passed to the returned function are added to the arguments
originally passed to apply.
This is similar to `paco.partial()`.
This function can be used as decorator.
arguments:
coro (coroutinefunction): coroutine function to wrap.
*args (mixed): mixed variadic arguments for partial application.
*kwargs (mixed): mixed variadic keyword arguments for partial
application.
Raises:
TypeError: if coro argument is not a coroutine function.
Returns:
coroutinefunction: wrapped coroutine function.
Usage::
async def hello(name, mark='!'):
print('Hello, {name}{mark}'.format(name=name, mark=mark))
hello_mike = paco.apply(hello, 'Mike')
await hello_mike()
# => Hello, Mike!
hello_mike = paco.apply(hello, 'Mike', mark='?')
await hello_mike()
# => Hello, Mike? | [
"Creates",
"a",
"continuation",
"coroutine",
"function",
"with",
"some",
"arguments",
"already",
"applied",
"."
] | python | train |
aloetesting/aloe_webdriver | aloe_webdriver/__init__.py | https://github.com/aloetesting/aloe_webdriver/blob/65d847da4bdc63f9c015cb19d4efdee87df8ffad/aloe_webdriver/__init__.py#L125-L132 | def click(self, name):
"""Click the link with the provided link text."""
try:
elem = world.browser.find_element_by_link_text(name)
except NoSuchElementException:
raise AssertionError(
"Cannot find the link with text '{}'.".format(name))
elem.click() | [
"def",
"click",
"(",
"self",
",",
"name",
")",
":",
"try",
":",
"elem",
"=",
"world",
".",
"browser",
".",
"find_element_by_link_text",
"(",
"name",
")",
"except",
"NoSuchElementException",
":",
"raise",
"AssertionError",
"(",
"\"Cannot find the link with text '{}... | Click the link with the provided link text. | [
"Click",
"the",
"link",
"with",
"the",
"provided",
"link",
"text",
"."
] | python | train |
RJT1990/pyflux | pyflux/families/poisson.py | https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/poisson.py#L342-L366 | def second_order_score(y, mean, scale, shape, skewness):
""" GAS Poisson Update term potentially using second-order information - native Python function
Parameters
----------
y : float
datapoint for the time series
mean : float
location parameter for the Poisson distribution
scale : float
scale parameter for the Poisson distribution
shape : float
tail thickness parameter for the Poisson distribution
skewness : float
skewness parameter for the Poisson distribution
Returns
----------
- Adjusted score of the Poisson family
"""
return (y-mean)/float(mean) | [
"def",
"second_order_score",
"(",
"y",
",",
"mean",
",",
"scale",
",",
"shape",
",",
"skewness",
")",
":",
"return",
"(",
"y",
"-",
"mean",
")",
"/",
"float",
"(",
"mean",
")"
] | GAS Poisson Update term potentially using second-order information - native Python function
Parameters
----------
y : float
datapoint for the time series
mean : float
location parameter for the Poisson distribution
scale : float
scale parameter for the Poisson distribution
shape : float
tail thickness parameter for the Poisson distribution
skewness : float
skewness parameter for the Poisson distribution
Returns
----------
- Adjusted score of the Poisson family | [
"GAS",
"Poisson",
"Update",
"term",
"potentially",
"using",
"second",
"-",
"order",
"information",
"-",
"native",
"Python",
"function"
] | python | train |
tensorflow/tensor2tensor | tensor2tensor/layers/common_video.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_video.py#L585-L618 | def beta_schedule(schedule, global_step, final_beta, decay_start, decay_end):
"""Get KL multiplier (beta) based on the schedule."""
if decay_start > decay_end:
raise ValueError("decay_end is smaller than decay_end.")
# Since some of the TF schedules do not support incrementing a value,
# in all of the schedules, we anneal the beta from final_beta to zero
# and then reverse it at the bottom.
if schedule == "constant":
decayed_value = 0.0
elif schedule == "linear":
decayed_value = tf.train.polynomial_decay(
learning_rate=final_beta,
global_step=global_step - decay_start,
decay_steps=decay_end - decay_start,
end_learning_rate=0.0)
elif schedule == "noisy_linear_cosine_decay":
decayed_value = tf.train.noisy_linear_cosine_decay(
learning_rate=final_beta,
global_step=global_step - decay_start,
decay_steps=decay_end - decay_start)
# TODO(mechcoder): Add log_annealing schedule.
else:
raise ValueError("Unknown beta schedule.")
increased_value = final_beta - decayed_value
increased_value = tf.maximum(0.0, increased_value)
beta = tf.case(
pred_fn_pairs={
tf.less(global_step, decay_start): lambda: 0.0,
tf.greater(global_step, decay_end): lambda: final_beta},
default=lambda: increased_value)
return beta | [
"def",
"beta_schedule",
"(",
"schedule",
",",
"global_step",
",",
"final_beta",
",",
"decay_start",
",",
"decay_end",
")",
":",
"if",
"decay_start",
">",
"decay_end",
":",
"raise",
"ValueError",
"(",
"\"decay_end is smaller than decay_end.\"",
")",
"# Since some of th... | Get KL multiplier (beta) based on the schedule. | [
"Get",
"KL",
"multiplier",
"(",
"beta",
")",
"based",
"on",
"the",
"schedule",
"."
] | python | train |
adafruit/Adafruit_Python_GPIO | Adafruit_GPIO/GPIO.py | https://github.com/adafruit/Adafruit_Python_GPIO/blob/a92a23d6b5869663b2bc1ccf78bb11585076a9c4/Adafruit_GPIO/GPIO.py#L183-L188 | def setup(self, pin, mode, pull_up_down=PUD_OFF):
"""Set the input or output mode for a specified pin. Mode should be
either OUTPUT or INPUT.
"""
self.rpi_gpio.setup(pin, self._dir_mapping[mode],
pull_up_down=self._pud_mapping[pull_up_down]) | [
"def",
"setup",
"(",
"self",
",",
"pin",
",",
"mode",
",",
"pull_up_down",
"=",
"PUD_OFF",
")",
":",
"self",
".",
"rpi_gpio",
".",
"setup",
"(",
"pin",
",",
"self",
".",
"_dir_mapping",
"[",
"mode",
"]",
",",
"pull_up_down",
"=",
"self",
".",
"_pud_m... | Set the input or output mode for a specified pin. Mode should be
either OUTPUT or INPUT. | [
"Set",
"the",
"input",
"or",
"output",
"mode",
"for",
"a",
"specified",
"pin",
".",
"Mode",
"should",
"be",
"either",
"OUTPUT",
"or",
"INPUT",
"."
] | python | valid |
ResilienceTesting/gremlinsdk-python | python/pygremlin/failuregenerator.py | https://github.com/ResilienceTesting/gremlinsdk-python/blob/c5cc439ea1c0d6a98ff88f5604bf739f3c48d1e6/python/pygremlin/failuregenerator.py#L65-L124 | def add_rule(self, **args):
"""
@param args keyword argument list, consisting of:
source: <source service name>,
dest: <destination service name>,
messagetype: <request|response|publish|subscribe|stream>
headerpattern: <regex to match against the value of the X-Gremlin-ID trackingheader present in HTTP headers>
bodypattern: <regex to match against HTTP message body>
delayprobability: <float, 0.0 to 1.0>
delaydistribution: <uniform|exponential|normal> probability distribution function
mangleprobability: <float, 0.0 to 1.0>
mangledistribution: <uniform|exponential|normal> probability distribution function
abortprobability: <float, 0.0 to 1.0>
abortdistribution: <uniform|exponential|normal> probability distribution function
delaytime: <string> latency to inject into requests <string, e.g., "10ms", "1s", "5m", "3h", "1s500ms">
errorcode: <Number> HTTP error code or -1 to reset TCP connection
searchstring: <string> string to replace when Mangle is enabled
replacestring: <string> string to replace with for Mangle fault
"""
#The defaults are indicated below
myrule = {
"source": "",
"dest": "",
"messagetype": "request",
"headerpattern": "*",
"bodypattern": "*",
"delayprobability": 0.0,
"delaydistribution": "uniform",
"mangleprobability": 0.0,
"mangledistribution": "uniform",
"abortprobability": 0.0,
"abortdistribution": "uniform",
"delaytime": "0s",
"errorcode": -1,
"searchstring": "",
"replacestring": ""
}
rule = args.copy()
#copy
for i in rule.keys():
if i not in myrule:
continue
myrule[i] = rule[i]
#check defaults
services = self.app.get_services()
assert myrule["source"] != "" and myrule["dest"] != ""
assert myrule["source"] in services and myrule["dest"] in services
assert myrule['headerpattern'] != "" or myrule["bodypattern"] != ""
assert myrule['delayprobability'] >0.0 or myrule['abortprobability'] >0.0 or myrule['mangleprobability'] >0.0
if myrule["delayprobability"] > 0.0:
assert myrule["delaytime"] != ""
if myrule["abortprobability"] > 0.0:
assert myrule["errorcode"] >= -1
assert myrule["messagetype"] in ["request", "response", "publish", "subscribe"]
self._queue.append(myrule) | [
"def",
"add_rule",
"(",
"self",
",",
"*",
"*",
"args",
")",
":",
"#The defaults are indicated below",
"myrule",
"=",
"{",
"\"source\"",
":",
"\"\"",
",",
"\"dest\"",
":",
"\"\"",
",",
"\"messagetype\"",
":",
"\"request\"",
",",
"\"headerpattern\"",
":",
"\"*\"... | @param args keyword argument list, consisting of:
source: <source service name>,
dest: <destination service name>,
messagetype: <request|response|publish|subscribe|stream>
headerpattern: <regex to match against the value of the X-Gremlin-ID trackingheader present in HTTP headers>
bodypattern: <regex to match against HTTP message body>
delayprobability: <float, 0.0 to 1.0>
delaydistribution: <uniform|exponential|normal> probability distribution function
mangleprobability: <float, 0.0 to 1.0>
mangledistribution: <uniform|exponential|normal> probability distribution function
abortprobability: <float, 0.0 to 1.0>
abortdistribution: <uniform|exponential|normal> probability distribution function
delaytime: <string> latency to inject into requests <string, e.g., "10ms", "1s", "5m", "3h", "1s500ms">
errorcode: <Number> HTTP error code or -1 to reset TCP connection
searchstring: <string> string to replace when Mangle is enabled
replacestring: <string> string to replace with for Mangle fault | [
"@param",
"args",
"keyword",
"argument",
"list",
"consisting",
"of",
":"
] | python | train |
jonathf/chaospy | chaospy/distributions/sampler/sequences/grid.py | https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/distributions/sampler/sequences/grid.py#L48-L63 | def create_grid_samples(order, dim=1):
"""
Create samples from a regular grid.
Args:
order (int):
The order of the grid. Defines the number of samples.
dim (int):
The number of dimensions in the grid
Returns (numpy.ndarray):
Regular grid with ``shape == (dim, order)``.
"""
x_data = numpy.arange(1, order+1)/(order+1.)
x_data = chaospy.quad.combine([x_data]*dim)
return x_data.T | [
"def",
"create_grid_samples",
"(",
"order",
",",
"dim",
"=",
"1",
")",
":",
"x_data",
"=",
"numpy",
".",
"arange",
"(",
"1",
",",
"order",
"+",
"1",
")",
"/",
"(",
"order",
"+",
"1.",
")",
"x_data",
"=",
"chaospy",
".",
"quad",
".",
"combine",
"(... | Create samples from a regular grid.
Args:
order (int):
The order of the grid. Defines the number of samples.
dim (int):
The number of dimensions in the grid
Returns (numpy.ndarray):
Regular grid with ``shape == (dim, order)``. | [
"Create",
"samples",
"from",
"a",
"regular",
"grid",
"."
] | python | train |
Carbonara-Project/Guanciale | guanciale/idblib.py | https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L721-L742 | def dumpfree(self):
""" list all free pages """
fmt = "L" if self.version > 15 else "H"
hdrsize = 8 if self.version > 15 else 4
pn = self.firstfree
if pn == 0:
print("no free pages")
return
while pn:
self.fh.seek(pn * self.pagesize)
data = self.fh.read(self.pagesize)
if len(data) == 0:
print("could not read FREE data at page %06x" % pn)
break
count, nextfree = struct.unpack_from("<" + (fmt * 2), data)
freepages = list(struct.unpack_from("<" + (fmt * count), data, hdrsize))
freepages.insert(0, pn)
for pn in freepages:
self.fh.seek(pn * self.pagesize)
data = self.fh.read(self.pagesize)
print("%06x: free: %s" % (pn, hexdump(data[:64])))
pn = nextfree | [
"def",
"dumpfree",
"(",
"self",
")",
":",
"fmt",
"=",
"\"L\"",
"if",
"self",
".",
"version",
">",
"15",
"else",
"\"H\"",
"hdrsize",
"=",
"8",
"if",
"self",
".",
"version",
">",
"15",
"else",
"4",
"pn",
"=",
"self",
".",
"firstfree",
"if",
"pn",
"... | list all free pages | [
"list",
"all",
"free",
"pages"
] | python | train |
neighbordog/deviantart | deviantart/api.py | https://github.com/neighbordog/deviantart/blob/5612f1d5e2139a48c9d793d7fd19cde7e162d7b1/deviantart/api.py#L1286-L1320 | def post_comment(self, target, body, comment_type="profile", commentid=""):
"""Post comment
:param target: The target you want to post the comment to (username/deviation UUID/status UUID)
:param body: The comment text
:param comment_type: The type of entry you want to post your comment to
:param commentid: The commentid you are replying to
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
if comment_type == "profile":
response = self._req('/comments/post/profile/{}'.format(target), post_data={
"body":body,
"commentid":commentid
})
elif comment_type == "deviation":
response = self._req('/comments/post/deviation/{}'.format(target), post_data={
"body":body,
"commentid":commentid
})
elif comment_type == "status":
response = self._req('/comments/post/status/{}'.format(target), post_data={
"body":body,
"commentid":commentid
})
else:
raise DeviantartError("Unknown comment type.")
comment = Comment()
comment.from_dict(response)
return comment | [
"def",
"post_comment",
"(",
"self",
",",
"target",
",",
"body",
",",
"comment_type",
"=",
"\"profile\"",
",",
"commentid",
"=",
"\"\"",
")",
":",
"if",
"self",
".",
"standard_grant_type",
"is",
"not",
"\"authorization_code\"",
":",
"raise",
"DeviantartError",
... | Post comment
:param target: The target you want to post the comment to (username/deviation UUID/status UUID)
:param body: The comment text
:param comment_type: The type of entry you want to post your comment to
:param commentid: The commentid you are replying to | [
"Post",
"comment"
] | python | train |
data-8/datascience | datascience/tables.py | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L1941-L2003 | def plot(self, column_for_xticks=None, select=None, overlay=True, width=6, height=4, **vargs):
"""Plot line charts for the table.
Args:
column_for_xticks (``str/array``): A column containing x-axis labels
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each plot will be displayed separately.
vargs: Additional arguments that get passed into `plt.plot`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot
for additional arguments that can be passed into vargs.
Raises:
ValueError -- Every selected column must be numerical.
Returns:
Returns a line plot (connected scatter). Each plot is labeled using
the values in `column_for_xticks` and one plot is produced for all
other columns in self (or for the columns designated by `select`).
>>> table = Table().with_columns(
... 'days', make_array(0, 1, 2, 3, 4, 5),
... 'price', make_array(90.5, 90.00, 83.00, 95.50, 82.00, 82.00),
... 'projection', make_array(90.75, 82.00, 82.50, 82.50, 83.00, 82.50))
>>> table
days | price | projection
0 | 90.5 | 90.75
1 | 90 | 82
2 | 83 | 82.5
3 | 95.5 | 82.5
4 | 82 | 83
5 | 82 | 82.5
>>> table.plot('days') # doctest: +SKIP
<line graph with days as x-axis and lines for price and projection>
>>> table.plot('days', overlay=False) # doctest: +SKIP
<line graph with days as x-axis and line for price>
<line graph with days as x-axis and line for projection>
>>> table.plot('days', 'price') # doctest: +SKIP
<line graph with days as x-axis and line for price>
"""
options = self.default_options.copy()
options.update(vargs)
if column_for_xticks is not None:
x_data, y_labels = self._split_column_and_labels(column_for_xticks)
x_label = self._as_label(column_for_xticks)
else:
x_data, y_labels = None, self.labels
x_label = None
if select is not None:
y_labels = self._as_labels(select)
if x_data is not None:
self = self.sort(x_data)
x_data = np.sort(x_data)
def draw(axis, label, color):
if x_data is None:
axis.plot(self[label], color=color, **options)
else:
axis.plot(x_data, self[label], color=color, **options)
self._visualize(x_label, y_labels, None, overlay, draw, _vertical_x, width=width, height=height) | [
"def",
"plot",
"(",
"self",
",",
"column_for_xticks",
"=",
"None",
",",
"select",
"=",
"None",
",",
"overlay",
"=",
"True",
",",
"width",
"=",
"6",
",",
"height",
"=",
"4",
",",
"*",
"*",
"vargs",
")",
":",
"options",
"=",
"self",
".",
"default_opt... | Plot line charts for the table.
Args:
column_for_xticks (``str/array``): A column containing x-axis labels
Kwargs:
overlay (bool): create a chart with one color per data column;
if False, each plot will be displayed separately.
vargs: Additional arguments that get passed into `plt.plot`.
See http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot
for additional arguments that can be passed into vargs.
Raises:
ValueError -- Every selected column must be numerical.
Returns:
Returns a line plot (connected scatter). Each plot is labeled using
the values in `column_for_xticks` and one plot is produced for all
other columns in self (or for the columns designated by `select`).
>>> table = Table().with_columns(
... 'days', make_array(0, 1, 2, 3, 4, 5),
... 'price', make_array(90.5, 90.00, 83.00, 95.50, 82.00, 82.00),
... 'projection', make_array(90.75, 82.00, 82.50, 82.50, 83.00, 82.50))
>>> table
days | price | projection
0 | 90.5 | 90.75
1 | 90 | 82
2 | 83 | 82.5
3 | 95.5 | 82.5
4 | 82 | 83
5 | 82 | 82.5
>>> table.plot('days') # doctest: +SKIP
<line graph with days as x-axis and lines for price and projection>
>>> table.plot('days', overlay=False) # doctest: +SKIP
<line graph with days as x-axis and line for price>
<line graph with days as x-axis and line for projection>
>>> table.plot('days', 'price') # doctest: +SKIP
<line graph with days as x-axis and line for price> | [
"Plot",
"line",
"charts",
"for",
"the",
"table",
"."
] | python | train |
RI-imaging/qpimage | qpimage/core.py | https://github.com/RI-imaging/qpimage/blob/863c0fce5735b4c0ae369f75c0df9a33411b2bb2/qpimage/core.py#L496-L507 | def copy(self, h5file=None):
"""Create a copy of the current instance
This is done by recursively copying the underlying hdf5 data.
Parameters
----------
h5file: str, h5py.File, h5py.Group, or None
see `QPImage.__init__`
"""
h5 = copyh5(self.h5, h5file)
return QPImage(h5file=h5, h5dtype=self.h5dtype) | [
"def",
"copy",
"(",
"self",
",",
"h5file",
"=",
"None",
")",
":",
"h5",
"=",
"copyh5",
"(",
"self",
".",
"h5",
",",
"h5file",
")",
"return",
"QPImage",
"(",
"h5file",
"=",
"h5",
",",
"h5dtype",
"=",
"self",
".",
"h5dtype",
")"
] | Create a copy of the current instance
This is done by recursively copying the underlying hdf5 data.
Parameters
----------
h5file: str, h5py.File, h5py.Group, or None
see `QPImage.__init__` | [
"Create",
"a",
"copy",
"of",
"the",
"current",
"instance"
] | python | train |
partofthething/ace | ace/smoother.py | https://github.com/partofthething/ace/blob/1593a49f3c2e845514323e9c36ee253fe77bac3c/ace/smoother.py#L256-L266 | def _add_observation_to_variances(self, xj, yj):
"""
Quickly update the variance and co-variance for the addition of one observation.
See Also
--------
_update_variance_in_window : compute variance considering full window
"""
term1 = (self.window_size + 1.0) / self.window_size * (xj - self._mean_x_in_window)
self._covariance_in_window += term1 * (yj - self._mean_y_in_window)
self._variance_in_window += term1 * (xj - self._mean_x_in_window) | [
"def",
"_add_observation_to_variances",
"(",
"self",
",",
"xj",
",",
"yj",
")",
":",
"term1",
"=",
"(",
"self",
".",
"window_size",
"+",
"1.0",
")",
"/",
"self",
".",
"window_size",
"*",
"(",
"xj",
"-",
"self",
".",
"_mean_x_in_window",
")",
"self",
".... | Quickly update the variance and co-variance for the addition of one observation.
See Also
--------
_update_variance_in_window : compute variance considering full window | [
"Quickly",
"update",
"the",
"variance",
"and",
"co",
"-",
"variance",
"for",
"the",
"addition",
"of",
"one",
"observation",
"."
] | python | train |
sixty-north/cosmic-ray | src/cosmic_ray/work_db.py | https://github.com/sixty-north/cosmic-ray/blob/c654e074afbb7b7fcbc23359083c1287c0d3e991/src/cosmic_ray/work_db.py#L118-L125 | def clear(self):
"""Clear all work items from the session.
This removes any associated results as well.
"""
with self._conn:
self._conn.execute('DELETE FROM results')
self._conn.execute('DELETE FROM work_items') | [
"def",
"clear",
"(",
"self",
")",
":",
"with",
"self",
".",
"_conn",
":",
"self",
".",
"_conn",
".",
"execute",
"(",
"'DELETE FROM results'",
")",
"self",
".",
"_conn",
".",
"execute",
"(",
"'DELETE FROM work_items'",
")"
] | Clear all work items from the session.
This removes any associated results as well. | [
"Clear",
"all",
"work",
"items",
"from",
"the",
"session",
"."
] | python | train |
Capitains/flask-capitains-nemo | flask_nemo/query/resolve.py | https://github.com/Capitains/flask-capitains-nemo/blob/8d91f2c05b925a6c8ea8c997baf698c87257bc58/flask_nemo/query/resolve.py#L24-L34 | def resolve(self, uri):
""" Resolve a Resource identified by URI
:param uri: The URI of the resource to be resolved
:type uri: str
:return: the contents of the resource as a string
:rtype: str
"""
for r in self.__retrievers__:
if r.match(uri):
return r
raise UnresolvableURIError() | [
"def",
"resolve",
"(",
"self",
",",
"uri",
")",
":",
"for",
"r",
"in",
"self",
".",
"__retrievers__",
":",
"if",
"r",
".",
"match",
"(",
"uri",
")",
":",
"return",
"r",
"raise",
"UnresolvableURIError",
"(",
")"
] | Resolve a Resource identified by URI
:param uri: The URI of the resource to be resolved
:type uri: str
:return: the contents of the resource as a string
:rtype: str | [
"Resolve",
"a",
"Resource",
"identified",
"by",
"URI",
":",
"param",
"uri",
":",
"The",
"URI",
"of",
"the",
"resource",
"to",
"be",
"resolved",
":",
"type",
"uri",
":",
"str",
":",
"return",
":",
"the",
"contents",
"of",
"the",
"resource",
"as",
"a",
... | python | valid |
mjj4791/python-buienradar | buienradar/buienradar_json.py | https://github.com/mjj4791/python-buienradar/blob/a70436f54e007ce921d5210cb296cf3e4adf9d09/buienradar/buienradar_json.py#L575-L580 | def __getStationName(name, id):
"""Construct a staiion name."""
name = name.replace("Meetstation", "")
name = name.strip()
name += " (%s)" % id
return name | [
"def",
"__getStationName",
"(",
"name",
",",
"id",
")",
":",
"name",
"=",
"name",
".",
"replace",
"(",
"\"Meetstation\"",
",",
"\"\"",
")",
"name",
"=",
"name",
".",
"strip",
"(",
")",
"name",
"+=",
"\" (%s)\"",
"%",
"id",
"return",
"name"
] | Construct a staiion name. | [
"Construct",
"a",
"staiion",
"name",
"."
] | python | train |
florianpaquet/mease | mease/registry.py | https://github.com/florianpaquet/mease/blob/b9fbd08bbe162c8890c2a2124674371170c319ef/mease/registry.py#L65-L77 | def sender(self, func, routing=None, routing_re=None):
"""
Registers a sender function
"""
if routing and not isinstance(routing, list):
routing = [routing]
if routing_re:
if not isinstance(routing_re, list):
routing_re = [routing_re]
routing_re[:] = [re.compile(r) for r in routing_re]
self.senders.append((func, routing, routing_re)) | [
"def",
"sender",
"(",
"self",
",",
"func",
",",
"routing",
"=",
"None",
",",
"routing_re",
"=",
"None",
")",
":",
"if",
"routing",
"and",
"not",
"isinstance",
"(",
"routing",
",",
"list",
")",
":",
"routing",
"=",
"[",
"routing",
"]",
"if",
"routing_... | Registers a sender function | [
"Registers",
"a",
"sender",
"function"
] | python | train |
secdev/scapy | scapy/layers/netflow.py | https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/netflow.py#L1284-L1318 | def _GenNetflowRecordV9(cls, lengths_list):
"""Internal function used to generate the Records from
their template.
"""
_fields_desc = []
for j, k in lengths_list:
_f_data = NetflowV9TemplateFieldDecoders.get(k, None)
_f_type, _f_args = (
_f_data if isinstance(_f_data, tuple) else (_f_data, [])
)
_f_kwargs = {}
if _f_type:
if issubclass(_f_type, _AdjustableNetflowField):
_f_kwargs["length"] = j
_fields_desc.append(
_f_type(
NetflowV910TemplateFieldTypes.get(k, "unknown_data"),
0, *_f_args, **_f_kwargs
)
)
else:
_fields_desc.append(
_CustomStrFixedLenField(
NetflowV910TemplateFieldTypes.get(k, "unknown_data"),
b"", length=j
)
)
# This will act exactly like a NetflowRecordV9, but has custom fields
class NetflowRecordV9I(cls):
fields_desc = _fields_desc
match_subclass = True
NetflowRecordV9I.name = cls.name
NetflowRecordV9I.__name__ = cls.__name__
return NetflowRecordV9I | [
"def",
"_GenNetflowRecordV9",
"(",
"cls",
",",
"lengths_list",
")",
":",
"_fields_desc",
"=",
"[",
"]",
"for",
"j",
",",
"k",
"in",
"lengths_list",
":",
"_f_data",
"=",
"NetflowV9TemplateFieldDecoders",
".",
"get",
"(",
"k",
",",
"None",
")",
"_f_type",
",... | Internal function used to generate the Records from
their template. | [
"Internal",
"function",
"used",
"to",
"generate",
"the",
"Records",
"from",
"their",
"template",
"."
] | python | train |
barryp/py-amqplib | extras/generate_skeleton_0_8.py | https://github.com/barryp/py-amqplib/blob/2b3a47de34b4712c111d0a55d7ff109dffc2a7b2/extras/generate_skeleton_0_8.py#L107-L160 | def generate_docstr(element, indent='', wrap=None):
"""
Generate a Python docstr for a given element in the AMQP
XML spec file. The element could be a class or method
The 'wrap' parameter is an optional chunk of text that's
added to the beginning and end of the resulting docstring.
"""
result = []
txt = element.text and element.text.rstrip()
if txt:
result.append(_reindent(txt, indent))
result.append(indent)
for d in element.findall('doc') + element.findall('rule'):
docval = ''.join(d.textlist()).rstrip()
if not docval:
continue
reformat = True
if 'name' in d.attrib:
result.append(indent + d.attrib['name'].upper() + ':')
result.append(indent)
extra_indent = ' '
if d.attrib['name'] == 'grammar':
reformat = False # Don't want re-indenting to mess this up
elif d.tag == 'rule':
result.append(indent + 'RULE:')
result.append(indent)
extra_indent = ' '
else:
extra_indent = ''
result.append(_reindent(docval, indent + extra_indent, reformat))
result.append(indent)
fields = element.findall('field')
if fields:
result.append(indent + 'PARAMETERS:')
for f in fields:
result.append(indent + ' ' + _fixup_field_name(f) + ': ' + _field_type(f))
field_docs = generate_docstr(f, indent + ' ')
if field_docs:
result.append(indent)
result.append(field_docs)
result.append(indent)
if not result:
return None
if wrap is not None:
result = [wrap] + result + [wrap]
return '\n'.join(x.rstrip() for x in result) + '\n' | [
"def",
"generate_docstr",
"(",
"element",
",",
"indent",
"=",
"''",
",",
"wrap",
"=",
"None",
")",
":",
"result",
"=",
"[",
"]",
"txt",
"=",
"element",
".",
"text",
"and",
"element",
".",
"text",
".",
"rstrip",
"(",
")",
"if",
"txt",
":",
"result",... | Generate a Python docstr for a given element in the AMQP
XML spec file. The element could be a class or method
The 'wrap' parameter is an optional chunk of text that's
added to the beginning and end of the resulting docstring. | [
"Generate",
"a",
"Python",
"docstr",
"for",
"a",
"given",
"element",
"in",
"the",
"AMQP",
"XML",
"spec",
"file",
".",
"The",
"element",
"could",
"be",
"a",
"class",
"or",
"method"
] | python | train |
wummel/linkchecker | linkcheck/logger/csvlog.py | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/logger/csvlog.py#L82-L120 | def log_url (self, url_data):
"""Write csv formatted url check info."""
row = []
if self.has_part("urlname"):
row.append(url_data.base_url)
if self.has_part("parentname"):
row.append(url_data.parent_url)
if self.has_part("baseref"):
row.append(url_data.base_ref)
if self.has_part("result"):
row.append(url_data.result)
if self.has_part("warningstring"):
row.append(self.linesep.join(x[1] for x in url_data.warnings))
if self.has_part("infostring"):
row.append(self.linesep.join(url_data.info))
if self.has_part("valid"):
row.append(url_data.valid)
if self.has_part("url"):
row.append(url_data.url)
if self.has_part("line"):
row.append(url_data.line)
if self.has_part("column"):
row.append(url_data.column)
if self.has_part("name"):
row.append(url_data.name)
if self.has_part("dltime"):
row.append(url_data.dltime)
if self.has_part("dlsize"):
row.append(url_data.size)
if self.has_part("checktime"):
row.append(url_data.checktime)
if self.has_part("cached"):
row.append(0)
if self.has_part("level"):
row.append(url_data.level)
if self.has_part("modified"):
row.append(self.format_modified(url_data.modified))
self.writerow(map(strformat.unicode_safe, row))
self.flush() | [
"def",
"log_url",
"(",
"self",
",",
"url_data",
")",
":",
"row",
"=",
"[",
"]",
"if",
"self",
".",
"has_part",
"(",
"\"urlname\"",
")",
":",
"row",
".",
"append",
"(",
"url_data",
".",
"base_url",
")",
"if",
"self",
".",
"has_part",
"(",
"\"parentnam... | Write csv formatted url check info. | [
"Write",
"csv",
"formatted",
"url",
"check",
"info",
"."
] | python | train |
bradmontgomery/django-redis-metrics | redis_metrics/models.py | https://github.com/bradmontgomery/django-redis-metrics/blob/2c92332920113d28c39234b949aa496b39a091d1/redis_metrics/models.py#L278-L325 | def set_metric(self, slug, value, category=None, expire=None, date=None):
"""Assigns a specific value to the *current* metric. You can use this
to start a metric at a value greater than 0 or to reset a metric.
The given slug will be used to generate Redis keys at the following
granularities: Seconds, Minutes, Hours, Day, Week, Month, and Year.
Parameters:
* ``slug`` -- a unique value to identify the metric; used in
construction of redis keys (see below).
* ``value`` -- The value of the metric.
* ``category`` -- (optional) Assign the metric to a Category (a string)
* ``expire`` -- (optional) Specify the number of seconds in which the
metric will expire.
* ``date`` -- (optional) Specify the timestamp for the metric; default
used to build the keys will be the current date and time in UTC form.
Redis keys for each metric (slug) take the form:
m:<slug>:s:<yyyy-mm-dd-hh-mm-ss> # Second
m:<slug>:i:<yyyy-mm-dd-hh-mm> # Minute
m:<slug>:h:<yyyy-mm-dd-hh> # Hour
m:<slug>:<yyyy-mm-dd> # Day
m:<slug>:w:<yyyy-num> # Week (year - week number)
m:<slug>:m:<yyyy-mm> # Month
m:<slug>:y:<yyyy> # Year
"""
keys = self._build_keys(slug, date=date)
# Add the slug to the set of metric slugs
self.r.sadd(self._metric_slugs_key, slug)
# Construct a dictionary of key/values for use with mset
data = {}
for k in keys:
data[k] = value
self.r.mset(data)
# Add the category if applicable.
if category:
self._categorize(slug, category)
# Expire the Metric in ``expire`` seconds if applicable.
if expire:
for k in keys:
self.r.expire(k, expire) | [
"def",
"set_metric",
"(",
"self",
",",
"slug",
",",
"value",
",",
"category",
"=",
"None",
",",
"expire",
"=",
"None",
",",
"date",
"=",
"None",
")",
":",
"keys",
"=",
"self",
".",
"_build_keys",
"(",
"slug",
",",
"date",
"=",
"date",
")",
"# Add t... | Assigns a specific value to the *current* metric. You can use this
to start a metric at a value greater than 0 or to reset a metric.
The given slug will be used to generate Redis keys at the following
granularities: Seconds, Minutes, Hours, Day, Week, Month, and Year.
Parameters:
* ``slug`` -- a unique value to identify the metric; used in
construction of redis keys (see below).
* ``value`` -- The value of the metric.
* ``category`` -- (optional) Assign the metric to a Category (a string)
* ``expire`` -- (optional) Specify the number of seconds in which the
metric will expire.
* ``date`` -- (optional) Specify the timestamp for the metric; default
used to build the keys will be the current date and time in UTC form.
Redis keys for each metric (slug) take the form:
m:<slug>:s:<yyyy-mm-dd-hh-mm-ss> # Second
m:<slug>:i:<yyyy-mm-dd-hh-mm> # Minute
m:<slug>:h:<yyyy-mm-dd-hh> # Hour
m:<slug>:<yyyy-mm-dd> # Day
m:<slug>:w:<yyyy-num> # Week (year - week number)
m:<slug>:m:<yyyy-mm> # Month
m:<slug>:y:<yyyy> # Year | [
"Assigns",
"a",
"specific",
"value",
"to",
"the",
"*",
"current",
"*",
"metric",
".",
"You",
"can",
"use",
"this",
"to",
"start",
"a",
"metric",
"at",
"a",
"value",
"greater",
"than",
"0",
"or",
"to",
"reset",
"a",
"metric",
"."
] | python | train |
INM-6/hybridLFPy | examples/Hagen_et_al_2016_cercor/figure_11.py | https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/figure_11.py#L31-L223 | def fig_lfp_corr(params, savefolders, transient=200,
channels=[0,3,7,11,13], Df=None,
mlab=True, NFFT=256, noverlap=128,
window=plt.mlab.window_hanning,
letterslist=['AB', 'CD'], data_type = 'LFP'):
'''This figure compares power spectra for correlated and uncorrelated signals
'''
ana_params.set_PLOS_2column_fig_style(ratio=0.5)
fig = plt.figure()
fig.subplots_adjust(left=0.07, right=0.95, bottom=0.1, wspace=0.3, hspace=0.1)
gs = gridspec.GridSpec(5, 4)
for i, (savefolder, letters) in enumerate(zip(savefolders, letterslist)):
# path to simulation files
params.savefolder = os.path.join(os.path.split(params.savefolder)[0],
savefolder)
params.figures_path = os.path.join(params.savefolder, 'figures')
params.spike_output_path = os.path.join(params.savefolder,
'processed_nest_output')
params.networkSimParams['spike_output_path'] = params.spike_output_path
## Including correlations
f = h5py.File(os.path.join(params.savefolder, ana_params.analysis_folder, data_type + ana_params.fname_psd),'r')
freqs = f['freqs'].value
LFP_PSD_corr = f['psd'].value
f.close()
## Excluding correlations
f = h5py.File(os.path.join(params.savefolder, ana_params.analysis_folder, data_type + ana_params.fname_psd_uncorr),'r')
freqs = f['freqs'].value
LFP_PSD_uncorr = f['psd'].value
f.close()
##################################
### Single channel LFP PSDs ###
##################################
ax = fig.add_subplot(gs[0, (i % 2)*2])
phlp.remove_axis_junk(ax)
ax.loglog(freqs,LFP_PSD_corr[channels[0]], color='k', label='$P$')
ax.loglog(freqs,LFP_PSD_uncorr[channels[0]],
color='gray' if analysis_params.bw else analysis_params.colorP,
lw=1,
label='$\tilde{P}$')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.text(0.80,0.82,'ch. %i' %(channels[0]+1),horizontalalignment='left',
verticalalignment='center',
fontsize=6,
transform=ax.transAxes)
ax.yaxis.set_minor_locator(plt.NullLocator())
ax.set_ylabel('(mV$^2$/Hz)', labelpad=0.)
ax.set_xticks([])
ax.set_xticklabels([])
ax.tick_params(axis='y',which='minor',bottom='off')
ax.set_xlim(4E0,4E2)
ax.set_ylim(1E-8,1.5E-4)
ax.set_yticks([1E-8,1E-6,1E-4])
ax.set_title('power spectra')
phlp.annotate_subplot(ax, ncols=4, nrows=5, letter=letters[0],
linear_offset=0.065)
ax = fig.add_subplot(gs[1, (i % 2)*2])
phlp.remove_axis_junk(ax)
ax.loglog(freqs,LFP_PSD_corr[channels[1]], color='k', label='corr')
ax.loglog(freqs,LFP_PSD_uncorr[channels[1]],
color='gray' if analysis_params.bw else analysis_params.colorP,
lw=1,
label='uncorr')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.text(0.80,0.82,'ch. %i' %(channels[1]+1),horizontalalignment='left',
verticalalignment='center',
fontsize=6,
transform=ax.transAxes)
ax.yaxis.set_minor_locator(plt.NullLocator())
ax.set_xticks([])
ax.set_xticklabels([])
ax.tick_params(axis='y',which='minor',bottom='off')
ax.set_xlim(4E0,4E2)
ax.set_ylim(1E-8,1.5E-4)
ax.set_yticks([1E-8,1E-6,1E-4])
ax.set_yticklabels([])
ax = fig.add_subplot(gs[2, (i % 2)*2])
phlp.remove_axis_junk(ax)
ax.loglog(freqs,LFP_PSD_corr[channels[2]], color='k', label='corr')
ax.loglog(freqs,LFP_PSD_uncorr[channels[2]],
color='gray' if analysis_params.bw else analysis_params.colorP,
lw=1,
label='uncorr')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.text(0.80,0.82,'ch. %i' %(channels[2]+1),horizontalalignment='left',
verticalalignment='center',
fontsize=6,
transform=ax.transAxes)
ax.yaxis.set_minor_locator(plt.NullLocator())
ax.set_xticks([])
ax.set_xticklabels([])
ax.tick_params(axis='y',which='minor',bottom='off')
ax.set_xlim(4E0,4E2)
ax.set_ylim(1E-8,1.5E-4)
ax.set_yticks([1E-8,1E-6,1E-4])
ax.set_yticklabels([])
ax = fig.add_subplot(gs[3, (i % 2)*2])
phlp.remove_axis_junk(ax)
ax.loglog(freqs,LFP_PSD_corr[channels[3]], color='k', label='corr')
ax.loglog(freqs,LFP_PSD_uncorr[channels[3]],
color='gray' if analysis_params.bw else analysis_params.colorP,
lw=1,
label='uncorr')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.text(0.80,0.82,'ch. %i' %(channels[3]+1),horizontalalignment='left',
verticalalignment='center',
fontsize=6,
transform=ax.transAxes)
ax.yaxis.set_minor_locator(plt.NullLocator())
ax.set_xticks([])
ax.set_xticklabels([])
ax.tick_params(axis='y',which='minor',bottom='off')
ax.set_xlim(4E0,4E2)
ax.set_ylim(1E-8,1.5E-4)
ax.set_yticks([1E-8,1E-6,1E-4])
ax.set_yticklabels([])
ax = fig.add_subplot(gs[4, (i % 2)*2])
phlp.remove_axis_junk(ax)
ax.loglog(freqs,LFP_PSD_corr[channels[4]], color='k', label='corr')
ax.loglog(freqs,LFP_PSD_uncorr[channels[4]],
color='gray' if analysis_params.bw else analysis_params.colorP,
lw=1,
label='uncorr')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xlabel(r'$f$ (Hz)', labelpad=0.2)
ax.text(0.80,0.82,'ch. %i' %(channels[4]+1),horizontalalignment='left',
verticalalignment='center',
fontsize=6,
transform=ax.transAxes)
ax.yaxis.set_minor_locator(plt.NullLocator())
ax.tick_params(axis='y',which='minor',bottom='off')
ax.set_xlim(4E0,4E2)
ax.set_ylim(1E-8,1.5E-4)
ax.set_yticks([1E-8,1E-6,1E-4])
ax.set_yticklabels([])
##################################
### LFP PSD ratios ###
##################################
ax = fig.add_subplot(gs[:, (i % 2)*2 + 1])
phlp.annotate_subplot(ax, ncols=4, nrows=1, letter=letters[1],
linear_offset=0.065)
phlp.remove_axis_junk(ax)
ax.set_title('power ratio')
PSD_ratio = LFP_PSD_corr/LFP_PSD_uncorr
zvec = np.r_[params.electrodeParams['z']]
zvec = np.r_[zvec, zvec[-1] + np.diff(zvec)[-1]]
inds = freqs >= 1 # frequencies greater than 4 Hz
im = ax.pcolormesh(freqs[inds], zvec+40, PSD_ratio[:, inds],
rasterized=False,
cmap=plt.get_cmap('gray_r', 12) if analysis_params.bw else plt.get_cmap('Reds', 12),
vmin=10**-0.25,vmax=10**2.75,norm=LogNorm())
ax.set_xscale('log')
ax.set_yticks(zvec)
yticklabels = ['ch. %i' %i for i in np.arange(len(zvec))+1]
ax.set_yticklabels(yticklabels)
ax.set_xlabel(r'$f$ (Hz)',labelpad=0.2)
plt.axis('tight')
ax.set_xlim([4E0, 4E2])
cb = phlp.colorbar(fig, ax, im,
width=0.05, height=0.5,
hoffset=-0.05, voffset=0.0)
cb.set_label('(-)', labelpad=0.1)
return fig | [
"def",
"fig_lfp_corr",
"(",
"params",
",",
"savefolders",
",",
"transient",
"=",
"200",
",",
"channels",
"=",
"[",
"0",
",",
"3",
",",
"7",
",",
"11",
",",
"13",
"]",
",",
"Df",
"=",
"None",
",",
"mlab",
"=",
"True",
",",
"NFFT",
"=",
"256",
",... | This figure compares power spectra for correlated and uncorrelated signals | [
"This",
"figure",
"compares",
"power",
"spectra",
"for",
"correlated",
"and",
"uncorrelated",
"signals"
] | python | train |
iotile/typedargs | typedargs/metadata.py | https://github.com/iotile/typedargs/blob/0a5091a664b9b4d836e091e9ba583e944f438fd8/typedargs/metadata.py#L304-L360 | def check_spec(self, pos_args, kwargs=None):
"""Check if there are any missing or duplicate arguments.
Args:
pos_args (list): A list of arguments that will be passed as positional
arguments.
kwargs (dict): A dictionary of the keyword arguments that will be passed.
Returns:
dict: A dictionary of argument name to argument value, pulled from either
the value passed or the default value if no argument is passed.
Raises:
ArgumentError: If a positional or keyword argument does not fit in the spec.
ValidationError: If an argument is passed twice.
"""
if kwargs is None:
kwargs = {}
if self.varargs is not None or self.kwargs is not None:
raise InternalError("check_spec cannot be called on a function that takes *args or **kwargs")
missing = object()
arg_vals = [missing]*len(self.arg_names)
kw_indices = {name: i for i, name in enumerate(self.arg_names)}
for i, arg in enumerate(pos_args):
if i >= len(arg_vals):
raise ArgumentError("Too many positional arguments, first excessive argument=%s" % str(arg))
arg_vals[i] = arg
for arg, val in kwargs.items():
index = kw_indices.get(arg)
if index is None:
raise ArgumentError("Cannot find argument by name: %s" % arg)
if arg_vals[index] is not missing:
raise ValidationError("Argument %s passed twice" % arg)
arg_vals[index] = val
# Fill in any default variables if their args are missing
if len(self.arg_defaults) > 0:
for i in range(0, len(self.arg_defaults)):
neg_index = -len(self.arg_defaults) + i
if arg_vals[neg_index] is missing:
arg_vals[neg_index] = self.arg_defaults[i]
# Now make sure there isn't a missing gap
if missing in arg_vals:
index = arg_vals.index(missing)
raise ArgumentError("Missing a required argument (position: %d, name: %s)" % (index, self.arg_names[index]))
return {name: val for name, val in zip(self.arg_names, arg_vals)} | [
"def",
"check_spec",
"(",
"self",
",",
"pos_args",
",",
"kwargs",
"=",
"None",
")",
":",
"if",
"kwargs",
"is",
"None",
":",
"kwargs",
"=",
"{",
"}",
"if",
"self",
".",
"varargs",
"is",
"not",
"None",
"or",
"self",
".",
"kwargs",
"is",
"not",
"None"... | Check if there are any missing or duplicate arguments.
Args:
pos_args (list): A list of arguments that will be passed as positional
arguments.
kwargs (dict): A dictionary of the keyword arguments that will be passed.
Returns:
dict: A dictionary of argument name to argument value, pulled from either
the value passed or the default value if no argument is passed.
Raises:
ArgumentError: If a positional or keyword argument does not fit in the spec.
ValidationError: If an argument is passed twice. | [
"Check",
"if",
"there",
"are",
"any",
"missing",
"or",
"duplicate",
"arguments",
"."
] | python | test |
barrust/mediawiki | mediawiki/mediawiki.py | https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L429-L443 | def suggest(self, query):
""" Gather suggestions based on the provided title or None if no
suggestions found
Args:
query (str): Page title
Returns:
String or None: Suggested page title or **None** if no \
suggestion found """
res, suggest = self.search(query, results=1, suggestion=True)
try:
title = suggest or res[0]
except IndexError: # page doesn't exist
title = None
return title | [
"def",
"suggest",
"(",
"self",
",",
"query",
")",
":",
"res",
",",
"suggest",
"=",
"self",
".",
"search",
"(",
"query",
",",
"results",
"=",
"1",
",",
"suggestion",
"=",
"True",
")",
"try",
":",
"title",
"=",
"suggest",
"or",
"res",
"[",
"0",
"]"... | Gather suggestions based on the provided title or None if no
suggestions found
Args:
query (str): Page title
Returns:
String or None: Suggested page title or **None** if no \
suggestion found | [
"Gather",
"suggestions",
"based",
"on",
"the",
"provided",
"title",
"or",
"None",
"if",
"no",
"suggestions",
"found"
] | python | train |
pymc-devs/pymc | pymc/distributions.py | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L968-L985 | def categorical_like(x, p):
R"""
Categorical log-likelihood. The most general discrete distribution.
.. math:: f(x=i \mid p) = p_i
for :math:`i \in 0 \ldots k-1`.
:Parameters:
- `x` : [int] :math:`x \in 0\ldots k-1`
- `p` : [float] :math:`p > 0`, :math:`\sum p = 1`
"""
p = np.atleast_2d(p)
if np.any(abs(np.sum(p, 1) - 1) > 0.0001):
print_("Probabilities in categorical_like sum to", np.sum(p, 1))
return flib.categorical(np.array(x).astype(int), p) | [
"def",
"categorical_like",
"(",
"x",
",",
"p",
")",
":",
"p",
"=",
"np",
".",
"atleast_2d",
"(",
"p",
")",
"if",
"np",
".",
"any",
"(",
"abs",
"(",
"np",
".",
"sum",
"(",
"p",
",",
"1",
")",
"-",
"1",
")",
">",
"0.0001",
")",
":",
"print_",... | R"""
Categorical log-likelihood. The most general discrete distribution.
.. math:: f(x=i \mid p) = p_i
for :math:`i \in 0 \ldots k-1`.
:Parameters:
- `x` : [int] :math:`x \in 0\ldots k-1`
- `p` : [float] :math:`p > 0`, :math:`\sum p = 1` | [
"R",
"Categorical",
"log",
"-",
"likelihood",
".",
"The",
"most",
"general",
"discrete",
"distribution",
"."
] | python | train |
StackStorm/pybind | pybind/slxos/v17s_1_02/bridge_domain_state/bridge_domain_list/outer_vlan_list/tagged_ports_list/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/bridge_domain_state/bridge_domain_list/outer_vlan_list/tagged_ports_list/__init__.py#L185-L208 | def _set_lif_main_intf_type(self, v, load=False):
"""
Setter method for lif_main_intf_type, mapped from YANG variable /bridge_domain_state/bridge_domain_list/outer_vlan_list/tagged_ports_list/lif_main_intf_type (nsm-dcm-lif-main-intf-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_lif_main_intf_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lif_main_intf_type() directly.
YANG Description: LIF Main interface type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'nsm-dcm-lif-main-intf-type-pw': {'value': 5}, u'nsm-dcm-lif-main-intf-type-lag': {'value': 2}, u'nsm-dcm-lif-main-intf-type-phy': {'value': 1}, u'nsm-dcm-lif-main-intf-type-tunnel-l2gre': {'value': 4}, u'nsm-dcm-lif-main-intf-type-unknown': {'value': 0}, u'nsm-dcm-lif-main-intf-type-tunnel-vxlan': {'value': 3}},), is_leaf=True, yang_name="lif-main-intf-type", rest_name="lif-main-intf-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='nsm-dcm-lif-main-intf-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lif_main_intf_type must be of a type compatible with nsm-dcm-lif-main-intf-type""",
'defined-type': "brocade-nsm-operational:nsm-dcm-lif-main-intf-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'nsm-dcm-lif-main-intf-type-pw': {'value': 5}, u'nsm-dcm-lif-main-intf-type-lag': {'value': 2}, u'nsm-dcm-lif-main-intf-type-phy': {'value': 1}, u'nsm-dcm-lif-main-intf-type-tunnel-l2gre': {'value': 4}, u'nsm-dcm-lif-main-intf-type-unknown': {'value': 0}, u'nsm-dcm-lif-main-intf-type-tunnel-vxlan': {'value': 3}},), is_leaf=True, yang_name="lif-main-intf-type", rest_name="lif-main-intf-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='nsm-dcm-lif-main-intf-type', is_config=False)""",
})
self.__lif_main_intf_type = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_lif_main_intf_type",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
","... | Setter method for lif_main_intf_type, mapped from YANG variable /bridge_domain_state/bridge_domain_list/outer_vlan_list/tagged_ports_list/lif_main_intf_type (nsm-dcm-lif-main-intf-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_lif_main_intf_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lif_main_intf_type() directly.
YANG Description: LIF Main interface type | [
"Setter",
"method",
"for",
"lif_main_intf_type",
"mapped",
"from",
"YANG",
"variable",
"/",
"bridge_domain_state",
"/",
"bridge_domain_list",
"/",
"outer_vlan_list",
"/",
"tagged_ports_list",
"/",
"lif_main_intf_type",
"(",
"nsm",
"-",
"dcm",
"-",
"lif",
"-",
"main"... | python | train |
iwanbk/nyamuk | nyamuk/nyamuk.py | https://github.com/iwanbk/nyamuk/blob/ac4c6028de288a4c8e0b332ae16eae889deb643d/nyamuk/nyamuk.py#L233-L254 | def send_subscribe(self, dup, topics):
"""Send subscribe COMMAND to server."""
pkt = MqttPkt()
pktlen = 2 + sum([2+len(topic)+1 for (topic, qos) in topics])
pkt.command = NC.CMD_SUBSCRIBE | (dup << 3) | (1 << 1)
pkt.remaining_length = pktlen
ret = pkt.alloc()
if ret != NC.ERR_SUCCESS:
return ret
#variable header
mid = self.mid_generate()
pkt.write_uint16(mid)
#payload
for (topic, qos) in topics:
pkt.write_string(topic)
pkt.write_byte(qos)
return self.packet_queue(pkt) | [
"def",
"send_subscribe",
"(",
"self",
",",
"dup",
",",
"topics",
")",
":",
"pkt",
"=",
"MqttPkt",
"(",
")",
"pktlen",
"=",
"2",
"+",
"sum",
"(",
"[",
"2",
"+",
"len",
"(",
"topic",
")",
"+",
"1",
"for",
"(",
"topic",
",",
"qos",
")",
"in",
"t... | Send subscribe COMMAND to server. | [
"Send",
"subscribe",
"COMMAND",
"to",
"server",
"."
] | python | train |
sethmlarson/virtualbox-python | virtualbox/library.py | https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L8377-L8410 | def get_guest_os_type(self, id_p):
"""Returns an object describing the specified guest OS type.
The requested guest OS type is specified using a string which is a
mnemonic identifier of the guest operating system, such as
"win31" or "ubuntu". The guest OS type ID of a
particular virtual machine can be read or set using the
:py:func:`IMachine.os_type_id` attribute.
The :py:func:`IVirtualBox.guest_os_types` collection contains all
available guest OS type objects. Each object has an
:py:func:`IGuestOSType.id_p` attribute which contains an identifier of
the guest OS this object describes.
While this function returns an error for unknown guest OS types, they
can be still used without serious problems (if one accepts the fact
that there is no default VM config information).
in id_p of type str
Guest OS type ID string.
return type_p of type :class:`IGuestOSType`
Guest OS type object.
raises :class:`OleErrorInvalidarg`
@a id is not a valid Guest OS type.
"""
if not isinstance(id_p, basestring):
raise TypeError("id_p can only be an instance of type basestring")
type_p = self._call("getGuestOSType",
in_p=[id_p])
type_p = IGuestOSType(type_p)
return type_p | [
"def",
"get_guest_os_type",
"(",
"self",
",",
"id_p",
")",
":",
"if",
"not",
"isinstance",
"(",
"id_p",
",",
"basestring",
")",
":",
"raise",
"TypeError",
"(",
"\"id_p can only be an instance of type basestring\"",
")",
"type_p",
"=",
"self",
".",
"_call",
"(",
... | Returns an object describing the specified guest OS type.
The requested guest OS type is specified using a string which is a
mnemonic identifier of the guest operating system, such as
"win31" or "ubuntu". The guest OS type ID of a
particular virtual machine can be read or set using the
:py:func:`IMachine.os_type_id` attribute.
The :py:func:`IVirtualBox.guest_os_types` collection contains all
available guest OS type objects. Each object has an
:py:func:`IGuestOSType.id_p` attribute which contains an identifier of
the guest OS this object describes.
While this function returns an error for unknown guest OS types, they
can be still used without serious problems (if one accepts the fact
that there is no default VM config information).
in id_p of type str
Guest OS type ID string.
return type_p of type :class:`IGuestOSType`
Guest OS type object.
raises :class:`OleErrorInvalidarg`
@a id is not a valid Guest OS type. | [
"Returns",
"an",
"object",
"describing",
"the",
"specified",
"guest",
"OS",
"type",
".",
"The",
"requested",
"guest",
"OS",
"type",
"is",
"specified",
"using",
"a",
"string",
"which",
"is",
"a",
"mnemonic",
"identifier",
"of",
"the",
"guest",
"operating",
"s... | python | train |
diamondman/proteusisc | proteusisc/jtagUtils.py | https://github.com/diamondman/proteusisc/blob/7622b7b04e63f9dc0f5a04429ff78d9a490c9c5c/proteusisc/jtagUtils.py#L96-L112 | def build_byte_align_buff(bits):
"""Pad the left side of a bitarray with 0s to align its length with byte boundaries.
Args:
bits: A bitarray to be padded and aligned.
Returns:
A newly aligned bitarray.
"""
bitmod = len(bits)%8
if bitmod == 0:
rdiff = bitarray()
else:
#KEEP bitarray
rdiff = bitarray(8-bitmod)
rdiff.setall(False)
return rdiff+bits | [
"def",
"build_byte_align_buff",
"(",
"bits",
")",
":",
"bitmod",
"=",
"len",
"(",
"bits",
")",
"%",
"8",
"if",
"bitmod",
"==",
"0",
":",
"rdiff",
"=",
"bitarray",
"(",
")",
"else",
":",
"#KEEP bitarray",
"rdiff",
"=",
"bitarray",
"(",
"8",
"-",
"bitm... | Pad the left side of a bitarray with 0s to align its length with byte boundaries.
Args:
bits: A bitarray to be padded and aligned.
Returns:
A newly aligned bitarray. | [
"Pad",
"the",
"left",
"side",
"of",
"a",
"bitarray",
"with",
"0s",
"to",
"align",
"its",
"length",
"with",
"byte",
"boundaries",
"."
] | python | train |
zyga/guacamole | examples/rainbow.py | https://github.com/zyga/guacamole/blob/105c10a798144e3b89659b500d7c2b84b0c76546/examples/rainbow.py#L167-L192 | def hsv(h, s, v):
"""Convert HSV (hue, saturation, value) to RGB."""
if 360 < h < 0:
raise ValueError("h out of range: {}".format(h))
if 1 < s < 0:
raise ValueError("s out of range: {}".format(h))
if 1 < v < 0:
raise ValueError("v out of range: {}".format(h))
c = v * s # chroma
h1 = h / 60
x = c * (1 - abs(h1 % 2 - 1))
if 0 <= h1 < 1:
r1, g1, b1 = (c, x, 0)
elif 1 <= h1 < 2:
r1, g1, b1 = (x, c, 0)
elif 2 <= h1 < 3:
r1, g1, b1 = (0, c, x)
elif 3 <= h1 < 4:
r1, g1, b1 = (0, x, c)
elif 4 <= h1 < 5:
r1, g1, b1 = (x, 0, c)
elif 5 <= h1 < 6:
r1, g1, b1 = (c, 0, x)
m = v - c
r, g, b = r1 + m, g1 + m, b1 + m
return int(r * 255), int(g * 255), int(b * 255) | [
"def",
"hsv",
"(",
"h",
",",
"s",
",",
"v",
")",
":",
"if",
"360",
"<",
"h",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"h out of range: {}\"",
".",
"format",
"(",
"h",
")",
")",
"if",
"1",
"<",
"s",
"<",
"0",
":",
"raise",
"ValueError",
"(",... | Convert HSV (hue, saturation, value) to RGB. | [
"Convert",
"HSV",
"(",
"hue",
"saturation",
"value",
")",
"to",
"RGB",
"."
] | python | train |
alixedi/palal | palal/survey.py | https://github.com/alixedi/palal/blob/325359f66ac48a9f96efea0489aec353f8a40837/palal/survey.py#L105-L111 | def survey_loader(sur_dir=SUR_DIR, sur_file=SUR_FILE):
"""Loads up the given survey in the given dir."""
survey_path = os.path.join(sur_dir, sur_file)
survey = None
with open(survey_path) as survey_file:
survey = Survey(survey_file.read())
return survey | [
"def",
"survey_loader",
"(",
"sur_dir",
"=",
"SUR_DIR",
",",
"sur_file",
"=",
"SUR_FILE",
")",
":",
"survey_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sur_dir",
",",
"sur_file",
")",
"survey",
"=",
"None",
"with",
"open",
"(",
"survey_path",
")",
... | Loads up the given survey in the given dir. | [
"Loads",
"up",
"the",
"given",
"survey",
"in",
"the",
"given",
"dir",
"."
] | python | train |
wonambi-python/wonambi | wonambi/ioeeg/blackrock.py | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/ioeeg/blackrock.py#L117-L142 | def return_dat(self, chan, begsam, endsam):
"""Return the data as 2D numpy.ndarray.
Parameters
----------
chan : int or list
index (indices) of the channels to read
begsam : int
index of the first sample
endsam : int
index of the last sample
Returns
-------
numpy.ndarray
A 2d matrix, with dimension chan X samples
"""
ext = splitext(self.filename)[1]
if ext == '.nev':
raise TypeError('NEV contains only header info, not data')
data = _read_nsx(self.filename, self.BOData, self.sess_begin,
self.sess_end, self.factor, begsam, endsam)
return data[chan, :] | [
"def",
"return_dat",
"(",
"self",
",",
"chan",
",",
"begsam",
",",
"endsam",
")",
":",
"ext",
"=",
"splitext",
"(",
"self",
".",
"filename",
")",
"[",
"1",
"]",
"if",
"ext",
"==",
"'.nev'",
":",
"raise",
"TypeError",
"(",
"'NEV contains only header info,... | Return the data as 2D numpy.ndarray.
Parameters
----------
chan : int or list
index (indices) of the channels to read
begsam : int
index of the first sample
endsam : int
index of the last sample
Returns
-------
numpy.ndarray
A 2d matrix, with dimension chan X samples | [
"Return",
"the",
"data",
"as",
"2D",
"numpy",
".",
"ndarray",
"."
] | python | train |
rameshg87/pyremotevbox | pyremotevbox/ZSI/generate/containers.py | https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/generate/containers.py#L120-L231 | def _setAttributes(self, attributes):
'''parameters
attributes -- a flat list of all attributes,
from this list all items in attribute_typecode_dict will
be generated into attrComponents.
returns a list of strings representing the attribute_typecode_dict.
'''
atd = self.attribute_typecode
atd_list = formatted_attribute_list = []
if not attributes:
return formatted_attribute_list
atd_list.append('# attribute handling code')
idx = 0
while(idx < len(attributes)):
a = attributes[idx]
idx += 1
if a.isWildCard() and a.isDeclaration():
atd_list.append(\
'%s[("%s","anyAttribute")] = ZSI.TC.AnyElement()'\
% (atd, SCHEMA.XSD3)
)
elif a.isDeclaration():
tdef = a.getTypeDefinition('type')
if tdef is not None:
tc = '%s.%s(None)' %(NAD.getAlias(tdef.getTargetNamespace()),
self.mangle(type_class_name(tdef.getAttributeName()))
)
else:
# built-in
t = a.getAttribute('type')
try:
tc = BTI.get_typeclass(t[1], t[0])
except:
# hand back a string by default.
tc = ZSI.TC.String
if tc is not None:
tc = '%s()' %tc
key = None
if a.getAttribute('form') == 'qualified':
key = '("%s","%s")' % ( a.getTargetNamespace(),
a.getAttribute('name') )
elif a.getAttribute('form') == 'unqualified':
key = '"%s"' % a.getAttribute('name')
else:
raise ContainerError, \
'attribute form must be un/qualified %s' \
% a.getAttribute('form')
atd_list.append(\
'%s[%s] = %s' % (atd, key, tc)
)
elif a.isReference() and a.isAttributeGroup():
# flatten 'em out....
for ga in a.getAttributeGroup().getAttributeContent():
attributes += (ga,)
elif a.isReference():
try:
ga = a.getAttributeDeclaration()
except XMLSchema.SchemaError:
key = a.getAttribute('ref')
self.logger.debug('No schema item for attribute ref (%s, %s)' %key)
if key in self.built_in_refs: continue
raise
tp = None
if ga is not None:
tp = ga.getTypeDefinition('type')
key = '("%s","%s")' %(ga.getTargetNamespace(),
ga.getAttribute('name'))
if ga is None:
# TODO: probably SOAPENC:arrayType
key = '("%s","%s")' %(
a.getAttribute('ref').getTargetNamespace(),
a.getAttribute('ref').getName())
atd_list.append(\
'%s[%s] = ZSI.TC.String()' %(atd, key)
)
elif tp is None:
# built in simple type
try:
namespace,typeName = ga.getAttribute('type')
except TypeError, ex:
# TODO: attribute declaration could be anonymous type
# hack in something to work
atd_list.append(\
'%s[%s] = ZSI.TC.String()' %(atd, key)
)
else:
atd_list.append(\
'%s[%s] = %s()' %(atd, key,
BTI.get_typeclass(typeName, namespace))
)
else:
typeName = tp.getAttribute('name')
namespace = tp.getTargetNamespace()
alias = NAD.getAlias(namespace)
key = '("%s","%s")' \
% (ga.getTargetNamespace(),ga.getAttribute('name'))
atd_list.append(\
'%s[%s] = %s.%s(None)' \
% (atd, key, alias, type_class_name(typeName))
)
else:
raise TypeError, 'expecting an attribute: %s' %a.getItemTrace()
return formatted_attribute_list | [
"def",
"_setAttributes",
"(",
"self",
",",
"attributes",
")",
":",
"atd",
"=",
"self",
".",
"attribute_typecode",
"atd_list",
"=",
"formatted_attribute_list",
"=",
"[",
"]",
"if",
"not",
"attributes",
":",
"return",
"formatted_attribute_list",
"atd_list",
".",
"... | parameters
attributes -- a flat list of all attributes,
from this list all items in attribute_typecode_dict will
be generated into attrComponents.
returns a list of strings representing the attribute_typecode_dict. | [
"parameters",
"attributes",
"--",
"a",
"flat",
"list",
"of",
"all",
"attributes",
"from",
"this",
"list",
"all",
"items",
"in",
"attribute_typecode_dict",
"will",
"be",
"generated",
"into",
"attrComponents",
".",
"returns",
"a",
"list",
"of",
"strings",
"represe... | python | train |
glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/scene/widgets/widget.py | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/scene/widgets/widget.py#L214-L230 | def height_max(self, height_max):
"""Set the maximum height of the widget.
Parameters
----------
height_max: None | float
the maximum height of the widget. if None, maximum height
is unbounded
"""
if height_max is None:
self._height_limits[1] = None
return
height_max = float(height_max)
assert(0 <= self.height_min <= height_max)
self._height_limits[1] = height_max
self._update_layout() | [
"def",
"height_max",
"(",
"self",
",",
"height_max",
")",
":",
"if",
"height_max",
"is",
"None",
":",
"self",
".",
"_height_limits",
"[",
"1",
"]",
"=",
"None",
"return",
"height_max",
"=",
"float",
"(",
"height_max",
")",
"assert",
"(",
"0",
"<=",
"se... | Set the maximum height of the widget.
Parameters
----------
height_max: None | float
the maximum height of the widget. if None, maximum height
is unbounded | [
"Set",
"the",
"maximum",
"height",
"of",
"the",
"widget",
"."
] | python | train |
tokibito/django-ftpserver | django_ftpserver/filesystems.py | https://github.com/tokibito/django-ftpserver/blob/18cf9f6645df9c2d9c5188bf21e74c188d55de47/django_ftpserver/filesystems.py#L68-L73 | def _exists(self, path):
"""S3 directory is not S3Ojbect.
"""
if path.endswith('/'):
return True
return self.storage.exists(path) | [
"def",
"_exists",
"(",
"self",
",",
"path",
")",
":",
"if",
"path",
".",
"endswith",
"(",
"'/'",
")",
":",
"return",
"True",
"return",
"self",
".",
"storage",
".",
"exists",
"(",
"path",
")"
] | S3 directory is not S3Ojbect. | [
"S3",
"directory",
"is",
"not",
"S3Ojbect",
"."
] | python | train |
spacetelescope/stsci.tools | lib/stsci/tools/iterfile.py | https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/iterfile.py#L62-L77 | def open(self):
""" Opens the file for subsequent access. """
if self.handle is None:
self.handle = fits.open(self.fname, mode='readonly')
if self.extn:
if len(self.extn) == 1:
hdu = self.handle[self.extn[0]]
else:
hdu = self.handle[self.extn[0],self.extn[1]]
else:
hdu = self.handle[0]
if isinstance(hdu,fits.hdu.compressed.CompImageHDU):
self.compress = True
return hdu | [
"def",
"open",
"(",
"self",
")",
":",
"if",
"self",
".",
"handle",
"is",
"None",
":",
"self",
".",
"handle",
"=",
"fits",
".",
"open",
"(",
"self",
".",
"fname",
",",
"mode",
"=",
"'readonly'",
")",
"if",
"self",
".",
"extn",
":",
"if",
"len",
... | Opens the file for subsequent access. | [
"Opens",
"the",
"file",
"for",
"subsequent",
"access",
"."
] | python | train |
seleniumbase/SeleniumBase | seleniumbase/core/log_helper.py | https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/core/log_helper.py#L97-L104 | def get_html_source_with_base_href(driver, page_source):
''' Combines the domain base href with the html source.
This is needed for the page html to render correctly. '''
last_page = get_last_page(driver)
if '://' in last_page:
base_href_html = get_base_href_html(last_page)
return '%s\n%s' % (base_href_html, page_source)
return '' | [
"def",
"get_html_source_with_base_href",
"(",
"driver",
",",
"page_source",
")",
":",
"last_page",
"=",
"get_last_page",
"(",
"driver",
")",
"if",
"'://'",
"in",
"last_page",
":",
"base_href_html",
"=",
"get_base_href_html",
"(",
"last_page",
")",
"return",
"'%s\\... | Combines the domain base href with the html source.
This is needed for the page html to render correctly. | [
"Combines",
"the",
"domain",
"base",
"href",
"with",
"the",
"html",
"source",
".",
"This",
"is",
"needed",
"for",
"the",
"page",
"html",
"to",
"render",
"correctly",
"."
] | python | train |
pallets/werkzeug | examples/coolmagic/application.py | https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/examples/coolmagic/application.py#L69-L85 | def make_app(config=None):
"""
Factory function that creates a new `CoolmagicApplication`
object. Optional WSGI middlewares should be applied here.
"""
config = config or {}
app = CoolMagicApplication(config)
# static stuff
app = SharedDataMiddleware(
app, {"/public": path.join(path.dirname(__file__), "public")}
)
# clean up locals
app = local_manager.make_middleware(app)
return app | [
"def",
"make_app",
"(",
"config",
"=",
"None",
")",
":",
"config",
"=",
"config",
"or",
"{",
"}",
"app",
"=",
"CoolMagicApplication",
"(",
"config",
")",
"# static stuff",
"app",
"=",
"SharedDataMiddleware",
"(",
"app",
",",
"{",
"\"/public\"",
":",
"path"... | Factory function that creates a new `CoolmagicApplication`
object. Optional WSGI middlewares should be applied here. | [
"Factory",
"function",
"that",
"creates",
"a",
"new",
"CoolmagicApplication",
"object",
".",
"Optional",
"WSGI",
"middlewares",
"should",
"be",
"applied",
"here",
"."
] | python | train |
pybel/pybel | src/pybel/struct/graph.py | https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/graph.py#L786-L797 | def serialize(self, *, fmt: str = 'nodelink', file: Union[None, str, TextIO] = None, **kwargs):
"""Serialize the graph to an object or file if given.
For additional I/O, see the :mod:`pybel.io` module.
"""
if file is None:
return self._serialize_object(fmt=fmt, **kwargs)
elif isinstance(file, str):
with open(file, 'w') as file_obj:
self._serialize_file(fmt=fmt, file=file_obj, **kwargs)
else:
self._serialize_file(fmt=fmt, file=file, **kwargs) | [
"def",
"serialize",
"(",
"self",
",",
"*",
",",
"fmt",
":",
"str",
"=",
"'nodelink'",
",",
"file",
":",
"Union",
"[",
"None",
",",
"str",
",",
"TextIO",
"]",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"file",
"is",
"None",
":",
"retu... | Serialize the graph to an object or file if given.
For additional I/O, see the :mod:`pybel.io` module. | [
"Serialize",
"the",
"graph",
"to",
"an",
"object",
"or",
"file",
"if",
"given",
"."
] | python | train |
Datary/scrapbag | scrapbag/strings.py | https://github.com/Datary/scrapbag/blob/3a4f9824ab6fe21121214ba9963690618da2c9de/scrapbag/strings.py#L115-L126 | def normalize_dict(dictionary, **kwargs):
"""
Given an dict, normalize all of their keys using normalize function.
"""
result = {}
if isinstance(dictionary, dict):
keys = list(dictionary.keys())
for key in keys:
result[normalizer(key, **kwargs)] = normalize_dict(dictionary.get(key), **kwargs)
else:
result = dictionary
return result | [
"def",
"normalize_dict",
"(",
"dictionary",
",",
"*",
"*",
"kwargs",
")",
":",
"result",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"dictionary",
",",
"dict",
")",
":",
"keys",
"=",
"list",
"(",
"dictionary",
".",
"keys",
"(",
")",
")",
"for",
"key",
... | Given an dict, normalize all of their keys using normalize function. | [
"Given",
"an",
"dict",
"normalize",
"all",
"of",
"their",
"keys",
"using",
"normalize",
"function",
"."
] | python | train |
oscarbranson/latools | latools/D_obj.py | https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/D_obj.py#L966-L1021 | def filter_correlation(self, x_analyte, y_analyte, window=15,
r_threshold=0.9, p_threshold=0.05, filt=True, recalc=False):
"""
Calculate correlation filter.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
r_threshold : float
The correlation index above which to exclude data.
Note: the absolute pearson R value is considered, so
negative correlations below -`r_threshold` will also
be excluded.
p_threshold : float
The significant level below which data are excluded.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
recalc : bool
If True, the correlation is re-calculated, even if it is already present.
Returns
-------
None
"""
# make window odd
if window % 2 != 1:
window += 1
params = locals()
del(params['self'])
setn = self.filt.maxset + 1
label = '{:}_{:}_{:.0f}'.format(x_analyte, y_analyte, window)
self.calc_correlation(x_analyte, y_analyte, window, filt, recalc)
r, p = self.correlations[label]
cfilt = (abs(r) > r_threshold) & (p < p_threshold)
cfilt = ~cfilt
name = x_analyte + '_' + y_analyte + '_corr'
self.filt.add(name=name,
filt=cfilt,
info=(x_analyte + ' vs. ' + y_analyte +
' correlation filter.'),
params=params, setn=setn)
self.filt.off(filt=name)
self.filt.on(analyte=y_analyte, filt=name)
return | [
"def",
"filter_correlation",
"(",
"self",
",",
"x_analyte",
",",
"y_analyte",
",",
"window",
"=",
"15",
",",
"r_threshold",
"=",
"0.9",
",",
"p_threshold",
"=",
"0.05",
",",
"filt",
"=",
"True",
",",
"recalc",
"=",
"False",
")",
":",
"# make window odd",
... | Calculate correlation filter.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
r_threshold : float
The correlation index above which to exclude data.
Note: the absolute pearson R value is considered, so
negative correlations below -`r_threshold` will also
be excluded.
p_threshold : float
The significant level below which data are excluded.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
recalc : bool
If True, the correlation is re-calculated, even if it is already present.
Returns
-------
None | [
"Calculate",
"correlation",
"filter",
"."
] | python | test |
fuzeman/trakt.py | trakt/objects/list/custom.py | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/list/custom.py#L116-L129 | def remove(self, items, **kwargs):
"""Remove specified items from the list.
:param items: Items that should be removed from the list
:type items: :class:`~python:list`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response
:rtype: :class:`~python:dict`
"""
return self._client['users/*/lists/*'].remove(self.username, self.id, items, **kwargs) | [
"def",
"remove",
"(",
"self",
",",
"items",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_client",
"[",
"'users/*/lists/*'",
"]",
".",
"remove",
"(",
"self",
".",
"username",
",",
"self",
".",
"id",
",",
"items",
",",
"*",
"*",
"kwarg... | Remove specified items from the list.
:param items: Items that should be removed from the list
:type items: :class:`~python:list`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response
:rtype: :class:`~python:dict` | [
"Remove",
"specified",
"items",
"from",
"the",
"list",
"."
] | python | train |
NoneGG/aredis | aredis/pool.py | https://github.com/NoneGG/aredis/blob/204caad740ac13e5760d46444a2ba7632982a046/aredis/pool.py#L422-L438 | def get_random_connection(self):
"""
Open new connection to random redis server.
"""
if self._available_connections:
node_name = random.choice(list(self._available_connections.keys()))
conn_list = self._available_connections[node_name]
# check it in case of empty connection list
if conn_list:
return conn_list.pop()
for node in self.nodes.random_startup_node_iter():
connection = self.get_connection_by_node(node)
if connection:
return connection
raise Exception("Cant reach a single startup node.") | [
"def",
"get_random_connection",
"(",
"self",
")",
":",
"if",
"self",
".",
"_available_connections",
":",
"node_name",
"=",
"random",
".",
"choice",
"(",
"list",
"(",
"self",
".",
"_available_connections",
".",
"keys",
"(",
")",
")",
")",
"conn_list",
"=",
... | Open new connection to random redis server. | [
"Open",
"new",
"connection",
"to",
"random",
"redis",
"server",
"."
] | python | train |
aboSamoor/polyglot | polyglot/mapping/base.py | https://github.com/aboSamoor/polyglot/blob/d0d2aa8d06cec4e03bd96618ae960030f7069a17/polyglot/mapping/base.py#L215-L222 | def most_frequent(self, k):
""" Returns a vocabulary with the most frequent `k` words.
Args:
k (integer): specifies the top k most frequent words to be returned.
"""
word_count = {w:self.word_count[w] for w in self.words[:k]}
return CountedVocabulary(word_count=word_count) | [
"def",
"most_frequent",
"(",
"self",
",",
"k",
")",
":",
"word_count",
"=",
"{",
"w",
":",
"self",
".",
"word_count",
"[",
"w",
"]",
"for",
"w",
"in",
"self",
".",
"words",
"[",
":",
"k",
"]",
"}",
"return",
"CountedVocabulary",
"(",
"word_count",
... | Returns a vocabulary with the most frequent `k` words.
Args:
k (integer): specifies the top k most frequent words to be returned. | [
"Returns",
"a",
"vocabulary",
"with",
"the",
"most",
"frequent",
"k",
"words",
"."
] | python | train |
pantsbuild/pants | pants-plugins/src/python/internal_backend/sitegen/tasks/sitegen.py | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/pants-plugins/src/python/internal_backend/sitegen/tasks/sitegen.py#L200-L207 | def find_existing_anchors(soup):
"""Return existing ids (and names) from a soup."""
existing_anchors = set()
for tag in soup.find_all(True):
for attr in ['id', 'name']:
if tag.has_attr(attr):
existing_anchors.add(tag.get(attr))
return existing_anchors | [
"def",
"find_existing_anchors",
"(",
"soup",
")",
":",
"existing_anchors",
"=",
"set",
"(",
")",
"for",
"tag",
"in",
"soup",
".",
"find_all",
"(",
"True",
")",
":",
"for",
"attr",
"in",
"[",
"'id'",
",",
"'name'",
"]",
":",
"if",
"tag",
".",
"has_att... | Return existing ids (and names) from a soup. | [
"Return",
"existing",
"ids",
"(",
"and",
"names",
")",
"from",
"a",
"soup",
"."
] | python | train |
d0c-s4vage/pfp | pfp/interp.py | https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/interp.py#L2041-L2058 | def _handle_if(self, node, scope, ctxt, stream):
"""Handle If nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling if/ternary_op")
cond = self._handle_node(node.cond, scope, ctxt, stream)
if cond:
# there should always be an iftrue
return self._handle_node(node.iftrue, scope, ctxt, stream)
else:
if node.iffalse is not None:
return self._handle_node(node.iffalse, scope, ctxt, stream) | [
"def",
"_handle_if",
"(",
"self",
",",
"node",
",",
"scope",
",",
"ctxt",
",",
"stream",
")",
":",
"self",
".",
"_dlog",
"(",
"\"handling if/ternary_op\"",
")",
"cond",
"=",
"self",
".",
"_handle_node",
"(",
"node",
".",
"cond",
",",
"scope",
",",
"ctx... | Handle If nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO | [
"Handle",
"If",
"nodes"
] | python | train |
NYUCCL/psiTurk | psiturk/psiturk_shell.py | https://github.com/NYUCCL/psiTurk/blob/7170b992a0b5f56c165929cf87b3d3a1f3336c36/psiturk/psiturk_shell.py#L758-L768 | def do_quit(self, _):
'''Override do_quit for network clean up.'''
if (self.server.is_server_running() == 'yes' or
self.server.is_server_running() == 'maybe'):
user_input = raw_input("Quitting shell will shut down experiment "
"server. Really quit? y or n: ")
if user_input == 'y':
self.server_off()
else:
return False
return True | [
"def",
"do_quit",
"(",
"self",
",",
"_",
")",
":",
"if",
"(",
"self",
".",
"server",
".",
"is_server_running",
"(",
")",
"==",
"'yes'",
"or",
"self",
".",
"server",
".",
"is_server_running",
"(",
")",
"==",
"'maybe'",
")",
":",
"user_input",
"=",
"ra... | Override do_quit for network clean up. | [
"Override",
"do_quit",
"for",
"network",
"clean",
"up",
"."
] | python | train |
mongolab/mongoctl | mongoctl/utils.py | https://github.com/mongolab/mongoctl/blob/fab15216127ad4bf8ea9aa8a95d75504c0ef01a2/mongoctl/utils.py#L97-L109 | def ensure_dir(dir_path):
"""
If DIR_PATH does not exist, makes it. Failing that, raises Exception.
Returns True if dir already existed; False if it had to be made.
"""
exists = dir_exists(dir_path)
if not exists:
try:
os.makedirs(dir_path)
except(Exception,RuntimeError), e:
raise Exception("Unable to create directory %s. Cause %s" %
(dir_path, e))
return exists | [
"def",
"ensure_dir",
"(",
"dir_path",
")",
":",
"exists",
"=",
"dir_exists",
"(",
"dir_path",
")",
"if",
"not",
"exists",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"dir_path",
")",
"except",
"(",
"Exception",
",",
"RuntimeError",
")",
",",
"e",
":",... | If DIR_PATH does not exist, makes it. Failing that, raises Exception.
Returns True if dir already existed; False if it had to be made. | [
"If",
"DIR_PATH",
"does",
"not",
"exist",
"makes",
"it",
".",
"Failing",
"that",
"raises",
"Exception",
".",
"Returns",
"True",
"if",
"dir",
"already",
"existed",
";",
"False",
"if",
"it",
"had",
"to",
"be",
"made",
"."
] | python | train |
user-cont/conu | conu/utils/filesystem.py | https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/utils/filesystem.py#L202-L214 | def _add_facl_rules(self):
"""
Apply ACL rules on the directory using setfacl program. Raises CommandDoesNotExistException
if the command is not present on the system.
:return: None
"""
setfacl_command_exists()
# we are not using pylibacl b/c it's only for python 2
if self.facl_rules:
logger.debug("adding ACLs %s to %s", self.facl_rules, self.path)
r = ",".join(self.facl_rules)
run_cmd(["setfacl", "-m", r, self.path]) | [
"def",
"_add_facl_rules",
"(",
"self",
")",
":",
"setfacl_command_exists",
"(",
")",
"# we are not using pylibacl b/c it's only for python 2",
"if",
"self",
".",
"facl_rules",
":",
"logger",
".",
"debug",
"(",
"\"adding ACLs %s to %s\"",
",",
"self",
".",
"facl_rules",
... | Apply ACL rules on the directory using setfacl program. Raises CommandDoesNotExistException
if the command is not present on the system.
:return: None | [
"Apply",
"ACL",
"rules",
"on",
"the",
"directory",
"using",
"setfacl",
"program",
".",
"Raises",
"CommandDoesNotExistException",
"if",
"the",
"command",
"is",
"not",
"present",
"on",
"the",
"system",
"."
] | python | train |
tanghaibao/jcvi | jcvi/formats/agp.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/agp.py#L1392-L1403 | def get_clone(rec):
"""
>>> get_clone("Medicago truncatula chromosome 2 clone mth2-48e18")
('2', 'mth2-48e18')
"""
s = rec.description
chr = re.search(chr_pat, s)
clone = re.search(clone_pat, s)
chr = chr.group(1) if chr else ""
clone = clone.group(1) if clone else ""
return chr, clone | [
"def",
"get_clone",
"(",
"rec",
")",
":",
"s",
"=",
"rec",
".",
"description",
"chr",
"=",
"re",
".",
"search",
"(",
"chr_pat",
",",
"s",
")",
"clone",
"=",
"re",
".",
"search",
"(",
"clone_pat",
",",
"s",
")",
"chr",
"=",
"chr",
".",
"group",
... | >>> get_clone("Medicago truncatula chromosome 2 clone mth2-48e18")
('2', 'mth2-48e18') | [
">>>",
"get_clone",
"(",
"Medicago",
"truncatula",
"chromosome",
"2",
"clone",
"mth2",
"-",
"48e18",
")",
"(",
"2",
"mth2",
"-",
"48e18",
")"
] | python | train |
openstack/networking-cisco | networking_cisco/ml2_drivers/nexus/nexus_db_v2.py | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/ml2_drivers/nexus/nexus_db_v2.py#L496-L525 | def add_host_mapping(host_id, nexus_ip, interface, ch_grp, is_static):
"""Add Host to interface mapping entry into mapping data base.
:param host_id: is the name of the host to add
:param interface: is the interface for this host
:param nexus_ip: is the ip addr of the nexus switch for this interface
:param ch_grp: is the port channel this interface belos
:param is_static: whether this is from conf file or learned from baremetal.
"""
LOG.debug("add_nexusport_binding() called")
session = bc.get_writer_session()
mapping = nexus_models_v2.NexusHostMapping(host_id=host_id,
if_id=interface,
switch_ip=nexus_ip,
ch_grp=ch_grp,
is_static=is_static)
try:
session.add(mapping)
session.flush()
except db_exc.DBDuplicateEntry:
with excutils.save_and_reraise_exception() as ctxt:
if is_static:
ctxt.reraise = False
LOG.debug("Duplicate static entry encountered "
"host=%(host)s, if=%(if)s, ip=%(ip)s",
{'host': host_id, 'if': interface,
'ip': nexus_ip})
return mapping | [
"def",
"add_host_mapping",
"(",
"host_id",
",",
"nexus_ip",
",",
"interface",
",",
"ch_grp",
",",
"is_static",
")",
":",
"LOG",
".",
"debug",
"(",
"\"add_nexusport_binding() called\"",
")",
"session",
"=",
"bc",
".",
"get_writer_session",
"(",
")",
"mapping",
... | Add Host to interface mapping entry into mapping data base.
:param host_id: is the name of the host to add
:param interface: is the interface for this host
:param nexus_ip: is the ip addr of the nexus switch for this interface
:param ch_grp: is the port channel this interface belos
:param is_static: whether this is from conf file or learned from baremetal. | [
"Add",
"Host",
"to",
"interface",
"mapping",
"entry",
"into",
"mapping",
"data",
"base",
"."
] | python | train |
mbedmicro/pyOCD | pyocd/probe/stlink/detect/base.py | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/stlink/detect/base.py#L151-L160 | def _run_cli_process(cmd, shell=True):
"""! Runs command as a process and return stdout, stderr and ret code
@param cmd Command to execute
@return Tuple of (stdout, stderr, returncode)
"""
from subprocess import Popen, PIPE
p = Popen(cmd, shell=shell, stdout=PIPE, stderr=PIPE)
_stdout, _stderr = p.communicate()
return _stdout, _stderr, p.returncode | [
"def",
"_run_cli_process",
"(",
"cmd",
",",
"shell",
"=",
"True",
")",
":",
"from",
"subprocess",
"import",
"Popen",
",",
"PIPE",
"p",
"=",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"shell",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
")",
... | ! Runs command as a process and return stdout, stderr and ret code
@param cmd Command to execute
@return Tuple of (stdout, stderr, returncode) | [
"!",
"Runs",
"command",
"as",
"a",
"process",
"and",
"return",
"stdout",
"stderr",
"and",
"ret",
"code"
] | python | train |
rm-hull/luma.core | luma/core/virtual.py | https://github.com/rm-hull/luma.core/blob/034b628fb304a01e77732a299c0b42e94d6443db/luma/core/virtual.py#L440-L446 | def savepoint(self):
"""
Copies the last displayed image.
"""
if self._last_image:
self._savepoints.append(self._last_image)
self._last_image = None | [
"def",
"savepoint",
"(",
"self",
")",
":",
"if",
"self",
".",
"_last_image",
":",
"self",
".",
"_savepoints",
".",
"append",
"(",
"self",
".",
"_last_image",
")",
"self",
".",
"_last_image",
"=",
"None"
] | Copies the last displayed image. | [
"Copies",
"the",
"last",
"displayed",
"image",
"."
] | python | train |
saltstack/salt | salt/modules/postgres.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/postgres.py#L610-L651 | def db_alter(name, user=None, host=None, port=None, maintenance_db=None,
password=None, tablespace=None, owner=None, owner_recurse=False,
runas=None):
'''
Change tablespace or/and owner of database.
CLI Example:
.. code-block:: bash
salt '*' postgres.db_alter dbname owner=otheruser
'''
if not any((tablespace, owner)):
return True # Nothing todo?
if owner and owner_recurse:
ret = owner_to(name, owner,
user=user,
host=host,
port=port,
password=password,
runas=runas)
else:
queries = []
if owner:
queries.append('ALTER DATABASE "{0}" OWNER TO "{1}"'.format(
name, owner
))
if tablespace:
queries.append('ALTER DATABASE "{0}" SET TABLESPACE "{1}"'.format(
name, tablespace
))
for query in queries:
ret = _psql_prepare_and_run(['-c', query],
user=user, host=host, port=port,
maintenance_db=maintenance_db,
password=password, runas=runas)
if ret['retcode'] != 0:
return False
return True | [
"def",
"db_alter",
"(",
"name",
",",
"user",
"=",
"None",
",",
"host",
"=",
"None",
",",
"port",
"=",
"None",
",",
"maintenance_db",
"=",
"None",
",",
"password",
"=",
"None",
",",
"tablespace",
"=",
"None",
",",
"owner",
"=",
"None",
",",
"owner_rec... | Change tablespace or/and owner of database.
CLI Example:
.. code-block:: bash
salt '*' postgres.db_alter dbname owner=otheruser | [
"Change",
"tablespace",
"or",
"/",
"and",
"owner",
"of",
"database",
"."
] | python | train |
joyent/python-manta | tools/which.py | https://github.com/joyent/python-manta/blob/f68ef142bdbac058c981e3b28e18d77612f5b7c6/tools/which.py#L90-L106 | def _getRegisteredExecutable(exeName):
"""Windows allow application paths to be registered in the registry."""
registered = None
if sys.platform.startswith('win'):
if os.path.splitext(exeName)[1].lower() != '.exe':
exeName += '.exe'
import _winreg
try:
key = "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\" +\
exeName
value = _winreg.QueryValue(_winreg.HKEY_LOCAL_MACHINE, key)
registered = (value, "from HKLM\\"+key)
except _winreg.error:
pass
if registered and not os.path.exists(registered[0]):
registered = None
return registered | [
"def",
"_getRegisteredExecutable",
"(",
"exeName",
")",
":",
"registered",
"=",
"None",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'win'",
")",
":",
"if",
"os",
".",
"path",
".",
"splitext",
"(",
"exeName",
")",
"[",
"1",
"]",
".",
"lower",... | Windows allow application paths to be registered in the registry. | [
"Windows",
"allow",
"application",
"paths",
"to",
"be",
"registered",
"in",
"the",
"registry",
"."
] | python | train |
celiao/tmdbsimple | tmdbsimple/movies.py | https://github.com/celiao/tmdbsimple/blob/ff17893110c99771d6398a62c35d36dd9735f4b9/tmdbsimple/movies.py#L103-L118 | def external_ids(self, **kwargs):
"""
Get the external ids for a specific movie id.
Args:
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any movie method.
Returns:
A dict representation of the JSON returned from the API.
"""
path = self._get_id_path('external_ids')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | [
"def",
"external_ids",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"path",
"=",
"self",
".",
"_get_id_path",
"(",
"'external_ids'",
")",
"response",
"=",
"self",
".",
"_GET",
"(",
"path",
",",
"kwargs",
")",
"self",
".",
"_set_attrs_to_values",
"(",
... | Get the external ids for a specific movie id.
Args:
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any movie method.
Returns:
A dict representation of the JSON returned from the API. | [
"Get",
"the",
"external",
"ids",
"for",
"a",
"specific",
"movie",
"id",
"."
] | python | test |
iblancasa/GitHubCity | src/githubcity/ghuser.py | https://github.com/iblancasa/GitHubCity/blob/c5299c6859dbefbd869e2ac6ff2faff2a39cf32f/src/githubcity/ghuser.py#L228-L251 | def __getBio(self, web):
"""Scrap the bio from a GitHub profile.
:param web: parsed web.
:type web: BeautifulSoup node.
"""
bio = web.find_all("div", {"class": "user-profile-bio"})
if bio:
try:
bio = bio[0].text
if bio and GitHubUser.isASCII(bio):
bioText = bio.replace("\n", "")
bioText = bioText.replace("\t", " ").replace("\"", "")
bioText = bioText.replace("\'", "").replace("\\", "")
self.bio = bioText
else:
self.bio = ""
except IndexError as error:
print("There was an error with the user " + self.name)
print(error)
except AttributeError as error:
print("There was an error with the user " + self.name)
print(error) | [
"def",
"__getBio",
"(",
"self",
",",
"web",
")",
":",
"bio",
"=",
"web",
".",
"find_all",
"(",
"\"div\"",
",",
"{",
"\"class\"",
":",
"\"user-profile-bio\"",
"}",
")",
"if",
"bio",
":",
"try",
":",
"bio",
"=",
"bio",
"[",
"0",
"]",
".",
"text",
"... | Scrap the bio from a GitHub profile.
:param web: parsed web.
:type web: BeautifulSoup node. | [
"Scrap",
"the",
"bio",
"from",
"a",
"GitHub",
"profile",
"."
] | python | train |
fusepy/fusepy | fusell.py | https://github.com/fusepy/fusepy/blob/5d997d6706cc0204e1b3ca679651485a7e7dda49/fusell.py#L789-L796 | def link(self, req, ino, newparent, newname):
"""Create a hard link
Valid replies:
reply_entry
reply_err
"""
self.reply_err(req, errno.EROFS) | [
"def",
"link",
"(",
"self",
",",
"req",
",",
"ino",
",",
"newparent",
",",
"newname",
")",
":",
"self",
".",
"reply_err",
"(",
"req",
",",
"errno",
".",
"EROFS",
")"
] | Create a hard link
Valid replies:
reply_entry
reply_err | [
"Create",
"a",
"hard",
"link"
] | python | train |
python-rope/rope | rope/refactor/restructure.py | https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/refactor/restructure.py#L210-L221 | def replace(code, pattern, goal):
"""used by other refactorings"""
finder = similarfinder.RawSimilarFinder(code)
matches = list(finder.get_matches(pattern))
ast = patchedast.get_patched_ast(code)
lines = codeanalyze.SourceLinesAdapter(code)
template = similarfinder.CodeTemplate(goal)
computer = _ChangeComputer(code, ast, lines, template, matches)
result = computer.get_changed()
if result is None:
return code
return result | [
"def",
"replace",
"(",
"code",
",",
"pattern",
",",
"goal",
")",
":",
"finder",
"=",
"similarfinder",
".",
"RawSimilarFinder",
"(",
"code",
")",
"matches",
"=",
"list",
"(",
"finder",
".",
"get_matches",
"(",
"pattern",
")",
")",
"ast",
"=",
"patchedast"... | used by other refactorings | [
"used",
"by",
"other",
"refactorings"
] | python | train |
wmayner/pyphi | pyphi/actual.py | https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/actual.py#L272-L277 | def purview_indices(self, direction):
"""The indices of nodes in the purview system."""
return {
Direction.CAUSE: self.cause_indices,
Direction.EFFECT: self.effect_indices
}[direction] | [
"def",
"purview_indices",
"(",
"self",
",",
"direction",
")",
":",
"return",
"{",
"Direction",
".",
"CAUSE",
":",
"self",
".",
"cause_indices",
",",
"Direction",
".",
"EFFECT",
":",
"self",
".",
"effect_indices",
"}",
"[",
"direction",
"]"
] | The indices of nodes in the purview system. | [
"The",
"indices",
"of",
"nodes",
"in",
"the",
"purview",
"system",
"."
] | python | train |
PyconUK/ConferenceScheduler | src/conference_scheduler/scheduler.py | https://github.com/PyconUK/ConferenceScheduler/blob/fb139f0ef2eab5ac8f4919aa4994d94d4e040030/src/conference_scheduler/scheduler.py#L161-L197 | def array(events, slots, objective_function=None, solver=None, **kwargs):
"""Compute a schedule in array form
Parameters
----------
events : list or tuple
of :py:class:`resources.Event` instances
slots : list or tuple
of :py:class:`resources.Slot` instances
objective_function : callable
from lp_problem.objective_functions
Returns
-------
np.array
An E by S array (X) where E is the number of events and S the
number of slots. Xij is 1 if event i is scheduled in slot j and
zero otherwise
Example
-------
For 3 events, 7 slots and a solution where
* event 0 is scheduled in slot 1
* event 1 is scheduled in slot 4
* event 2 is scheduled in slot 5
the resulting array would be::
[[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0]]
"""
return conv.solution_to_array(
solution(events, slots, objective_function, solver=solver, **kwargs),
events, slots
) | [
"def",
"array",
"(",
"events",
",",
"slots",
",",
"objective_function",
"=",
"None",
",",
"solver",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"conv",
".",
"solution_to_array",
"(",
"solution",
"(",
"events",
",",
"slots",
",",
"objective_... | Compute a schedule in array form
Parameters
----------
events : list or tuple
of :py:class:`resources.Event` instances
slots : list or tuple
of :py:class:`resources.Slot` instances
objective_function : callable
from lp_problem.objective_functions
Returns
-------
np.array
An E by S array (X) where E is the number of events and S the
number of slots. Xij is 1 if event i is scheduled in slot j and
zero otherwise
Example
-------
For 3 events, 7 slots and a solution where
* event 0 is scheduled in slot 1
* event 1 is scheduled in slot 4
* event 2 is scheduled in slot 5
the resulting array would be::
[[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0]] | [
"Compute",
"a",
"schedule",
"in",
"array",
"form"
] | python | train |
waqasbhatti/astrobase | astrobase/plotbase.py | https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/plotbase.py#L869-L1010 | def skyview_stamp(ra, decl,
survey='DSS2 Red',
scaling='Linear',
flip=True,
convolvewith=None,
forcefetch=False,
cachedir='~/.astrobase/stamp-cache',
timeout=10.0,
retry_failed=False,
savewcsheader=True,
verbose=False):
'''This downloads a DSS FITS stamp centered on the coordinates specified.
This wraps the function :py:func:`astrobase.services.skyview.get_stamp`,
which downloads Digitized Sky Survey stamps in FITS format from the NASA
SkyView service:
https://skyview.gsfc.nasa.gov/current/cgi/query.pl
Also adds some useful operations on top of the FITS file returned.
Parameters
----------
ra,decl : float
The center coordinates for the stamp in decimal degrees.
survey : str
The survey name to get the stamp from. This is one of the
values in the 'SkyView Surveys' option boxes on the SkyView
webpage. Currently, we've only tested using 'DSS2 Red' as the value for
this kwarg, but the other ones should work in principle.
scaling : str
This is the pixel value scaling function to use.
flip : bool
Will flip the downloaded image top to bottom. This should usually be
True because matplotlib and FITS have different image coord origin
conventions. Alternatively, set this to False and use the
`origin='lower'` in any call to `matplotlib.pyplot.imshow` when plotting
this image.
convolvewith : astropy.convolution Kernel object or None
If `convolvewith` is an astropy.convolution Kernel object from:
http://docs.astropy.org/en/stable/convolution/kernels.html
then, this function will return the stamp convolved with that
kernel. This can be useful to see effects of wide-field telescopes (like
the HATNet and HATSouth lenses) degrading the nominal 1 arcsec/px of
DSS, causing blending of targets and any variability.
forcefetch : bool
If True, will disregard any existing cached copies of the stamp already
downloaded corresponding to the requested center coordinates and
redownload the FITS from the SkyView service.
cachedir : str
This is the path to the astrobase cache directory. All downloaded FITS
stamps are stored here as .fits.gz files so we can immediately respond
with the cached copy when a request is made for a coordinate center
that's already been downloaded.
timeout : float
Sets the timeout in seconds to wait for a response from the NASA SkyView
service.
retry_failed : bool
If the initial request to SkyView fails, and this is True, will retry
until it succeeds.
savewcsheader : bool
If this is True, also returns the WCS header of the downloaded FITS
stamp in addition to the FITS image itself. Useful for projecting object
coordinates onto image xy coordinates for visualization.
verbose : bool
If True, indicates progress.
Returns
-------
tuple or array or None
This returns based on the value of `savewcsheader`:
- If `savewcsheader=True`, returns a tuple:
(FITS stamp image as a numpy array, FITS header)
- If `savewcsheader=False`, returns only the FITS stamp image as numpy
array.
- If the stamp retrieval fails, returns None.
'''
stampdict = get_stamp(ra, decl,
survey=survey,
scaling=scaling,
forcefetch=forcefetch,
cachedir=cachedir,
timeout=timeout,
retry_failed=retry_failed,
verbose=verbose)
#
# DONE WITH FETCHING STUFF
#
if stampdict:
# open the frame
stampfits = pyfits.open(stampdict['fitsfile'])
header = stampfits[0].header
frame = stampfits[0].data
stampfits.close()
# finally, we can process the frame
if flip:
frame = np.flipud(frame)
if verbose:
LOGINFO('fetched stamp successfully for (%.3f, %.3f)'
% (ra, decl))
if convolvewith:
convolved = aconv.convolve(frame, convolvewith)
if savewcsheader:
return convolved, header
else:
return convolved
else:
if savewcsheader:
return frame, header
else:
return frame
else:
LOGERROR('could not fetch the requested stamp for '
'coords: (%.3f, %.3f) from survey: %s and scaling: %s'
% (ra, decl, survey, scaling))
return None | [
"def",
"skyview_stamp",
"(",
"ra",
",",
"decl",
",",
"survey",
"=",
"'DSS2 Red'",
",",
"scaling",
"=",
"'Linear'",
",",
"flip",
"=",
"True",
",",
"convolvewith",
"=",
"None",
",",
"forcefetch",
"=",
"False",
",",
"cachedir",
"=",
"'~/.astrobase/stamp-cache'"... | This downloads a DSS FITS stamp centered on the coordinates specified.
This wraps the function :py:func:`astrobase.services.skyview.get_stamp`,
which downloads Digitized Sky Survey stamps in FITS format from the NASA
SkyView service:
https://skyview.gsfc.nasa.gov/current/cgi/query.pl
Also adds some useful operations on top of the FITS file returned.
Parameters
----------
ra,decl : float
The center coordinates for the stamp in decimal degrees.
survey : str
The survey name to get the stamp from. This is one of the
values in the 'SkyView Surveys' option boxes on the SkyView
webpage. Currently, we've only tested using 'DSS2 Red' as the value for
this kwarg, but the other ones should work in principle.
scaling : str
This is the pixel value scaling function to use.
flip : bool
Will flip the downloaded image top to bottom. This should usually be
True because matplotlib and FITS have different image coord origin
conventions. Alternatively, set this to False and use the
`origin='lower'` in any call to `matplotlib.pyplot.imshow` when plotting
this image.
convolvewith : astropy.convolution Kernel object or None
If `convolvewith` is an astropy.convolution Kernel object from:
http://docs.astropy.org/en/stable/convolution/kernels.html
then, this function will return the stamp convolved with that
kernel. This can be useful to see effects of wide-field telescopes (like
the HATNet and HATSouth lenses) degrading the nominal 1 arcsec/px of
DSS, causing blending of targets and any variability.
forcefetch : bool
If True, will disregard any existing cached copies of the stamp already
downloaded corresponding to the requested center coordinates and
redownload the FITS from the SkyView service.
cachedir : str
This is the path to the astrobase cache directory. All downloaded FITS
stamps are stored here as .fits.gz files so we can immediately respond
with the cached copy when a request is made for a coordinate center
that's already been downloaded.
timeout : float
Sets the timeout in seconds to wait for a response from the NASA SkyView
service.
retry_failed : bool
If the initial request to SkyView fails, and this is True, will retry
until it succeeds.
savewcsheader : bool
If this is True, also returns the WCS header of the downloaded FITS
stamp in addition to the FITS image itself. Useful for projecting object
coordinates onto image xy coordinates for visualization.
verbose : bool
If True, indicates progress.
Returns
-------
tuple or array or None
This returns based on the value of `savewcsheader`:
- If `savewcsheader=True`, returns a tuple:
(FITS stamp image as a numpy array, FITS header)
- If `savewcsheader=False`, returns only the FITS stamp image as numpy
array.
- If the stamp retrieval fails, returns None. | [
"This",
"downloads",
"a",
"DSS",
"FITS",
"stamp",
"centered",
"on",
"the",
"coordinates",
"specified",
"."
] | python | valid |
SecurityInnovation/PGPy | pgpy/types.py | https://github.com/SecurityInnovation/PGPy/blob/f1c3d68e32c334f5aa14c34580925e97f17f4fde/pgpy/types.py#L602-L616 | def bad_signatures(self): # pragma: no cover
"""
A generator yielding namedtuples of all signatures that were not verified
in the operation that returned this instance. The namedtuple has the following attributes:
``sigsubj.verified`` - ``bool`` of whether the signature verified successfully or not.
``sigsubj.by`` - the :py:obj:`~pgpy.PGPKey` that was used in this verify operation.
``sigsubj.signature`` - the :py:obj:`~pgpy.PGPSignature` that was verified.
``sigsubj.subject`` - the subject that was verified using the signature.
"""
for s in [ i for i in self._subjects if not i.verified ]:
yield s | [
"def",
"bad_signatures",
"(",
"self",
")",
":",
"# pragma: no cover",
"for",
"s",
"in",
"[",
"i",
"for",
"i",
"in",
"self",
".",
"_subjects",
"if",
"not",
"i",
".",
"verified",
"]",
":",
"yield",
"s"
] | A generator yielding namedtuples of all signatures that were not verified
in the operation that returned this instance. The namedtuple has the following attributes:
``sigsubj.verified`` - ``bool`` of whether the signature verified successfully or not.
``sigsubj.by`` - the :py:obj:`~pgpy.PGPKey` that was used in this verify operation.
``sigsubj.signature`` - the :py:obj:`~pgpy.PGPSignature` that was verified.
``sigsubj.subject`` - the subject that was verified using the signature. | [
"A",
"generator",
"yielding",
"namedtuples",
"of",
"all",
"signatures",
"that",
"were",
"not",
"verified",
"in",
"the",
"operation",
"that",
"returned",
"this",
"instance",
".",
"The",
"namedtuple",
"has",
"the",
"following",
"attributes",
":"
] | python | train |
saltstack/salt | salt/output/highstate.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/output/highstate.py#L186-L511 | def _format_host(host, data, indent_level=1):
'''
Main highstate formatter. can be called recursively if a nested highstate
contains other highstates (ie in an orchestration)
'''
host = salt.utils.data.decode(host)
colors = salt.utils.color.get_colors(
__opts__.get('color'),
__opts__.get('color_theme'))
tabular = __opts__.get('state_tabular', False)
rcounts = {}
rdurations = []
hcolor = colors['GREEN']
hstrs = []
nchanges = 0
strip_colors = __opts__.get('strip_colors', True)
if isinstance(data, int) or isinstance(data, six.string_types):
# Data in this format is from saltmod.function,
# so it is always a 'change'
nchanges = 1
hstrs.append(('{0} {1}{2[ENDC]}'
.format(hcolor, data, colors)))
hcolor = colors['CYAN'] # Print the minion name in cyan
if isinstance(data, list):
# Errors have been detected, list them in RED!
hcolor = colors['LIGHT_RED']
hstrs.append((' {0}Data failed to compile:{1[ENDC]}'
.format(hcolor, colors)))
for err in data:
if strip_colors:
err = salt.output.strip_esc_sequence(
salt.utils.data.decode(err)
)
hstrs.append(('{0}----------\n {1}{2[ENDC]}'
.format(hcolor, err, colors)))
if isinstance(data, dict):
# Verify that the needed data is present
data_tmp = {}
for tname, info in six.iteritems(data):
if isinstance(info, dict) and tname is not 'changes' and info and '__run_num__' not in info:
err = ('The State execution failed to record the order '
'in which all states were executed. The state '
'return missing data is:')
hstrs.insert(0, pprint.pformat(info))
hstrs.insert(0, err)
if isinstance(info, dict) and 'result' in info:
data_tmp[tname] = info
data = data_tmp
# Everything rendered as it should display the output
for tname in sorted(
data,
key=lambda k: data[k].get('__run_num__', 0)):
ret = data[tname]
# Increment result counts
rcounts.setdefault(ret['result'], 0)
rcounts[ret['result']] += 1
rduration = ret.get('duration', 0)
try:
rdurations.append(float(rduration))
except ValueError:
rduration, _, _ = rduration.partition(' ms')
try:
rdurations.append(float(rduration))
except ValueError:
log.error('Cannot parse a float from duration %s', ret.get('duration', 0))
tcolor = colors['GREEN']
if ret.get('name') in ['state.orch', 'state.orchestrate', 'state.sls']:
nested = output(ret['changes']['return'], indent_level=indent_level+1)
ctext = re.sub('^', ' ' * 14 * indent_level, '\n'+nested, flags=re.MULTILINE)
schanged = True
nchanges += 1
else:
schanged, ctext = _format_changes(ret['changes'])
nchanges += 1 if schanged else 0
# Skip this state if it was successful & diff output was requested
if __opts__.get('state_output_diff', False) and \
ret['result'] and not schanged:
continue
# Skip this state if state_verbose is False, the result is True and
# there were no changes made
if not __opts__.get('state_verbose', False) and \
ret['result'] and not schanged:
continue
if schanged:
tcolor = colors['CYAN']
if ret['result'] is False:
hcolor = colors['RED']
tcolor = colors['RED']
if ret['result'] is None:
hcolor = colors['LIGHT_YELLOW']
tcolor = colors['LIGHT_YELLOW']
state_output = __opts__.get('state_output', 'full').lower()
comps = tname.split('_|-')
if state_output.endswith('_id'):
# Swap in the ID for the name. Refs #35137
comps[2] = comps[1]
if state_output.startswith('filter'):
# By default, full data is shown for all types. However, return
# data may be excluded by setting state_output_exclude to a
# comma-separated list of True, False or None, or including the
# same list with the exclude option on the command line. For
# now, this option must include a comma. For example:
# exclude=True,
# The same functionality is also available for making return
# data terse, instead of excluding it.
cliargs = __opts__.get('arg', [])
clikwargs = {}
for item in cliargs:
if isinstance(item, dict) and '__kwarg__' in item:
clikwargs = item.copy()
exclude = clikwargs.get(
'exclude', __opts__.get('state_output_exclude', [])
)
if isinstance(exclude, six.string_types):
exclude = six.text_type(exclude).split(',')
terse = clikwargs.get(
'terse', __opts__.get('state_output_terse', [])
)
if isinstance(terse, six.string_types):
terse = six.text_type(terse).split(',')
if six.text_type(ret['result']) in terse:
msg = _format_terse(tcolor, comps, ret, colors, tabular)
hstrs.append(msg)
continue
if six.text_type(ret['result']) in exclude:
continue
elif any((
state_output.startswith('terse'),
state_output.startswith('mixed') and ret['result'] is not False, # only non-error'd
state_output.startswith('changes') and ret['result'] and not schanged # non-error'd non-changed
)):
# Print this chunk in a terse way and continue in the loop
msg = _format_terse(tcolor, comps, ret, colors, tabular)
hstrs.append(msg)
continue
state_lines = [
'{tcolor}----------{colors[ENDC]}',
' {tcolor} ID: {comps[1]}{colors[ENDC]}',
' {tcolor}Function: {comps[0]}.{comps[3]}{colors[ENDC]}',
' {tcolor} Result: {ret[result]!s}{colors[ENDC]}',
' {tcolor} Comment: {comment}{colors[ENDC]}',
]
if __opts__.get('state_output_profile', True) and 'start_time' in ret:
state_lines.extend([
' {tcolor} Started: {ret[start_time]!s}{colors[ENDC]}',
' {tcolor}Duration: {ret[duration]!s}{colors[ENDC]}',
])
# This isn't the prettiest way of doing this, but it's readable.
if comps[1] != comps[2]:
state_lines.insert(
3, ' {tcolor} Name: {comps[2]}{colors[ENDC]}')
# be sure that ret['comment'] is utf-8 friendly
try:
if not isinstance(ret['comment'], six.text_type):
ret['comment'] = six.text_type(ret['comment'])
except UnicodeDecodeError:
# If we got here, we're on Python 2 and ret['comment'] somehow
# contained a str type with unicode content.
ret['comment'] = salt.utils.stringutils.to_unicode(ret['comment'])
try:
comment = salt.utils.data.decode(ret['comment'])
comment = comment.strip().replace(
'\n',
'\n' + ' ' * 14)
except AttributeError: # Assume comment is a list
try:
comment = ret['comment'].join(' ').replace(
'\n',
'\n' + ' ' * 13)
except AttributeError:
# Comment isn't a list either, just convert to string
comment = six.text_type(ret['comment'])
comment = comment.strip().replace(
'\n',
'\n' + ' ' * 14)
# If there is a data attribute, append it to the comment
if 'data' in ret:
if isinstance(ret['data'], list):
for item in ret['data']:
comment = '{0} {1}'.format(comment, item)
elif isinstance(ret['data'], dict):
for key, value in ret['data'].items():
comment = '{0}\n\t\t{1}: {2}'.format(comment, key, value)
else:
comment = '{0} {1}'.format(comment, ret['data'])
for detail in ['start_time', 'duration']:
ret.setdefault(detail, '')
if ret['duration'] != '':
ret['duration'] = '{0} ms'.format(ret['duration'])
svars = {
'tcolor': tcolor,
'comps': comps,
'ret': ret,
'comment': salt.utils.data.decode(comment),
# This nukes any trailing \n and indents the others.
'colors': colors
}
hstrs.extend([sline.format(**svars) for sline in state_lines])
changes = ' Changes: ' + ctext
hstrs.append(('{0}{1}{2[ENDC]}'
.format(tcolor, changes, colors)))
if 'warnings' in ret:
rcounts.setdefault('warnings', 0)
rcounts['warnings'] += 1
wrapper = textwrap.TextWrapper(
width=80,
initial_indent=' ' * 14,
subsequent_indent=' ' * 14
)
hstrs.append(
' {colors[LIGHT_RED]} Warnings: {0}{colors[ENDC]}'.format(
wrapper.fill('\n'.join(ret['warnings'])).lstrip(),
colors=colors
)
)
# Append result counts to end of output
colorfmt = '{0}{1}{2[ENDC]}'
rlabel = {True: 'Succeeded', False: 'Failed', None: 'Not Run', 'warnings': 'Warnings'}
count_max_len = max([len(six.text_type(x)) for x in six.itervalues(rcounts)] or [0])
label_max_len = max([len(x) for x in six.itervalues(rlabel)] or [0])
line_max_len = label_max_len + count_max_len + 2 # +2 for ': '
hstrs.append(
colorfmt.format(
colors['CYAN'],
'\nSummary for {0}\n{1}'.format(host, '-' * line_max_len),
colors
)
)
def _counts(label, count):
return '{0}: {1:>{2}}'.format(
label,
count,
line_max_len - (len(label) + 2)
)
# Successful states
changestats = []
if None in rcounts and rcounts.get(None, 0) > 0:
# test=True states
changestats.append(
colorfmt.format(
colors['LIGHT_YELLOW'],
'unchanged={0}'.format(rcounts.get(None, 0)),
colors
)
)
if nchanges > 0:
changestats.append(
colorfmt.format(
colors['GREEN'],
'changed={0}'.format(nchanges),
colors
)
)
if changestats:
changestats = ' ({0})'.format(', '.join(changestats))
else:
changestats = ''
hstrs.append(
colorfmt.format(
colors['GREEN'],
_counts(
rlabel[True],
rcounts.get(True, 0) + rcounts.get(None, 0)
),
colors
) + changestats
)
# Failed states
num_failed = rcounts.get(False, 0)
hstrs.append(
colorfmt.format(
colors['RED'] if num_failed else colors['CYAN'],
_counts(rlabel[False], num_failed),
colors
)
)
num_warnings = rcounts.get('warnings', 0)
if num_warnings:
hstrs.append(
colorfmt.format(
colors['LIGHT_RED'],
_counts(rlabel['warnings'], num_warnings),
colors
)
)
totals = '{0}\nTotal states run: {1:>{2}}'.format('-' * line_max_len,
sum(six.itervalues(rcounts)) - rcounts.get('warnings', 0),
line_max_len - 7)
hstrs.append(colorfmt.format(colors['CYAN'], totals, colors))
if __opts__.get('state_output_profile', True):
sum_duration = sum(rdurations)
duration_unit = 'ms'
# convert to seconds if duration is 1000ms or more
if sum_duration > 999:
sum_duration /= 1000
duration_unit = 's'
total_duration = 'Total run time: {0} {1}'.format(
'{0:.3f}'.format(sum_duration).rjust(line_max_len - 5),
duration_unit)
hstrs.append(colorfmt.format(colors['CYAN'], total_duration, colors))
if strip_colors:
host = salt.output.strip_esc_sequence(host)
hstrs.insert(0, ('{0}{1}:{2[ENDC]}'.format(hcolor, host, colors)))
return '\n'.join(hstrs), nchanges > 0 | [
"def",
"_format_host",
"(",
"host",
",",
"data",
",",
"indent_level",
"=",
"1",
")",
":",
"host",
"=",
"salt",
".",
"utils",
".",
"data",
".",
"decode",
"(",
"host",
")",
"colors",
"=",
"salt",
".",
"utils",
".",
"color",
".",
"get_colors",
"(",
"_... | Main highstate formatter. can be called recursively if a nested highstate
contains other highstates (ie in an orchestration) | [
"Main",
"highstate",
"formatter",
".",
"can",
"be",
"called",
"recursively",
"if",
"a",
"nested",
"highstate",
"contains",
"other",
"highstates",
"(",
"ie",
"in",
"an",
"orchestration",
")"
] | python | train |
MartinHjelmare/leicacam | leicacam/cam.py | https://github.com/MartinHjelmare/leicacam/blob/1df37bccd34884737d3b5e169fae71dd2f21f1e2/leicacam/cam.py#L231-L261 | def wait_for(self, cmd, value=None, timeout=60):
"""Hang until command is received.
If value is supplied, it will hang until ``cmd:value`` is received.
Parameters
----------
cmd : string
Command to wait for in bytestring from microscope CAM interface. If
``value`` is falsey, value of received command does not matter.
value : string
Wait until ``cmd:value`` is received.
timeout : int
Minutes to wait for command. If timeout is reached, an empty
OrderedDict will be returned.
Returns
-------
collections.OrderedDict
Last received messsage or empty message if timeout is reached.
"""
wait = time() + timeout * 60
while True:
if time() > wait:
return OrderedDict()
msgs = self.receive()
msg = check_messages(msgs, cmd, value=value)
if msg:
return msg
sleep(self.delay) | [
"def",
"wait_for",
"(",
"self",
",",
"cmd",
",",
"value",
"=",
"None",
",",
"timeout",
"=",
"60",
")",
":",
"wait",
"=",
"time",
"(",
")",
"+",
"timeout",
"*",
"60",
"while",
"True",
":",
"if",
"time",
"(",
")",
">",
"wait",
":",
"return",
"Ord... | Hang until command is received.
If value is supplied, it will hang until ``cmd:value`` is received.
Parameters
----------
cmd : string
Command to wait for in bytestring from microscope CAM interface. If
``value`` is falsey, value of received command does not matter.
value : string
Wait until ``cmd:value`` is received.
timeout : int
Minutes to wait for command. If timeout is reached, an empty
OrderedDict will be returned.
Returns
-------
collections.OrderedDict
Last received messsage or empty message if timeout is reached. | [
"Hang",
"until",
"command",
"is",
"received",
"."
] | python | test |
pyslackers/slack-sansio | slack/events.py | https://github.com/pyslackers/slack-sansio/blob/068ddd6480c6d2f9bf14fa4db498c9fe1017f4ab/slack/events.py#L247-L276 | def register(
self,
pattern: str,
handler: Any,
flags: int = 0,
channel: str = "*",
subtype: Optional[str] = None,
) -> None:
"""
Register a new handler for a specific :class:`slack.events.Message`.
The routing is based on regex pattern matching the message text and the incoming slack channel.
Args:
pattern: Regex pattern matching the message text.
handler: Callback
flags: Regex flags.
channel: Slack channel ID. Use * for any.
subtype: Message subtype
"""
LOG.debug('Registering message endpoint "%s: %s"', pattern, handler)
match = re.compile(pattern, flags)
if subtype not in self._routes[channel]:
self._routes[channel][subtype] = dict()
if match in self._routes[channel][subtype]:
self._routes[channel][subtype][match].append(handler)
else:
self._routes[channel][subtype][match] = [handler] | [
"def",
"register",
"(",
"self",
",",
"pattern",
":",
"str",
",",
"handler",
":",
"Any",
",",
"flags",
":",
"int",
"=",
"0",
",",
"channel",
":",
"str",
"=",
"\"*\"",
",",
"subtype",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
")",
"->",
... | Register a new handler for a specific :class:`slack.events.Message`.
The routing is based on regex pattern matching the message text and the incoming slack channel.
Args:
pattern: Regex pattern matching the message text.
handler: Callback
flags: Regex flags.
channel: Slack channel ID. Use * for any.
subtype: Message subtype | [
"Register",
"a",
"new",
"handler",
"for",
"a",
"specific",
":",
"class",
":",
"slack",
".",
"events",
".",
"Message",
"."
] | python | train |
blockstack/blockstack-core | blockstack/blockstackd.py | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L783-L802 | def get_subdomain_DID_record(self, did):
"""
Given a DID for subdomain, get the subdomain record
Return {'record': ...} on success
Return {'error': ...} on error
"""
try:
did_info = parse_DID(did)
assert did_info['name_type'] == 'subdomain'
except Exception as e:
if BLOCKSTACK_DEBUG:
log.exception(e)
return {'error': 'Invalid DID', 'http_status': 400}
subrec = get_DID_subdomain(did, check_pending=True)
if subrec is None:
return {'error': 'Failed to load subdomain from {}'.format(did), 'http_status': 404}
return {'record': subrec.to_json()} | [
"def",
"get_subdomain_DID_record",
"(",
"self",
",",
"did",
")",
":",
"try",
":",
"did_info",
"=",
"parse_DID",
"(",
"did",
")",
"assert",
"did_info",
"[",
"'name_type'",
"]",
"==",
"'subdomain'",
"except",
"Exception",
"as",
"e",
":",
"if",
"BLOCKSTACK_DEBU... | Given a DID for subdomain, get the subdomain record
Return {'record': ...} on success
Return {'error': ...} on error | [
"Given",
"a",
"DID",
"for",
"subdomain",
"get",
"the",
"subdomain",
"record",
"Return",
"{",
"record",
":",
"...",
"}",
"on",
"success",
"Return",
"{",
"error",
":",
"...",
"}",
"on",
"error"
] | python | train |
MKLab-ITI/reveal-graph-embedding | reveal_graph_embedding/learning/holdout.py | https://github.com/MKLab-ITI/reveal-graph-embedding/blob/eda862687aa5a64b79c6b12de1b4dca6ce986dc8/reveal_graph_embedding/learning/holdout.py#L11-L77 | def get_folds_generator(node_label_matrix,
labelled_node_indices,
number_of_categories,
dataset_memory_folder,
percentage,
number_of_folds=10):
"""
Read or form and store the seed nodes for training and testing.
Inputs: - node_label_matrix: The node-label ground truth in a SciPy sparse matrix format.
- labelled_node_indices: A NumPy array containing the labelled node indices.
- number_of_categories: The number of categories/classes in the learning.
- memory_path: The folder where the results are stored.
- percentage: The percentage of labelled samples that will be used for training.
Output: - folds: A generator containing train/test set folds.
"""
number_of_labeled_nodes = labelled_node_indices.size
training_set_size = int(np.ceil(percentage*number_of_labeled_nodes/100))
####################################################################################################################
# Read or generate folds
####################################################################################################################
fold_file_path = dataset_memory_folder + "/folds/" + str(percentage) + "_folds.txt"
train_list = list()
test_list = list()
if not os.path.exists(fold_file_path):
with open(fold_file_path, "w") as fp:
for trial in np.arange(number_of_folds):
train, test = valid_train_test(node_label_matrix[labelled_node_indices, :],
training_set_size,
number_of_categories,
trial)
train = labelled_node_indices[train]
test = labelled_node_indices[test]
# Write test nodes
row = [str(node) for node in test]
row = "\t".join(row) + "\n"
fp.write(row)
# Write train nodes
row = [str(node) for node in train]
row = "\t".join(row) + "\n"
fp.write(row)
train_list.append(train)
test_list.append(test)
else:
file_row_gen = get_file_row_generator(fold_file_path, "\t")
for trial in np.arange(number_of_folds):
# Read test nodes
test = next(file_row_gen)
test = [int(node) for node in test]
test = np.array(test)
# Read train nodes
train = next(file_row_gen)
train = [int(node) for node in train]
train = np.array(train)
train_list.append(train)
test_list.append(test)
folds = ((train, test) for train, test in zip(train_list, test_list))
return folds | [
"def",
"get_folds_generator",
"(",
"node_label_matrix",
",",
"labelled_node_indices",
",",
"number_of_categories",
",",
"dataset_memory_folder",
",",
"percentage",
",",
"number_of_folds",
"=",
"10",
")",
":",
"number_of_labeled_nodes",
"=",
"labelled_node_indices",
".",
"... | Read or form and store the seed nodes for training and testing.
Inputs: - node_label_matrix: The node-label ground truth in a SciPy sparse matrix format.
- labelled_node_indices: A NumPy array containing the labelled node indices.
- number_of_categories: The number of categories/classes in the learning.
- memory_path: The folder where the results are stored.
- percentage: The percentage of labelled samples that will be used for training.
Output: - folds: A generator containing train/test set folds. | [
"Read",
"or",
"form",
"and",
"store",
"the",
"seed",
"nodes",
"for",
"training",
"and",
"testing",
"."
] | python | train |
pyca/pyopenssl | leakcheck/crypto.py | https://github.com/pyca/pyopenssl/blob/1fbe064c50fd030948141d7d630673761525b0d0/leakcheck/crypto.py#L120-L129 | def check_get_revoked(self):
"""
Create a CRL object with 100 Revoked objects, then call the
get_revoked method repeatedly.
"""
crl = CRL()
for i in xrange(100):
crl.add_revoked(Revoked())
for i in xrange(self.iterations):
crl.get_revoked() | [
"def",
"check_get_revoked",
"(",
"self",
")",
":",
"crl",
"=",
"CRL",
"(",
")",
"for",
"i",
"in",
"xrange",
"(",
"100",
")",
":",
"crl",
".",
"add_revoked",
"(",
"Revoked",
"(",
")",
")",
"for",
"i",
"in",
"xrange",
"(",
"self",
".",
"iterations",
... | Create a CRL object with 100 Revoked objects, then call the
get_revoked method repeatedly. | [
"Create",
"a",
"CRL",
"object",
"with",
"100",
"Revoked",
"objects",
"then",
"call",
"the",
"get_revoked",
"method",
"repeatedly",
"."
] | python | test |
anjos/rrbob | rr/preprocessor.py | https://github.com/anjos/rrbob/blob/d32d35bab2aa2698d3caa923fd02afb6d67f3235/rr/preprocessor.py#L37-L58 | def normalize(X, norm):
'''Applies the given norm to the input data set
Parameters:
X (numpy.ndarray): A 3D numpy ndarray in which the rows represent examples
while the columns, features of the data set you want to normalize. Every
depth corresponds to data for a particular class
norm (tuple): A tuple containing two 1D numpy ndarrays corresponding to the
normalization parameters extracted with :py:func:`estimated_norm` above.
Returns:
numpy.ndarray: A 3D numpy ndarray with the same dimensions as the input
array ``X``, but with its values normalized according to the norm input.
'''
return numpy.array([(k - norm[0]) / norm[1] for k in X]) | [
"def",
"normalize",
"(",
"X",
",",
"norm",
")",
":",
"return",
"numpy",
".",
"array",
"(",
"[",
"(",
"k",
"-",
"norm",
"[",
"0",
"]",
")",
"/",
"norm",
"[",
"1",
"]",
"for",
"k",
"in",
"X",
"]",
")"
] | Applies the given norm to the input data set
Parameters:
X (numpy.ndarray): A 3D numpy ndarray in which the rows represent examples
while the columns, features of the data set you want to normalize. Every
depth corresponds to data for a particular class
norm (tuple): A tuple containing two 1D numpy ndarrays corresponding to the
normalization parameters extracted with :py:func:`estimated_norm` above.
Returns:
numpy.ndarray: A 3D numpy ndarray with the same dimensions as the input
array ``X``, but with its values normalized according to the norm input. | [
"Applies",
"the",
"given",
"norm",
"to",
"the",
"input",
"data",
"set"
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.