repo stringlengths 7 54 | path stringlengths 4 192 | url stringlengths 87 284 | code stringlengths 78 104k | code_tokens list | docstring stringlengths 1 46.9k | docstring_tokens list | language stringclasses 1
value | partition stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|
Genida/dependenpy | src/dependenpy/finder.py | https://github.com/Genida/dependenpy/blob/df099c17cbe735c990eca9197e39cfc5eb8a4c8e/src/dependenpy/finder.py#L45-L61 | def combine(specs):
"""
Combine package specifications' limitations.
Args:
specs (list of PackageSpec): the package specifications.
Returns:
list of PackageSpec: the new, merged list of PackageSpec.
"""
new_specs = {}
for spec in specs:
if new_specs.get(spec, None) is None:
new_specs[spec] = spec
else:
new_specs[spec].add(spec)
return list(new_specs.values()) | [
"def",
"combine",
"(",
"specs",
")",
":",
"new_specs",
"=",
"{",
"}",
"for",
"spec",
"in",
"specs",
":",
"if",
"new_specs",
".",
"get",
"(",
"spec",
",",
"None",
")",
"is",
"None",
":",
"new_specs",
"[",
"spec",
"]",
"=",
"spec",
"else",
":",
"ne... | Combine package specifications' limitations.
Args:
specs (list of PackageSpec): the package specifications.
Returns:
list of PackageSpec: the new, merged list of PackageSpec. | [
"Combine",
"package",
"specifications",
"limitations",
"."
] | python | train |
saltstack/salt | salt/modules/win_lgpo.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_lgpo.py#L4462-L4476 | def _dasd_conversion(cls, val, **kwargs):
'''
converts 0/1/2 for dasd reg key
'''
if val is not None:
if val == '0' or val == 0 or val == '':
return 'Administrators'
elif val == '1' or val == 1:
return 'Administrators and Power Users'
elif val == '2' or val == 2:
return 'Administrators and Interactive Users'
else:
return 'Not Defined'
else:
return 'Not Defined' | [
"def",
"_dasd_conversion",
"(",
"cls",
",",
"val",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"val",
"is",
"not",
"None",
":",
"if",
"val",
"==",
"'0'",
"or",
"val",
"==",
"0",
"or",
"val",
"==",
"''",
":",
"return",
"'Administrators'",
"elif",
"val"... | converts 0/1/2 for dasd reg key | [
"converts",
"0",
"/",
"1",
"/",
"2",
"for",
"dasd",
"reg",
"key"
] | python | train |
thomasdelaet/python-velbus | velbus/controller.py | https://github.com/thomasdelaet/python-velbus/blob/af2f8af43f1a24bf854eff9f3126fd7b5c41b3dd/velbus/controller.py#L123-L130 | def send_binary(self, binary_message, callback=None):
"""
:return: None
"""
assert isinstance(binary_message, str)
message = self.parser.parse(binary_message)
if isinstance(message, velbus.Message):
self.send(message, callback) | [
"def",
"send_binary",
"(",
"self",
",",
"binary_message",
",",
"callback",
"=",
"None",
")",
":",
"assert",
"isinstance",
"(",
"binary_message",
",",
"str",
")",
"message",
"=",
"self",
".",
"parser",
".",
"parse",
"(",
"binary_message",
")",
"if",
"isinst... | :return: None | [
":",
"return",
":",
"None"
] | python | train |
saltstack/salt | salt/states/cmd.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/cmd.py#L561-L710 | def wait_script(name,
source=None,
template=None,
onlyif=None,
unless=None,
cwd=None,
runas=None,
shell=None,
env=None,
stateful=False,
umask=None,
use_vt=False,
output_loglevel='debug',
hide_output=False,
success_retcodes=None,
success_stdout=None,
success_stderr=None,
**kwargs):
'''
Download a script from a remote source and execute it only if a watch
statement calls it.
source
The source script being downloaded to the minion, this source script is
hosted on the salt master server. If the file is located on the master
in the directory named spam, and is called eggs, the source string is
salt://spam/eggs
template
If this setting is applied then the named templating engine will be
used to render the downloaded file, currently jinja, mako, and wempy
are supported
name
The command to execute, remember that the command will execute with the
path and permissions of the salt-minion.
onlyif
A command to run as a check, run the named command only if the command
passed to the ``onlyif`` option returns true
unless
A command to run as a check, only run the named command if the command
passed to the ``unless`` option returns false
cwd
The current working directory to execute the command in, defaults to
/root
runas
The user name to run the command as
shell
The shell to use for execution, defaults to the shell grain
env
A list of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.wait_script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.wait_script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: jinja
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
umask
The umask (in octal) to use when running the command.
stateful
The command being executed is expected to return data about executing
a state. For more information, see the :ref:`stateful-argument` section.
use_vt
Use VT utils (saltstack) to stream the command output more
interactively to the console and the logs.
This is experimental.
output_loglevel : debug
Control the loglevel at which the output from the command is logged to
the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
hide_output : False
Suppress stdout and stderr in the state's results.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
success_retcodes: This parameter will be allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 2019.2.0
success_stdout: This parameter will be allow a list of
strings that when found in standard out should be considered a success.
If stdout returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Neon
success_stderr: This parameter will be allow a list of
strings that when found in standard error should be considered a success.
If stderr returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Neon
'''
# Ignoring our arguments is intentional.
return {'name': name,
'changes': {},
'result': True,
'comment': ''} | [
"def",
"wait_script",
"(",
"name",
",",
"source",
"=",
"None",
",",
"template",
"=",
"None",
",",
"onlyif",
"=",
"None",
",",
"unless",
"=",
"None",
",",
"cwd",
"=",
"None",
",",
"runas",
"=",
"None",
",",
"shell",
"=",
"None",
",",
"env",
"=",
"... | Download a script from a remote source and execute it only if a watch
statement calls it.
source
The source script being downloaded to the minion, this source script is
hosted on the salt master server. If the file is located on the master
in the directory named spam, and is called eggs, the source string is
salt://spam/eggs
template
If this setting is applied then the named templating engine will be
used to render the downloaded file, currently jinja, mako, and wempy
are supported
name
The command to execute, remember that the command will execute with the
path and permissions of the salt-minion.
onlyif
A command to run as a check, run the named command only if the command
passed to the ``onlyif`` option returns true
unless
A command to run as a check, only run the named command if the command
passed to the ``unless`` option returns false
cwd
The current working directory to execute the command in, defaults to
/root
runas
The user name to run the command as
shell
The shell to use for execution, defaults to the shell grain
env
A list of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.wait_script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.wait_script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: jinja
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
umask
The umask (in octal) to use when running the command.
stateful
The command being executed is expected to return data about executing
a state. For more information, see the :ref:`stateful-argument` section.
use_vt
Use VT utils (saltstack) to stream the command output more
interactively to the console and the logs.
This is experimental.
output_loglevel : debug
Control the loglevel at which the output from the command is logged to
the minion log.
.. note::
The command being run will still be logged at the ``debug``
loglevel regardless, unless ``quiet`` is used for this value.
hide_output : False
Suppress stdout and stderr in the state's results.
.. note::
This is separate from ``output_loglevel``, which only handles how
Salt logs to the minion log.
.. versionadded:: 2018.3.0
success_retcodes: This parameter will be allow a list of
non-zero return codes that should be considered a success. If the
return code returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: 2019.2.0
success_stdout: This parameter will be allow a list of
strings that when found in standard out should be considered a success.
If stdout returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Neon
success_stderr: This parameter will be allow a list of
strings that when found in standard error should be considered a success.
If stderr returned from the run matches any in the provided list,
the return code will be overridden with zero.
.. versionadded:: Neon | [
"Download",
"a",
"script",
"from",
"a",
"remote",
"source",
"and",
"execute",
"it",
"only",
"if",
"a",
"watch",
"statement",
"calls",
"it",
"."
] | python | train |
widdowquinn/pyani | pyani/anib.py | https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L341-L360 | def construct_blastn_cmdline(
fname1, fname2, outdir, blastn_exe=pyani_config.BLASTN_DEFAULT
):
"""Returns a single blastn command.
- filename - input filename
- blastn_exe - path to BLASTN executable
"""
fstem1 = os.path.splitext(os.path.split(fname1)[-1])[0]
fstem2 = os.path.splitext(os.path.split(fname2)[-1])[0]
fstem1 = fstem1.replace("-fragments", "")
prefix = os.path.join(outdir, "%s_vs_%s" % (fstem1, fstem2))
cmd = (
"{0} -out {1}.blast_tab -query {2} -db {3} "
+ "-xdrop_gap_final 150 -dust no -evalue 1e-15 "
+ "-max_target_seqs 1 -outfmt '6 qseqid sseqid length mismatch "
+ "pident nident qlen slen qstart qend sstart send positive "
+ "ppos gaps' -task blastn"
)
return cmd.format(blastn_exe, prefix, fname1, fname2) | [
"def",
"construct_blastn_cmdline",
"(",
"fname1",
",",
"fname2",
",",
"outdir",
",",
"blastn_exe",
"=",
"pyani_config",
".",
"BLASTN_DEFAULT",
")",
":",
"fstem1",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"split",
"(",
"fname1",... | Returns a single blastn command.
- filename - input filename
- blastn_exe - path to BLASTN executable | [
"Returns",
"a",
"single",
"blastn",
"command",
"."
] | python | train |
PaulHancock/Aegean | AegeanTools/angle_tools.py | https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/angle_tools.py#L240-L265 | def bear_rhumb(ra1, dec1, ra2, dec2):
"""
Calculate the bearing of point 2 from point 1 along a Rhumb line.
The bearing is East of North and is in [0, 360), whereas position angle is also East of North but (-180,180]
Parameters
----------
ra1, dec1, ra2, dec2 : float
The sky coordinates (degrees) of the two points.
Returns
-------
dist : float
The bearing of point 2 from point 1 along a Rhumb line (degrees).
"""
# verified against website to give correct results
phi1 = np.radians(dec1)
phi2 = np.radians(dec2)
lambda1 = np.radians(ra1)
lambda2 = np.radians(ra2)
dlambda = lambda2 - lambda1
dpsi = np.log(np.tan(np.pi / 4 + phi2 / 2) / np.tan(np.pi / 4 + phi1 / 2))
theta = np.arctan2(dlambda, dpsi)
return np.degrees(theta) | [
"def",
"bear_rhumb",
"(",
"ra1",
",",
"dec1",
",",
"ra2",
",",
"dec2",
")",
":",
"# verified against website to give correct results",
"phi1",
"=",
"np",
".",
"radians",
"(",
"dec1",
")",
"phi2",
"=",
"np",
".",
"radians",
"(",
"dec2",
")",
"lambda1",
"=",... | Calculate the bearing of point 2 from point 1 along a Rhumb line.
The bearing is East of North and is in [0, 360), whereas position angle is also East of North but (-180,180]
Parameters
----------
ra1, dec1, ra2, dec2 : float
The sky coordinates (degrees) of the two points.
Returns
-------
dist : float
The bearing of point 2 from point 1 along a Rhumb line (degrees). | [
"Calculate",
"the",
"bearing",
"of",
"point",
"2",
"from",
"point",
"1",
"along",
"a",
"Rhumb",
"line",
".",
"The",
"bearing",
"is",
"East",
"of",
"North",
"and",
"is",
"in",
"[",
"0",
"360",
")",
"whereas",
"position",
"angle",
"is",
"also",
"East",
... | python | train |
NerdWalletOSS/savage | src/savage/__init__.py | https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/__init__.py#L55-L71 | def _after_flush_handler(session, _flush_context):
"""Archive all new/updated/deleted data"""
dialect = get_dialect(session)
handlers = [
(_versioned_delete, session.deleted),
(_versioned_insert, session.new),
(_versioned_update, session.dirty),
]
for handler, rows in handlers:
# TODO: Bulk archive insert statements
for row in rows:
if not isinstance(row, SavageModelMixin):
continue
if not hasattr(row, 'ArchiveTable'):
raise LogTableCreationError('Need to register Savage tables!!')
user_id = getattr(row, '_updated_by', None)
handler(row, session, user_id, dialect) | [
"def",
"_after_flush_handler",
"(",
"session",
",",
"_flush_context",
")",
":",
"dialect",
"=",
"get_dialect",
"(",
"session",
")",
"handlers",
"=",
"[",
"(",
"_versioned_delete",
",",
"session",
".",
"deleted",
")",
",",
"(",
"_versioned_insert",
",",
"sessio... | Archive all new/updated/deleted data | [
"Archive",
"all",
"new",
"/",
"updated",
"/",
"deleted",
"data"
] | python | train |
saltstack/salt | salt/modules/boto_ec2.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_ec2.py#L2079-L2107 | def attach_volume(volume_id, instance_id, device,
region=None, key=None, keyid=None, profile=None):
'''
Attach an EBS volume to an EC2 instance.
..
volume_id
(string) – The ID of the EBS volume to be attached.
instance_id
(string) – The ID of the EC2 instance to attach the volume to.
device
(string) – The device on the instance through which the volume is exposed (e.g. /dev/sdh)
returns
(bool) - True on success, False on failure.
CLI Example:
.. code-block:: bash
salt-call boto_ec2.attach_volume vol-12345678 i-87654321 /dev/sdh
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
return conn.attach_volume(volume_id, instance_id, device)
except boto.exception.BotoServerError as error:
log.error(error)
return False | [
"def",
"attach_volume",
"(",
"volume_id",
",",
"instance_id",
",",
"device",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
... | Attach an EBS volume to an EC2 instance.
..
volume_id
(string) – The ID of the EBS volume to be attached.
instance_id
(string) – The ID of the EC2 instance to attach the volume to.
device
(string) – The device on the instance through which the volume is exposed (e.g. /dev/sdh)
returns
(bool) - True on success, False on failure.
CLI Example:
.. code-block:: bash
salt-call boto_ec2.attach_volume vol-12345678 i-87654321 /dev/sdh | [
"Attach",
"an",
"EBS",
"volume",
"to",
"an",
"EC2",
"instance",
".",
".."
] | python | train |
taskcluster/taskcluster-client.py | taskcluster/queue.py | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/queue.py#L822-L835 | def quarantineWorker(self, *args, **kwargs):
"""
Quarantine a worker
Quarantine a worker
This method takes input: ``v1/quarantine-worker-request.json#``
This method gives output: ``v1/worker-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["quarantineWorker"], *args, **kwargs) | [
"def",
"quarantineWorker",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_makeApiCall",
"(",
"self",
".",
"funcinfo",
"[",
"\"quarantineWorker\"",
"]",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Quarantine a worker
Quarantine a worker
This method takes input: ``v1/quarantine-worker-request.json#``
This method gives output: ``v1/worker-response.json#``
This method is ``experimental`` | [
"Quarantine",
"a",
"worker"
] | python | train |
ManiacalLabs/BiblioPixel | bibliopixel/colors/tables.py | https://github.com/ManiacalLabs/BiblioPixel/blob/fd97e6c651a4bbcade64733847f4eec8f7704b7c/bibliopixel/colors/tables.py#L48-L55 | def contains(x):
"""Return true if this string or integer tuple appears in tables"""
if isinstance(x, str):
x = canonical_name(x)
return x in _TO_COLOR_USER or x in _TO_COLOR
else:
x = tuple(x)
return x in _TO_NAME_USER or x in _TO_NAME | [
"def",
"contains",
"(",
"x",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"str",
")",
":",
"x",
"=",
"canonical_name",
"(",
"x",
")",
"return",
"x",
"in",
"_TO_COLOR_USER",
"or",
"x",
"in",
"_TO_COLOR",
"else",
":",
"x",
"=",
"tuple",
"(",
"x",
"... | Return true if this string or integer tuple appears in tables | [
"Return",
"true",
"if",
"this",
"string",
"or",
"integer",
"tuple",
"appears",
"in",
"tables"
] | python | valid |
prompt-toolkit/pyvim | pyvim/editor.py | https://github.com/prompt-toolkit/pyvim/blob/5928b53b9d700863c1a06d2181a034a955f94594/pyvim/editor.py#L262-L269 | def enter_command_mode(self):
"""
Go into command mode.
"""
self.application.layout.focus(self.command_buffer)
self.application.vi_state.input_mode = InputMode.INSERT
self.previewer.save() | [
"def",
"enter_command_mode",
"(",
"self",
")",
":",
"self",
".",
"application",
".",
"layout",
".",
"focus",
"(",
"self",
".",
"command_buffer",
")",
"self",
".",
"application",
".",
"vi_state",
".",
"input_mode",
"=",
"InputMode",
".",
"INSERT",
"self",
"... | Go into command mode. | [
"Go",
"into",
"command",
"mode",
"."
] | python | train |
nerox8664/pytorch2keras | pytorch2keras/reshape_layers.py | https://github.com/nerox8664/pytorch2keras/blob/750eaf747323580e6732d0c5ba9f2f39cb096764/pytorch2keras/reshape_layers.py#L124-L151 | def convert_unsqueeze(params, w_name, scope_name, inputs, layers, weights, names):
"""
Convert unsqueeze operation.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
"""
print('Converting unsqueeze ...')
if names == 'short':
tf_name = 'UNSQ' + random_string(4)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
def target_layer(x):
import keras
return keras.backend.expand_dims(x)
lambda_layer = keras.layers.Lambda(target_layer, name=tf_name + 'E')
layers[scope_name] = lambda_layer(layers[inputs[0]]) | [
"def",
"convert_unsqueeze",
"(",
"params",
",",
"w_name",
",",
"scope_name",
",",
"inputs",
",",
"layers",
",",
"weights",
",",
"names",
")",
":",
"print",
"(",
"'Converting unsqueeze ...'",
")",
"if",
"names",
"==",
"'short'",
":",
"tf_name",
"=",
"'UNSQ'",... | Convert unsqueeze operation.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers | [
"Convert",
"unsqueeze",
"operation",
"."
] | python | valid |
JasonKessler/scattertext | scattertext/termscoring/MannWhitneyU.py | https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/termscoring/MannWhitneyU.py#L35-L87 | def get_score_df(self, correction_method=None):
'''
Computes Mann Whitney corrected p, z-values. Falls back to normal approximation when numerical limits are reached.
:param correction_method: str or None, correction method from statsmodels.stats.multitest.multipletests
'fdr_bh' is recommended.
:return: pd.DataFrame
'''
X = self._get_X().astype(np.float64)
X = X / X.sum(axis=1)
cat_X, ncat_X = self._get_cat_and_ncat(X)
def normal_apx(u, x, y):
# from https://stats.stackexchange.com/questions/116315/problem-with-mann-whitney-u-test-in-scipy
m_u = len(x) * len(y) / 2
sigma_u = np.sqrt(len(x) * len(y) * (len(x) + len(y) + 1) / 12)
z = (u - m_u) / sigma_u
return 2*norm.cdf(z)
scores = []
for i in range(cat_X.shape[1]):
cat_list = cat_X.T[i].A1
ncat_list = ncat_X.T[i].A1
try:
if cat_list.mean() > ncat_list.mean():
mw = mannwhitneyu(cat_list, ncat_list, alternative='greater')
if mw.pvalue in (0, 1):
mw.pvalue = normal_apx(mw.staistic, cat_list, ncat_list)
scores.append({'mwu': mw.statistic, 'mwu_p': mw.pvalue, 'mwu_z': norm.isf(float(mw.pvalue)), 'valid':True})
else:
mw = mannwhitneyu(ncat_list, cat_list, alternative='greater')
if mw.pvalue in (0, 1):
mw.pvalue = normal_apx(mw.staistic, ncat_list, cat_list)
scores.append({'mwu': -mw.statistic, 'mwu_p': 1 - mw.pvalue, 'mwu_z': 1. - norm.isf(float(mw.pvalue)), 'valid':True})
except:
scores.append({'mwu': 0, 'mwu_p': 0, 'mwu_z': 0, 'valid':False})
score_df = pd.DataFrame(scores, index=self.corpus_.get_terms()).fillna(0)
if correction_method is not None:
from statsmodels.stats.multitest import multipletests
for method in ['mwu']:
valid_pvals = score_df[score_df.valid].mwu_p
valid_pvals_abs = np.min([valid_pvals, 1-valid_pvals], axis=0)
valid_pvals_abs_corr = multipletests(valid_pvals_abs, method=correction_method)[1]
score_df[method + '_p_corr'] = 0.5
valid_pvals_abs_corr[valid_pvals > 0.5] = 1. - valid_pvals_abs_corr[valid_pvals > 0.5]
valid_pvals_abs_corr[valid_pvals < 0.5] = valid_pvals_abs_corr[valid_pvals < 0.5]
score_df.loc[score_df.valid, method + '_p_corr'] = valid_pvals_abs_corr
score_df[method + '_z'] = -norm.ppf(score_df[method + '_p_corr'])
return score_df | [
"def",
"get_score_df",
"(",
"self",
",",
"correction_method",
"=",
"None",
")",
":",
"X",
"=",
"self",
".",
"_get_X",
"(",
")",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"X",
"=",
"X",
"/",
"X",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"cat... | Computes Mann Whitney corrected p, z-values. Falls back to normal approximation when numerical limits are reached.
:param correction_method: str or None, correction method from statsmodels.stats.multitest.multipletests
'fdr_bh' is recommended.
:return: pd.DataFrame | [
"Computes",
"Mann",
"Whitney",
"corrected",
"p",
"z",
"-",
"values",
".",
"Falls",
"back",
"to",
"normal",
"approximation",
"when",
"numerical",
"limits",
"are",
"reached",
"."
] | python | train |
LionelAuroux/pyrser | pyrser/type_system/fun.py | https://github.com/LionelAuroux/pyrser/blob/f153a97ef2b6bf915a1ed468c0252a9a59b754d5/pyrser/type_system/fun.py#L57-L66 | def internal_name(self):
"""
Return the unique internal name
"""
unq = 'f_' + super().internal_name()
if self.tparams is not None:
unq += "_" + "_".join(self.tparams)
if self.tret is not None:
unq += "_" + self.tret
return unq | [
"def",
"internal_name",
"(",
"self",
")",
":",
"unq",
"=",
"'f_'",
"+",
"super",
"(",
")",
".",
"internal_name",
"(",
")",
"if",
"self",
".",
"tparams",
"is",
"not",
"None",
":",
"unq",
"+=",
"\"_\"",
"+",
"\"_\"",
".",
"join",
"(",
"self",
".",
... | Return the unique internal name | [
"Return",
"the",
"unique",
"internal",
"name"
] | python | test |
pyblish/pyblish-nuke | pyblish_nuke/vendor/Qt.py | https://github.com/pyblish/pyblish-nuke/blob/5fbd766774e999e5e3015201094a07a92d800c4f/pyblish_nuke/vendor/Qt.py#L299-L365 | def init():
"""Try loading each binding in turn
Please note: the entire Qt module is replaced with this code:
sys.modules["Qt"] = binding()
This means no functions or variables can be called after
this has executed.
For debugging and testing, this module may be accessed
through `Qt.__shim__`.
"""
preferred = os.getenv("QT_PREFERRED_BINDING")
verbose = os.getenv("QT_VERBOSE") is not None
bindings = (_pyside2, _pyqt5, _pyside, _pyqt4)
if preferred:
# Internal flag (used in installer)
if preferred == "None":
self.__wrapper_version__ = self.__version__
return
preferred = preferred.split(os.pathsep)
available = {
"PySide2": _pyside2,
"PyQt5": _pyqt5,
"PySide": _pyside,
"PyQt4": _pyqt4
}
try:
bindings = [available[binding] for binding in preferred]
except KeyError:
raise ImportError(
"Available preferred Qt bindings: "
"\n".join(preferred)
)
for binding in bindings:
_log("Trying %s" % binding.__name__, verbose)
try:
binding = binding()
except ImportError as e:
_log(" - ImportError(\"%s\")" % e, verbose)
continue
else:
# Reference to this module
binding.__shim__ = self
binding.QtCompat = self
sys.modules.update({
__name__: binding,
# Fix #133, `from Qt.QtWidgets import QPushButton`
__name__ + ".QtWidgets": binding.QtWidgets
})
return
# If not binding were found, throw this error
raise ImportError("No Qt binding were found.") | [
"def",
"init",
"(",
")",
":",
"preferred",
"=",
"os",
".",
"getenv",
"(",
"\"QT_PREFERRED_BINDING\"",
")",
"verbose",
"=",
"os",
".",
"getenv",
"(",
"\"QT_VERBOSE\"",
")",
"is",
"not",
"None",
"bindings",
"=",
"(",
"_pyside2",
",",
"_pyqt5",
",",
"_pysid... | Try loading each binding in turn
Please note: the entire Qt module is replaced with this code:
sys.modules["Qt"] = binding()
This means no functions or variables can be called after
this has executed.
For debugging and testing, this module may be accessed
through `Qt.__shim__`. | [
"Try",
"loading",
"each",
"binding",
"in",
"turn"
] | python | train |
exa-analytics/exa | exa/util/utility.py | https://github.com/exa-analytics/exa/blob/40fb3c22b531d460dbc51e603de75b856cc28f0d/exa/util/utility.py#L55-L66 | def convert_bytes(value):
"""
Reduces bytes to more convenient units (i.e. KiB, GiB, TiB, etc.).
Args:
values (int): Value in Bytes
Returns:
tup (tuple): Tuple of value, unit (e.g. (10, 'MiB'))
"""
n = np.rint(len(str(value))/4).astype(int)
return value/(1024**n), sizes[n] | [
"def",
"convert_bytes",
"(",
"value",
")",
":",
"n",
"=",
"np",
".",
"rint",
"(",
"len",
"(",
"str",
"(",
"value",
")",
")",
"/",
"4",
")",
".",
"astype",
"(",
"int",
")",
"return",
"value",
"/",
"(",
"1024",
"**",
"n",
")",
",",
"sizes",
"["... | Reduces bytes to more convenient units (i.e. KiB, GiB, TiB, etc.).
Args:
values (int): Value in Bytes
Returns:
tup (tuple): Tuple of value, unit (e.g. (10, 'MiB')) | [
"Reduces",
"bytes",
"to",
"more",
"convenient",
"units",
"(",
"i",
".",
"e",
".",
"KiB",
"GiB",
"TiB",
"etc",
".",
")",
"."
] | python | train |
flo-compbio/genometools | genometools/expression/filter.py | https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/expression/filter.py#L74-L104 | def filter_mean(matrix, top):
"""Filter genes in an expression matrix by mean expression.
Parameters
----------
matrix: ExpMatrix
The expression matrix.
top: int
The number of genes to retain.
Returns
-------
ExpMatrix
The filtered expression matrix.
"""
assert isinstance(matrix, ExpMatrix)
assert isinstance(top, int)
if top >= matrix.p:
logger.warning('Gene expression filter with `top` parameter that is '
'>= the number of genes!')
top = matrix.p
a = np.argsort(np.mean(matrix.X, axis=1))
a = a[::-1]
sel = np.zeros(matrix.p, dtype=np.bool_)
sel[a[:top]] = True
matrix = matrix.loc[sel]
return matrix | [
"def",
"filter_mean",
"(",
"matrix",
",",
"top",
")",
":",
"assert",
"isinstance",
"(",
"matrix",
",",
"ExpMatrix",
")",
"assert",
"isinstance",
"(",
"top",
",",
"int",
")",
"if",
"top",
">=",
"matrix",
".",
"p",
":",
"logger",
".",
"warning",
"(",
"... | Filter genes in an expression matrix by mean expression.
Parameters
----------
matrix: ExpMatrix
The expression matrix.
top: int
The number of genes to retain.
Returns
-------
ExpMatrix
The filtered expression matrix. | [
"Filter",
"genes",
"in",
"an",
"expression",
"matrix",
"by",
"mean",
"expression",
"."
] | python | train |
radjkarl/imgProcessor | imgProcessor/interpolate/videoWrite.py | https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/interpolate/videoWrite.py#L14-L81 | def videoWrite(path, imgs, levels=None, shape=None, frames=15,
annotate_names=None,
lut=None, updateFn=None):
'''
TODO
'''
frames = int(frames)
if annotate_names is not None:
assert len(annotate_names) == len(imgs)
if levels is None:
if imgs[0].dtype == np.uint8:
levels = 0, 255
elif imgs[0].dtype == np.uint16:
levels = 0, 2**16 - 1
else:
levels = np.min(imgs), np.max(imgs)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
h, w = imgs.shape[1:3]
if shape and shape != (h, w):
h, w = shape
imgs = [cv2.resize(i, (w, h)) for i in imgs]
assert path[-3:] in ('avi',
'png'), 'video export only supports *.avi or *.png'
isVideo = path[-3:] == 'avi'
if isVideo:
cap = cv2.VideoCapture(0)
# im.ndim==4)
out = cv2.VideoWriter(path, fourcc, frames, (w, h), isColor=1)
times = np.linspace(0, len(imgs) - 1, len(imgs) * frames)
interpolator = LinearInterpolateImageStack(imgs)
if lut is not None:
lut = lut(imgs[0])
for n, time in enumerate(times):
if updateFn:
# update progress:
updateFn.emit(100 * n / len(times))
image = interpolator(time)
cimg = makeRGBA(image, lut=lut,
levels=levels)[0]
cimg = cv2.cvtColor(cimg, cv2.COLOR_RGBA2BGR)
if annotate_names:
text = annotate_names[n // frames]
alpha = 0.5
org = (0, cimg.shape[0])
fontFace = cv2.FONT_HERSHEY_PLAIN
fontScale = 2
thickness = 3
putTextAlpha(cimg, text, alpha, org, fontFace, fontScale,
(0, 255, 0), thickness
)
if isVideo:
out.write(cimg)
else:
cv2.imwrite('%s_%i_%.3f.png' % (path[:-4], n, time), cimg)
if isVideo:
cap.release()
out.release() | [
"def",
"videoWrite",
"(",
"path",
",",
"imgs",
",",
"levels",
"=",
"None",
",",
"shape",
"=",
"None",
",",
"frames",
"=",
"15",
",",
"annotate_names",
"=",
"None",
",",
"lut",
"=",
"None",
",",
"updateFn",
"=",
"None",
")",
":",
"frames",
"=",
"int... | TODO | [
"TODO"
] | python | train |
ToFuProject/tofu | tofu/geom/utils.py | https://github.com/ToFuProject/tofu/blob/39d6b2e7ced9e13666572dfd37e19403f1d6ff8d/tofu/geom/utils.py#L244-L347 | def _compute_VesPoly(R=2.4, r=1., elong=0., Dshape=0.,
divlow=True, divup=True, nP=200):
""" Utility to compute three 2D (R,Z) polygons
One represents a vacuum vessel, one an outer bumper, one a baffle
The vessel polygon is centered on (R,0.), with minor radius r
It can have a vertical (>0) or horizontal(<0) elongation in [-1;1]
It can be D-shaped (Dshape in [0.,1.], typically 0.2)
It can be non-convex, with:
* a lower divertor-like shape
* a upper divertor-like shape
The elongation also affects the outer bumper and baffle
Parameters
----------
R: int / float
Major radius used as a center of the vessel
r : int / float
Minor radius of the vessel
elong: int / float
Dimensionless elongation parameter in [-1;1]
Dshape: int / float
Dimensionless parameter for the D-shape (in-out asymmetry) in [0;1]
divlow: bool
Flag indicating whether to incude a lower divertor-like shape
divup: bool
Flag indicating whether to incude an upper divertor-like shape
nP : int
Parameter specifying approximately the number of points of the vessel
Return
------
poly: np.ndarray
Closed (2,nP) polygon of the vacuum vessel, optionnally with divertors
pbump: np.ndarray
Closed (2,N) polygon defining the outer bumper
pbaffle: np.ndarray
Closed (2,N) polygon defining the lower baffle
"""
# Basics (center, theta, unit vectors)
cent = np.r_[R,0.]
theta = np.linspace(-np.pi,np.pi,nP)
poly = np.array([np.cos(theta), np.sin(theta)])
# Divertors
pdivR = np.r_[-0.1,0.,0.1]
pdivZ = np.r_[-0.1,0.,-0.1]
if divlow:
ind = (np.sin(theta)<-0.85).nonzero()[0]
pinsert = np.array([pdivR, -1.+pdivZ])
poly = np.concatenate((poly[:,:ind[0]], pinsert, poly[:,ind[-1]+1:]),
axis=1)
if divup:
theta = np.arctan2(poly[1,:], poly[0,:])
ind = (np.sin(theta)>0.85).nonzero()[0]
pinsert = np.array([pdivR[::-1], 1.-pdivZ])
poly = np.concatenate((poly[:,:ind[0]], pinsert, poly[:,ind[-1]+1:]),
axis=1)
# Modified radius (by elongation and Dshape)
rbis = r*np.hypot(poly[0,:],poly[1,:])
theta = np.arctan2(poly[1,:],poly[0,:])
rbis = rbis*(1+elong*0.15*np.sin(2.*theta-np.pi/2.))
if Dshape>0.:
ind = np.cos(theta)<0.
coef = 1 + Dshape*(np.sin(theta[ind])**2-1.)
rbis[ind] = rbis[ind]*coef
er = np.array([np.cos(theta), np.sin(theta)])
poly = cent[:,np.newaxis] + rbis[np.newaxis,:]*er
# Outer bumper
Dbeta = 2.*np.pi/6.
beta = np.linspace(-Dbeta/2.,Dbeta/2., 20)
pbRin = 0.85*np.array([np.cos(beta), np.sin(beta)])
pbRout = 0.95*np.array([np.cos(beta), np.sin(beta)])[:,::-1]
pinsert = np.array([[0.95,1.05,1.05,0.95],
[0.05,0.05,-0.05,-0.05]])
ind = (np.abs(pbRout[1,:])<0.05).nonzero()[0]
pbump = (pbRin, pbRout[:,:ind[0]], pinsert,
pbRout[:,ind[-1]+1:], pbRin[:,0:1])
pbump = np.concatenate(pbump, axis=1)
theta = np.arctan2(pbump[1,:],pbump[0,:])
er = np.array([np.cos(theta), np.sin(theta)])
rbis = r*(np.hypot(pbump[0,:],pbump[1,:])
*(1.+elong*0.15*np.sin(2.*theta-np.pi/2.)))
pbump = cent[:,np.newaxis] + rbis[np.newaxis,:]*er
# Baffle
offR, offZ = 0.1, -0.85
wR, wZ = 0.2, 0.05
pbaffle = np.array([offR + wR*np.r_[-1,1,1,-1,-1],
offZ + wZ*np.r_[1,1,-1,-1,1]])
theta = np.arctan2(pbaffle[1,:],pbaffle[0,:])
er = np.array([np.cos(theta), np.sin(theta)])
rbis = r*(np.hypot(pbaffle[0,:],pbaffle[1,:])
*(1.+elong*0.15*np.sin(2.*theta-np.pi/2.)))
pbaffle = cent[:,np.newaxis] + rbis[np.newaxis,:]*er
return poly, pbump, pbaffle | [
"def",
"_compute_VesPoly",
"(",
"R",
"=",
"2.4",
",",
"r",
"=",
"1.",
",",
"elong",
"=",
"0.",
",",
"Dshape",
"=",
"0.",
",",
"divlow",
"=",
"True",
",",
"divup",
"=",
"True",
",",
"nP",
"=",
"200",
")",
":",
"# Basics (center, theta, unit vectors)",
... | Utility to compute three 2D (R,Z) polygons
One represents a vacuum vessel, one an outer bumper, one a baffle
The vessel polygon is centered on (R,0.), with minor radius r
It can have a vertical (>0) or horizontal(<0) elongation in [-1;1]
It can be D-shaped (Dshape in [0.,1.], typically 0.2)
It can be non-convex, with:
* a lower divertor-like shape
* a upper divertor-like shape
The elongation also affects the outer bumper and baffle
Parameters
----------
R: int / float
Major radius used as a center of the vessel
r : int / float
Minor radius of the vessel
elong: int / float
Dimensionless elongation parameter in [-1;1]
Dshape: int / float
Dimensionless parameter for the D-shape (in-out asymmetry) in [0;1]
divlow: bool
Flag indicating whether to incude a lower divertor-like shape
divup: bool
Flag indicating whether to incude an upper divertor-like shape
nP : int
Parameter specifying approximately the number of points of the vessel
Return
------
poly: np.ndarray
Closed (2,nP) polygon of the vacuum vessel, optionnally with divertors
pbump: np.ndarray
Closed (2,N) polygon defining the outer bumper
pbaffle: np.ndarray
Closed (2,N) polygon defining the lower baffle | [
"Utility",
"to",
"compute",
"three",
"2D",
"(",
"R",
"Z",
")",
"polygons"
] | python | train |
rigetti/quantumflow | quantumflow/backend/numpybk.py | https://github.com/rigetti/quantumflow/blob/13a66cabbe8aabf6e023cc675f4a4ebe6ccda8fb/quantumflow/backend/numpybk.py#L150-L156 | def productdiag(tensor: BKTensor) -> BKTensor:
"""Returns the matrix diagonal of the product tensor""" # DOCME: Explain
N = rank(tensor)
tensor = reshape(tensor, [2**(N//2), 2**(N//2)])
tensor = np.diag(tensor)
tensor = reshape(tensor, [2]*(N//2))
return tensor | [
"def",
"productdiag",
"(",
"tensor",
":",
"BKTensor",
")",
"->",
"BKTensor",
":",
"# DOCME: Explain",
"N",
"=",
"rank",
"(",
"tensor",
")",
"tensor",
"=",
"reshape",
"(",
"tensor",
",",
"[",
"2",
"**",
"(",
"N",
"//",
"2",
")",
",",
"2",
"**",
"(",... | Returns the matrix diagonal of the product tensor | [
"Returns",
"the",
"matrix",
"diagonal",
"of",
"the",
"product",
"tensor"
] | python | train |
twilio/twilio-python | twilio/twiml/voice_response.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/twiml/voice_response.py#L474-L484 | def ssml_break(self, strength=None, time=None, **kwargs):
"""
Create a <Break> element
:param strength: Set a pause based on strength
:param time: Set a pause to a specific length of time in seconds or milliseconds, available values: [number]s, [number]ms
:param kwargs: additional attributes
:returns: <Break> element
"""
return self.nest(SsmlBreak(strength=strength, time=time, **kwargs)) | [
"def",
"ssml_break",
"(",
"self",
",",
"strength",
"=",
"None",
",",
"time",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"nest",
"(",
"SsmlBreak",
"(",
"strength",
"=",
"strength",
",",
"time",
"=",
"time",
",",
"*",
"*",... | Create a <Break> element
:param strength: Set a pause based on strength
:param time: Set a pause to a specific length of time in seconds or milliseconds, available values: [number]s, [number]ms
:param kwargs: additional attributes
:returns: <Break> element | [
"Create",
"a",
"<Break",
">",
"element"
] | python | train |
cirruscluster/cirruscluster | cirruscluster/ext/ansible/inventory/__init__.py | https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/inventory/__init__.py#L145-L155 | def __get_hosts(self, pattern):
"""
finds hosts that postively match a particular pattern. Does not
take into account negative matches.
"""
(name, enumeration_details) = self._enumeration_info(pattern)
hpat = self._hosts_in_unenumerated_pattern(name)
hpat = sorted(hpat, key=lambda x: x.name)
return set(self._apply_ranges(pattern, hpat)) | [
"def",
"__get_hosts",
"(",
"self",
",",
"pattern",
")",
":",
"(",
"name",
",",
"enumeration_details",
")",
"=",
"self",
".",
"_enumeration_info",
"(",
"pattern",
")",
"hpat",
"=",
"self",
".",
"_hosts_in_unenumerated_pattern",
"(",
"name",
")",
"hpat",
"=",
... | finds hosts that postively match a particular pattern. Does not
take into account negative matches. | [
"finds",
"hosts",
"that",
"postively",
"match",
"a",
"particular",
"pattern",
".",
"Does",
"not",
"take",
"into",
"account",
"negative",
"matches",
"."
] | python | train |
synw/dataswim | dataswim/data/clean.py | https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/data/clean.py#L140-L157 | def to_int(self, *cols, **kwargs):
"""
Convert some column values to integers
:param \*cols: names of the colums
:type \*cols: str, at least one
:param \*\*kwargs: keyword arguments for ``pd.to_numeric``
:type \*\*kwargs: optional
:example: ``ds.to_int("mycol1", "mycol2", errors="coerce")``
"""
try:
for col in cols:
self.df[col] = pd.to_numeric(self.df[col], **kwargs)
except Exception as e:
self.err(e, "Can not convert column values to integer")
return
self.ok("Converted column values to integers") | [
"def",
"to_int",
"(",
"self",
",",
"*",
"cols",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"for",
"col",
"in",
"cols",
":",
"self",
".",
"df",
"[",
"col",
"]",
"=",
"pd",
".",
"to_numeric",
"(",
"self",
".",
"df",
"[",
"col",
"]",
",",
... | Convert some column values to integers
:param \*cols: names of the colums
:type \*cols: str, at least one
:param \*\*kwargs: keyword arguments for ``pd.to_numeric``
:type \*\*kwargs: optional
:example: ``ds.to_int("mycol1", "mycol2", errors="coerce")`` | [
"Convert",
"some",
"column",
"values",
"to",
"integers"
] | python | train |
joelcolucci/flask-registerblueprints | flask_registerblueprints/register.py | https://github.com/joelcolucci/flask-registerblueprints/blob/c117404691b66594f2cc84bff103ce893c633ecc/flask_registerblueprints/register.py#L24-L37 | def get_child_directories(path):
"""Return names of immediate child directories"""
if not _is_valid_directory(path):
raise exceptions.InvalidDirectory
entries = os.listdir(path)
directory_names = []
for entry in entries:
abs_entry_path = os.path.join(path, entry)
if _is_valid_directory(abs_entry_path):
directory_names.append(entry)
return directory_names | [
"def",
"get_child_directories",
"(",
"path",
")",
":",
"if",
"not",
"_is_valid_directory",
"(",
"path",
")",
":",
"raise",
"exceptions",
".",
"InvalidDirectory",
"entries",
"=",
"os",
".",
"listdir",
"(",
"path",
")",
"directory_names",
"=",
"[",
"]",
"for",... | Return names of immediate child directories | [
"Return",
"names",
"of",
"immediate",
"child",
"directories"
] | python | train |
saltstack/salt | salt/cloud/clouds/xen.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L697-L718 | def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5) | [
"def",
"_wait_for_ip",
"(",
"name",
",",
"session",
")",
":",
"start_time",
"=",
"datetime",
".",
"now",
"(",
")",
"status",
"=",
"None",
"while",
"status",
"is",
"None",
":",
"status",
"=",
"get_vm_ip",
"(",
"name",
",",
"session",
")",
"if",
"status"... | Wait for IP to be available during create() | [
"Wait",
"for",
"IP",
"to",
"be",
"available",
"during",
"create",
"()"
] | python | train |
fastai/fastai | docs_src/nbval/cover.py | https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/docs_src/nbval/cover.py#L33-L73 | def setup_coverage(config, kernel, floc, output_loc=None):
"""Start coverage reporting in kernel.
Currently supported kernel languages are:
- Python
"""
language = kernel.language
if language.startswith('python'):
# Get the pytest-cov coverage object
cov = get_cov(config)
if cov:
# If present, copy the data file location used by pytest-cov
data_file = os.path.abspath(cov.config.data_file)
else:
# Fall back on output_loc and current dir if not
data_file = os.path.abspath(os.path.join(output_loc or os.getcwd(), '.coverage'))
# Get options from pytest-cov's command line arguments:
source = config.option.cov_source
config_file = config.option.cov_config
if isinstance(config_file, str) and os.path.isfile(config_file):
config_file = os.path.abspath(config_file)
# Copy the suffix of plugin if available
suffix = _make_suffix(cov)
if suffix is True:
# Cannot merge data with autogen suffix, so turn off warning
# for missing data in pytest-cov collector
cov._warn_no_data = False
# Build setup command and execute in kernel:
cmd = _python_setup % (data_file, source, config_file, suffix)
msg_id = kernel.kc.execute(cmd, stop_on_error=False)
kernel.await_idle(msg_id, 60) # A minute should be plenty to enable coverage
else:
config.warn(
'C1',
'Coverage currently not supported for language "%s".' % language,
floc)
return | [
"def",
"setup_coverage",
"(",
"config",
",",
"kernel",
",",
"floc",
",",
"output_loc",
"=",
"None",
")",
":",
"language",
"=",
"kernel",
".",
"language",
"if",
"language",
".",
"startswith",
"(",
"'python'",
")",
":",
"# Get the pytest-cov coverage object",
"c... | Start coverage reporting in kernel.
Currently supported kernel languages are:
- Python | [
"Start",
"coverage",
"reporting",
"in",
"kernel",
"."
] | python | train |
data61/clkhash | clkhash/validate_data.py | https://github.com/data61/clkhash/blob/ec6398d6708a063de83f7c3d6286587bff8e7121/clkhash/validate_data.py#L30-L47 | def validate_row_lengths(fields, # type: Sequence[FieldSpec]
data # type: Sequence[Sequence[str]]
):
# type: (...) -> None
""" Validate the `data` row lengths according to the specification
in `fields`.
:param fields: The `FieldSpec` objects forming the
specification.
:param data: The rows to check.
:raises FormatError: When the number of entries in a row does
not match expectation.
"""
for i, row in enumerate(data):
if len(fields) != len(row):
msg = 'Row {} has {} entries when {} are expected.'.format(
i, len(row), len(fields))
raise FormatError(msg) | [
"def",
"validate_row_lengths",
"(",
"fields",
",",
"# type: Sequence[FieldSpec]",
"data",
"# type: Sequence[Sequence[str]]",
")",
":",
"# type: (...) -> None",
"for",
"i",
",",
"row",
"in",
"enumerate",
"(",
"data",
")",
":",
"if",
"len",
"(",
"fields",
")",
"!=",... | Validate the `data` row lengths according to the specification
in `fields`.
:param fields: The `FieldSpec` objects forming the
specification.
:param data: The rows to check.
:raises FormatError: When the number of entries in a row does
not match expectation. | [
"Validate",
"the",
"data",
"row",
"lengths",
"according",
"to",
"the",
"specification",
"in",
"fields",
"."
] | python | train |
noxdafox/clipspy | clips/functions.py | https://github.com/noxdafox/clipspy/blob/b22d71a6da821c1715d8fa00d7d75cabc09ed364/clips/functions.py#L341-L352 | def undefine(self):
"""Undefine the Method.
Python equivalent of the CLIPS undefmethod command.
The object becomes unusable after this method has been called.
"""
if lib.EnvUndefmethod(self._env, self._gnc, self._idx) != 1:
raise CLIPSError(self._env)
self._env = None | [
"def",
"undefine",
"(",
"self",
")",
":",
"if",
"lib",
".",
"EnvUndefmethod",
"(",
"self",
".",
"_env",
",",
"self",
".",
"_gnc",
",",
"self",
".",
"_idx",
")",
"!=",
"1",
":",
"raise",
"CLIPSError",
"(",
"self",
".",
"_env",
")",
"self",
".",
"_... | Undefine the Method.
Python equivalent of the CLIPS undefmethod command.
The object becomes unusable after this method has been called. | [
"Undefine",
"the",
"Method",
"."
] | python | train |
ccarocean/python-contours | contours/core.py | https://github.com/ccarocean/python-contours/blob/d154a679a2ea6a324c3308c1d087d88d0eb79622/contours/core.py#L108-L148 | def matlab_formatter(level, vertices, codes=None):
"""`MATLAB`_ style contour formatter.
Contours are returned as a single Nx2, `MATLAB`_ style, contour array.
There are two types of rows in this format:
* Header: The first element of a header row is the level of the contour
(the lower level for filled contours) and the second element is the
number of vertices (to follow) belonging to this contour line.
* Vertex: x,y coordinate pairs of the vertex.
A header row is always followed by the coresponding number of vertices.
Another header row may follow if there are more contour lines.
For filled contours the direction of vertices matters:
* CCW (ACW): The vertices give the exterior of a contour polygon.
* CW: The vertices give a hole of a contour polygon. This hole will
always be inside the exterior of the last contour exterior.
For further explanation of this format see the `Mathworks documentation
<https://www.mathworks.com/help/matlab/ref/contour-properties.html#prop_ContourMatrix>`_
noting that the MATLAB format used in the `contours` package is the
transpose of that used by `MATLAB`_ (since `MATLAB`_ is column-major
and `NumPy`_ is row-major by default).
.. _NumPy: http://www.numpy.org
.. _MATLAB: https://www.mathworks.com/products/matlab.html
"""
vertices = numpy_formatter(level, vertices, codes)
if codes is not None:
level = level[0]
headers = np.vstack((
[v.shape[0] for v in vertices],
[level]*len(vertices))).T
vertices = np.vstack(
list(it.__next__() for it in
itertools.cycle((iter(headers), iter(vertices)))))
return vertices | [
"def",
"matlab_formatter",
"(",
"level",
",",
"vertices",
",",
"codes",
"=",
"None",
")",
":",
"vertices",
"=",
"numpy_formatter",
"(",
"level",
",",
"vertices",
",",
"codes",
")",
"if",
"codes",
"is",
"not",
"None",
":",
"level",
"=",
"level",
"[",
"0... | `MATLAB`_ style contour formatter.
Contours are returned as a single Nx2, `MATLAB`_ style, contour array.
There are two types of rows in this format:
* Header: The first element of a header row is the level of the contour
(the lower level for filled contours) and the second element is the
number of vertices (to follow) belonging to this contour line.
* Vertex: x,y coordinate pairs of the vertex.
A header row is always followed by the coresponding number of vertices.
Another header row may follow if there are more contour lines.
For filled contours the direction of vertices matters:
* CCW (ACW): The vertices give the exterior of a contour polygon.
* CW: The vertices give a hole of a contour polygon. This hole will
always be inside the exterior of the last contour exterior.
For further explanation of this format see the `Mathworks documentation
<https://www.mathworks.com/help/matlab/ref/contour-properties.html#prop_ContourMatrix>`_
noting that the MATLAB format used in the `contours` package is the
transpose of that used by `MATLAB`_ (since `MATLAB`_ is column-major
and `NumPy`_ is row-major by default).
.. _NumPy: http://www.numpy.org
.. _MATLAB: https://www.mathworks.com/products/matlab.html | [
"MATLAB",
"_",
"style",
"contour",
"formatter",
"."
] | python | valid |
thomasdelaet/python-velbus | velbus/messages/sensor_temperature.py | https://github.com/thomasdelaet/python-velbus/blob/af2f8af43f1a24bf854eff9f3126fd7b5c41b3dd/velbus/messages/sensor_temperature.py#L25-L39 | def populate(self, priority, address, rtr, data):
"""
data bytes (high + low)
1 + 2 = current temp
3 + 4 = min temp
5 + 6 = max temp
:return: None
"""
assert isinstance(data, bytes)
self.needs_no_rtr(rtr)
self.needs_data(data, 6)
self.set_attributes(priority, address, rtr)
self.cur = (((data[0] << 8)| data[1]) / 32 ) * 0.0625
self.min = (((data[2] << 8) | data[3]) / 32 ) * 0.0625
self.max = (((data[4] << 8) | data[5]) / 32 ) * 0.0625 | [
"def",
"populate",
"(",
"self",
",",
"priority",
",",
"address",
",",
"rtr",
",",
"data",
")",
":",
"assert",
"isinstance",
"(",
"data",
",",
"bytes",
")",
"self",
".",
"needs_no_rtr",
"(",
"rtr",
")",
"self",
".",
"needs_data",
"(",
"data",
",",
"6"... | data bytes (high + low)
1 + 2 = current temp
3 + 4 = min temp
5 + 6 = max temp
:return: None | [
"data",
"bytes",
"(",
"high",
"+",
"low",
")",
"1",
"+",
"2",
"=",
"current",
"temp",
"3",
"+",
"4",
"=",
"min",
"temp",
"5",
"+",
"6",
"=",
"max",
"temp",
":",
"return",
":",
"None"
] | python | train |
fake-name/ChromeController | ChromeController/Generator/Generated.py | https://github.com/fake-name/ChromeController/blob/914dd136184e8f1165c7aa6ef30418aaf10c61f0/ChromeController/Generator/Generated.py#L1621-L1641 | def Emulation_setScriptExecutionDisabled(self, value):
"""
Function path: Emulation.setScriptExecutionDisabled
Domain: Emulation
Method name: setScriptExecutionDisabled
WARNING: This function is marked 'Experimental'!
Parameters:
Required arguments:
'value' (type: boolean) -> Whether script execution should be disabled in the page.
No return value.
Description: Switches script execution in the page.
"""
assert isinstance(value, (bool,)
), "Argument 'value' must be of type '['bool']'. Received type: '%s'" % type(
value)
subdom_funcs = self.synchronous_command(
'Emulation.setScriptExecutionDisabled', value=value)
return subdom_funcs | [
"def",
"Emulation_setScriptExecutionDisabled",
"(",
"self",
",",
"value",
")",
":",
"assert",
"isinstance",
"(",
"value",
",",
"(",
"bool",
",",
")",
")",
",",
"\"Argument 'value' must be of type '['bool']'. Received type: '%s'\"",
"%",
"type",
"(",
"value",
")",
"s... | Function path: Emulation.setScriptExecutionDisabled
Domain: Emulation
Method name: setScriptExecutionDisabled
WARNING: This function is marked 'Experimental'!
Parameters:
Required arguments:
'value' (type: boolean) -> Whether script execution should be disabled in the page.
No return value.
Description: Switches script execution in the page. | [
"Function",
"path",
":",
"Emulation",
".",
"setScriptExecutionDisabled",
"Domain",
":",
"Emulation",
"Method",
"name",
":",
"setScriptExecutionDisabled",
"WARNING",
":",
"This",
"function",
"is",
"marked",
"Experimental",
"!",
"Parameters",
":",
"Required",
"arguments... | python | train |
aleju/imgaug | imgaug/multicore.py | https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/multicore.py#L233-L238 | def terminate(self):
"""Terminate the pool immediately."""
if self._pool is not None:
self._pool.terminate()
self._pool.join()
self._pool = None | [
"def",
"terminate",
"(",
"self",
")",
":",
"if",
"self",
".",
"_pool",
"is",
"not",
"None",
":",
"self",
".",
"_pool",
".",
"terminate",
"(",
")",
"self",
".",
"_pool",
".",
"join",
"(",
")",
"self",
".",
"_pool",
"=",
"None"
] | Terminate the pool immediately. | [
"Terminate",
"the",
"pool",
"immediately",
"."
] | python | valid |
frc1418/tbapy | tbapy/main.py | https://github.com/frc1418/tbapy/blob/3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4/tbapy/main.py#L251-L261 | def event(self, event, simple=False):
"""
Get basic information about an event.
More specific data (typically obtained with the detail_type URL parameter) can be obtained with event_alliances(), event_district_points(), event_insights(), event_oprs(), event_predictions(), and event_rankings().
:param event: Key of event for which you desire data.
:param simple: Get only vital data.
:return: A single Event object.
"""
return Event(self._get('event/%s%s' % (event, '/simple' if simple else ''))) | [
"def",
"event",
"(",
"self",
",",
"event",
",",
"simple",
"=",
"False",
")",
":",
"return",
"Event",
"(",
"self",
".",
"_get",
"(",
"'event/%s%s'",
"%",
"(",
"event",
",",
"'/simple'",
"if",
"simple",
"else",
"''",
")",
")",
")"
] | Get basic information about an event.
More specific data (typically obtained with the detail_type URL parameter) can be obtained with event_alliances(), event_district_points(), event_insights(), event_oprs(), event_predictions(), and event_rankings().
:param event: Key of event for which you desire data.
:param simple: Get only vital data.
:return: A single Event object. | [
"Get",
"basic",
"information",
"about",
"an",
"event",
"."
] | python | train |
pymupdf/PyMuPDF | examples/PDFLinkMaint.py | https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/examples/PDFLinkMaint.py#L908-L911 | def wxRect_to_Rect(self, wr):
""" Return a shrunk fitz.Rect for given wx.Rect."""
r = fitz.Rect(wr.x, wr.y, wr.x + wr.width, wr.y + wr.height)
return r * self.shrink | [
"def",
"wxRect_to_Rect",
"(",
"self",
",",
"wr",
")",
":",
"r",
"=",
"fitz",
".",
"Rect",
"(",
"wr",
".",
"x",
",",
"wr",
".",
"y",
",",
"wr",
".",
"x",
"+",
"wr",
".",
"width",
",",
"wr",
".",
"y",
"+",
"wr",
".",
"height",
")",
"return",
... | Return a shrunk fitz.Rect for given wx.Rect. | [
"Return",
"a",
"shrunk",
"fitz",
".",
"Rect",
"for",
"given",
"wx",
".",
"Rect",
"."
] | python | train |
proycon/clam | clam/clamservice.py | https://github.com/proycon/clam/blob/09d15cfc26d7cbe0f5976cdd5424dc446d10dbf3/clam/clamservice.py#L2099-L2120 | def uploader(project, credentials=None):
"""The Uploader is intended for the Fine Uploader used in the web application (or similar frontend), it is not intended for proper RESTful communication. Will return JSON compatible with Fine Uploader rather than CLAM Upload XML. Unfortunately, normal digest authentication does not work well with the uploader, so we implement a simple key check based on hashed username, projectname and a secret key that is communicated as a JS variable in the interface ."""
postdata = flask.request.values
if 'user' in postdata:
user = postdata['user']
else:
user = 'anonymous'
if 'filename' in postdata:
filename = postdata['filename']
else:
printdebug('No filename passed')
return "{success: false, error: 'No filename passed'}"
if 'accesstoken' in postdata:
accesstoken = postdata['accesstoken']
else:
return withheaders(flask.make_response("{success: false, error: 'No accesstoken given'}"),'application/json', {'allow_origin': settings.ALLOW_ORIGIN})
if accesstoken != Project.getaccesstoken(user,project):
return withheaders(flask.make_response("{success: false, error: 'Invalid accesstoken given'}"),'application/json', {'allow_origin': settings.ALLOW_ORIGIN})
if not os.path.exists(Project.path(project, user)):
return withheaders(flask.make_response("{success: false, error: 'Destination does not exist'}"),'application/json', {'allow_origin': settings.ALLOW_ORIGIN})
else:
return addfile(project,filename,user, postdata,None, 'json' ) | [
"def",
"uploader",
"(",
"project",
",",
"credentials",
"=",
"None",
")",
":",
"postdata",
"=",
"flask",
".",
"request",
".",
"values",
"if",
"'user'",
"in",
"postdata",
":",
"user",
"=",
"postdata",
"[",
"'user'",
"]",
"else",
":",
"user",
"=",
"'anony... | The Uploader is intended for the Fine Uploader used in the web application (or similar frontend), it is not intended for proper RESTful communication. Will return JSON compatible with Fine Uploader rather than CLAM Upload XML. Unfortunately, normal digest authentication does not work well with the uploader, so we implement a simple key check based on hashed username, projectname and a secret key that is communicated as a JS variable in the interface . | [
"The",
"Uploader",
"is",
"intended",
"for",
"the",
"Fine",
"Uploader",
"used",
"in",
"the",
"web",
"application",
"(",
"or",
"similar",
"frontend",
")",
"it",
"is",
"not",
"intended",
"for",
"proper",
"RESTful",
"communication",
".",
"Will",
"return",
"JSON"... | python | train |
theonion/djes | djes/models.py | https://github.com/theonion/djes/blob/8f7347382c74172e82e959e3dfbc12b18fbb523f/djes/models.py#L24-L38 | def mapping(self):
"""Get a mapping class for this model
This method will return a Mapping class for your model, generating it using settings from a
`Mapping` class on your model (if one exists). The generated class is cached on the manager.
"""
if not hasattr(self, "_mapping"):
if hasattr(self.model, "Mapping"):
mapping_klass = type("Mapping", (DjangoMapping, self.model.Mapping), {})
else:
mapping_klass = get_first_mapping(self.model)
if mapping_klass is None:
mapping_klass = DjangoMapping
self._mapping = mapping_klass(self.model)
return self._mapping | [
"def",
"mapping",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"\"_mapping\"",
")",
":",
"if",
"hasattr",
"(",
"self",
".",
"model",
",",
"\"Mapping\"",
")",
":",
"mapping_klass",
"=",
"type",
"(",
"\"Mapping\"",
",",
"(",
"DjangoMa... | Get a mapping class for this model
This method will return a Mapping class for your model, generating it using settings from a
`Mapping` class on your model (if one exists). The generated class is cached on the manager. | [
"Get",
"a",
"mapping",
"class",
"for",
"this",
"model"
] | python | train |
Clinical-Genomics/scout | scout/parse/cytoband.py | https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/parse/cytoband.py#L5-L35 | def parse_cytoband(lines):
"""Parse iterable with cytoband coordinates
Args:
lines(iterable): Strings on format "chr1\t2300000\t5400000\tp36.32\tgpos25"
Returns:
cytobands(dict): Dictionary with chromosome names as keys and
interval trees as values
"""
cytobands = {}
for line in lines:
line = line.rstrip()
splitted_line = line.split('\t')
chrom = splitted_line[0].lstrip('chr')
start = int(splitted_line[1])
stop = int(splitted_line[2])
name = splitted_line[3]
if chrom in cytobands:
# Add interval to existing tree
cytobands[chrom][start:stop] = name
else:
# Create a new interval tree
new_tree = intervaltree.IntervalTree()
# create the interval
new_tree[start:stop] = name
# Add the interval tree
cytobands[chrom] = new_tree
return cytobands | [
"def",
"parse_cytoband",
"(",
"lines",
")",
":",
"cytobands",
"=",
"{",
"}",
"for",
"line",
"in",
"lines",
":",
"line",
"=",
"line",
".",
"rstrip",
"(",
")",
"splitted_line",
"=",
"line",
".",
"split",
"(",
"'\\t'",
")",
"chrom",
"=",
"splitted_line",
... | Parse iterable with cytoband coordinates
Args:
lines(iterable): Strings on format "chr1\t2300000\t5400000\tp36.32\tgpos25"
Returns:
cytobands(dict): Dictionary with chromosome names as keys and
interval trees as values | [
"Parse",
"iterable",
"with",
"cytoband",
"coordinates",
"Args",
":",
"lines",
"(",
"iterable",
")",
":",
"Strings",
"on",
"format",
"chr1",
"\\",
"t2300000",
"\\",
"t5400000",
"\\",
"tp36",
".",
"32",
"\\",
"tgpos25",
"Returns",
":",
"cytobands",
"(",
"dic... | python | test |
StellarCN/py-stellar-base | stellar_base/builder.py | https://github.com/StellarCN/py-stellar-base/blob/cce2e782064fb3955c85e1696e630d67b1010848/stellar_base/builder.py#L804-L814 | def get_sequence(self):
"""Get the sequence number for a given account via Horizon.
:return: The current sequence number for a given account
:rtype: int
"""
if not self.address:
raise StellarAddressInvalidError('No address provided.')
address = self.horizon.account(self.address)
return int(address.get('sequence')) | [
"def",
"get_sequence",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"address",
":",
"raise",
"StellarAddressInvalidError",
"(",
"'No address provided.'",
")",
"address",
"=",
"self",
".",
"horizon",
".",
"account",
"(",
"self",
".",
"address",
")",
"retu... | Get the sequence number for a given account via Horizon.
:return: The current sequence number for a given account
:rtype: int | [
"Get",
"the",
"sequence",
"number",
"for",
"a",
"given",
"account",
"via",
"Horizon",
"."
] | python | train |
numenta/nupic | src/nupic/data/generators/data_generator.py | https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/generators/data_generator.py#L166-L171 | def generateRecords(self, records):
"""Generate multiple records. Refer to definition for generateRecord"""
if self.verbosity>0: print 'Generating', len(records), 'records...'
for record in records:
self.generateRecord(record) | [
"def",
"generateRecords",
"(",
"self",
",",
"records",
")",
":",
"if",
"self",
".",
"verbosity",
">",
"0",
":",
"print",
"'Generating'",
",",
"len",
"(",
"records",
")",
",",
"'records...'",
"for",
"record",
"in",
"records",
":",
"self",
".",
"generateRe... | Generate multiple records. Refer to definition for generateRecord | [
"Generate",
"multiple",
"records",
".",
"Refer",
"to",
"definition",
"for",
"generateRecord"
] | python | valid |
awslabs/aws-sam-cli | samcli/commands/local/lib/sam_api_provider.py | https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/commands/local/lib/sam_api_provider.py#L246-L270 | def _extract_apis_from_events(function_logical_id, serverless_function_events, collector):
"""
Given an AWS::Serverless::Function Event Dictionary, extract out all 'Api' events and store within the
collector
Parameters
----------
function_logical_id : str
LogicalId of the AWS::Serverless::Function
serverless_function_events : dict
Event Dictionary of a AWS::Serverless::Function
collector : ApiCollector
Instance of the API collector that where we will save the API information
"""
count = 0
for _, event in serverless_function_events.items():
if SamApiProvider._FUNCTION_EVENT_TYPE_API == event.get(SamApiProvider._TYPE):
api_resource_id, api = SamApiProvider._convert_event_api(function_logical_id, event.get("Properties"))
collector.add_apis(api_resource_id, [api])
count += 1
LOG.debug("Found '%d' API Events in Serverless function with name '%s'", count, function_logical_id) | [
"def",
"_extract_apis_from_events",
"(",
"function_logical_id",
",",
"serverless_function_events",
",",
"collector",
")",
":",
"count",
"=",
"0",
"for",
"_",
",",
"event",
"in",
"serverless_function_events",
".",
"items",
"(",
")",
":",
"if",
"SamApiProvider",
"."... | Given an AWS::Serverless::Function Event Dictionary, extract out all 'Api' events and store within the
collector
Parameters
----------
function_logical_id : str
LogicalId of the AWS::Serverless::Function
serverless_function_events : dict
Event Dictionary of a AWS::Serverless::Function
collector : ApiCollector
Instance of the API collector that where we will save the API information | [
"Given",
"an",
"AWS",
"::",
"Serverless",
"::",
"Function",
"Event",
"Dictionary",
"extract",
"out",
"all",
"Api",
"events",
"and",
"store",
"within",
"the",
"collector"
] | python | train |
xeroc/python-graphenelib | graphenecommon/memo.py | https://github.com/xeroc/python-graphenelib/blob/8bb5396bc79998ee424cf3813af478304173f3a6/graphenecommon/memo.py#L114-L150 | def decrypt(self, message):
""" Decrypt a message
:param dict message: encrypted memo message
:returns: decrypted message
:rtype: str
"""
if not message:
return None
# We first try to decode assuming we received the memo
try:
memo_wif = self.blockchain.wallet.getPrivateKeyForPublicKey(message["to"])
pubkey = message["from"]
except KeyNotFound:
try:
# if that failed, we assume that we have sent the memo
memo_wif = self.blockchain.wallet.getPrivateKeyForPublicKey(
message["from"]
)
pubkey = message["to"]
except KeyNotFound:
# if all fails, raise exception
raise MissingKeyError(
"None of the required memo keys are installed!"
"Need any of {}".format([message["to"], message["from"]])
)
if not hasattr(self, "chain_prefix"):
self.chain_prefix = self.blockchain.prefix
return memo.decode_memo(
self.privatekey_class(memo_wif),
self.publickey_class(pubkey, prefix=self.chain_prefix),
message.get("nonce"),
message.get("message"),
) | [
"def",
"decrypt",
"(",
"self",
",",
"message",
")",
":",
"if",
"not",
"message",
":",
"return",
"None",
"# We first try to decode assuming we received the memo",
"try",
":",
"memo_wif",
"=",
"self",
".",
"blockchain",
".",
"wallet",
".",
"getPrivateKeyForPublicKey",... | Decrypt a message
:param dict message: encrypted memo message
:returns: decrypted message
:rtype: str | [
"Decrypt",
"a",
"message"
] | python | valid |
Chilipp/sphinx-nbexamples | sphinx_nbexamples/__init__.py | https://github.com/Chilipp/sphinx-nbexamples/blob/08e0319ff3c70f8a931dfa8890caf48add4d0470/sphinx_nbexamples/__init__.py#L317-L379 | def process_notebook(self, disable_warnings=True):
"""Process the notebook and create all the pictures and files
This method runs the notebook using the :mod:`nbconvert` and
:mod:`nbformat` modules. It creates the :attr:`outfile` notebook,
a python and a rst file"""
infile = self.infile
outfile = self.outfile
in_dir = os.path.dirname(infile) + os.path.sep
odir = os.path.dirname(outfile) + os.path.sep
create_dirs(os.path.join(odir, 'images'))
ep = nbconvert.preprocessors.ExecutePreprocessor(
timeout=300)
cp = nbconvert.preprocessors.ClearOutputPreprocessor(
timeout=300)
self.nb = nb = nbformat.read(infile, nbformat.current_nbformat)
# disable warnings in the rst file
if disable_warnings:
for i, cell in enumerate(nb.cells):
if cell['cell_type'] == 'code':
cell = cell.copy()
break
cell = cell.copy()
cell.source = """
import logging
logging.captureWarnings(True)
logging.getLogger('py.warnings').setLevel(logging.ERROR)
"""
nb.cells.insert(i, cell)
# write and process rst_file
if self.preprocess:
t = dt.datetime.now()
logger.info('Processing %s', self.infile)
try:
ep.preprocess(nb, {'metadata': {'path': in_dir}})
except nbconvert.preprocessors.execute.CellExecutionError:
logger.critical(
'Error while processing %s!', self.infile, exc_info=True)
else:
logger.info('Done. Seconds needed: %i',
(dt.datetime.now() - t).seconds)
if disable_warnings:
nb.cells.pop(i)
self.py_file = self.get_out_file('py')
if self.remove_tags:
tp = nbconvert.preprocessors.TagRemovePreprocessor(timeout=300)
for key, val in self.tag_options.items():
setattr(tp, key, set(val))
nb4rst = deepcopy(nb)
tp.preprocess(nb4rst, {'metadata': {'path': in_dir}})
else:
nb4rst = nb
self.create_rst(nb4rst, in_dir, odir)
if self.clear:
cp.preprocess(nb, {'metadata': {'path': in_dir}})
# write notebook file
nbformat.write(nb, outfile)
self.create_py(nb) | [
"def",
"process_notebook",
"(",
"self",
",",
"disable_warnings",
"=",
"True",
")",
":",
"infile",
"=",
"self",
".",
"infile",
"outfile",
"=",
"self",
".",
"outfile",
"in_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"infile",
")",
"+",
"os",
".",
... | Process the notebook and create all the pictures and files
This method runs the notebook using the :mod:`nbconvert` and
:mod:`nbformat` modules. It creates the :attr:`outfile` notebook,
a python and a rst file | [
"Process",
"the",
"notebook",
"and",
"create",
"all",
"the",
"pictures",
"and",
"files"
] | python | test |
bitprophet/ssh | ssh/transport.py | https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/transport.py#L1094-L1119 | def auth_none(self, username):
"""
Try to authenticate to the server using no authentication at all.
This will almost always fail. It may be useful for determining the
list of authentication types supported by the server, by catching the
L{BadAuthenticationType} exception raised.
@param username: the username to authenticate as
@type username: string
@return: list of auth types permissible for the next stage of
authentication (normally empty)
@rtype: list
@raise BadAuthenticationType: if "none" authentication isn't allowed
by the server for this user
@raise SSHException: if the authentication failed due to a network
error
@since: 1.5
"""
if (not self.active) or (not self.initial_kex_done):
raise SSHException('No existing session')
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_none(username, my_event)
return self.auth_handler.wait_for_response(my_event) | [
"def",
"auth_none",
"(",
"self",
",",
"username",
")",
":",
"if",
"(",
"not",
"self",
".",
"active",
")",
"or",
"(",
"not",
"self",
".",
"initial_kex_done",
")",
":",
"raise",
"SSHException",
"(",
"'No existing session'",
")",
"my_event",
"=",
"threading",... | Try to authenticate to the server using no authentication at all.
This will almost always fail. It may be useful for determining the
list of authentication types supported by the server, by catching the
L{BadAuthenticationType} exception raised.
@param username: the username to authenticate as
@type username: string
@return: list of auth types permissible for the next stage of
authentication (normally empty)
@rtype: list
@raise BadAuthenticationType: if "none" authentication isn't allowed
by the server for this user
@raise SSHException: if the authentication failed due to a network
error
@since: 1.5 | [
"Try",
"to",
"authenticate",
"to",
"the",
"server",
"using",
"no",
"authentication",
"at",
"all",
".",
"This",
"will",
"almost",
"always",
"fail",
".",
"It",
"may",
"be",
"useful",
"for",
"determining",
"the",
"list",
"of",
"authentication",
"types",
"suppor... | python | train |
NiklasRosenstein/myo-python | myo/math.py | https://github.com/NiklasRosenstein/myo-python/blob/89a7480f8058061da7a3dd98ccec57a6b134ddf3/myo/math.py#L102-L108 | def normalized(self):
"""
Returns a normalized copy of this vector.
"""
norm = self.magnitude()
return Vector(self.x / norm, self.y / norm, self.z / norm) | [
"def",
"normalized",
"(",
"self",
")",
":",
"norm",
"=",
"self",
".",
"magnitude",
"(",
")",
"return",
"Vector",
"(",
"self",
".",
"x",
"/",
"norm",
",",
"self",
".",
"y",
"/",
"norm",
",",
"self",
".",
"z",
"/",
"norm",
")"
] | Returns a normalized copy of this vector. | [
"Returns",
"a",
"normalized",
"copy",
"of",
"this",
"vector",
"."
] | python | train |
mhe/pynrrd | nrrd/reader.py | https://github.com/mhe/pynrrd/blob/96dd875b302031ea27e2d3aaa611dc6f2dfc7979/nrrd/reader.py#L469-L506 | def read(filename, custom_field_map=None, index_order='F'):
"""Read a NRRD file and return the header and data
See :ref:`user-guide:Reading NRRD files` for more information on reading NRRD files.
.. note::
Users should be aware that the `index_order` argument needs to be consistent between `nrrd.read` and `nrrd.write`. I.e., reading an array with `index_order='F'` will result in a transposed version of the original data and hence the writer needs to be aware of this.
Parameters
----------
filename : :class:`str`
Filename of the NRRD file
custom_field_map : :class:`dict` (:class:`str`, :class:`str`), optional
Dictionary used for parsing custom field types where the key is the custom field name and the value is a
string identifying datatype for the custom field.
index_order : {'C', 'F'}, optional
Specifies the index order of the resulting data array. Either 'C' (C-order) where the dimensions are ordered from
slowest-varying to fastest-varying (e.g. (z, y, x)), or 'F' (Fortran-order) where the dimensions are ordered
from fastest-varying to slowest-varying (e.g. (x, y, z)).
Returns
-------
data : :class:`numpy.ndarray`
Data read from NRRD file
header : :class:`dict` (:class:`str`, :obj:`Object`)
Dictionary containing the header fields and their corresponding parsed value
See Also
--------
:meth:`write`, :meth:`read_header`, :meth:`read_data`
"""
"""Read a NRRD file and return a tuple (data, header)."""
with open(filename, 'rb') as fh:
header = read_header(fh, custom_field_map)
data = read_data(header, fh, filename, index_order)
return data, header | [
"def",
"read",
"(",
"filename",
",",
"custom_field_map",
"=",
"None",
",",
"index_order",
"=",
"'F'",
")",
":",
"\"\"\"Read a NRRD file and return a tuple (data, header).\"\"\"",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"fh",
":",
"header",
"=",
... | Read a NRRD file and return the header and data
See :ref:`user-guide:Reading NRRD files` for more information on reading NRRD files.
.. note::
Users should be aware that the `index_order` argument needs to be consistent between `nrrd.read` and `nrrd.write`. I.e., reading an array with `index_order='F'` will result in a transposed version of the original data and hence the writer needs to be aware of this.
Parameters
----------
filename : :class:`str`
Filename of the NRRD file
custom_field_map : :class:`dict` (:class:`str`, :class:`str`), optional
Dictionary used for parsing custom field types where the key is the custom field name and the value is a
string identifying datatype for the custom field.
index_order : {'C', 'F'}, optional
Specifies the index order of the resulting data array. Either 'C' (C-order) where the dimensions are ordered from
slowest-varying to fastest-varying (e.g. (z, y, x)), or 'F' (Fortran-order) where the dimensions are ordered
from fastest-varying to slowest-varying (e.g. (x, y, z)).
Returns
-------
data : :class:`numpy.ndarray`
Data read from NRRD file
header : :class:`dict` (:class:`str`, :obj:`Object`)
Dictionary containing the header fields and their corresponding parsed value
See Also
--------
:meth:`write`, :meth:`read_header`, :meth:`read_data` | [
"Read",
"a",
"NRRD",
"file",
"and",
"return",
"the",
"header",
"and",
"data"
] | python | train |
tomprince/txgithub | txgithub/api.py | https://github.com/tomprince/txgithub/blob/3bd5eebb25db013e2193e6a102a91049f356710d/txgithub/api.py#L378-L396 | def createComment(self, repo_user, repo_name, pull_number,
body, commit_id, path, position):
"""
POST /repos/:owner/:repo/pulls/:number/comments
:param pull_number: The pull request's ID.
:param body: The text of the comment.
:param commit_id: The SHA of the commit to comment on.
:param path: The relative path of the file to comment on.
:param position: The line index in the diff to comment on.
"""
return self.api.makeRequest(
["repos", repo_user, repo_name,
"pulls", str(pull_number), "comments"],
method="POST",
data=dict(body=body,
commit_id=commit_id,
path=path,
position=position)) | [
"def",
"createComment",
"(",
"self",
",",
"repo_user",
",",
"repo_name",
",",
"pull_number",
",",
"body",
",",
"commit_id",
",",
"path",
",",
"position",
")",
":",
"return",
"self",
".",
"api",
".",
"makeRequest",
"(",
"[",
"\"repos\"",
",",
"repo_user",
... | POST /repos/:owner/:repo/pulls/:number/comments
:param pull_number: The pull request's ID.
:param body: The text of the comment.
:param commit_id: The SHA of the commit to comment on.
:param path: The relative path of the file to comment on.
:param position: The line index in the diff to comment on. | [
"POST",
"/",
"repos",
"/",
":",
"owner",
"/",
":",
"repo",
"/",
"pulls",
"/",
":",
"number",
"/",
"comments"
] | python | train |
PolyJIT/benchbuild | benchbuild/projects/gentoo/gentoo.py | https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/projects/gentoo/gentoo.py#L229-L250 | def write_wgetrc(_path):
"""
Write a valid gentoo wgetrc file to :path:.
Args:
path - The output path of the wgetrc
"""
http_proxy = str(CFG["gentoo"]["http_proxy"])
ftp_proxy = str(CFG["gentoo"]["ftp_proxy"])
path.mkfile_uchroot("/etc/wgetrc")
with open(_path, 'w') as wgetrc:
if http_proxy is not None:
http_s = "http_proxy = {0}".format(http_proxy)
https_s = "https_proxy = {0}".format(http_proxy)
wgetrc.write("use_proxy = on\n")
wgetrc.write(http_s + "\n")
wgetrc.write(https_s + "\n")
if ftp_proxy is not None:
fp_s = "ftp_proxy={0}".format(ftp_proxy)
wgetrc.write(fp_s + "\n") | [
"def",
"write_wgetrc",
"(",
"_path",
")",
":",
"http_proxy",
"=",
"str",
"(",
"CFG",
"[",
"\"gentoo\"",
"]",
"[",
"\"http_proxy\"",
"]",
")",
"ftp_proxy",
"=",
"str",
"(",
"CFG",
"[",
"\"gentoo\"",
"]",
"[",
"\"ftp_proxy\"",
"]",
")",
"path",
".",
"mkf... | Write a valid gentoo wgetrc file to :path:.
Args:
path - The output path of the wgetrc | [
"Write",
"a",
"valid",
"gentoo",
"wgetrc",
"file",
"to",
":",
"path",
":",
"."
] | python | train |
spyder-ide/spyder | spyder/utils/qthelpers.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/qthelpers.py#L65-L94 | def qapplication(translate=True, test_time=3):
"""
Return QApplication instance
Creates it if it doesn't already exist
test_time: Time to maintain open the application when testing. It's given
in seconds
"""
if running_in_mac_app():
SpyderApplication = MacApplication
else:
SpyderApplication = QApplication
app = SpyderApplication.instance()
if app is None:
# Set Application name for Gnome 3
# https://groups.google.com/forum/#!topic/pyside/24qxvwfrRDs
app = SpyderApplication(['Spyder'])
# Set application name for KDE (See issue 2207)
app.setApplicationName('Spyder')
if translate:
install_translator(app)
test_ci = os.environ.get('TEST_CI_WIDGETS', None)
if test_ci is not None:
timer_shutdown = QTimer(app)
timer_shutdown.timeout.connect(app.quit)
timer_shutdown.start(test_time*1000)
return app | [
"def",
"qapplication",
"(",
"translate",
"=",
"True",
",",
"test_time",
"=",
"3",
")",
":",
"if",
"running_in_mac_app",
"(",
")",
":",
"SpyderApplication",
"=",
"MacApplication",
"else",
":",
"SpyderApplication",
"=",
"QApplication",
"app",
"=",
"SpyderApplicati... | Return QApplication instance
Creates it if it doesn't already exist
test_time: Time to maintain open the application when testing. It's given
in seconds | [
"Return",
"QApplication",
"instance",
"Creates",
"it",
"if",
"it",
"doesn",
"t",
"already",
"exist",
"test_time",
":",
"Time",
"to",
"maintain",
"open",
"the",
"application",
"when",
"testing",
".",
"It",
"s",
"given",
"in",
"seconds"
] | python | train |
facebook/watchman | python/pywatchman/__init__.py | https://github.com/facebook/watchman/blob/d416c249dd8f463dc69fc2691d0f890598c045a9/python/pywatchman/__init__.py#L1145-L1158 | def capabilityCheck(self, optional=None, required=None):
""" Perform a server capability check """
res = self.query(
"version", {"optional": optional or [], "required": required or []}
)
if not self._hasprop(res, "capabilities"):
# Server doesn't support capabilities, so we need to
# synthesize the results based on the version
capabilities.synthesize(res, optional)
if "error" in res:
raise CommandError(res["error"])
return res | [
"def",
"capabilityCheck",
"(",
"self",
",",
"optional",
"=",
"None",
",",
"required",
"=",
"None",
")",
":",
"res",
"=",
"self",
".",
"query",
"(",
"\"version\"",
",",
"{",
"\"optional\"",
":",
"optional",
"or",
"[",
"]",
",",
"\"required\"",
":",
"req... | Perform a server capability check | [
"Perform",
"a",
"server",
"capability",
"check"
] | python | train |
lepture/flask-oauthlib | flask_oauthlib/client.py | https://github.com/lepture/flask-oauthlib/blob/9e6f152a5bb360e7496210da21561c3e6d41b0e1/flask_oauthlib/client.py#L621-L650 | def handle_oauth1_response(self, args):
"""Handles an oauth1 authorization response."""
client = self.make_client()
client.verifier = args.get('oauth_verifier')
tup = session.get('%s_oauthtok' % self.name)
if not tup:
raise OAuthException(
'Token not found, maybe you disabled cookie',
type='token_not_found'
)
client.resource_owner_key = tup[0]
client.resource_owner_secret = tup[1]
uri, headers, data = client.sign(
self.expand_url(self.access_token_url),
_encode(self.access_token_method)
)
headers.update(self._access_token_headers)
resp, content = self.http_request(
uri, headers, to_bytes(data, self.encoding),
method=self.access_token_method
)
data = parse_response(resp, content)
if resp.code not in (200, 201):
raise OAuthException(
'Invalid response from %s' % self.name,
type='invalid_response', data=data
)
return data | [
"def",
"handle_oauth1_response",
"(",
"self",
",",
"args",
")",
":",
"client",
"=",
"self",
".",
"make_client",
"(",
")",
"client",
".",
"verifier",
"=",
"args",
".",
"get",
"(",
"'oauth_verifier'",
")",
"tup",
"=",
"session",
".",
"get",
"(",
"'%s_oauth... | Handles an oauth1 authorization response. | [
"Handles",
"an",
"oauth1",
"authorization",
"response",
"."
] | python | test |
btimby/fulltext | fulltext/__init__.py | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/__init__.py#L329-L362 | def is_binary(f):
"""Return True if binary mode."""
# NOTE: order matters here. We don't bail on Python 2 just yet. Both
# codecs.open() and io.open() can open in text mode, both set the encoding
# attribute. We must do that check first.
# If it has a decoding attribute with a value, it is text mode.
if getattr(f, "encoding", None):
return False
# Python 2 makes no further distinction.
if not PY3:
return True
# If the file has a mode, and it contains b, it is binary.
try:
if 'b' in getattr(f, 'mode', ''):
return True
except TypeError:
import gzip
if isinstance(f, gzip.GzipFile):
return True # in gzip mode is an integer
raise
# Can we sniff?
try:
f.seek(0, os.SEEK_CUR)
except (AttributeError, IOError):
return False
# Finally, let's sniff by reading a byte.
byte = f.read(1)
f.seek(-1, os.SEEK_CUR)
return hasattr(byte, 'decode') | [
"def",
"is_binary",
"(",
"f",
")",
":",
"# NOTE: order matters here. We don't bail on Python 2 just yet. Both",
"# codecs.open() and io.open() can open in text mode, both set the encoding",
"# attribute. We must do that check first.",
"# If it has a decoding attribute with a value, it is text mode... | Return True if binary mode. | [
"Return",
"True",
"if",
"binary",
"mode",
"."
] | python | train |
albert12132/templar | templar/markdown.py | https://github.com/albert12132/templar/blob/39851c89730ab69e5c73d0a46adca2a44ecc4165/templar/markdown.py#L673-L685 | def unhash(text, hashes):
"""Unhashes all hashed entites in the hashes dictionary.
The pattern for hashes is defined by re_hash. After everything is
unhashed, <pre> blocks are "pulled out" of whatever indentation
level in which they used to be (e.g. in a list).
"""
def retrieve_match(match):
return hashes[match.group(0)]
while re_hash.search(text):
text = re_hash.sub(retrieve_match, text)
text = re_pre_tag.sub(lambda m: re.sub('^' + m.group(1), '', m.group(0), flags=re.M), text)
return text | [
"def",
"unhash",
"(",
"text",
",",
"hashes",
")",
":",
"def",
"retrieve_match",
"(",
"match",
")",
":",
"return",
"hashes",
"[",
"match",
".",
"group",
"(",
"0",
")",
"]",
"while",
"re_hash",
".",
"search",
"(",
"text",
")",
":",
"text",
"=",
"re_h... | Unhashes all hashed entites in the hashes dictionary.
The pattern for hashes is defined by re_hash. After everything is
unhashed, <pre> blocks are "pulled out" of whatever indentation
level in which they used to be (e.g. in a list). | [
"Unhashes",
"all",
"hashed",
"entites",
"in",
"the",
"hashes",
"dictionary",
"."
] | python | train |
MartinThoma/hwrt | hwrt/create_ffiles.py | https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/create_ffiles.py#L38-L59 | def _create_index_formula_lookup(formula_id2index,
feature_folder,
index2latex):
"""
Create a lookup file where the index is mapped to the formula id and the
LaTeX command.
Parameters
----------
formula_id2index : dict
feature_folder : str
Path to a folder in which a feature file as well as an
index2formula_id.csv is.
index2latex : dict
Maps an integer index to a LaTeX command
"""
index2formula_id = sorted(formula_id2index.items(), key=lambda n: n[1])
index2formula_file = os.path.join(feature_folder, "index2formula_id.csv")
with open(index2formula_file, "w") as f:
f.write("index,formula_id,latex\n")
for formula_id, index in index2formula_id:
f.write("%i,%i,%s\n" % (index, formula_id, index2latex[index])) | [
"def",
"_create_index_formula_lookup",
"(",
"formula_id2index",
",",
"feature_folder",
",",
"index2latex",
")",
":",
"index2formula_id",
"=",
"sorted",
"(",
"formula_id2index",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"n",
":",
"n",
"[",
"1",
"]",
... | Create a lookup file where the index is mapped to the formula id and the
LaTeX command.
Parameters
----------
formula_id2index : dict
feature_folder : str
Path to a folder in which a feature file as well as an
index2formula_id.csv is.
index2latex : dict
Maps an integer index to a LaTeX command | [
"Create",
"a",
"lookup",
"file",
"where",
"the",
"index",
"is",
"mapped",
"to",
"the",
"formula",
"id",
"and",
"the",
"LaTeX",
"command",
"."
] | python | train |
goose3/goose3 | goose3/extractors/images.py | https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/extractors/images.py#L303-L314 | def check_link_tag(self):
"""\
checks to see if we were able to
find open link_src on this page
"""
node = self.article.raw_doc
meta = self.parser.getElementsByTag(node, tag='link', attr='rel', value='image_src')
for item in meta:
src = self.parser.getAttribute(item, attr='href')
if src:
return self.get_image(src, extraction_type='linktag')
return None | [
"def",
"check_link_tag",
"(",
"self",
")",
":",
"node",
"=",
"self",
".",
"article",
".",
"raw_doc",
"meta",
"=",
"self",
".",
"parser",
".",
"getElementsByTag",
"(",
"node",
",",
"tag",
"=",
"'link'",
",",
"attr",
"=",
"'rel'",
",",
"value",
"=",
"'... | \
checks to see if we were able to
find open link_src on this page | [
"\\",
"checks",
"to",
"see",
"if",
"we",
"were",
"able",
"to",
"find",
"open",
"link_src",
"on",
"this",
"page"
] | python | valid |
Apitax/Apitax | apitax/api/controllers/migrations/scriptax_controller.py | https://github.com/Apitax/Apitax/blob/3883e45f17e01eba4edac9d1bba42f0e7a748682/apitax/api/controllers/migrations/scriptax_controller.py#L20-L44 | def create_driver_script(name, create=None): # noqa: E501
"""Create a new script
Create a new script # noqa: E501
:param name: Get status of a driver with this name
:type name: str
:param create: The data needed to create this script
:type create: dict | bytes
:rtype: Response
"""
if connexion.request.is_json:
create = Create1.from_dict(connexion.request.get_json()) # noqa: E501
response = errorIfUnauthorized(role='developer')
if response:
return response
else:
response = ApitaxResponse()
driver: Driver = LoadedDrivers.getDriver(name)
driver.saveDriverScript(create.script.name, create.script.content)
return Response(status=200, body=response.getResponseBody()) | [
"def",
"create_driver_script",
"(",
"name",
",",
"create",
"=",
"None",
")",
":",
"# noqa: E501",
"if",
"connexion",
".",
"request",
".",
"is_json",
":",
"create",
"=",
"Create1",
".",
"from_dict",
"(",
"connexion",
".",
"request",
".",
"get_json",
"(",
")... | Create a new script
Create a new script # noqa: E501
:param name: Get status of a driver with this name
:type name: str
:param create: The data needed to create this script
:type create: dict | bytes
:rtype: Response | [
"Create",
"a",
"new",
"script"
] | python | train |
pingali/dgit | dgitcore/helper.py | https://github.com/pingali/dgit/blob/ecde01f40b98f0719dbcfb54452270ed2f86686d/dgitcore/helper.py#L112-L128 | def compute_sha256(filename):
"""
Try the library. If it doesnt work, use the command line..
"""
try:
h = sha256()
fd = open(filename, 'rb')
while True:
buf = fd.read(0x1000000)
if buf in [None, ""]:
break
h.update(buf.encode('utf-8'))
fd.close()
return h.hexdigest()
except:
output = run(["sha256sum", "-b", filename])
return output.split(" ")[0] | [
"def",
"compute_sha256",
"(",
"filename",
")",
":",
"try",
":",
"h",
"=",
"sha256",
"(",
")",
"fd",
"=",
"open",
"(",
"filename",
",",
"'rb'",
")",
"while",
"True",
":",
"buf",
"=",
"fd",
".",
"read",
"(",
"0x1000000",
")",
"if",
"buf",
"in",
"["... | Try the library. If it doesnt work, use the command line.. | [
"Try",
"the",
"library",
".",
"If",
"it",
"doesnt",
"work",
"use",
"the",
"command",
"line",
".."
] | python | valid |
benhoff/pluginmanager | pluginmanager/file_filters/matching_regex.py | https://github.com/benhoff/pluginmanager/blob/a8a184f9ebfbb521703492cb88c1dbda4cd04c06/pluginmanager/file_filters/matching_regex.py#L35-L43 | def plugin_valid(self, filename):
"""
Checks if the given filename is a valid plugin for this Strategy
"""
filename = os.path.basename(filename)
for regex in self.regex_expressions:
if regex.match(filename):
return True
return False | [
"def",
"plugin_valid",
"(",
"self",
",",
"filename",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
"for",
"regex",
"in",
"self",
".",
"regex_expressions",
":",
"if",
"regex",
".",
"match",
"(",
"filename",
")",
":... | Checks if the given filename is a valid plugin for this Strategy | [
"Checks",
"if",
"the",
"given",
"filename",
"is",
"a",
"valid",
"plugin",
"for",
"this",
"Strategy"
] | python | train |
sixty-north/cosmic-ray | src/cosmic_ray/plugins.py | https://github.com/sixty-north/cosmic-ray/blob/c654e074afbb7b7fcbc23359083c1287c0d3e991/src/cosmic_ray/plugins.py#L27-L40 | def get_operator(name):
"""Get an operator class from a provider plugin.
Attrs:
name: The name of the operator class.
Returns: The operator *class object* (i.e. not an instance).
"""
sep = name.index('/')
provider_name = name[:sep]
operator_name = name[sep + 1:]
provider = OPERATOR_PROVIDERS[provider_name]
return provider[operator_name] | [
"def",
"get_operator",
"(",
"name",
")",
":",
"sep",
"=",
"name",
".",
"index",
"(",
"'/'",
")",
"provider_name",
"=",
"name",
"[",
":",
"sep",
"]",
"operator_name",
"=",
"name",
"[",
"sep",
"+",
"1",
":",
"]",
"provider",
"=",
"OPERATOR_PROVIDERS",
... | Get an operator class from a provider plugin.
Attrs:
name: The name of the operator class.
Returns: The operator *class object* (i.e. not an instance). | [
"Get",
"an",
"operator",
"class",
"from",
"a",
"provider",
"plugin",
"."
] | python | train |
TheHive-Project/Cortex-Analyzers | analyzers/MaxMind/ipaddr.py | https://github.com/TheHive-Project/Cortex-Analyzers/blob/8dae6a8c4cf9af5554ae8c844985c4b44d4bd4bf/analyzers/MaxMind/ipaddr.py#L315-L366 | def collapse_address_list(addresses):
"""Collapse a list of IP objects.
Example:
collapse_address_list([IPv4('1.1.0.0/24'), IPv4('1.1.1.0/24')]) ->
[IPv4('1.1.0.0/23')]
Args:
addresses: A list of IPv4Network or IPv6Network objects.
Returns:
A list of IPv4Network or IPv6Network objects depending on what we
were passed.
Raises:
TypeError: If passed a list of mixed version objects.
"""
i = 0
addrs = []
ips = []
nets = []
# split IP addresses and networks
for ip in addresses:
if isinstance(ip, _BaseIP):
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
str(ip), str(ips[-1])))
ips.append(ip)
elif ip._prefixlen == ip._max_prefixlen:
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
str(ip), str(ips[-1])))
ips.append(ip.ip)
else:
if nets and nets[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
str(ip), str(nets[-1])))
nets.append(ip)
# sort and dedup
ips = sorted(set(ips))
nets = sorted(set(nets))
while i < len(ips):
(first, last) = _find_address_range(ips[i:])
i = ips.index(last) + 1
addrs.extend(summarize_address_range(first, last))
return _collapse_address_list_recursive(sorted(
addrs + nets, key=_BaseNet._get_networks_key)) | [
"def",
"collapse_address_list",
"(",
"addresses",
")",
":",
"i",
"=",
"0",
"addrs",
"=",
"[",
"]",
"ips",
"=",
"[",
"]",
"nets",
"=",
"[",
"]",
"# split IP addresses and networks",
"for",
"ip",
"in",
"addresses",
":",
"if",
"isinstance",
"(",
"ip",
",",
... | Collapse a list of IP objects.
Example:
collapse_address_list([IPv4('1.1.0.0/24'), IPv4('1.1.1.0/24')]) ->
[IPv4('1.1.0.0/23')]
Args:
addresses: A list of IPv4Network or IPv6Network objects.
Returns:
A list of IPv4Network or IPv6Network objects depending on what we
were passed.
Raises:
TypeError: If passed a list of mixed version objects. | [
"Collapse",
"a",
"list",
"of",
"IP",
"objects",
"."
] | python | train |
BerkeleyAutomation/autolab_core | autolab_core/transformations.py | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/transformations.py#L437-L496 | def projection_matrix(point, normal, direction=None,
perspective=None, pseudo=False):
"""Return matrix to project onto plane defined by point and normal.
Using either perspective point, projection direction, or none of both.
If pseudo is True, perspective projections will preserve relative depth
such that Perspective = dot(Orthogonal, PseudoPerspective).
>>> P = projection_matrix((0, 0, 0), (1, 0, 0))
>>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:])
True
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> P1 = projection_matrix(point, normal, direction=direct)
>>> P2 = projection_matrix(point, normal, perspective=persp)
>>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> is_same_transform(P2, numpy.dot(P0, P3))
True
>>> P = projection_matrix((3, 0, 0), (1, 1, 0), (1, 0, 0))
>>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20.0
>>> v0[3] = 1.0
>>> v1 = numpy.dot(P, v0)
>>> numpy.allclose(v1[1], v0[1])
True
>>> numpy.allclose(v1[0], 3.0-v1[1])
True
"""
M = numpy.identity(4)
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
normal = unit_vector(normal[:3])
if perspective is not None:
# perspective projection
perspective = numpy.array(perspective[:3], dtype=numpy.float64,
copy=False)
M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal)
M[:3, :3] -= numpy.outer(perspective, normal)
if pseudo:
# preserve relative depth
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * (perspective+normal)
else:
M[:3, 3] = numpy.dot(point, normal) * perspective
M[3, :3] = -normal
M[3, 3] = numpy.dot(perspective, normal)
elif direction is not None:
# parallel projection
direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False)
scale = numpy.dot(direction, normal)
M[:3, :3] -= numpy.outer(direction, normal) / scale
M[:3, 3] = direction * (numpy.dot(point, normal) / scale)
else:
# orthogonal projection
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * normal
return M | [
"def",
"projection_matrix",
"(",
"point",
",",
"normal",
",",
"direction",
"=",
"None",
",",
"perspective",
"=",
"None",
",",
"pseudo",
"=",
"False",
")",
":",
"M",
"=",
"numpy",
".",
"identity",
"(",
"4",
")",
"point",
"=",
"numpy",
".",
"array",
"(... | Return matrix to project onto plane defined by point and normal.
Using either perspective point, projection direction, or none of both.
If pseudo is True, perspective projections will preserve relative depth
such that Perspective = dot(Orthogonal, PseudoPerspective).
>>> P = projection_matrix((0, 0, 0), (1, 0, 0))
>>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:])
True
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> P1 = projection_matrix(point, normal, direction=direct)
>>> P2 = projection_matrix(point, normal, perspective=persp)
>>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> is_same_transform(P2, numpy.dot(P0, P3))
True
>>> P = projection_matrix((3, 0, 0), (1, 1, 0), (1, 0, 0))
>>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20.0
>>> v0[3] = 1.0
>>> v1 = numpy.dot(P, v0)
>>> numpy.allclose(v1[1], v0[1])
True
>>> numpy.allclose(v1[0], 3.0-v1[1])
True | [
"Return",
"matrix",
"to",
"project",
"onto",
"plane",
"defined",
"by",
"point",
"and",
"normal",
"."
] | python | train |
sethmlarson/virtualbox-python | virtualbox/library.py | https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L17808-L17822 | def get_max_port_count_for_storage_bus(self, bus):
"""Returns the maximum number of ports the given storage bus supports.
in bus of type :class:`StorageBus`
The storage bus type to get the value for.
return max_port_count of type int
The maximum number of ports for the given storage bus.
"""
if not isinstance(bus, StorageBus):
raise TypeError("bus can only be an instance of type StorageBus")
max_port_count = self._call("getMaxPortCountForStorageBus",
in_p=[bus])
return max_port_count | [
"def",
"get_max_port_count_for_storage_bus",
"(",
"self",
",",
"bus",
")",
":",
"if",
"not",
"isinstance",
"(",
"bus",
",",
"StorageBus",
")",
":",
"raise",
"TypeError",
"(",
"\"bus can only be an instance of type StorageBus\"",
")",
"max_port_count",
"=",
"self",
"... | Returns the maximum number of ports the given storage bus supports.
in bus of type :class:`StorageBus`
The storage bus type to get the value for.
return max_port_count of type int
The maximum number of ports for the given storage bus. | [
"Returns",
"the",
"maximum",
"number",
"of",
"ports",
"the",
"given",
"storage",
"bus",
"supports",
"."
] | python | train |
mardix/Mocha | mocha/utils.py | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/utils.py#L443-L450 | def how_old(dob):
"""
Calculate the age
:param dob: datetime object
:return: int
"""
today = datetime.date.today()
return today.year - dob.year - ((today.month, today.day) < (dob.month, dob.day)) | [
"def",
"how_old",
"(",
"dob",
")",
":",
"today",
"=",
"datetime",
".",
"date",
".",
"today",
"(",
")",
"return",
"today",
".",
"year",
"-",
"dob",
".",
"year",
"-",
"(",
"(",
"today",
".",
"month",
",",
"today",
".",
"day",
")",
"<",
"(",
"dob"... | Calculate the age
:param dob: datetime object
:return: int | [
"Calculate",
"the",
"age",
":",
"param",
"dob",
":",
"datetime",
"object",
":",
"return",
":",
"int"
] | python | train |
ergoithz/browsepy | browsepy/transform/__init__.py | https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/transform/__init__.py#L20-L55 | def nearest(self):
'''
Get the next state jump.
The next jump is calculated looking at :attr:`current` state
and its possible :attr:`jumps` to find the nearest and bigger
option in :attr:`pending` data.
If none is found, the returned next state label will be None.
:returns: tuple with index, substring and next state label
:rtype: tuple
'''
try:
options = self.jumps[self.current]
except KeyError:
raise KeyError(
'Current state %r not defined in %s.jumps.'
% (self.current, self.__class__)
)
offset = len(self.start)
index = len(self.pending)
if self.streaming:
index -= max(map(len, options))
key = (index, 1)
result = (index, '', None)
for amark, anext in options.items():
asize = len(amark)
aindex = self.pending.find(amark, offset, index + asize)
if aindex > -1:
index = aindex
akey = (aindex, -asize)
if akey < key:
key = akey
result = (aindex, amark, anext)
return result | [
"def",
"nearest",
"(",
"self",
")",
":",
"try",
":",
"options",
"=",
"self",
".",
"jumps",
"[",
"self",
".",
"current",
"]",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"'Current state %r not defined in %s.jumps.'",
"%",
"(",
"self",
".",
"current",... | Get the next state jump.
The next jump is calculated looking at :attr:`current` state
and its possible :attr:`jumps` to find the nearest and bigger
option in :attr:`pending` data.
If none is found, the returned next state label will be None.
:returns: tuple with index, substring and next state label
:rtype: tuple | [
"Get",
"the",
"next",
"state",
"jump",
"."
] | python | train |
tensorforce/tensorforce | tensorforce/models/memory_model.py | https://github.com/tensorforce/tensorforce/blob/520a8d992230e382f08e315ede5fc477f5e26bfb/tensorforce/models/memory_model.py#L465-L491 | def tf_optimization(self, states, internals, actions, terminal, reward, next_states=None, next_internals=None):
"""
Creates the TensorFlow operations for performing an optimization update step based
on the given input states and actions batch.
Args:
states: Dict of state tensors.
internals: List of prior internal state tensors.
actions: Dict of action tensors.
terminal: Terminal boolean tensor.
reward: Reward tensor.
next_states: Dict of successor state tensors.
next_internals: List of posterior internal state tensors.
Returns:
The optimization operation.
"""
arguments = self.optimizer_arguments(
states=states,
internals=internals,
actions=actions,
terminal=terminal,
reward=reward,
next_states=next_states,
next_internals=next_internals
)
return self.optimizer.minimize(**arguments) | [
"def",
"tf_optimization",
"(",
"self",
",",
"states",
",",
"internals",
",",
"actions",
",",
"terminal",
",",
"reward",
",",
"next_states",
"=",
"None",
",",
"next_internals",
"=",
"None",
")",
":",
"arguments",
"=",
"self",
".",
"optimizer_arguments",
"(",
... | Creates the TensorFlow operations for performing an optimization update step based
on the given input states and actions batch.
Args:
states: Dict of state tensors.
internals: List of prior internal state tensors.
actions: Dict of action tensors.
terminal: Terminal boolean tensor.
reward: Reward tensor.
next_states: Dict of successor state tensors.
next_internals: List of posterior internal state tensors.
Returns:
The optimization operation. | [
"Creates",
"the",
"TensorFlow",
"operations",
"for",
"performing",
"an",
"optimization",
"update",
"step",
"based",
"on",
"the",
"given",
"input",
"states",
"and",
"actions",
"batch",
"."
] | python | valid |
QualiSystems/vCenterShell | package/cloudshell/cp/vcenter/common/vcenter/vmomi_service.py | https://github.com/QualiSystems/vCenterShell/blob/e2e24cd938a92a68f4a8e6a860810d3ef72aae6d/package/cloudshell/cp/vcenter/common/vcenter/vmomi_service.py#L315-L338 | def get_obj(self, content, vimtype, name):
"""
Return an object by name for a specific type, if name is None the
first found object is returned
:param content: pyvmomi content object
:param vimtype: the type of object too search
:param name: the object name to return
"""
obj = None
container = self._get_all_objects_by_type(content, vimtype)
# If no name was given will return the first object from list of a objects matching the given vimtype type
for c in container.view:
if name:
if c.name == name:
obj = c
break
else:
obj = c
break
return obj | [
"def",
"get_obj",
"(",
"self",
",",
"content",
",",
"vimtype",
",",
"name",
")",
":",
"obj",
"=",
"None",
"container",
"=",
"self",
".",
"_get_all_objects_by_type",
"(",
"content",
",",
"vimtype",
")",
"# If no name was given will return the first object from list o... | Return an object by name for a specific type, if name is None the
first found object is returned
:param content: pyvmomi content object
:param vimtype: the type of object too search
:param name: the object name to return | [
"Return",
"an",
"object",
"by",
"name",
"for",
"a",
"specific",
"type",
"if",
"name",
"is",
"None",
"the",
"first",
"found",
"object",
"is",
"returned"
] | python | train |
dnanhkhoa/logone | logone/logone.py | https://github.com/dnanhkhoa/logone/blob/7345a59e54ae59418a2c35ae7e7af5b2784fa1b5/logone/logone.py#L147-L199 | def use_file(self, enabled=True,
file_name=None,
level=logging.WARNING,
when='d',
interval=1,
backup_count=30,
delay=False,
utc=False,
at_time=None,
log_format=None,
date_format=None):
"""
Handler for logging to a file, rotating the log file at certain timed intervals.
"""
if enabled:
if not self.__file_handler:
assert file_name, 'File name is missing!'
# Create new TimedRotatingFileHandler instance
kwargs = {
'filename': file_name,
'when': when,
'interval': interval,
'backupCount': backup_count,
'encoding': 'UTF-8',
'delay': delay,
'utc': utc,
}
if sys.version_info[0] >= 3:
kwargs['atTime'] = at_time
self.__file_handler = TimedRotatingFileHandler(**kwargs)
# Use this format for default case
if not log_format:
log_format = '%(asctime)s %(name)s[%(process)d] ' \
'%(programname)s/%(module)s/%(funcName)s[%(lineno)d] ' \
'%(levelname)s %(message)s'
# Set formatter
formatter = logging.Formatter(fmt=log_format, datefmt=date_format)
self.__file_handler.setFormatter(fmt=formatter)
# Set level for this handler
self.__file_handler.setLevel(level=level)
# Add this handler to logger
self.add_handler(hdlr=self.__file_handler)
elif self.__file_handler:
# Remove handler from logger
self.remove_handler(hdlr=self.__file_handler)
self.__file_handler = None | [
"def",
"use_file",
"(",
"self",
",",
"enabled",
"=",
"True",
",",
"file_name",
"=",
"None",
",",
"level",
"=",
"logging",
".",
"WARNING",
",",
"when",
"=",
"'d'",
",",
"interval",
"=",
"1",
",",
"backup_count",
"=",
"30",
",",
"delay",
"=",
"False",
... | Handler for logging to a file, rotating the log file at certain timed intervals. | [
"Handler",
"for",
"logging",
"to",
"a",
"file",
"rotating",
"the",
"log",
"file",
"at",
"certain",
"timed",
"intervals",
"."
] | python | train |
Rackspace-DOT/flask_keystone | flask_keystone/__init__.py | https://github.com/Rackspace-DOT/flask_keystone/blob/6f6d630e9e66a3beca6607b0b786510ec2a79747/flask_keystone/__init__.py#L86-L123 | def init_app(self, app, config_group="flask_keystone"):
"""
Iniitialize the Flask_Keystone module in an application factory.
:param app: `flask.Flask` application to which to connect.
:type app: `flask.Flask`
:param str config_group: :class:`oslo_config.cfg.OptGroup` to which
to attach.
When initialized, the extension will apply the
:mod:`keystonemiddleware` WSGI middleware to the flask Application,
attach it's own error handler, and generate a User model based on
its :mod:`oslo_config` configuration.
"""
cfg.CONF.register_opts(RAX_OPTS, group=config_group)
self.logger = logging.getLogger(__name__)
try:
logging.register_options(cfg.CONF)
except cfg.ArgsAlreadyParsedError: # pragma: no cover
pass
logging.setup(cfg.CONF, "flask_keystone")
self.config = cfg.CONF[config_group]
self.roles = self._parse_roles()
self.User = self._make_user_model()
self.Anonymous = self._make_anonymous_model()
self.logger.debug("Initialized keystone with roles: %s and "
"allow_anonymous: %s" % (
self.roles,
self.config.allow_anonymous_access
))
app.wsgi_app = auth_token.AuthProtocol(app.wsgi_app, {})
self.logger.debug("Adding before_request request handler.")
app.before_request(self._make_before_request())
self.logger.debug("Registering Custom Error Handler.")
app.register_error_handler(FlaskKeystoneException, handle_exception) | [
"def",
"init_app",
"(",
"self",
",",
"app",
",",
"config_group",
"=",
"\"flask_keystone\"",
")",
":",
"cfg",
".",
"CONF",
".",
"register_opts",
"(",
"RAX_OPTS",
",",
"group",
"=",
"config_group",
")",
"self",
".",
"logger",
"=",
"logging",
".",
"getLogger"... | Iniitialize the Flask_Keystone module in an application factory.
:param app: `flask.Flask` application to which to connect.
:type app: `flask.Flask`
:param str config_group: :class:`oslo_config.cfg.OptGroup` to which
to attach.
When initialized, the extension will apply the
:mod:`keystonemiddleware` WSGI middleware to the flask Application,
attach it's own error handler, and generate a User model based on
its :mod:`oslo_config` configuration. | [
"Iniitialize",
"the",
"Flask_Keystone",
"module",
"in",
"an",
"application",
"factory",
"."
] | python | train |
AndrewAnnex/SpiceyPy | spiceypy/spiceypy.py | https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L12904-L12945 | def spkw18(handle, subtyp, body, center, inframe, first, last, segid, degree, packts, epochs):
"""
Write a type 18 segment to an SPK file.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkw18_c.html
:param handle: Handle of an SPK file open for writing.
:type handle: int
:param subtyp: SPK type 18 subtype code.
:type subtyp: int
:param body: Body code for ephemeris object.
:type body: int
:param center: Body code for the center of motion of the body.
:type center: int
:param inframe: The reference frame of the states.
:type inframe: str
:param first: First valid time for which states can be computed.
:type first: float
:param last: Last valid time for which states can be computed.
:type last: float
:param segid: Segment identifier.
:type segid: str
:param degree: Degree of interpolating polynomials.
:type degree: int
:param packts: data packets
:type packts: 2D Array of floats
:param epochs: Array of epochs corresponding to states.
:type epochs: N-Element Array of floats
"""
handle = ctypes.c_int(handle)
subtyp = ctypes.c_int(subtyp)
body = ctypes.c_int(body)
center = ctypes.c_int(center)
inframe = stypes.stringToCharP(inframe)
first = ctypes.c_double(first)
last = ctypes.c_double(last)
segid = stypes.stringToCharP(segid)
degree = ctypes.c_int(degree)
n = ctypes.c_int(len(packts))
packts = stypes.toDoubleMatrix(packts)
epochs = stypes.toDoubleVector(epochs)
libspice.spkw18_c(handle, subtyp, body, center, inframe, first, last, segid, degree, n, packts, epochs) | [
"def",
"spkw18",
"(",
"handle",
",",
"subtyp",
",",
"body",
",",
"center",
",",
"inframe",
",",
"first",
",",
"last",
",",
"segid",
",",
"degree",
",",
"packts",
",",
"epochs",
")",
":",
"handle",
"=",
"ctypes",
".",
"c_int",
"(",
"handle",
")",
"s... | Write a type 18 segment to an SPK file.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkw18_c.html
:param handle: Handle of an SPK file open for writing.
:type handle: int
:param subtyp: SPK type 18 subtype code.
:type subtyp: int
:param body: Body code for ephemeris object.
:type body: int
:param center: Body code for the center of motion of the body.
:type center: int
:param inframe: The reference frame of the states.
:type inframe: str
:param first: First valid time for which states can be computed.
:type first: float
:param last: Last valid time for which states can be computed.
:type last: float
:param segid: Segment identifier.
:type segid: str
:param degree: Degree of interpolating polynomials.
:type degree: int
:param packts: data packets
:type packts: 2D Array of floats
:param epochs: Array of epochs corresponding to states.
:type epochs: N-Element Array of floats | [
"Write",
"a",
"type",
"18",
"segment",
"to",
"an",
"SPK",
"file",
"."
] | python | train |
ARMmbed/icetea | icetea_lib/tools/tools.py | https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/tools/tools.py#L538-L549 | def _create_combined_words(words, startindex):
"""
Helper for create_match_bool, used to combine words inside single quotes from a list into a
single string.
:param words: List of words.
:param startindex: Index where search is started.
:return: (str, int) or (None, 0) if no closing quote is found.
"""
for i, word in enumerate(words[startindex+1:]):
if "'" in word:
return " ".join(words[startindex:startindex+i+2]), i+1
return None, 0 | [
"def",
"_create_combined_words",
"(",
"words",
",",
"startindex",
")",
":",
"for",
"i",
",",
"word",
"in",
"enumerate",
"(",
"words",
"[",
"startindex",
"+",
"1",
":",
"]",
")",
":",
"if",
"\"'\"",
"in",
"word",
":",
"return",
"\" \"",
".",
"join",
"... | Helper for create_match_bool, used to combine words inside single quotes from a list into a
single string.
:param words: List of words.
:param startindex: Index where search is started.
:return: (str, int) or (None, 0) if no closing quote is found. | [
"Helper",
"for",
"create_match_bool",
"used",
"to",
"combine",
"words",
"inside",
"single",
"quotes",
"from",
"a",
"list",
"into",
"a",
"single",
"string",
".",
":",
"param",
"words",
":",
"List",
"of",
"words",
".",
":",
"param",
"startindex",
":",
"Index... | python | train |
mieubrisse/wunderpy2 | wunderpy2/tasks_endpoint.py | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/tasks_endpoint.py#L58-L84 | def update_task(client, task_id, revision, title=None, assignee_id=None, completed=None, recurrence_type=None, recurrence_count=None, due_date=None, starred=None, remove=None):
'''
Updates the task with the given ID
See https://developer.wunderlist.com/documentation/endpoints/task for detailed parameter information
'''
if title is not None:
_check_title_length(title, client.api)
if (recurrence_type is None and recurrence_count is not None) or (recurrence_type is not None and recurrence_count is None):
raise ValueError("recurrence_type and recurrence_count are required are required together")
if due_date is not None:
_check_date_format(due_date, client.api)
data = {
'revision' : int(revision),
'title' : title,
'assignee_id' : int(assignee_id) if assignee_id else None,
'completed' : completed,
'recurrence_type' : recurrence_type,
'recurrence_count' : int(recurrence_count) if recurrence_count else None,
'due_date' : due_date,
'starred' : starred,
'remove' : remove,
}
data = { key: value for key, value in data.items() if value is not None }
endpoint = '/'.join([client.api.Endpoints.TASKS, str(task_id)])
response = client.authenticated_request(endpoint, 'PATCH', data=data)
return response.json() | [
"def",
"update_task",
"(",
"client",
",",
"task_id",
",",
"revision",
",",
"title",
"=",
"None",
",",
"assignee_id",
"=",
"None",
",",
"completed",
"=",
"None",
",",
"recurrence_type",
"=",
"None",
",",
"recurrence_count",
"=",
"None",
",",
"due_date",
"="... | Updates the task with the given ID
See https://developer.wunderlist.com/documentation/endpoints/task for detailed parameter information | [
"Updates",
"the",
"task",
"with",
"the",
"given",
"ID"
] | python | train |
saltstack/salt | salt/modules/virt.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/virt.py#L3437-L3448 | def _is_kvm_hyper():
'''
Returns a bool whether or not this node is a KVM hypervisor
'''
try:
with salt.utils.files.fopen('/proc/modules') as fp_:
if 'kvm_' not in salt.utils.stringutils.to_unicode(fp_.read()):
return False
except IOError:
# No /proc/modules? Are we on Windows? Or Solaris?
return False
return 'libvirtd' in __salt__['cmd.run'](__grains__['ps']) | [
"def",
"_is_kvm_hyper",
"(",
")",
":",
"try",
":",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"'/proc/modules'",
")",
"as",
"fp_",
":",
"if",
"'kvm_'",
"not",
"in",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_unicode",
"(",
... | Returns a bool whether or not this node is a KVM hypervisor | [
"Returns",
"a",
"bool",
"whether",
"or",
"not",
"this",
"node",
"is",
"a",
"KVM",
"hypervisor"
] | python | train |
aws/aws-iot-device-sdk-python | AWSIoTPythonSDK/core/shadow/deviceShadow.py | https://github.com/aws/aws-iot-device-sdk-python/blob/f0aa2ce34b21dd2e44f4fb7e1d058656aaf2fc62/AWSIoTPythonSDK/core/shadow/deviceShadow.py#L367-L399 | def shadowRegisterDeltaCallback(self, srcCallback):
"""
**Description**
Listen on delta topics for this device shadow by subscribing to delta topics. Whenever there
is a difference between the desired and reported state, the registered callback will be called
and the delta payload will be available in the callback.
**Syntax**
.. code:: python
# Listen on delta topics for BotShadow
BotShadow.shadowRegisterDeltaCallback(customCallback)
**Parameters**
*srcCallback* - Function to be called when the response for this shadow request comes back. Should
be in form :code:`customCallback(payload, responseStatus, token)`, where :code:`payload` is the
JSON document returned, :code:`responseStatus` indicates whether the request has been accepted,
rejected or is a delta message, :code:`token` is the token used for tracing in this request.
**Returns**
None
"""
with self._dataStructureLock:
# Update callback data structure
self._shadowSubscribeCallbackTable["delta"] = srcCallback
# One subscription
self._shadowManagerHandler.basicShadowSubscribe(self._shadowName, "delta", self.generalCallback)
self._logger.info("Subscribed to delta topic for deviceShadow: " + self._shadowName) | [
"def",
"shadowRegisterDeltaCallback",
"(",
"self",
",",
"srcCallback",
")",
":",
"with",
"self",
".",
"_dataStructureLock",
":",
"# Update callback data structure",
"self",
".",
"_shadowSubscribeCallbackTable",
"[",
"\"delta\"",
"]",
"=",
"srcCallback",
"# One subscriptio... | **Description**
Listen on delta topics for this device shadow by subscribing to delta topics. Whenever there
is a difference between the desired and reported state, the registered callback will be called
and the delta payload will be available in the callback.
**Syntax**
.. code:: python
# Listen on delta topics for BotShadow
BotShadow.shadowRegisterDeltaCallback(customCallback)
**Parameters**
*srcCallback* - Function to be called when the response for this shadow request comes back. Should
be in form :code:`customCallback(payload, responseStatus, token)`, where :code:`payload` is the
JSON document returned, :code:`responseStatus` indicates whether the request has been accepted,
rejected or is a delta message, :code:`token` is the token used for tracing in this request.
**Returns**
None | [
"**",
"Description",
"**"
] | python | train |
tensorpack/tensorpack | examples/FasterRCNN/model_fpn.py | https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/FasterRCNN/model_fpn.py#L70-L100 | def fpn_map_rois_to_levels(boxes):
"""
Assign boxes to level 2~5.
Args:
boxes (nx4):
Returns:
[tf.Tensor]: 4 tensors for level 2-5. Each tensor is a vector of indices of boxes in its level.
[tf.Tensor]: 4 tensors, the gathered boxes in each level.
Be careful that the returned tensor could be empty.
"""
sqrtarea = tf.sqrt(tf_area(boxes))
level = tf.cast(tf.floor(
4 + tf.log(sqrtarea * (1. / 224) + 1e-6) * (1.0 / np.log(2))), tf.int32)
# RoI levels range from 2~5 (not 6)
level_ids = [
tf.where(level <= 2),
tf.where(tf.equal(level, 3)), # == is not supported
tf.where(tf.equal(level, 4)),
tf.where(level >= 5)]
level_ids = [tf.reshape(x, [-1], name='roi_level{}_id'.format(i + 2))
for i, x in enumerate(level_ids)]
num_in_levels = [tf.size(x, name='num_roi_level{}'.format(i + 2))
for i, x in enumerate(level_ids)]
add_moving_summary(*num_in_levels)
level_boxes = [tf.gather(boxes, ids) for ids in level_ids]
return level_ids, level_boxes | [
"def",
"fpn_map_rois_to_levels",
"(",
"boxes",
")",
":",
"sqrtarea",
"=",
"tf",
".",
"sqrt",
"(",
"tf_area",
"(",
"boxes",
")",
")",
"level",
"=",
"tf",
".",
"cast",
"(",
"tf",
".",
"floor",
"(",
"4",
"+",
"tf",
".",
"log",
"(",
"sqrtarea",
"*",
... | Assign boxes to level 2~5.
Args:
boxes (nx4):
Returns:
[tf.Tensor]: 4 tensors for level 2-5. Each tensor is a vector of indices of boxes in its level.
[tf.Tensor]: 4 tensors, the gathered boxes in each level.
Be careful that the returned tensor could be empty. | [
"Assign",
"boxes",
"to",
"level",
"2~5",
"."
] | python | train |
mryellow/maze_explorer | mazeexp/engine/world_rewards.py | https://github.com/mryellow/maze_explorer/blob/ab8a25ccd05105d2fe57e0213d690cfc07e45827/mazeexp/engine/world_rewards.py#L71-L84 | def reward_goal(self):
"""
Add an end goal reward
"""
if not 'goal' in self.mode:
return
mode = self.mode['goal']
if mode and mode['reward'] and self.__test_cond(mode):
if mode['reward'] > 0:
self.logger.info("Escaped!!")
self.player.stats['reward'] += mode['reward']
self.player.stats['score'] += mode['reward']
self.player.game_over = self.player.game_over or mode['terminal'] | [
"def",
"reward_goal",
"(",
"self",
")",
":",
"if",
"not",
"'goal'",
"in",
"self",
".",
"mode",
":",
"return",
"mode",
"=",
"self",
".",
"mode",
"[",
"'goal'",
"]",
"if",
"mode",
"and",
"mode",
"[",
"'reward'",
"]",
"and",
"self",
".",
"__test_cond",
... | Add an end goal reward | [
"Add",
"an",
"end",
"goal",
"reward"
] | python | train |
gabstopper/smc-python | smc/administration/scheduled_tasks.py | https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/administration/scheduled_tasks.py#L592-L627 | def create(cls, name, servers, backup_log_data=False,
encrypt_password=None, comment=None):
"""
Create a new server backup task. This task provides the ability
to backup individual or all management and log servers under
SMC management.
:param str name: name of task
:param servers: servers to back up. Servers must be instances of
management servers or log servers. If no value is provided all
servers are backed up.
:type servers: list(ManagementServer or LogServer)
:param bool backup_log_data: Should the log files be backed up. This
field is only relevant if a Log Server is backed up.
:param str encrypt_password: Provide an encrypt password if you want
this backup to be encrypted.
:param str comment: optional comment
:raises ElementNotFound: specified servers were not found
:raises CreateElementFailed: failure to create the task
:return: the task
:rtype: ServerBackupTask
"""
if not servers:
servers = [svr.href for svr in ManagementServer.objects.all()]
servers.extend([svr.href for svr in LogServer.objects.all()])
else:
servers = [svr.href for svr in servers]
json = {
'resources': servers,
'name': name,
'password': encrypt_password if encrypt_password else None,
'log_data_must_be_saved': backup_log_data,
'comment': comment}
return ElementCreator(cls, json) | [
"def",
"create",
"(",
"cls",
",",
"name",
",",
"servers",
",",
"backup_log_data",
"=",
"False",
",",
"encrypt_password",
"=",
"None",
",",
"comment",
"=",
"None",
")",
":",
"if",
"not",
"servers",
":",
"servers",
"=",
"[",
"svr",
".",
"href",
"for",
... | Create a new server backup task. This task provides the ability
to backup individual or all management and log servers under
SMC management.
:param str name: name of task
:param servers: servers to back up. Servers must be instances of
management servers or log servers. If no value is provided all
servers are backed up.
:type servers: list(ManagementServer or LogServer)
:param bool backup_log_data: Should the log files be backed up. This
field is only relevant if a Log Server is backed up.
:param str encrypt_password: Provide an encrypt password if you want
this backup to be encrypted.
:param str comment: optional comment
:raises ElementNotFound: specified servers were not found
:raises CreateElementFailed: failure to create the task
:return: the task
:rtype: ServerBackupTask | [
"Create",
"a",
"new",
"server",
"backup",
"task",
".",
"This",
"task",
"provides",
"the",
"ability",
"to",
"backup",
"individual",
"or",
"all",
"management",
"and",
"log",
"servers",
"under",
"SMC",
"management",
".",
":",
"param",
"str",
"name",
":",
"nam... | python | train |
materialsproject/pymatgen | pymatgen/analysis/molecule_matcher.py | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/molecule_matcher.py#L237-L259 | def _group_centroid(mol, ilabels, group_atoms):
"""
Calculate the centroids of a group atoms indexed by the labels of inchi
Args:
mol: The molecule. OpenBabel OBMol object
ilabel: inchi label map
Returns:
Centroid. Tuple (x, y, z)
"""
c1x, c1y, c1z = 0.0, 0.0, 0.0
for i in group_atoms:
orig_idx = ilabels[i-1]
oa1 = mol.GetAtom(orig_idx)
c1x += float(oa1.x())
c1y += float(oa1.y())
c1z += float(oa1.z())
num_atoms = len(group_atoms)
c1x /= num_atoms
c1y /= num_atoms
c1z /= num_atoms
return c1x, c1y, c1z | [
"def",
"_group_centroid",
"(",
"mol",
",",
"ilabels",
",",
"group_atoms",
")",
":",
"c1x",
",",
"c1y",
",",
"c1z",
"=",
"0.0",
",",
"0.0",
",",
"0.0",
"for",
"i",
"in",
"group_atoms",
":",
"orig_idx",
"=",
"ilabels",
"[",
"i",
"-",
"1",
"]",
"oa1",... | Calculate the centroids of a group atoms indexed by the labels of inchi
Args:
mol: The molecule. OpenBabel OBMol object
ilabel: inchi label map
Returns:
Centroid. Tuple (x, y, z) | [
"Calculate",
"the",
"centroids",
"of",
"a",
"group",
"atoms",
"indexed",
"by",
"the",
"labels",
"of",
"inchi"
] | python | train |
programa-stic/barf-project | barf/barf.py | https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/barf.py#L216-L232 | def load_architecture(self, name, arch_info, disassembler, translator):
"""Translate to REIL instructions.
Args:
name (str): Architecture's name.
arch_info (ArchitectureInformation): Architecture information object.
disassembler (Disassembler): Disassembler for the architecture.
translator (Translator): Translator for the architecture.
"""
# Set up architecture information.
self.name = name
self.arch_info = arch_info
self.disassembler = disassembler
self.ir_translator = translator
# Setup analysis modules.
self._setup_analysis_modules() | [
"def",
"load_architecture",
"(",
"self",
",",
"name",
",",
"arch_info",
",",
"disassembler",
",",
"translator",
")",
":",
"# Set up architecture information.",
"self",
".",
"name",
"=",
"name",
"self",
".",
"arch_info",
"=",
"arch_info",
"self",
".",
"disassembl... | Translate to REIL instructions.
Args:
name (str): Architecture's name.
arch_info (ArchitectureInformation): Architecture information object.
disassembler (Disassembler): Disassembler for the architecture.
translator (Translator): Translator for the architecture. | [
"Translate",
"to",
"REIL",
"instructions",
"."
] | python | train |
CalebBell/thermo | thermo/viscosity.py | https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/viscosity.py#L764-L831 | def Lucas(T, P, Tc, Pc, omega, P_sat, mu_l):
r'''Adjustes for pressure the viscosity of a liquid using an emperical
formula developed in [1]_, but as discussed in [2]_ as the original source
is in German.
.. math::
\frac{\mu}{\mu_{sat}}=\frac{1+D(\Delta P_r/2.118)^A}{1+C\omega \Delta P_r}
\Delta P_r = \frac{P-P^{sat}}{P_c}
A=0.9991-\frac{4.674\times 10^{-4}}{1.0523T_r^{-0.03877}-1.0513}
D = \frac{0.3257}{(1.0039-T_r^{2.573})^{0.2906}}-0.2086
C = -0.07921+2.1616T_r-13.4040T_r^2+44.1706T_r^3-84.8291T_r^4+
96.1209T_r^5-59.8127T_r^6+15.6719T_r^7
Parameters
----------
T : float
Temperature of fluid [K]
P : float
Pressure of fluid [Pa]
Tc: float
Critical point of fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
omega : float
Acentric factor of compound
P_sat : float
Saturation pressure of the fluid [Pa]
mu_l : float
Viscosity of liquid at 1 atm or saturation, [Pa*S]
Returns
-------
mu_l_dense : float
Viscosity of liquid, [Pa*s]
Notes
-----
This equation is entirely dimensionless; all dimensions cancel.
The example is from Reid (1987); all results agree.
Above several thousand bar, this equation does not represent true behavior.
If Psat is larger than P, the fluid may not be liquid; dPr is set to 0.
Examples
--------
>>> Lucas(300., 500E5, 572.2, 34.7E5, 0.236, 0, 0.00068) # methylcyclohexane
0.0010683738499316518
References
----------
.. [1] Lucas, Klaus. "Ein Einfaches Verfahren Zur Berechnung Der
Viskositat von Gasen Und Gasgemischen." Chemie Ingenieur Technik 46, no. 4
(February 1, 1974): 157-157. doi:10.1002/cite.330460413.
.. [2] Reid, Robert C.; Prausnitz, John M.; Poling, Bruce E.
Properties of Gases and Liquids. McGraw-Hill Companies, 1987.
'''
Tr = T/Tc
C = -0.07921+2.1616*Tr - 13.4040*Tr**2 + 44.1706*Tr**3 - 84.8291*Tr**4 \
+ 96.1209*Tr**5-59.8127*Tr**6+15.6719*Tr**7
D = 0.3257/((1.0039-Tr**2.573)**0.2906) - 0.2086
A = 0.9991 - 4.674E-4/(1.0523*Tr**-0.03877 - 1.0513)
dPr = (P-P_sat)/Pc
if dPr < 0:
dPr = 0
return (1. + D*(dPr/2.118)**A)/(1. + C*omega*dPr)*mu_l | [
"def",
"Lucas",
"(",
"T",
",",
"P",
",",
"Tc",
",",
"Pc",
",",
"omega",
",",
"P_sat",
",",
"mu_l",
")",
":",
"Tr",
"=",
"T",
"/",
"Tc",
"C",
"=",
"-",
"0.07921",
"+",
"2.1616",
"*",
"Tr",
"-",
"13.4040",
"*",
"Tr",
"**",
"2",
"+",
"44.1706"... | r'''Adjustes for pressure the viscosity of a liquid using an emperical
formula developed in [1]_, but as discussed in [2]_ as the original source
is in German.
.. math::
\frac{\mu}{\mu_{sat}}=\frac{1+D(\Delta P_r/2.118)^A}{1+C\omega \Delta P_r}
\Delta P_r = \frac{P-P^{sat}}{P_c}
A=0.9991-\frac{4.674\times 10^{-4}}{1.0523T_r^{-0.03877}-1.0513}
D = \frac{0.3257}{(1.0039-T_r^{2.573})^{0.2906}}-0.2086
C = -0.07921+2.1616T_r-13.4040T_r^2+44.1706T_r^3-84.8291T_r^4+
96.1209T_r^5-59.8127T_r^6+15.6719T_r^7
Parameters
----------
T : float
Temperature of fluid [K]
P : float
Pressure of fluid [Pa]
Tc: float
Critical point of fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
omega : float
Acentric factor of compound
P_sat : float
Saturation pressure of the fluid [Pa]
mu_l : float
Viscosity of liquid at 1 atm or saturation, [Pa*S]
Returns
-------
mu_l_dense : float
Viscosity of liquid, [Pa*s]
Notes
-----
This equation is entirely dimensionless; all dimensions cancel.
The example is from Reid (1987); all results agree.
Above several thousand bar, this equation does not represent true behavior.
If Psat is larger than P, the fluid may not be liquid; dPr is set to 0.
Examples
--------
>>> Lucas(300., 500E5, 572.2, 34.7E5, 0.236, 0, 0.00068) # methylcyclohexane
0.0010683738499316518
References
----------
.. [1] Lucas, Klaus. "Ein Einfaches Verfahren Zur Berechnung Der
Viskositat von Gasen Und Gasgemischen." Chemie Ingenieur Technik 46, no. 4
(February 1, 1974): 157-157. doi:10.1002/cite.330460413.
.. [2] Reid, Robert C.; Prausnitz, John M.; Poling, Bruce E.
Properties of Gases and Liquids. McGraw-Hill Companies, 1987. | [
"r",
"Adjustes",
"for",
"pressure",
"the",
"viscosity",
"of",
"a",
"liquid",
"using",
"an",
"emperical",
"formula",
"developed",
"in",
"[",
"1",
"]",
"_",
"but",
"as",
"discussed",
"in",
"[",
"2",
"]",
"_",
"as",
"the",
"original",
"source",
"is",
"in"... | python | valid |
sdispater/orator | orator/orm/builder.py | https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/orm/builder.py#L532-L543 | def _is_nested(self, name, relation):
"""
Determine if the relationship is nested.
:type name: str
:type relation: str
:rtype: bool
"""
dots = name.find(".")
return dots and name.startswith(relation + ".") | [
"def",
"_is_nested",
"(",
"self",
",",
"name",
",",
"relation",
")",
":",
"dots",
"=",
"name",
".",
"find",
"(",
"\".\"",
")",
"return",
"dots",
"and",
"name",
".",
"startswith",
"(",
"relation",
"+",
"\".\"",
")"
] | Determine if the relationship is nested.
:type name: str
:type relation: str
:rtype: bool | [
"Determine",
"if",
"the",
"relationship",
"is",
"nested",
"."
] | python | train |
Unity-Technologies/ml-agents | ml-agents/mlagents/trainers/ppo/trainer.py | https://github.com/Unity-Technologies/ml-agents/blob/37d139af636e4a2351751fbf0f2fca5a9ed7457f/ml-agents/mlagents/trainers/ppo/trainer.py#L293-L305 | def end_episode(self):
"""
A signal that the Episode has ended. The buffer must be reset.
Get only called when the academy resets.
"""
self.training_buffer.reset_local_buffers()
for agent_id in self.cumulative_rewards:
self.cumulative_rewards[agent_id] = 0
for agent_id in self.episode_steps:
self.episode_steps[agent_id] = 0
if self.use_curiosity:
for agent_id in self.intrinsic_rewards:
self.intrinsic_rewards[agent_id] = 0 | [
"def",
"end_episode",
"(",
"self",
")",
":",
"self",
".",
"training_buffer",
".",
"reset_local_buffers",
"(",
")",
"for",
"agent_id",
"in",
"self",
".",
"cumulative_rewards",
":",
"self",
".",
"cumulative_rewards",
"[",
"agent_id",
"]",
"=",
"0",
"for",
"age... | A signal that the Episode has ended. The buffer must be reset.
Get only called when the academy resets. | [
"A",
"signal",
"that",
"the",
"Episode",
"has",
"ended",
".",
"The",
"buffer",
"must",
"be",
"reset",
".",
"Get",
"only",
"called",
"when",
"the",
"academy",
"resets",
"."
] | python | train |
maxpumperla/elephas | elephas/utils/functional_utils.py | https://github.com/maxpumperla/elephas/blob/84605acdc9564673c487637dcb27f5def128bcc7/elephas/utils/functional_utils.py#L33-L43 | def get_neutral(array_list):
"""Get list of zero-valued numpy arrays for
specified list of numpy arrays
:param array_list: list of numpy arrays
:return: list of zeros of same shape as input
"""
res = []
for x in array_list:
res.append(np.zeros_like(x))
return res | [
"def",
"get_neutral",
"(",
"array_list",
")",
":",
"res",
"=",
"[",
"]",
"for",
"x",
"in",
"array_list",
":",
"res",
".",
"append",
"(",
"np",
".",
"zeros_like",
"(",
"x",
")",
")",
"return",
"res"
] | Get list of zero-valued numpy arrays for
specified list of numpy arrays
:param array_list: list of numpy arrays
:return: list of zeros of same shape as input | [
"Get",
"list",
"of",
"zero",
"-",
"valued",
"numpy",
"arrays",
"for",
"specified",
"list",
"of",
"numpy",
"arrays"
] | python | train |
StanfordVL/robosuite | robosuite/environments/base.py | https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/environments/base.py#L115-L128 | def initialize_time(self, control_freq):
"""
Initializes the time constants used for simulation.
"""
self.cur_time = 0
self.model_timestep = self.sim.model.opt.timestep
if self.model_timestep <= 0:
raise XMLError("xml model defined non-positive time step")
self.control_freq = control_freq
if control_freq <= 0:
raise SimulationError(
"control frequency {} is invalid".format(control_freq)
)
self.control_timestep = 1. / control_freq | [
"def",
"initialize_time",
"(",
"self",
",",
"control_freq",
")",
":",
"self",
".",
"cur_time",
"=",
"0",
"self",
".",
"model_timestep",
"=",
"self",
".",
"sim",
".",
"model",
".",
"opt",
".",
"timestep",
"if",
"self",
".",
"model_timestep",
"<=",
"0",
... | Initializes the time constants used for simulation. | [
"Initializes",
"the",
"time",
"constants",
"used",
"for",
"simulation",
"."
] | python | train |
rabitt/pysox | sox/transform.py | https://github.com/rabitt/pysox/blob/eae89bde74567136ec3f723c3e6b369916d9b837/sox/transform.py#L136-L253 | def set_input_format(self, file_type=None, rate=None, bits=None,
channels=None, encoding=None, ignore_length=False):
'''Sets input file format arguments. This is primarily useful when
dealing with audio files without a file extension. Overwrites any
previously set input file arguments.
If this function is not explicity called the input format is inferred
from the file extension or the file's header.
Parameters
----------
file_type : str or None, default=None
The file type of the input audio file. Should be the same as what
the file extension would be, for ex. 'mp3' or 'wav'.
rate : float or None, default=None
The sample rate of the input audio file. If None the sample rate
is inferred.
bits : int or None, default=None
The number of bits per sample. If None, the number of bits per
sample is inferred.
channels : int or None, default=None
The number of channels in the audio file. If None the number of
channels is inferred.
encoding : str or None, default=None
The audio encoding type. Sometimes needed with file-types that
support more than one encoding type. One of:
* signed-integer : PCM data stored as signed (‘two’s
complement’) integers. Commonly used with a 16 or 24−bit
encoding size. A value of 0 represents minimum signal
power.
* unsigned-integer : PCM data stored as unsigned integers.
Commonly used with an 8-bit encoding size. A value of 0
represents maximum signal power.
* floating-point : PCM data stored as IEEE 753 single precision
(32-bit) or double precision (64-bit) floating-point
(‘real’) numbers. A value of 0 represents minimum signal
power.
* a-law : International telephony standard for logarithmic
encoding to 8 bits per sample. It has a precision
equivalent to roughly 13-bit PCM and is sometimes encoded
with reversed bit-ordering.
* u-law : North American telephony standard for logarithmic
encoding to 8 bits per sample. A.k.a. μ-law. It has a
precision equivalent to roughly 14-bit PCM and is sometimes
encoded with reversed bit-ordering.
* oki-adpcm : OKI (a.k.a. VOX, Dialogic, or Intel) 4-bit ADPCM;
it has a precision equivalent to roughly 12-bit PCM. ADPCM
is a form of audio compression that has a good compromise
between audio quality and encoding/decoding speed.
* ima-adpcm : IMA (a.k.a. DVI) 4-bit ADPCM; it has a precision
equivalent to roughly 13-bit PCM.
* ms-adpcm : Microsoft 4-bit ADPCM; it has a precision
equivalent to roughly 14-bit PCM.
* gsm-full-rate : GSM is currently used for the vast majority
of the world’s digital wireless telephone calls. It
utilises several audio formats with different bit-rates and
associated speech quality. SoX has support for GSM’s
original 13kbps ‘Full Rate’ audio format. It is usually
CPU-intensive to work with GSM audio.
ignore_length : bool, default=False
If True, overrides an (incorrect) audio length given in an audio
file’s header. If this option is given then SoX will keep reading
audio until it reaches the end of the input file.
'''
if file_type not in VALID_FORMATS + [None]:
raise ValueError(
'Invalid file_type. Must be one of {}'.format(VALID_FORMATS)
)
if not is_number(rate) and rate is not None:
raise ValueError('rate must be a float or None')
if rate is not None and rate <= 0:
raise ValueError('rate must be a positive number')
if not isinstance(bits, int) and bits is not None:
raise ValueError('bits must be an int or None')
if bits is not None and bits <= 0:
raise ValueError('bits must be a positive number')
if not isinstance(channels, int) and channels is not None:
raise ValueError('channels must be an int or None')
if channels is not None and channels <= 0:
raise ValueError('channels must be a positive number')
if encoding not in ENCODING_VALS + [None]:
raise ValueError(
'Invalid encoding. Must be one of {}'.format(ENCODING_VALS)
)
if not isinstance(ignore_length, bool):
raise ValueError('ignore_length must be a boolean')
input_format = []
if file_type is not None:
input_format.extend(['-t', '{}'.format(file_type)])
if rate is not None:
input_format.extend(['-r', '{:f}'.format(rate)])
if bits is not None:
input_format.extend(['-b', '{}'.format(bits)])
if channels is not None:
input_format.extend(['-c', '{}'.format(channels)])
if encoding is not None:
input_format.extend(['-e', '{}'.format(encoding)])
if ignore_length:
input_format.append('--ignore-length')
self.input_format = input_format
return self | [
"def",
"set_input_format",
"(",
"self",
",",
"file_type",
"=",
"None",
",",
"rate",
"=",
"None",
",",
"bits",
"=",
"None",
",",
"channels",
"=",
"None",
",",
"encoding",
"=",
"None",
",",
"ignore_length",
"=",
"False",
")",
":",
"if",
"file_type",
"not... | Sets input file format arguments. This is primarily useful when
dealing with audio files without a file extension. Overwrites any
previously set input file arguments.
If this function is not explicity called the input format is inferred
from the file extension or the file's header.
Parameters
----------
file_type : str or None, default=None
The file type of the input audio file. Should be the same as what
the file extension would be, for ex. 'mp3' or 'wav'.
rate : float or None, default=None
The sample rate of the input audio file. If None the sample rate
is inferred.
bits : int or None, default=None
The number of bits per sample. If None, the number of bits per
sample is inferred.
channels : int or None, default=None
The number of channels in the audio file. If None the number of
channels is inferred.
encoding : str or None, default=None
The audio encoding type. Sometimes needed with file-types that
support more than one encoding type. One of:
* signed-integer : PCM data stored as signed (‘two’s
complement’) integers. Commonly used with a 16 or 24−bit
encoding size. A value of 0 represents minimum signal
power.
* unsigned-integer : PCM data stored as unsigned integers.
Commonly used with an 8-bit encoding size. A value of 0
represents maximum signal power.
* floating-point : PCM data stored as IEEE 753 single precision
(32-bit) or double precision (64-bit) floating-point
(‘real’) numbers. A value of 0 represents minimum signal
power.
* a-law : International telephony standard for logarithmic
encoding to 8 bits per sample. It has a precision
equivalent to roughly 13-bit PCM and is sometimes encoded
with reversed bit-ordering.
* u-law : North American telephony standard for logarithmic
encoding to 8 bits per sample. A.k.a. μ-law. It has a
precision equivalent to roughly 14-bit PCM and is sometimes
encoded with reversed bit-ordering.
* oki-adpcm : OKI (a.k.a. VOX, Dialogic, or Intel) 4-bit ADPCM;
it has a precision equivalent to roughly 12-bit PCM. ADPCM
is a form of audio compression that has a good compromise
between audio quality and encoding/decoding speed.
* ima-adpcm : IMA (a.k.a. DVI) 4-bit ADPCM; it has a precision
equivalent to roughly 13-bit PCM.
* ms-adpcm : Microsoft 4-bit ADPCM; it has a precision
equivalent to roughly 14-bit PCM.
* gsm-full-rate : GSM is currently used for the vast majority
of the world’s digital wireless telephone calls. It
utilises several audio formats with different bit-rates and
associated speech quality. SoX has support for GSM’s
original 13kbps ‘Full Rate’ audio format. It is usually
CPU-intensive to work with GSM audio.
ignore_length : bool, default=False
If True, overrides an (incorrect) audio length given in an audio
file’s header. If this option is given then SoX will keep reading
audio until it reaches the end of the input file. | [
"Sets",
"input",
"file",
"format",
"arguments",
".",
"This",
"is",
"primarily",
"useful",
"when",
"dealing",
"with",
"audio",
"files",
"without",
"a",
"file",
"extension",
".",
"Overwrites",
"any",
"previously",
"set",
"input",
"file",
"arguments",
"."
] | python | valid |
thebigmunch/gmusicapi-wrapper | gmusicapi_wrapper/musicmanager.py | https://github.com/thebigmunch/gmusicapi-wrapper/blob/8708683cd33955def1378fc28319ef37805b851d/gmusicapi_wrapper/musicmanager.py#L35-L73 | def login(self, oauth_filename="oauth", uploader_id=None):
"""Authenticate the gmusicapi Musicmanager instance.
Parameters:
oauth_filename (str): The filename of the oauth credentials file to use/create for login.
Default: ``oauth``
uploader_id (str): A unique id as a MAC address (e.g. ``'00:11:22:33:AA:BB'``).
This should only be provided in cases where the default (host MAC address incremented by 1) won't work.
Returns:
``True`` on successful login, ``False`` on unsuccessful login.
"""
cls_name = type(self).__name__
oauth_cred = os.path.join(os.path.dirname(OAUTH_FILEPATH), oauth_filename + '.cred')
try:
if not self.api.login(oauth_credentials=oauth_cred, uploader_id=uploader_id):
try:
self.api.perform_oauth(storage_filepath=oauth_cred)
except OSError:
logger.exception("\nUnable to login with specified oauth code.")
self.api.login(oauth_credentials=oauth_cred, uploader_id=uploader_id)
except (OSError, ValueError):
logger.exception("{} authentication failed.".format(cls_name))
return False
if not self.is_authenticated:
logger.warning("{} authentication failed.".format(cls_name))
return False
logger.info("{} authentication succeeded.\n".format(cls_name))
return True | [
"def",
"login",
"(",
"self",
",",
"oauth_filename",
"=",
"\"oauth\"",
",",
"uploader_id",
"=",
"None",
")",
":",
"cls_name",
"=",
"type",
"(",
"self",
")",
".",
"__name__",
"oauth_cred",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".... | Authenticate the gmusicapi Musicmanager instance.
Parameters:
oauth_filename (str): The filename of the oauth credentials file to use/create for login.
Default: ``oauth``
uploader_id (str): A unique id as a MAC address (e.g. ``'00:11:22:33:AA:BB'``).
This should only be provided in cases where the default (host MAC address incremented by 1) won't work.
Returns:
``True`` on successful login, ``False`` on unsuccessful login. | [
"Authenticate",
"the",
"gmusicapi",
"Musicmanager",
"instance",
"."
] | python | valid |
askedrelic/libgreader | libgreader/auth.py | https://github.com/askedrelic/libgreader/blob/7b668ee291c2464ea172ef44393100c369efa970/libgreader/auth.py#L303-L316 | def get(self, url, parameters=None):
"""
Convenience method for requesting to google with proper cookies/params.
"""
if not self.access_token:
raise IOError("No authorized client available.")
if parameters is None:
parameters = {}
parameters.update({'access_token': self.access_token, 'alt': 'json'})
request = requests.get(url + '?' + self.getParameters(parameters))
if request.status_code != 200:
return None
else:
return toUnicode(request.text) | [
"def",
"get",
"(",
"self",
",",
"url",
",",
"parameters",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"access_token",
":",
"raise",
"IOError",
"(",
"\"No authorized client available.\"",
")",
"if",
"parameters",
"is",
"None",
":",
"parameters",
"=",
"... | Convenience method for requesting to google with proper cookies/params. | [
"Convenience",
"method",
"for",
"requesting",
"to",
"google",
"with",
"proper",
"cookies",
"/",
"params",
"."
] | python | train |
Microsoft/nni | src/sdk/pynni/nni/networkmorphism_tuner/layer_transformer.py | https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/networkmorphism_tuner/layer_transformer.py#L223-L229 | def add_noise(weights, other_weights):
'''add noise to the layer.
'''
w_range = np.ptp(other_weights.flatten())
noise_range = NOISE_RATIO * w_range
noise = np.random.uniform(-noise_range / 2.0, noise_range / 2.0, weights.shape)
return np.add(noise, weights) | [
"def",
"add_noise",
"(",
"weights",
",",
"other_weights",
")",
":",
"w_range",
"=",
"np",
".",
"ptp",
"(",
"other_weights",
".",
"flatten",
"(",
")",
")",
"noise_range",
"=",
"NOISE_RATIO",
"*",
"w_range",
"noise",
"=",
"np",
".",
"random",
".",
"uniform... | add noise to the layer. | [
"add",
"noise",
"to",
"the",
"layer",
"."
] | python | train |
klahnakoski/pyLibrary | jx_base/query.py | https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/jx_base/query.py#L200-L245 | def wrap(query, container, namespace):
"""
NORMALIZE QUERY SO IT CAN STILL BE JSON
"""
if is_op(query, QueryOp) or query == None:
return query
query = wrap(query)
table = container.get_table(query['from'])
schema = table.schema
output = QueryOp(
frum=table,
format=query.format,
limit=mo_math.min(MAX_LIMIT, coalesce(query.limit, DEFAULT_LIMIT))
)
if query.select or isinstance(query.select, (Mapping, list)):
output.select = _normalize_selects(query.select, query.frum, schema=schema)
else:
if query.edges or query.groupby:
output.select = DEFAULT_SELECT
else:
output.select = _normalize_selects(".", query.frum)
if query.groupby and query.edges:
Log.error("You can not use both the `groupby` and `edges` clauses in the same query!")
elif query.edges:
output.edges = _normalize_edges(query.edges, limit=output.limit, schema=schema)
output.groupby = Null
elif query.groupby:
output.edges = Null
output.groupby = _normalize_groupby(query.groupby, limit=output.limit, schema=schema)
else:
output.edges = Null
output.groupby = Null
output.where = _normalize_where(query.where, schema=schema)
output.window = [_normalize_window(w) for w in listwrap(query.window)]
output.having = None
output.sort = _normalize_sort(query.sort)
if not mo_math.is_integer(output.limit) or output.limit < 0:
Log.error("Expecting limit >= 0")
output.isLean = query.isLean
return output | [
"def",
"wrap",
"(",
"query",
",",
"container",
",",
"namespace",
")",
":",
"if",
"is_op",
"(",
"query",
",",
"QueryOp",
")",
"or",
"query",
"==",
"None",
":",
"return",
"query",
"query",
"=",
"wrap",
"(",
"query",
")",
"table",
"=",
"container",
".",... | NORMALIZE QUERY SO IT CAN STILL BE JSON | [
"NORMALIZE",
"QUERY",
"SO",
"IT",
"CAN",
"STILL",
"BE",
"JSON"
] | python | train |
googleapis/google-cloud-python | bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py#L542-L652 | def check_and_mutate_row(
self,
table_name,
row_key,
app_profile_id=None,
predicate_filter=None,
true_mutations=None,
false_mutations=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Mutates a row atomically based on the output of a predicate Reader filter.
Example:
>>> from google.cloud import bigtable_v2
>>>
>>> client = bigtable_v2.BigtableClient()
>>>
>>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> # TODO: Initialize `row_key`:
>>> row_key = b''
>>>
>>> response = client.check_and_mutate_row(table_name, row_key)
Args:
table_name (str): The unique name of the table to which the conditional mutation should be
applied. Values are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
row_key (bytes): The key of the row to which the conditional mutation should be applied.
app_profile_id (str): This value specifies routing for replication. If not specified, the
"default" application profile will be used.
predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. Depending
on whether or not any results are yielded, either ``true_mutations`` or
``false_mutations`` will be executed. If unset, checks that the row
contains any values at all.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.RowFilter`
true_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if
``predicate_filter`` yields at least one cell when applied to
``row_key``. Entries are applied in order, meaning that earlier
mutations can be masked by later ones. Must contain at least one entry
if ``false_mutations`` is empty, and at most 100000.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.Mutation`
false_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if
``predicate_filter`` does not yield any cells when applied to
``row_key``. Entries are applied in order, meaning that earlier
mutations can be masked by later ones. Must contain at least one entry
if ``true_mutations`` is empty, and at most 100000.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.Mutation`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.bigtable_v2.types.CheckAndMutateRowResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "check_and_mutate_row" not in self._inner_api_calls:
self._inner_api_calls[
"check_and_mutate_row"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.check_and_mutate_row,
default_retry=self._method_configs["CheckAndMutateRow"].retry,
default_timeout=self._method_configs["CheckAndMutateRow"].timeout,
client_info=self._client_info,
)
request = bigtable_pb2.CheckAndMutateRowRequest(
table_name=table_name,
row_key=row_key,
app_profile_id=app_profile_id,
predicate_filter=predicate_filter,
true_mutations=true_mutations,
false_mutations=false_mutations,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("table_name", table_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["check_and_mutate_row"](
request, retry=retry, timeout=timeout, metadata=metadata
) | [
"def",
"check_and_mutate_row",
"(",
"self",
",",
"table_name",
",",
"row_key",
",",
"app_profile_id",
"=",
"None",
",",
"predicate_filter",
"=",
"None",
",",
"true_mutations",
"=",
"None",
",",
"false_mutations",
"=",
"None",
",",
"retry",
"=",
"google",
".",
... | Mutates a row atomically based on the output of a predicate Reader filter.
Example:
>>> from google.cloud import bigtable_v2
>>>
>>> client = bigtable_v2.BigtableClient()
>>>
>>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> # TODO: Initialize `row_key`:
>>> row_key = b''
>>>
>>> response = client.check_and_mutate_row(table_name, row_key)
Args:
table_name (str): The unique name of the table to which the conditional mutation should be
applied. Values are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
row_key (bytes): The key of the row to which the conditional mutation should be applied.
app_profile_id (str): This value specifies routing for replication. If not specified, the
"default" application profile will be used.
predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. Depending
on whether or not any results are yielded, either ``true_mutations`` or
``false_mutations`` will be executed. If unset, checks that the row
contains any values at all.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.RowFilter`
true_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if
``predicate_filter`` yields at least one cell when applied to
``row_key``. Entries are applied in order, meaning that earlier
mutations can be masked by later ones. Must contain at least one entry
if ``false_mutations`` is empty, and at most 100000.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.Mutation`
false_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if
``predicate_filter`` does not yield any cells when applied to
``row_key``. Entries are applied in order, meaning that earlier
mutations can be masked by later ones. Must contain at least one entry
if ``true_mutations`` is empty, and at most 100000.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.Mutation`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.bigtable_v2.types.CheckAndMutateRowResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | [
"Mutates",
"a",
"row",
"atomically",
"based",
"on",
"the",
"output",
"of",
"a",
"predicate",
"Reader",
"filter",
"."
] | python | train |
craffel/mir_eval | mir_eval/pattern.py | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/pattern.py#L306-L387 | def occurrence_FPR(reference_patterns, estimated_patterns, thres=.75,
similarity_metric="cardinality_score"):
"""Establishment F1 Score, Precision and Recall.
Examples
--------
>>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt")
>>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt")
>>> F, P, R = mir_eval.pattern.occurrence_FPR(ref_patterns,
... est_patterns)
Parameters
----------
reference_patterns : list
The reference patterns in the format returned by
:func:`mir_eval.io.load_patterns()`
estimated_patterns : list
The estimated patterns in the same format
thres : float
How similar two occcurrences must be in order to be considered
equal
(Default value = .75)
similarity_metric : str
A string representing the metric to be used
when computing the similarity matrix. Accepted values:
- "cardinality_score": Count of the intersection
between occurrences.
(Default value = "cardinality_score")
Returns
-------
f_measure : float
The establishment F1 Score
precision : float
The establishment Precision
recall : float
The establishment Recall
"""
validate(reference_patterns, estimated_patterns)
# Number of elements in reference
nP = len(reference_patterns)
# Number of elements in estimation
nQ = len(estimated_patterns)
# Occurrence matrix with Precision and recall in its last dimension
O_PR = np.zeros((nP, nQ, 2))
# Index of the values that are greater than the specified threshold
rel_idx = np.empty((0, 2), dtype=int)
# If no patterns were provided, metric is zero
if _n_onset_midi(reference_patterns) == 0 or \
_n_onset_midi(estimated_patterns) == 0:
return 0., 0., 0.
for iP, ref_pattern in enumerate(reference_patterns):
for iQ, est_pattern in enumerate(estimated_patterns):
s = _compute_score_matrix(ref_pattern, est_pattern,
similarity_metric)
if np.max(s) >= thres:
O_PR[iP, iQ, 0] = np.mean(np.max(s, axis=0))
O_PR[iP, iQ, 1] = np.mean(np.max(s, axis=1))
rel_idx = np.vstack((rel_idx, [iP, iQ]))
# Compute the scores
if len(rel_idx) == 0:
precision = 0
recall = 0
else:
P = O_PR[:, :, 0]
precision = np.mean(np.max(P[np.ix_(rel_idx[:, 0], rel_idx[:, 1])],
axis=0))
R = O_PR[:, :, 1]
recall = np.mean(np.max(R[np.ix_(rel_idx[:, 0], rel_idx[:, 1])],
axis=1))
f_measure = util.f_measure(precision, recall)
return f_measure, precision, recall | [
"def",
"occurrence_FPR",
"(",
"reference_patterns",
",",
"estimated_patterns",
",",
"thres",
"=",
".75",
",",
"similarity_metric",
"=",
"\"cardinality_score\"",
")",
":",
"validate",
"(",
"reference_patterns",
",",
"estimated_patterns",
")",
"# Number of elements in refer... | Establishment F1 Score, Precision and Recall.
Examples
--------
>>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt")
>>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt")
>>> F, P, R = mir_eval.pattern.occurrence_FPR(ref_patterns,
... est_patterns)
Parameters
----------
reference_patterns : list
The reference patterns in the format returned by
:func:`mir_eval.io.load_patterns()`
estimated_patterns : list
The estimated patterns in the same format
thres : float
How similar two occcurrences must be in order to be considered
equal
(Default value = .75)
similarity_metric : str
A string representing the metric to be used
when computing the similarity matrix. Accepted values:
- "cardinality_score": Count of the intersection
between occurrences.
(Default value = "cardinality_score")
Returns
-------
f_measure : float
The establishment F1 Score
precision : float
The establishment Precision
recall : float
The establishment Recall | [
"Establishment",
"F1",
"Score",
"Precision",
"and",
"Recall",
"."
] | python | train |
vlukes/dicom2fem | dicom2fem/mesh.py | https://github.com/vlukes/dicom2fem/blob/3056c977ca7119e01984d3aa0c4448a1c6c2430f/dicom2fem/mesh.py#L67-L94 | def merge_mesh( x1, ngroups1, conns1, x2, ngroups2, conns2, cmap, eps = 1e-8 ):
"""Merge two meshes in common coordinates found in x1, x2."""
n1 = x1.shape[0]
n2 = x2.shape[0]
err = nm.sum( nm.sum( nm.abs( x1[cmap[:,0],:-1] - x2[cmap[:,1],:-1] ) ) )
if abs( err ) > (10.0 * eps):
print 'nonmatching meshes!', err
raise ValueError
mask = nm.ones( (n2,), dtype = nm.int32 )
mask[cmap[:,1]] = 0
# print mask, nm.cumsum( mask )
remap = nm.cumsum( mask ) + n1 - 1
remap[cmap[:,1]] = cmap[:,0]
# print remap
i2 = nm.setdiff1d( nm.arange( n2, dtype = nm.int32 ),
cmap[:,1] )
xx = nm.r_[x1, x2[i2]]
ngroups = nm.r_[ngroups1, ngroups2[i2]]
conns = []
for ii in xrange( len( conns1 ) ):
conn = nm.vstack( (conns1[ii], remap[conns2[ii]]) )
conns.append( conn )
return xx, ngroups, conns | [
"def",
"merge_mesh",
"(",
"x1",
",",
"ngroups1",
",",
"conns1",
",",
"x2",
",",
"ngroups2",
",",
"conns2",
",",
"cmap",
",",
"eps",
"=",
"1e-8",
")",
":",
"n1",
"=",
"x1",
".",
"shape",
"[",
"0",
"]",
"n2",
"=",
"x2",
".",
"shape",
"[",
"0",
... | Merge two meshes in common coordinates found in x1, x2. | [
"Merge",
"two",
"meshes",
"in",
"common",
"coordinates",
"found",
"in",
"x1",
"x2",
"."
] | python | train |
spyder-ide/conda-manager | conda_manager/api/conda_api.py | https://github.com/spyder-ide/conda-manager/blob/89a2126cbecefc92185cf979347ccac1c5ee5d9d/conda_manager/api/conda_api.py#L727-L737 | def _setup_config_from_kwargs(kwargs):
"""Setup config commands for conda."""
cmd_list = ['--json', '--force']
if 'file' in kwargs:
cmd_list.extend(['--file', kwargs['file']])
if 'system' in kwargs:
cmd_list.append('--system')
return cmd_list | [
"def",
"_setup_config_from_kwargs",
"(",
"kwargs",
")",
":",
"cmd_list",
"=",
"[",
"'--json'",
",",
"'--force'",
"]",
"if",
"'file'",
"in",
"kwargs",
":",
"cmd_list",
".",
"extend",
"(",
"[",
"'--file'",
",",
"kwargs",
"[",
"'file'",
"]",
"]",
")",
"if",... | Setup config commands for conda. | [
"Setup",
"config",
"commands",
"for",
"conda",
"."
] | python | train |
trailofbits/protofuzz | protofuzz/values.py | https://github.com/trailofbits/protofuzz/blob/589492d34de9a0da6cc5554094e2588b893b2fd8/protofuzz/values.py#L94-L102 | def get_floats(bitwidth, limit=0):
'''
Return a number of interesting floating point values
'''
assert bitwidth in (32, 64, 80)
values = [0.0, -1.0, 1.0, -1231231231231.0123, 123123123123123.123]
for val in _limit_helper(values, limit):
yield val | [
"def",
"get_floats",
"(",
"bitwidth",
",",
"limit",
"=",
"0",
")",
":",
"assert",
"bitwidth",
"in",
"(",
"32",
",",
"64",
",",
"80",
")",
"values",
"=",
"[",
"0.0",
",",
"-",
"1.0",
",",
"1.0",
",",
"-",
"1231231231231.0123",
",",
"123123123123123.12... | Return a number of interesting floating point values | [
"Return",
"a",
"number",
"of",
"interesting",
"floating",
"point",
"values"
] | python | train |
globality-corp/microcosm-flask | microcosm_flask/conventions/build_info.py | https://github.com/globality-corp/microcosm-flask/blob/c2eaf57f03e7d041eea343751a4a90fcc80df418/microcosm_flask/conventions/build_info.py#L63-L74 | def configure_build_info(graph):
"""
Configure the build info endpoint.
"""
ns = Namespace(
subject=BuildInfo,
)
convention = BuildInfoConvention(graph)
convention.configure(ns, retrieve=tuple())
return convention.build_info | [
"def",
"configure_build_info",
"(",
"graph",
")",
":",
"ns",
"=",
"Namespace",
"(",
"subject",
"=",
"BuildInfo",
",",
")",
"convention",
"=",
"BuildInfoConvention",
"(",
"graph",
")",
"convention",
".",
"configure",
"(",
"ns",
",",
"retrieve",
"=",
"tuple",
... | Configure the build info endpoint. | [
"Configure",
"the",
"build",
"info",
"endpoint",
"."
] | python | train |
newville/asteval | asteval/asteval.py | https://github.com/newville/asteval/blob/bb7d3a95079f96ead75ea55662014bbcc82f9b28/asteval/asteval.py#L528-L533 | def on_augassign(self, node): # ('target', 'op', 'value')
"""Augmented assign."""
return self.on_assign(ast.Assign(targets=[node.target],
value=ast.BinOp(left=node.target,
op=node.op,
right=node.value))) | [
"def",
"on_augassign",
"(",
"self",
",",
"node",
")",
":",
"# ('target', 'op', 'value')",
"return",
"self",
".",
"on_assign",
"(",
"ast",
".",
"Assign",
"(",
"targets",
"=",
"[",
"node",
".",
"target",
"]",
",",
"value",
"=",
"ast",
".",
"BinOp",
"(",
... | Augmented assign. | [
"Augmented",
"assign",
"."
] | python | train |
Yubico/python-yubico | yubico/yubikey_config.py | https://github.com/Yubico/python-yubico/blob/a72e8eddb90da6ee96e29f60912ca1f2872c9aea/yubico/yubikey_config.py#L362-L383 | def ticket_flag(self, which, new=None):
"""
Get or set a ticket flag.
'which' can be either a string ('APPEND_CR' etc.), or an integer.
You should ALWAYS use a string, unless you really know what you are doing.
"""
flag = _get_flag(which, TicketFlags)
if flag:
if not self.capabilities.have_ticket_flag(flag):
raise yubikey_base.YubiKeyVersionError('Ticket flag %s requires %s, and this is %s %d.%d'
% (which, flag.req_string(self.capabilities.model), \
self.capabilities.model, self.ykver[0], self.ykver[1]))
req_major, req_minor = flag.req_version()
self._require_version(major=req_major, minor=req_minor)
value = flag.to_integer()
else:
if type(which) is not int:
raise yubico_exception.InputError('Unknown non-integer TicketFlag (%s)' % which)
value = which
return self.ticket_flags.get_set(value, new) | [
"def",
"ticket_flag",
"(",
"self",
",",
"which",
",",
"new",
"=",
"None",
")",
":",
"flag",
"=",
"_get_flag",
"(",
"which",
",",
"TicketFlags",
")",
"if",
"flag",
":",
"if",
"not",
"self",
".",
"capabilities",
".",
"have_ticket_flag",
"(",
"flag",
")",... | Get or set a ticket flag.
'which' can be either a string ('APPEND_CR' etc.), or an integer.
You should ALWAYS use a string, unless you really know what you are doing. | [
"Get",
"or",
"set",
"a",
"ticket",
"flag",
"."
] | python | train |
saltstack/salt | salt/states/postgres_cluster.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/postgres_cluster.py#L117-L151 | def absent(version,
name):
'''
Ensure that the named cluster is absent
version
Version of the postgresql server of the cluster to remove
name
The name of the cluster to remove
.. versionadded:: 2015.XX
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
#check if cluster exists and remove it
if __salt__['postgres.cluster_exists'](version, name):
if __opts__.get('test'):
ret['result'] = None
msg = 'Cluster {0}/{1} is set to be removed'
ret['comment'] = msg.format(version, name)
return ret
if __salt__['postgres.cluster_remove'](version, name, True):
msg = 'Cluster {0}/{1} has been removed'
ret['comment'] = msg.format(version, name)
ret['changes'][name] = 'Absent'
return ret
# fallback
ret['comment'] = 'Cluster {0}/{1} is not present, so it cannot ' \
'be removed'.format(version, name)
return ret | [
"def",
"absent",
"(",
"version",
",",
"name",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"''",
"}",
"#check if cluster exists and remove it",
"if",
"__salt__",
"... | Ensure that the named cluster is absent
version
Version of the postgresql server of the cluster to remove
name
The name of the cluster to remove
.. versionadded:: 2015.XX | [
"Ensure",
"that",
"the",
"named",
"cluster",
"is",
"absent"
] | python | train |
secynic/ipwhois | ipwhois/scripts/ipwhois_cli.py | https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/scripts/ipwhois_cli.py#L489-L532 | def generate_output_entities(self, json_data=None, hr=True,
show_name=False, colorize=True):
"""
The function for generating CLI output RDAP entity results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
"""
output = ''
short = HR_RDAP['entities']['_short'] if hr else 'entities'
name = HR_RDAP['entities']['_name'] if (hr and show_name) else None
output += generate_output(
line='0',
short=short,
name=name,
is_parent=False if (json_data is None or
json_data['entities'] is None) else True,
value='None' if (json_data is None or
json_data['entities'] is None) else None,
colorize=colorize
)
if json_data is not None:
for ent in json_data['entities']:
output += generate_output(
line='1',
value=ent,
colorize=colorize
)
return output | [
"def",
"generate_output_entities",
"(",
"self",
",",
"json_data",
"=",
"None",
",",
"hr",
"=",
"True",
",",
"show_name",
"=",
"False",
",",
"colorize",
"=",
"True",
")",
":",
"output",
"=",
"''",
"short",
"=",
"HR_RDAP",
"[",
"'entities'",
"]",
"[",
"'... | The function for generating CLI output RDAP entity results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output. | [
"The",
"function",
"for",
"generating",
"CLI",
"output",
"RDAP",
"entity",
"results",
"."
] | python | train |
pytroll/satpy | satpy/readers/goes_imager_nc.py | https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/goes_imager_nc.py#L926-L954 | def _update_metadata(self, data, ds_info):
"""Update metadata of the given DataArray"""
# Metadata from the dataset definition
data.attrs.update(ds_info)
# If the file_type attribute is a list and the data is xarray
# the concat of the dataset will not work. As the file_type is
# not needed this will be popped here.
if 'file_type' in data.attrs:
data.attrs.pop('file_type')
# Metadata discovered from the file
data.attrs.update(
{'platform_name': self.platform_name,
'sensor': self.sensor,
'sector': self.sector,
'yaw_flip': self.meta['yaw_flip']}
)
if self.meta['lon0'] is not None:
# Attributes only available for full disc images. YAML reader
# doesn't like it if satellite_* is present but None
data.attrs.update(
{'satellite_longitude': self.meta['lon0'],
'satellite_latitude': self.meta['lat0'],
'satellite_altitude': ALTITUDE,
'nadir_row': self.meta['nadir_row'],
'nadir_col': self.meta['nadir_col'],
'area_def_uniform_sampling': self.meta['area_def_uni']}
) | [
"def",
"_update_metadata",
"(",
"self",
",",
"data",
",",
"ds_info",
")",
":",
"# Metadata from the dataset definition",
"data",
".",
"attrs",
".",
"update",
"(",
"ds_info",
")",
"# If the file_type attribute is a list and the data is xarray",
"# the concat of the dataset wil... | Update metadata of the given DataArray | [
"Update",
"metadata",
"of",
"the",
"given",
"DataArray"
] | python | train |
pyupio/changelogs | changelogs/finder.py | https://github.com/pyupio/changelogs/blob/0cdb929ac4546c766cd7eef9ae4eb4baaa08f452/changelogs/finder.py#L62-L95 | def find_repo_urls(session, name, candidates):
"""
Visits the given URL candidates and searches the page for valid links to a repository.
:param session: requests Session instance
:param name: str, project name
:param candidates: list, list of URL candidates
:return: str, URL to a repo
"""
for _url in candidates:
if validate_url(_url):
try:
resp = session.get(_url)
if resp.status_code == 200:
tree = etree.HTML(resp.content)
if tree:
for link in frozenset([str(l) for l in tree.xpath("//a/@href")]):
# check if the link 1) is to github.com / bitbucket.org AND 2) somewhat
# contains the project name
if ("github.com" in link or "bitbucket.org" in link or
"sourceforge.net" in link) \
and contains_project_name(name, link):
link = validate_url(validate_repo_url(url=link))
if link:
logger.debug("Found repo URL {}".format(link))
yield link
except ConnectionError:
# we really don't care about connection errors here. a lot of project pages are simply
# down because the project is no longer maintained
pass
except etree.XMLSyntaxError:
# unable to parse HTML
pass
except UnicodeEncodeError:
pass | [
"def",
"find_repo_urls",
"(",
"session",
",",
"name",
",",
"candidates",
")",
":",
"for",
"_url",
"in",
"candidates",
":",
"if",
"validate_url",
"(",
"_url",
")",
":",
"try",
":",
"resp",
"=",
"session",
".",
"get",
"(",
"_url",
")",
"if",
"resp",
".... | Visits the given URL candidates and searches the page for valid links to a repository.
:param session: requests Session instance
:param name: str, project name
:param candidates: list, list of URL candidates
:return: str, URL to a repo | [
"Visits",
"the",
"given",
"URL",
"candidates",
"and",
"searches",
"the",
"page",
"for",
"valid",
"links",
"to",
"a",
"repository",
".",
":",
"param",
"session",
":",
"requests",
"Session",
"instance",
":",
"param",
"name",
":",
"str",
"project",
"name",
":... | python | train |
pip-services3-python/pip-services3-commons-python | pip_services3_commons/random/RandomDateTime.py | https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/random/RandomDateTime.py#L66-L79 | def next_datetime(min_year = None, max_year = None):
"""
Generates a random Date and time in the range ['minYear', 'maxYear'].
This method generate dates without time (or time set to 00:00:00)
:param min_year: (optional) minimum range value
:param max_year: max range value
:return: a random Date and time value.
"""
date = RandomDateTime.next_date(min_year, max_year).date()
time = RandomDateTime.next_time()
return datetime.datetime.combine(date, time) | [
"def",
"next_datetime",
"(",
"min_year",
"=",
"None",
",",
"max_year",
"=",
"None",
")",
":",
"date",
"=",
"RandomDateTime",
".",
"next_date",
"(",
"min_year",
",",
"max_year",
")",
".",
"date",
"(",
")",
"time",
"=",
"RandomDateTime",
".",
"next_time",
... | Generates a random Date and time in the range ['minYear', 'maxYear'].
This method generate dates without time (or time set to 00:00:00)
:param min_year: (optional) minimum range value
:param max_year: max range value
:return: a random Date and time value. | [
"Generates",
"a",
"random",
"Date",
"and",
"time",
"in",
"the",
"range",
"[",
"minYear",
"maxYear",
"]",
".",
"This",
"method",
"generate",
"dates",
"without",
"time",
"(",
"or",
"time",
"set",
"to",
"00",
":",
"00",
":",
"00",
")"
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.