code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def get_services_with_resources(cls, request=None):
""" Get a list of services and resources endpoints.
{
...
"GitLab": {
"url": "/api/gitlab/",
"service_project_link_url": "/api/gitlab-service-project-link/",
"resources": {
"Project": "/api/gitlab-projects/",
"Group": "/api/gitlab-groups/"
}
},
...
}
"""
from django.apps import apps
data = {}
for service in cls._registry.values():
service_model = apps.get_model(service['model_name'])
service_project_link = service_model.projects.through
service_project_link_url = reverse(cls.get_list_view_for_model(service_project_link), request=request)
data[service['name']] = {
'url': reverse(service['list_view'], request=request),
'service_project_link_url': service_project_link_url,
'resources': {resource['name']: reverse(resource['list_view'], request=request)
for resource in service['resources'].values()},
'properties': {resource['name']: reverse(resource['list_view'], request=request)
for resource in service.get('properties', {}).values()},
'is_public_service': cls.is_public_service(service_model)
}
return data | Get a list of services and resources endpoints.
{
...
"GitLab": {
"url": "/api/gitlab/",
"service_project_link_url": "/api/gitlab-service-project-link/",
"resources": {
"Project": "/api/gitlab-projects/",
"Group": "/api/gitlab-groups/"
}
},
...
} | Below is the the instruction that describes the task:
### Input:
Get a list of services and resources endpoints.
{
...
"GitLab": {
"url": "/api/gitlab/",
"service_project_link_url": "/api/gitlab-service-project-link/",
"resources": {
"Project": "/api/gitlab-projects/",
"Group": "/api/gitlab-groups/"
}
},
...
}
### Response:
def get_services_with_resources(cls, request=None):
""" Get a list of services and resources endpoints.
{
...
"GitLab": {
"url": "/api/gitlab/",
"service_project_link_url": "/api/gitlab-service-project-link/",
"resources": {
"Project": "/api/gitlab-projects/",
"Group": "/api/gitlab-groups/"
}
},
...
}
"""
from django.apps import apps
data = {}
for service in cls._registry.values():
service_model = apps.get_model(service['model_name'])
service_project_link = service_model.projects.through
service_project_link_url = reverse(cls.get_list_view_for_model(service_project_link), request=request)
data[service['name']] = {
'url': reverse(service['list_view'], request=request),
'service_project_link_url': service_project_link_url,
'resources': {resource['name']: reverse(resource['list_view'], request=request)
for resource in service['resources'].values()},
'properties': {resource['name']: reverse(resource['list_view'], request=request)
for resource in service.get('properties', {}).values()},
'is_public_service': cls.is_public_service(service_model)
}
return data |
def main(prog: str = None,
subcommand_overrides: Dict[str, Subcommand] = {}) -> None:
"""
The :mod:`~allennlp.run` command only knows about the registered classes in the ``allennlp``
codebase. In particular, once you start creating your own ``Model`` s and so forth, it won't
work for them, unless you use the ``--include-package`` flag.
"""
# pylint: disable=dangerous-default-value
parser = ArgumentParserWithDefaults(description="Run AllenNLP", usage='%(prog)s', prog=prog)
parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
subparsers = parser.add_subparsers(title='Commands', metavar='')
subcommands = {
# Default commands
"configure": Configure(),
"train": Train(),
"evaluate": Evaluate(),
"predict": Predict(),
"make-vocab": MakeVocab(),
"elmo": Elmo(),
"fine-tune": FineTune(),
"dry-run": DryRun(),
"test-install": TestInstall(),
"find-lr": FindLearningRate(),
"print-results": PrintResults(),
# Superseded by overrides
**subcommand_overrides
}
for name, subcommand in subcommands.items():
subparser = subcommand.add_subparser(name, subparsers)
# configure doesn't need include-package because it imports
# whatever classes it needs.
if name != "configure":
subparser.add_argument('--include-package',
type=str,
action='append',
default=[],
help='additional packages to include')
args = parser.parse_args()
# If a subparser is triggered, it adds its work as `args.func`.
# So if no such attribute has been added, no subparser was triggered,
# so give the user some help.
if 'func' in dir(args):
# Import any additional modules needed (to register custom classes).
for package_name in getattr(args, 'include_package', ()):
import_submodules(package_name)
args.func(args)
else:
parser.print_help() | The :mod:`~allennlp.run` command only knows about the registered classes in the ``allennlp``
codebase. In particular, once you start creating your own ``Model`` s and so forth, it won't
work for them, unless you use the ``--include-package`` flag. | Below is the the instruction that describes the task:
### Input:
The :mod:`~allennlp.run` command only knows about the registered classes in the ``allennlp``
codebase. In particular, once you start creating your own ``Model`` s and so forth, it won't
work for them, unless you use the ``--include-package`` flag.
### Response:
def main(prog: str = None,
subcommand_overrides: Dict[str, Subcommand] = {}) -> None:
"""
The :mod:`~allennlp.run` command only knows about the registered classes in the ``allennlp``
codebase. In particular, once you start creating your own ``Model`` s and so forth, it won't
work for them, unless you use the ``--include-package`` flag.
"""
# pylint: disable=dangerous-default-value
parser = ArgumentParserWithDefaults(description="Run AllenNLP", usage='%(prog)s', prog=prog)
parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
subparsers = parser.add_subparsers(title='Commands', metavar='')
subcommands = {
# Default commands
"configure": Configure(),
"train": Train(),
"evaluate": Evaluate(),
"predict": Predict(),
"make-vocab": MakeVocab(),
"elmo": Elmo(),
"fine-tune": FineTune(),
"dry-run": DryRun(),
"test-install": TestInstall(),
"find-lr": FindLearningRate(),
"print-results": PrintResults(),
# Superseded by overrides
**subcommand_overrides
}
for name, subcommand in subcommands.items():
subparser = subcommand.add_subparser(name, subparsers)
# configure doesn't need include-package because it imports
# whatever classes it needs.
if name != "configure":
subparser.add_argument('--include-package',
type=str,
action='append',
default=[],
help='additional packages to include')
args = parser.parse_args()
# If a subparser is triggered, it adds its work as `args.func`.
# So if no such attribute has been added, no subparser was triggered,
# so give the user some help.
if 'func' in dir(args):
# Import any additional modules needed (to register custom classes).
for package_name in getattr(args, 'include_package', ()):
import_submodules(package_name)
args.func(args)
else:
parser.print_help() |
def sort_reverse_chronologically(self):
"""
Sorts the measurements of this buffer in reverse chronological order
"""
self.measurements.sort(key=lambda m: m.timestamp, reverse=True) | Sorts the measurements of this buffer in reverse chronological order | Below is the the instruction that describes the task:
### Input:
Sorts the measurements of this buffer in reverse chronological order
### Response:
def sort_reverse_chronologically(self):
"""
Sorts the measurements of this buffer in reverse chronological order
"""
self.measurements.sort(key=lambda m: m.timestamp, reverse=True) |
def _timestamps_eq(a, b):
"""Compares two timestamp operands for equivalence under the Ion data model."""
assert isinstance(a, datetime)
if not isinstance(b, datetime):
return False
# Local offsets must be equivalent.
if (a.tzinfo is None) ^ (b.tzinfo is None):
return False
if a.utcoffset() != b.utcoffset():
return False
for a, b in ((a, b), (b, a)):
if isinstance(a, Timestamp):
if isinstance(b, Timestamp):
# Both operands declare their precisions. They are only equivalent if their precisions are the same.
if a.precision is b.precision and a.fractional_precision is b.fractional_precision:
break
return False
elif a.precision is not TimestampPrecision.SECOND or a.fractional_precision != MICROSECOND_PRECISION:
# Only one of the operands declares its precision. It is only equivalent to the other (a naive datetime)
# if it has full microseconds precision.
return False
return a == b | Compares two timestamp operands for equivalence under the Ion data model. | Below is the the instruction that describes the task:
### Input:
Compares two timestamp operands for equivalence under the Ion data model.
### Response:
def _timestamps_eq(a, b):
"""Compares two timestamp operands for equivalence under the Ion data model."""
assert isinstance(a, datetime)
if not isinstance(b, datetime):
return False
# Local offsets must be equivalent.
if (a.tzinfo is None) ^ (b.tzinfo is None):
return False
if a.utcoffset() != b.utcoffset():
return False
for a, b in ((a, b), (b, a)):
if isinstance(a, Timestamp):
if isinstance(b, Timestamp):
# Both operands declare their precisions. They are only equivalent if their precisions are the same.
if a.precision is b.precision and a.fractional_precision is b.fractional_precision:
break
return False
elif a.precision is not TimestampPrecision.SECOND or a.fractional_precision != MICROSECOND_PRECISION:
# Only one of the operands declares its precision. It is only equivalent to the other (a naive datetime)
# if it has full microseconds precision.
return False
return a == b |
def ystep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`.
"""
self.Y = np.asarray(sp.prox_l1(self.S - self.AX - self.U,
self.lmbda/self.rho), dtype=self.dtype) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`. | Below is the the instruction that describes the task:
### Input:
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`.
### Response:
def ystep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`.
"""
self.Y = np.asarray(sp.prox_l1(self.S - self.AX - self.U,
self.lmbda/self.rho), dtype=self.dtype) |
def set_slotname(slot, name, host=None,
admin_username=None, admin_password=None):
'''
Set the name of a slot in a chassis.
slot
The slot number to change.
name
The name to set. Can only be 15 characters long.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
CLI Example:
.. code-block:: bash
salt '*' dracr.set_slotname 2 my-slotname host=111.222.333.444
admin_username=root admin_password=secret
'''
return __execute_cmd('config -g cfgServerInfo -o cfgServerName -i {0} {1}'.format(slot, name),
host=host, admin_username=admin_username,
admin_password=admin_password) | Set the name of a slot in a chassis.
slot
The slot number to change.
name
The name to set. Can only be 15 characters long.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
CLI Example:
.. code-block:: bash
salt '*' dracr.set_slotname 2 my-slotname host=111.222.333.444
admin_username=root admin_password=secret | Below is the the instruction that describes the task:
### Input:
Set the name of a slot in a chassis.
slot
The slot number to change.
name
The name to set. Can only be 15 characters long.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
CLI Example:
.. code-block:: bash
salt '*' dracr.set_slotname 2 my-slotname host=111.222.333.444
admin_username=root admin_password=secret
### Response:
def set_slotname(slot, name, host=None,
admin_username=None, admin_password=None):
'''
Set the name of a slot in a chassis.
slot
The slot number to change.
name
The name to set. Can only be 15 characters long.
host
The chassis host.
admin_username
The username used to access the chassis.
admin_password
The password used to access the chassis.
CLI Example:
.. code-block:: bash
salt '*' dracr.set_slotname 2 my-slotname host=111.222.333.444
admin_username=root admin_password=secret
'''
return __execute_cmd('config -g cfgServerInfo -o cfgServerName -i {0} {1}'.format(slot, name),
host=host, admin_username=admin_username,
admin_password=admin_password) |
def is_compatible_space(space, base_space):
"""Check compatibility of a (power) space with a base space.
Compatibility here means that the spaces are equal or ``space``
is a non-empty power space of ``base_space`` up to different
data types.
Parameters
----------
space, base_space : `LinearSpace`
Spaces to check for compatibility. ``base_space`` cannot be a
`ProductSpace`.
Returns
-------
is_compatible : bool
``True`` if
- ``space == base_space`` or
- ``space.astype(base_space.dtype) == base_space``, provided that
these properties exist, or
- ``space`` is a power space of nonzero length and one of the three
situations applies to ``space[0]`` (recursively).
Otherwise ``False``.
Examples
--------
Scalar spaces:
>>> base = odl.rn(2)
>>> is_compatible_space(odl.rn(2), base)
True
>>> is_compatible_space(odl.rn(3), base)
False
>>> is_compatible_space(odl.rn(2, dtype='float32'), base)
True
Power spaces:
>>> is_compatible_space(odl.rn(2) ** 2, base)
True
>>> is_compatible_space(odl.rn(2) * odl.rn(3), base) # no power space
False
>>> is_compatible_space(odl.rn(2, dtype='float32') ** 2, base)
True
"""
if isinstance(base_space, ProductSpace):
return False
if isinstance(space, ProductSpace):
if not space.is_power_space:
return False
elif len(space) == 0:
return False
else:
return is_compatible_space(space[0], base_space)
else:
if hasattr(space, 'astype') and hasattr(base_space, 'dtype'):
# TODO: maybe only the shape should play a role?
comp_space = space.astype(base_space.dtype)
else:
comp_space = space
return comp_space == base_space | Check compatibility of a (power) space with a base space.
Compatibility here means that the spaces are equal or ``space``
is a non-empty power space of ``base_space`` up to different
data types.
Parameters
----------
space, base_space : `LinearSpace`
Spaces to check for compatibility. ``base_space`` cannot be a
`ProductSpace`.
Returns
-------
is_compatible : bool
``True`` if
- ``space == base_space`` or
- ``space.astype(base_space.dtype) == base_space``, provided that
these properties exist, or
- ``space`` is a power space of nonzero length and one of the three
situations applies to ``space[0]`` (recursively).
Otherwise ``False``.
Examples
--------
Scalar spaces:
>>> base = odl.rn(2)
>>> is_compatible_space(odl.rn(2), base)
True
>>> is_compatible_space(odl.rn(3), base)
False
>>> is_compatible_space(odl.rn(2, dtype='float32'), base)
True
Power spaces:
>>> is_compatible_space(odl.rn(2) ** 2, base)
True
>>> is_compatible_space(odl.rn(2) * odl.rn(3), base) # no power space
False
>>> is_compatible_space(odl.rn(2, dtype='float32') ** 2, base)
True | Below is the the instruction that describes the task:
### Input:
Check compatibility of a (power) space with a base space.
Compatibility here means that the spaces are equal or ``space``
is a non-empty power space of ``base_space`` up to different
data types.
Parameters
----------
space, base_space : `LinearSpace`
Spaces to check for compatibility. ``base_space`` cannot be a
`ProductSpace`.
Returns
-------
is_compatible : bool
``True`` if
- ``space == base_space`` or
- ``space.astype(base_space.dtype) == base_space``, provided that
these properties exist, or
- ``space`` is a power space of nonzero length and one of the three
situations applies to ``space[0]`` (recursively).
Otherwise ``False``.
Examples
--------
Scalar spaces:
>>> base = odl.rn(2)
>>> is_compatible_space(odl.rn(2), base)
True
>>> is_compatible_space(odl.rn(3), base)
False
>>> is_compatible_space(odl.rn(2, dtype='float32'), base)
True
Power spaces:
>>> is_compatible_space(odl.rn(2) ** 2, base)
True
>>> is_compatible_space(odl.rn(2) * odl.rn(3), base) # no power space
False
>>> is_compatible_space(odl.rn(2, dtype='float32') ** 2, base)
True
### Response:
def is_compatible_space(space, base_space):
"""Check compatibility of a (power) space with a base space.
Compatibility here means that the spaces are equal or ``space``
is a non-empty power space of ``base_space`` up to different
data types.
Parameters
----------
space, base_space : `LinearSpace`
Spaces to check for compatibility. ``base_space`` cannot be a
`ProductSpace`.
Returns
-------
is_compatible : bool
``True`` if
- ``space == base_space`` or
- ``space.astype(base_space.dtype) == base_space``, provided that
these properties exist, or
- ``space`` is a power space of nonzero length and one of the three
situations applies to ``space[0]`` (recursively).
Otherwise ``False``.
Examples
--------
Scalar spaces:
>>> base = odl.rn(2)
>>> is_compatible_space(odl.rn(2), base)
True
>>> is_compatible_space(odl.rn(3), base)
False
>>> is_compatible_space(odl.rn(2, dtype='float32'), base)
True
Power spaces:
>>> is_compatible_space(odl.rn(2) ** 2, base)
True
>>> is_compatible_space(odl.rn(2) * odl.rn(3), base) # no power space
False
>>> is_compatible_space(odl.rn(2, dtype='float32') ** 2, base)
True
"""
if isinstance(base_space, ProductSpace):
return False
if isinstance(space, ProductSpace):
if not space.is_power_space:
return False
elif len(space) == 0:
return False
else:
return is_compatible_space(space[0], base_space)
else:
if hasattr(space, 'astype') and hasattr(base_space, 'dtype'):
# TODO: maybe only the shape should play a role?
comp_space = space.astype(base_space.dtype)
else:
comp_space = space
return comp_space == base_space |
def vtquery(apikey, checksums):
"""Performs the query dealing with errors and throttling requests."""
data = {'apikey': apikey,
'resource': isinstance(checksums, str) and checksums
or ', '.join(checksums)}
while 1:
response = requests.post(VT_REPORT_URL, data=data)
response.raise_for_status()
if response.status_code == 200:
return response.json()
elif response.status_code == 204:
logging.debug("API key request rate limit reached, throttling.")
time.sleep(VT_THROTTLE)
else:
raise RuntimeError("Response status code %s" % response.status_code) | Performs the query dealing with errors and throttling requests. | Below is the the instruction that describes the task:
### Input:
Performs the query dealing with errors and throttling requests.
### Response:
def vtquery(apikey, checksums):
"""Performs the query dealing with errors and throttling requests."""
data = {'apikey': apikey,
'resource': isinstance(checksums, str) and checksums
or ', '.join(checksums)}
while 1:
response = requests.post(VT_REPORT_URL, data=data)
response.raise_for_status()
if response.status_code == 200:
return response.json()
elif response.status_code == 204:
logging.debug("API key request rate limit reached, throttling.")
time.sleep(VT_THROTTLE)
else:
raise RuntimeError("Response status code %s" % response.status_code) |
def _get_csv_from_model(models, crumbs, csvs):
"""
Get csv from model data
:param dict models: Metadata
:param str crumbs: Crumbs
:param dict csvs: Csv
:return dict models: Metadata
:return dict csvs: Csv
"""
logger_csvs.info("enter get_csv_from_model: {}".format(crumbs))
_idx = 0
try:
for _name, _model in models.items():
if "distributionTable" in _model:
models[_name]["distributionTable"], csvs = _get_csv_from_table(_model["distributionTable"], "{}{}{}".format(crumbs, _idx, "distribution"), csvs)
if "summaryTable" in _model:
models[_name]["summaryTable"], csvs = _get_csv_from_table(_model["summaryTable"], "{}{}{}".format(crumbs, _idx, "summary"), csvs)
if "ensembleTable" in _model:
models[_name]["ensembleTable"], csvs = _get_csv_from_table(_model["ensembleTable"], "{}{}{}".format(crumbs, _idx, "ensemble"), csvs)
_idx += 1
except Exception as e:
print("Error: get_csv_from_model: {}, {}".format(crumbs, e))
logger_csvs.error("Error: get_csv_from_model: {}, {}".format(crumbs, e))
return models, csvs | Get csv from model data
:param dict models: Metadata
:param str crumbs: Crumbs
:param dict csvs: Csv
:return dict models: Metadata
:return dict csvs: Csv | Below is the the instruction that describes the task:
### Input:
Get csv from model data
:param dict models: Metadata
:param str crumbs: Crumbs
:param dict csvs: Csv
:return dict models: Metadata
:return dict csvs: Csv
### Response:
def _get_csv_from_model(models, crumbs, csvs):
"""
Get csv from model data
:param dict models: Metadata
:param str crumbs: Crumbs
:param dict csvs: Csv
:return dict models: Metadata
:return dict csvs: Csv
"""
logger_csvs.info("enter get_csv_from_model: {}".format(crumbs))
_idx = 0
try:
for _name, _model in models.items():
if "distributionTable" in _model:
models[_name]["distributionTable"], csvs = _get_csv_from_table(_model["distributionTable"], "{}{}{}".format(crumbs, _idx, "distribution"), csvs)
if "summaryTable" in _model:
models[_name]["summaryTable"], csvs = _get_csv_from_table(_model["summaryTable"], "{}{}{}".format(crumbs, _idx, "summary"), csvs)
if "ensembleTable" in _model:
models[_name]["ensembleTable"], csvs = _get_csv_from_table(_model["ensembleTable"], "{}{}{}".format(crumbs, _idx, "ensemble"), csvs)
_idx += 1
except Exception as e:
print("Error: get_csv_from_model: {}, {}".format(crumbs, e))
logger_csvs.error("Error: get_csv_from_model: {}, {}".format(crumbs, e))
return models, csvs |
def magic_fields(self):
"""the magic fields for the schema"""
return {f:v for f, v in self.fields.items() if f.startswith('_')} | the magic fields for the schema | Below is the the instruction that describes the task:
### Input:
the magic fields for the schema
### Response:
def magic_fields(self):
"""the magic fields for the schema"""
return {f:v for f, v in self.fields.items() if f.startswith('_')} |
def load_npy_to_any(path='', name='file.npy'):
"""Load `.npy` file.
Parameters
------------
path : str
Path to the file (optional).
name : str
File name.
Examples
---------
- see tl.files.save_any_to_npy()
"""
file_path = os.path.join(path, name)
try:
return np.load(file_path).item()
except Exception:
return np.load(file_path)
raise Exception("[!] Fail to load %s" % file_path) | Load `.npy` file.
Parameters
------------
path : str
Path to the file (optional).
name : str
File name.
Examples
---------
- see tl.files.save_any_to_npy() | Below is the the instruction that describes the task:
### Input:
Load `.npy` file.
Parameters
------------
path : str
Path to the file (optional).
name : str
File name.
Examples
---------
- see tl.files.save_any_to_npy()
### Response:
def load_npy_to_any(path='', name='file.npy'):
"""Load `.npy` file.
Parameters
------------
path : str
Path to the file (optional).
name : str
File name.
Examples
---------
- see tl.files.save_any_to_npy()
"""
file_path = os.path.join(path, name)
try:
return np.load(file_path).item()
except Exception:
return np.load(file_path)
raise Exception("[!] Fail to load %s" % file_path) |
def read_json_file(cls, path):
"""
Construct a VariantCollection from a JSON file.
"""
with open(path, 'r') as f:
json_string = f.read()
return cls.from_json(json_string) | Construct a VariantCollection from a JSON file. | Below is the the instruction that describes the task:
### Input:
Construct a VariantCollection from a JSON file.
### Response:
def read_json_file(cls, path):
"""
Construct a VariantCollection from a JSON file.
"""
with open(path, 'r') as f:
json_string = f.read()
return cls.from_json(json_string) |
def write_wdata(self, address, register, value):
"""Write a word (two bytes) value to a device's register. """
warnings.warn("write_wdata() is deprecated and will be removed in future versions replace with write_word_data()", DeprecationWarning)
LOGGER.debug("Writing word data %s to register %s on device %s",
bin(value), hex(register), hex(address))
return self.driver.write_word_data(address, register, value) | Write a word (two bytes) value to a device's register. | Below is the the instruction that describes the task:
### Input:
Write a word (two bytes) value to a device's register.
### Response:
def write_wdata(self, address, register, value):
"""Write a word (two bytes) value to a device's register. """
warnings.warn("write_wdata() is deprecated and will be removed in future versions replace with write_word_data()", DeprecationWarning)
LOGGER.debug("Writing word data %s to register %s on device %s",
bin(value), hex(register), hex(address))
return self.driver.write_word_data(address, register, value) |
def deploy_sandbox_shared_setup(log, verbose=True, app=None, exp_config=None):
"""Set up Git, push to Heroku, and launch the app."""
if verbose:
out = None
else:
out = open(os.devnull, "w")
config = get_config()
if not config.ready:
config.load()
heroku.sanity_check(config)
(id, tmp) = setup_experiment(log, debug=False, app=app, exp_config=exp_config)
# Register the experiment using all configured registration services.
if config.get("mode") == "live":
log("Registering the experiment on configured services...")
registration.register(id, snapshot=None)
# Log in to Heroku if we aren't already.
log("Making sure that you are logged in to Heroku.")
heroku.log_in()
config.set("heroku_auth_token", heroku.auth_token())
log("", chevrons=False)
# Change to temporary directory.
cwd = os.getcwd()
os.chdir(tmp)
# Commit Heroku-specific files to tmp folder's git repo.
git = GitClient(output=out)
git.init()
git.add("--all")
git.commit('"Experiment {}"'.format(id))
# Initialize the app on Heroku.
log("Initializing app on Heroku...")
team = config.get("heroku_team", None)
heroku_app = HerokuApp(dallinger_uid=id, output=out, team=team)
heroku_app.bootstrap()
heroku_app.buildpack("https://github.com/stomita/heroku-buildpack-phantomjs")
# Set up add-ons and AWS environment variables.
database_size = config.get("database_size")
redis_size = config.get("redis_size")
addons = [
"heroku-postgresql:{}".format(quote(database_size)),
"heroku-redis:{}".format(quote(redis_size)),
"papertrail",
]
if config.get("sentry"):
addons.append("sentry")
for name in addons:
heroku_app.addon(name)
heroku_config = {
"aws_access_key_id": config["aws_access_key_id"],
"aws_secret_access_key": config["aws_secret_access_key"],
"aws_region": config["aws_region"],
"auto_recruit": config["auto_recruit"],
"smtp_username": config["smtp_username"],
"smtp_password": config["smtp_password"],
"whimsical": config["whimsical"],
}
heroku_app.set_multiple(**heroku_config)
# Wait for Redis database to be ready.
log("Waiting for Redis...")
ready = False
while not ready:
try:
r = redis.from_url(heroku_app.redis_url)
r.set("foo", "bar")
ready = True
except (ValueError, redis.exceptions.ConnectionError):
time.sleep(2)
log("Saving the URL of the postgres database...")
# Set the notification URL and database URL in the config file.
config.extend(
{
"notification_url": heroku_app.url + "/notifications",
"database_url": heroku_app.db_url,
}
)
config.write()
git.add("config.txt")
time.sleep(0.25)
git.commit("Save URLs for database and notifications")
time.sleep(0.25)
# Launch the Heroku app.
log("Pushing code to Heroku...")
git.push(remote="heroku", branch="HEAD:master")
log("Scaling up the dynos...")
size = config.get("dyno_type")
for process in ["web", "worker"]:
qty = config.get("num_dynos_" + process)
heroku_app.scale_up_dyno(process, qty, size)
if config.get("clock_on"):
heroku_app.scale_up_dyno("clock", 1, size)
time.sleep(8)
# Launch the experiment.
log("Launching the experiment on the remote server and starting recruitment...")
launch_data = _handle_launch_data("{}/launch".format(heroku_app.url), error=log)
result = {
"app_name": heroku_app.name,
"app_home": heroku_app.url,
"recruitment_msg": launch_data.get("recruitment_msg", None),
}
log("Experiment details:")
log("App home: {}".format(result["app_home"]), chevrons=False)
log("Recruiter info:")
log(result["recruitment_msg"], chevrons=False)
# Return to the branch whence we came.
os.chdir(cwd)
log("Completed deployment of experiment " + id + ".")
return result | Set up Git, push to Heroku, and launch the app. | Below is the the instruction that describes the task:
### Input:
Set up Git, push to Heroku, and launch the app.
### Response:
def deploy_sandbox_shared_setup(log, verbose=True, app=None, exp_config=None):
"""Set up Git, push to Heroku, and launch the app."""
if verbose:
out = None
else:
out = open(os.devnull, "w")
config = get_config()
if not config.ready:
config.load()
heroku.sanity_check(config)
(id, tmp) = setup_experiment(log, debug=False, app=app, exp_config=exp_config)
# Register the experiment using all configured registration services.
if config.get("mode") == "live":
log("Registering the experiment on configured services...")
registration.register(id, snapshot=None)
# Log in to Heroku if we aren't already.
log("Making sure that you are logged in to Heroku.")
heroku.log_in()
config.set("heroku_auth_token", heroku.auth_token())
log("", chevrons=False)
# Change to temporary directory.
cwd = os.getcwd()
os.chdir(tmp)
# Commit Heroku-specific files to tmp folder's git repo.
git = GitClient(output=out)
git.init()
git.add("--all")
git.commit('"Experiment {}"'.format(id))
# Initialize the app on Heroku.
log("Initializing app on Heroku...")
team = config.get("heroku_team", None)
heroku_app = HerokuApp(dallinger_uid=id, output=out, team=team)
heroku_app.bootstrap()
heroku_app.buildpack("https://github.com/stomita/heroku-buildpack-phantomjs")
# Set up add-ons and AWS environment variables.
database_size = config.get("database_size")
redis_size = config.get("redis_size")
addons = [
"heroku-postgresql:{}".format(quote(database_size)),
"heroku-redis:{}".format(quote(redis_size)),
"papertrail",
]
if config.get("sentry"):
addons.append("sentry")
for name in addons:
heroku_app.addon(name)
heroku_config = {
"aws_access_key_id": config["aws_access_key_id"],
"aws_secret_access_key": config["aws_secret_access_key"],
"aws_region": config["aws_region"],
"auto_recruit": config["auto_recruit"],
"smtp_username": config["smtp_username"],
"smtp_password": config["smtp_password"],
"whimsical": config["whimsical"],
}
heroku_app.set_multiple(**heroku_config)
# Wait for Redis database to be ready.
log("Waiting for Redis...")
ready = False
while not ready:
try:
r = redis.from_url(heroku_app.redis_url)
r.set("foo", "bar")
ready = True
except (ValueError, redis.exceptions.ConnectionError):
time.sleep(2)
log("Saving the URL of the postgres database...")
# Set the notification URL and database URL in the config file.
config.extend(
{
"notification_url": heroku_app.url + "/notifications",
"database_url": heroku_app.db_url,
}
)
config.write()
git.add("config.txt")
time.sleep(0.25)
git.commit("Save URLs for database and notifications")
time.sleep(0.25)
# Launch the Heroku app.
log("Pushing code to Heroku...")
git.push(remote="heroku", branch="HEAD:master")
log("Scaling up the dynos...")
size = config.get("dyno_type")
for process in ["web", "worker"]:
qty = config.get("num_dynos_" + process)
heroku_app.scale_up_dyno(process, qty, size)
if config.get("clock_on"):
heroku_app.scale_up_dyno("clock", 1, size)
time.sleep(8)
# Launch the experiment.
log("Launching the experiment on the remote server and starting recruitment...")
launch_data = _handle_launch_data("{}/launch".format(heroku_app.url), error=log)
result = {
"app_name": heroku_app.name,
"app_home": heroku_app.url,
"recruitment_msg": launch_data.get("recruitment_msg", None),
}
log("Experiment details:")
log("App home: {}".format(result["app_home"]), chevrons=False)
log("Recruiter info:")
log(result["recruitment_msg"], chevrons=False)
# Return to the branch whence we came.
os.chdir(cwd)
log("Completed deployment of experiment " + id + ".")
return result |
def set(self, name, msg) :
"fills in the error name and message."
dbus.dbus_set_error(self._dbobj, name.encode(), b"%s", msg.encode()) | fills in the error name and message. | Below is the the instruction that describes the task:
### Input:
fills in the error name and message.
### Response:
def set(self, name, msg) :
"fills in the error name and message."
dbus.dbus_set_error(self._dbobj, name.encode(), b"%s", msg.encode()) |
def load_mode_builder(obs_mode, node):
"""Load observing mode OB builder"""
# Check 'builder' and 'builder_options'
nval1 = node.get('builder')
if nval1 is not None:
if isinstance(nval1, str):
# override method
newmethod = import_object(nval1)
obs_mode.build_ob = newmethod.__get__(obs_mode)
else:
raise TypeError('builder must be None or a string')
else:
nval2 = node.get('builder_options')
if nval2 is not None:
if isinstance(nval2, list):
for opt_dict in nval2:
if 'result_of' in opt_dict:
fields = opt_dict['result_of']
obs_mode.build_ob_options = ResultOf(**fields)
break
else:
raise TypeError('builder_options must be None or a list')
return obs_mode | Load observing mode OB builder | Below is the the instruction that describes the task:
### Input:
Load observing mode OB builder
### Response:
def load_mode_builder(obs_mode, node):
"""Load observing mode OB builder"""
# Check 'builder' and 'builder_options'
nval1 = node.get('builder')
if nval1 is not None:
if isinstance(nval1, str):
# override method
newmethod = import_object(nval1)
obs_mode.build_ob = newmethod.__get__(obs_mode)
else:
raise TypeError('builder must be None or a string')
else:
nval2 = node.get('builder_options')
if nval2 is not None:
if isinstance(nval2, list):
for opt_dict in nval2:
if 'result_of' in opt_dict:
fields = opt_dict['result_of']
obs_mode.build_ob_options = ResultOf(**fields)
break
else:
raise TypeError('builder_options must be None or a list')
return obs_mode |
def connect(cls, region, session=None, access_key=None, secret_key=None,
host=None, port=80, is_secure=True, **kwargs):
"""
Connect to an AWS region.
Parameters
----------
region : str
Name of an AWS region
session : :class:`~botocore.session.Session`, optional
The Session object to use for the connection
access_key : str, optional
If session is None, set this access key when creating the session
secret_key : str, optional
If session is None, set this secret key when creating the session
host : str, optional
Address of the host. Use this to connect to a local instance.
port : int, optional
Connect to the host on this port (default 80)
is_secure : bool, optional
Enforce https connection (default True)
**kwargs : dict
Keyword arguments to pass to the constructor
"""
if session is None:
session = botocore.session.get_session()
if access_key is not None:
session.set_credentials(access_key, secret_key)
url = None
if host is not None:
protocol = 'https' if is_secure else 'http'
url = "%s://%s:%d" % (protocol, host, port)
client = session.create_client('dynamodb', region, endpoint_url=url,
use_ssl=is_secure)
return cls(client, **kwargs) | Connect to an AWS region.
Parameters
----------
region : str
Name of an AWS region
session : :class:`~botocore.session.Session`, optional
The Session object to use for the connection
access_key : str, optional
If session is None, set this access key when creating the session
secret_key : str, optional
If session is None, set this secret key when creating the session
host : str, optional
Address of the host. Use this to connect to a local instance.
port : int, optional
Connect to the host on this port (default 80)
is_secure : bool, optional
Enforce https connection (default True)
**kwargs : dict
Keyword arguments to pass to the constructor | Below is the the instruction that describes the task:
### Input:
Connect to an AWS region.
Parameters
----------
region : str
Name of an AWS region
session : :class:`~botocore.session.Session`, optional
The Session object to use for the connection
access_key : str, optional
If session is None, set this access key when creating the session
secret_key : str, optional
If session is None, set this secret key when creating the session
host : str, optional
Address of the host. Use this to connect to a local instance.
port : int, optional
Connect to the host on this port (default 80)
is_secure : bool, optional
Enforce https connection (default True)
**kwargs : dict
Keyword arguments to pass to the constructor
### Response:
def connect(cls, region, session=None, access_key=None, secret_key=None,
host=None, port=80, is_secure=True, **kwargs):
"""
Connect to an AWS region.
Parameters
----------
region : str
Name of an AWS region
session : :class:`~botocore.session.Session`, optional
The Session object to use for the connection
access_key : str, optional
If session is None, set this access key when creating the session
secret_key : str, optional
If session is None, set this secret key when creating the session
host : str, optional
Address of the host. Use this to connect to a local instance.
port : int, optional
Connect to the host on this port (default 80)
is_secure : bool, optional
Enforce https connection (default True)
**kwargs : dict
Keyword arguments to pass to the constructor
"""
if session is None:
session = botocore.session.get_session()
if access_key is not None:
session.set_credentials(access_key, secret_key)
url = None
if host is not None:
protocol = 'https' if is_secure else 'http'
url = "%s://%s:%d" % (protocol, host, port)
client = session.create_client('dynamodb', region, endpoint_url=url,
use_ssl=is_secure)
return cls(client, **kwargs) |
def get_relative_error(self):
"""
Returns the relative error statistic (e_rel), defined by Frohlich &
Davis (1999): `e_rel = sqrt((U:U) / (M:M))` where M is the moment
tensor, U is the uncertainty tensor and : is the tensor dot product
"""
if not self.moment_tensor:
raise ValueError('Moment tensor not defined!')
numer = np.tensordot(self.moment_tensor.tensor_sigma,
self.moment_tensor.tensor_sigma)
denom = np.tensordot(self.moment_tensor.tensor,
self.moment_tensor.tensor)
self.e_rel = sqrt(numer / denom)
return self.e_rel | Returns the relative error statistic (e_rel), defined by Frohlich &
Davis (1999): `e_rel = sqrt((U:U) / (M:M))` where M is the moment
tensor, U is the uncertainty tensor and : is the tensor dot product | Below is the the instruction that describes the task:
### Input:
Returns the relative error statistic (e_rel), defined by Frohlich &
Davis (1999): `e_rel = sqrt((U:U) / (M:M))` where M is the moment
tensor, U is the uncertainty tensor and : is the tensor dot product
### Response:
def get_relative_error(self):
"""
Returns the relative error statistic (e_rel), defined by Frohlich &
Davis (1999): `e_rel = sqrt((U:U) / (M:M))` where M is the moment
tensor, U is the uncertainty tensor and : is the tensor dot product
"""
if not self.moment_tensor:
raise ValueError('Moment tensor not defined!')
numer = np.tensordot(self.moment_tensor.tensor_sigma,
self.moment_tensor.tensor_sigma)
denom = np.tensordot(self.moment_tensor.tensor,
self.moment_tensor.tensor)
self.e_rel = sqrt(numer / denom)
return self.e_rel |
def del_results_for_stopped_hosts(self, scan_id):
""" Remove results from the result table for those host
"""
unfinished_hosts = self.get_hosts_unfinished(scan_id)
for result in self.results_iterator(scan_id, False):
if result['host'] in unfinished_hosts:
self.remove_single_result(scan_id, result) | Remove results from the result table for those host | Below is the the instruction that describes the task:
### Input:
Remove results from the result table for those host
### Response:
def del_results_for_stopped_hosts(self, scan_id):
""" Remove results from the result table for those host
"""
unfinished_hosts = self.get_hosts_unfinished(scan_id)
for result in self.results_iterator(scan_id, False):
if result['host'] in unfinished_hosts:
self.remove_single_result(scan_id, result) |
def describe_version(self):
"""
Query the Cassandra server for the version.
:returns: string -- the version tag
"""
def _vers(client):
return client.describe_version()
d = self._connection()
d.addCallback(_vers)
return d | Query the Cassandra server for the version.
:returns: string -- the version tag | Below is the the instruction that describes the task:
### Input:
Query the Cassandra server for the version.
:returns: string -- the version tag
### Response:
def describe_version(self):
"""
Query the Cassandra server for the version.
:returns: string -- the version tag
"""
def _vers(client):
return client.describe_version()
d = self._connection()
d.addCallback(_vers)
return d |
def _compute_childtab_next_l_index(self, lcptab):
"""Computes the child 'next l index' array in O(n) based on the LCP table.
Abouelhoda et al. (2004).
"""
stack = [0]
n = len(lcptab)
childtab_next_l_index = np.zeros(n, dtype=np.int) # Zeros / -1 ?
for i in xrange(n):
while lcptab[i] < lcptab[stack[-1]]:
stack.pop()
if lcptab[i] == lcptab[stack[-1]]:
last_index = stack.pop()
childtab_next_l_index[last_index] = i
stack.append(i)
return childtab_next_l_index | Computes the child 'next l index' array in O(n) based on the LCP table.
Abouelhoda et al. (2004). | Below is the the instruction that describes the task:
### Input:
Computes the child 'next l index' array in O(n) based on the LCP table.
Abouelhoda et al. (2004).
### Response:
def _compute_childtab_next_l_index(self, lcptab):
"""Computes the child 'next l index' array in O(n) based on the LCP table.
Abouelhoda et al. (2004).
"""
stack = [0]
n = len(lcptab)
childtab_next_l_index = np.zeros(n, dtype=np.int) # Zeros / -1 ?
for i in xrange(n):
while lcptab[i] < lcptab[stack[-1]]:
stack.pop()
if lcptab[i] == lcptab[stack[-1]]:
last_index = stack.pop()
childtab_next_l_index[last_index] = i
stack.append(i)
return childtab_next_l_index |
def add_translation(self, rna: Rna, protein: Protein) -> str:
"""Add a translation relation from a RNA to a protein.
:param rna: An RNA node
:param protein: A protein node
"""
return self.add_unqualified_edge(rna, protein, TRANSLATED_TO) | Add a translation relation from a RNA to a protein.
:param rna: An RNA node
:param protein: A protein node | Below is the the instruction that describes the task:
### Input:
Add a translation relation from a RNA to a protein.
:param rna: An RNA node
:param protein: A protein node
### Response:
def add_translation(self, rna: Rna, protein: Protein) -> str:
"""Add a translation relation from a RNA to a protein.
:param rna: An RNA node
:param protein: A protein node
"""
return self.add_unqualified_edge(rna, protein, TRANSLATED_TO) |
def upload_file(self, file_name, file_obj, *args, **kwargs):
""" :meth:`.WNetworkClientProto.upload_file` method implementation
"""
self.dav_client().upload_to(file_obj, self.join_path(self.session_path(), file_name)) | :meth:`.WNetworkClientProto.upload_file` method implementation | Below is the the instruction that describes the task:
### Input:
:meth:`.WNetworkClientProto.upload_file` method implementation
### Response:
def upload_file(self, file_name, file_obj, *args, **kwargs):
""" :meth:`.WNetworkClientProto.upload_file` method implementation
"""
self.dav_client().upload_to(file_obj, self.join_path(self.session_path(), file_name)) |
def report_fit(self):
"""
Print a report of the fit results.
"""
if not self.fitted:
print('Model not yet fit.')
return
print('Null Log-liklihood: {0:.3f}'.format(
self.log_likelihoods['null']))
print('Log-liklihood at convergence: {0:.3f}'.format(
self.log_likelihoods['convergence']))
print('Log-liklihood Ratio: {0:.3f}\n'.format(
self.log_likelihoods['ratio']))
tbl = PrettyTable(
['Component', ])
tbl = PrettyTable()
tbl.add_column('Component', self.fit_parameters.index.values)
for col in ('Coefficient', 'Std. Error', 'T-Score'):
tbl.add_column(col, self.fit_parameters[col].values)
tbl.align['Component'] = 'l'
tbl.float_format = '.3'
print(tbl) | Print a report of the fit results. | Below is the the instruction that describes the task:
### Input:
Print a report of the fit results.
### Response:
def report_fit(self):
"""
Print a report of the fit results.
"""
if not self.fitted:
print('Model not yet fit.')
return
print('Null Log-liklihood: {0:.3f}'.format(
self.log_likelihoods['null']))
print('Log-liklihood at convergence: {0:.3f}'.format(
self.log_likelihoods['convergence']))
print('Log-liklihood Ratio: {0:.3f}\n'.format(
self.log_likelihoods['ratio']))
tbl = PrettyTable(
['Component', ])
tbl = PrettyTable()
tbl.add_column('Component', self.fit_parameters.index.values)
for col in ('Coefficient', 'Std. Error', 'T-Score'):
tbl.add_column(col, self.fit_parameters[col].values)
tbl.align['Component'] = 'l'
tbl.float_format = '.3'
print(tbl) |
def astype(self, dtype):
"""Return a copy of this space with new ``dtype``.
Parameters
----------
dtype :
Scalar data type of the returned space. Can be provided
in any way the `numpy.dtype` constructor understands, e.g.
as built-in type or as a string. Data types with non-trivial
shapes are not allowed.
Returns
-------
newspace : `TensorSpace`
Version of this space with given data type.
"""
if dtype is None:
# Need to filter this out since Numpy iterprets it as 'float'
raise ValueError('`None` is not a valid data type')
dtype = np.dtype(dtype)
if dtype == self.dtype:
return self
if is_numeric_dtype(self.dtype):
# Caching for real and complex versions (exact dtype mappings)
if dtype == self.__real_dtype:
if self.__real_space is None:
self.__real_space = self._astype(dtype)
return self.__real_space
elif dtype == self.__complex_dtype:
if self.__complex_space is None:
self.__complex_space = self._astype(dtype)
return self.__complex_space
else:
return self._astype(dtype)
else:
return self._astype(dtype) | Return a copy of this space with new ``dtype``.
Parameters
----------
dtype :
Scalar data type of the returned space. Can be provided
in any way the `numpy.dtype` constructor understands, e.g.
as built-in type or as a string. Data types with non-trivial
shapes are not allowed.
Returns
-------
newspace : `TensorSpace`
Version of this space with given data type. | Below is the the instruction that describes the task:
### Input:
Return a copy of this space with new ``dtype``.
Parameters
----------
dtype :
Scalar data type of the returned space. Can be provided
in any way the `numpy.dtype` constructor understands, e.g.
as built-in type or as a string. Data types with non-trivial
shapes are not allowed.
Returns
-------
newspace : `TensorSpace`
Version of this space with given data type.
### Response:
def astype(self, dtype):
"""Return a copy of this space with new ``dtype``.
Parameters
----------
dtype :
Scalar data type of the returned space. Can be provided
in any way the `numpy.dtype` constructor understands, e.g.
as built-in type or as a string. Data types with non-trivial
shapes are not allowed.
Returns
-------
newspace : `TensorSpace`
Version of this space with given data type.
"""
if dtype is None:
# Need to filter this out since Numpy iterprets it as 'float'
raise ValueError('`None` is not a valid data type')
dtype = np.dtype(dtype)
if dtype == self.dtype:
return self
if is_numeric_dtype(self.dtype):
# Caching for real and complex versions (exact dtype mappings)
if dtype == self.__real_dtype:
if self.__real_space is None:
self.__real_space = self._astype(dtype)
return self.__real_space
elif dtype == self.__complex_dtype:
if self.__complex_space is None:
self.__complex_space = self._astype(dtype)
return self.__complex_space
else:
return self._astype(dtype)
else:
return self._astype(dtype) |
def contains_sender_names(sender):
'''Returns a functions to search sender\'s name or it\'s part.
>>> feature = contains_sender_names("Sergey N. Obukhov <xxx@example.com>")
>>> feature("Sergey Obukhov")
1
>>> feature("BR, Sergey N.")
1
>>> feature("Sergey")
1
>>> contains_sender_names("<serobnic@mail.ru>")("Serobnic")
1
>>> contains_sender_names("<serobnic@mail.ru>")("serobnic")
1
'''
names = '( |$)|'.join(flatten_list([[e, e.capitalize()]
for e in extract_names(sender)]))
names = names or sender
if names != '':
return binary_regex_search(re.compile(names))
return lambda s: 0 | Returns a functions to search sender\'s name or it\'s part.
>>> feature = contains_sender_names("Sergey N. Obukhov <xxx@example.com>")
>>> feature("Sergey Obukhov")
1
>>> feature("BR, Sergey N.")
1
>>> feature("Sergey")
1
>>> contains_sender_names("<serobnic@mail.ru>")("Serobnic")
1
>>> contains_sender_names("<serobnic@mail.ru>")("serobnic")
1 | Below is the the instruction that describes the task:
### Input:
Returns a functions to search sender\'s name or it\'s part.
>>> feature = contains_sender_names("Sergey N. Obukhov <xxx@example.com>")
>>> feature("Sergey Obukhov")
1
>>> feature("BR, Sergey N.")
1
>>> feature("Sergey")
1
>>> contains_sender_names("<serobnic@mail.ru>")("Serobnic")
1
>>> contains_sender_names("<serobnic@mail.ru>")("serobnic")
1
### Response:
def contains_sender_names(sender):
'''Returns a functions to search sender\'s name or it\'s part.
>>> feature = contains_sender_names("Sergey N. Obukhov <xxx@example.com>")
>>> feature("Sergey Obukhov")
1
>>> feature("BR, Sergey N.")
1
>>> feature("Sergey")
1
>>> contains_sender_names("<serobnic@mail.ru>")("Serobnic")
1
>>> contains_sender_names("<serobnic@mail.ru>")("serobnic")
1
'''
names = '( |$)|'.join(flatten_list([[e, e.capitalize()]
for e in extract_names(sender)]))
names = names or sender
if names != '':
return binary_regex_search(re.compile(names))
return lambda s: 0 |
def call(func, args):
"""Call the function with args normalized and cast to the correct types.
Args:
func: The function to call.
args: The arguments parsed by docopt.
Returns:
The return value of func.
"""
assert hasattr(func, '__call__'), 'Cannot call func: {}'.format(
func.__name__)
raw_func = (
func if isinstance(func, FunctionType) else func.__class__.__call__)
hints = collections.defaultdict(lambda: Any, get_type_hints(raw_func))
argspec = _getargspec(raw_func)
named_args = {}
varargs = ()
for k, nk, v in _normalize(args):
if nk == argspec.varargs:
hints[nk] = Tuple[hints[nk], ...]
elif nk not in argspec.args and argspec.varkw in hints:
hints[nk] = hints[argspec.varkw]
try:
value = cast(hints[nk], v)
except TypeError as e:
_LOGGER.exception(e)
six.raise_from(exc.InvalidCliValueError(k, v), e)
if nk == argspec.varargs:
varargs = value
elif (nk in argspec.args or argspec.varkw) and (
nk not in named_args or named_args[nk] is None):
named_args[nk] = value
return func(*varargs, **named_args) | Call the function with args normalized and cast to the correct types.
Args:
func: The function to call.
args: The arguments parsed by docopt.
Returns:
The return value of func. | Below is the the instruction that describes the task:
### Input:
Call the function with args normalized and cast to the correct types.
Args:
func: The function to call.
args: The arguments parsed by docopt.
Returns:
The return value of func.
### Response:
def call(func, args):
"""Call the function with args normalized and cast to the correct types.
Args:
func: The function to call.
args: The arguments parsed by docopt.
Returns:
The return value of func.
"""
assert hasattr(func, '__call__'), 'Cannot call func: {}'.format(
func.__name__)
raw_func = (
func if isinstance(func, FunctionType) else func.__class__.__call__)
hints = collections.defaultdict(lambda: Any, get_type_hints(raw_func))
argspec = _getargspec(raw_func)
named_args = {}
varargs = ()
for k, nk, v in _normalize(args):
if nk == argspec.varargs:
hints[nk] = Tuple[hints[nk], ...]
elif nk not in argspec.args and argspec.varkw in hints:
hints[nk] = hints[argspec.varkw]
try:
value = cast(hints[nk], v)
except TypeError as e:
_LOGGER.exception(e)
six.raise_from(exc.InvalidCliValueError(k, v), e)
if nk == argspec.varargs:
varargs = value
elif (nk in argspec.args or argspec.varkw) and (
nk not in named_args or named_args[nk] is None):
named_args[nk] = value
return func(*varargs, **named_args) |
def getParentAddress(self):
"""get Thread device's parent extended address and rloc16 short address
Returns:
The extended address of parent in hex format
"""
print '%s call getParentAddress' % self.port
parentInfo = []
parentInfo = self.__sendCommand('parent')
for line in parentInfo:
if 'Done' in line:
break
elif 'Ext Addr' in line:
eui = line.split()[2]
print eui
#elif 'Rloc' in line:
# rloc16 = line.split()[1]
# print rloc16
else:
pass
return int(eui, 16) | get Thread device's parent extended address and rloc16 short address
Returns:
The extended address of parent in hex format | Below is the the instruction that describes the task:
### Input:
get Thread device's parent extended address and rloc16 short address
Returns:
The extended address of parent in hex format
### Response:
def getParentAddress(self):
"""get Thread device's parent extended address and rloc16 short address
Returns:
The extended address of parent in hex format
"""
print '%s call getParentAddress' % self.port
parentInfo = []
parentInfo = self.__sendCommand('parent')
for line in parentInfo:
if 'Done' in line:
break
elif 'Ext Addr' in line:
eui = line.split()[2]
print eui
#elif 'Rloc' in line:
# rloc16 = line.split()[1]
# print rloc16
else:
pass
return int(eui, 16) |
def economic_qs_zeros(n):
"""Eigen decomposition of a zero matrix."""
Q0 = empty((n, 0))
Q1 = eye(n)
S0 = empty(0)
return ((Q0, Q1), S0) | Eigen decomposition of a zero matrix. | Below is the the instruction that describes the task:
### Input:
Eigen decomposition of a zero matrix.
### Response:
def economic_qs_zeros(n):
"""Eigen decomposition of a zero matrix."""
Q0 = empty((n, 0))
Q1 = eye(n)
S0 = empty(0)
return ((Q0, Q1), S0) |
def update():
# type: () -> None
""" Update the feature with updates committed to develop.
This will merge current develop into the current branch.
"""
branch = git.current_branch(refresh=True)
develop = conf.get('git.devel_branch', 'develop')
common.assert_branch_type('feature')
common.git_checkout(develop)
common.git_pull(develop)
common.git_checkout(branch.name)
common.git_merge(branch.name, develop) | Update the feature with updates committed to develop.
This will merge current develop into the current branch. | Below is the the instruction that describes the task:
### Input:
Update the feature with updates committed to develop.
This will merge current develop into the current branch.
### Response:
def update():
# type: () -> None
""" Update the feature with updates committed to develop.
This will merge current develop into the current branch.
"""
branch = git.current_branch(refresh=True)
develop = conf.get('git.devel_branch', 'develop')
common.assert_branch_type('feature')
common.git_checkout(develop)
common.git_pull(develop)
common.git_checkout(branch.name)
common.git_merge(branch.name, develop) |
def fill_predictive_missing_parameters(self):
"""define state with initial_state
:return: None
"""
if self.initial_state == 'w':
self.state = u'WARNING'
elif self.initial_state == 'u':
self.state = u'UNKNOWN'
elif self.initial_state == 'c':
self.state = u'CRITICAL'
elif self.initial_state == 'x':
self.state = u'UNREACHABLE' | define state with initial_state
:return: None | Below is the the instruction that describes the task:
### Input:
define state with initial_state
:return: None
### Response:
def fill_predictive_missing_parameters(self):
"""define state with initial_state
:return: None
"""
if self.initial_state == 'w':
self.state = u'WARNING'
elif self.initial_state == 'u':
self.state = u'UNKNOWN'
elif self.initial_state == 'c':
self.state = u'CRITICAL'
elif self.initial_state == 'x':
self.state = u'UNREACHABLE' |
def __make_var(self, name: str, shape: list):
"""
Creates a tensorflow variable with the given name and shape.
:param name: name to set for the variable.
:param shape: list defining the shape of the variable.
:return: created TF variable.
"""
return tf.get_variable(name, shape, trainable=self.__network.is_trainable()) | Creates a tensorflow variable with the given name and shape.
:param name: name to set for the variable.
:param shape: list defining the shape of the variable.
:return: created TF variable. | Below is the the instruction that describes the task:
### Input:
Creates a tensorflow variable with the given name and shape.
:param name: name to set for the variable.
:param shape: list defining the shape of the variable.
:return: created TF variable.
### Response:
def __make_var(self, name: str, shape: list):
"""
Creates a tensorflow variable with the given name and shape.
:param name: name to set for the variable.
:param shape: list defining the shape of the variable.
:return: created TF variable.
"""
return tf.get_variable(name, shape, trainable=self.__network.is_trainable()) |
def update_entry(self, anime_id, status=None, privacy=None, rating=None,
sane_rating_update=None, rewatched_times=None, notes=None,
episodes_watched=None, increment_episodes=None):
"""Creates or updates the Library entry with the provided values.
:param anime_id: The Anime ID or Slug.
:type anime_id: int or str
:param str auth_token: User authentication token.
:param str status:
Can be one of `'currently-watching'`, `'plan-to-watch'`,
`'completed'`, `'on-hold'`, `'dropped'`.
:param str privacy: Can be one of `'public'`, `'private'`. Making an
entry private will hide it from public view.
:param rating: Can be one of `0`, `0.5`, `1`, `1.5`, `2`, `2.5`, `3`,
`3.5`, `4`, `4.5`, `5`. Setting it to the current value or 0 will
remove the rating.
:type rating: str, int or float
:param sane_rating_update: Can be any one of the values for rating.
Setting it to 0 will remove the rating. This should be used instead
of rating if you don't want to unset the rating when setting it to
its current value.
:type sane_rating_update: str, int or float
:param int rewatched_times: Number of rewatches. Can be 0 or above.
:param str notes: The personal notes for the entry.
:param int episodes_watched: Number of watched episodes.
Can be between 0 and the total number of episodes. If equal to
total number of episodes, status should be set to completed.
:param bool increment_episodes: If set to true, increments watched
episodes by one. If used along with episodes_watched, provided
value will be incremented.
:raises: ValueError -- if Authentication Token is invalid
(it shouldn't be), or if there is a `500 Internal Server Error`
or if the response is `Invalid JSON Object`.
"""
r = self._query_('/libraries/%s' % anime_id, 'POST', {
'auth_token': self.auth_token,
'status': status,
'privacy': privacy,
'rating': rating,
'sane_rating_update': sane_rating_update,
'rewatched_times': rewatched_times,
'notes': notes,
'episodes_watched': episodes_watched,
'increment_episodes': increment_episodes})
if not (r.status_code == 200 or r.status_code == 201):
raise ValueError | Creates or updates the Library entry with the provided values.
:param anime_id: The Anime ID or Slug.
:type anime_id: int or str
:param str auth_token: User authentication token.
:param str status:
Can be one of `'currently-watching'`, `'plan-to-watch'`,
`'completed'`, `'on-hold'`, `'dropped'`.
:param str privacy: Can be one of `'public'`, `'private'`. Making an
entry private will hide it from public view.
:param rating: Can be one of `0`, `0.5`, `1`, `1.5`, `2`, `2.5`, `3`,
`3.5`, `4`, `4.5`, `5`. Setting it to the current value or 0 will
remove the rating.
:type rating: str, int or float
:param sane_rating_update: Can be any one of the values for rating.
Setting it to 0 will remove the rating. This should be used instead
of rating if you don't want to unset the rating when setting it to
its current value.
:type sane_rating_update: str, int or float
:param int rewatched_times: Number of rewatches. Can be 0 or above.
:param str notes: The personal notes for the entry.
:param int episodes_watched: Number of watched episodes.
Can be between 0 and the total number of episodes. If equal to
total number of episodes, status should be set to completed.
:param bool increment_episodes: If set to true, increments watched
episodes by one. If used along with episodes_watched, provided
value will be incremented.
:raises: ValueError -- if Authentication Token is invalid
(it shouldn't be), or if there is a `500 Internal Server Error`
or if the response is `Invalid JSON Object`. | Below is the the instruction that describes the task:
### Input:
Creates or updates the Library entry with the provided values.
:param anime_id: The Anime ID or Slug.
:type anime_id: int or str
:param str auth_token: User authentication token.
:param str status:
Can be one of `'currently-watching'`, `'plan-to-watch'`,
`'completed'`, `'on-hold'`, `'dropped'`.
:param str privacy: Can be one of `'public'`, `'private'`. Making an
entry private will hide it from public view.
:param rating: Can be one of `0`, `0.5`, `1`, `1.5`, `2`, `2.5`, `3`,
`3.5`, `4`, `4.5`, `5`. Setting it to the current value or 0 will
remove the rating.
:type rating: str, int or float
:param sane_rating_update: Can be any one of the values for rating.
Setting it to 0 will remove the rating. This should be used instead
of rating if you don't want to unset the rating when setting it to
its current value.
:type sane_rating_update: str, int or float
:param int rewatched_times: Number of rewatches. Can be 0 or above.
:param str notes: The personal notes for the entry.
:param int episodes_watched: Number of watched episodes.
Can be between 0 and the total number of episodes. If equal to
total number of episodes, status should be set to completed.
:param bool increment_episodes: If set to true, increments watched
episodes by one. If used along with episodes_watched, provided
value will be incremented.
:raises: ValueError -- if Authentication Token is invalid
(it shouldn't be), or if there is a `500 Internal Server Error`
or if the response is `Invalid JSON Object`.
### Response:
def update_entry(self, anime_id, status=None, privacy=None, rating=None,
sane_rating_update=None, rewatched_times=None, notes=None,
episodes_watched=None, increment_episodes=None):
"""Creates or updates the Library entry with the provided values.
:param anime_id: The Anime ID or Slug.
:type anime_id: int or str
:param str auth_token: User authentication token.
:param str status:
Can be one of `'currently-watching'`, `'plan-to-watch'`,
`'completed'`, `'on-hold'`, `'dropped'`.
:param str privacy: Can be one of `'public'`, `'private'`. Making an
entry private will hide it from public view.
:param rating: Can be one of `0`, `0.5`, `1`, `1.5`, `2`, `2.5`, `3`,
`3.5`, `4`, `4.5`, `5`. Setting it to the current value or 0 will
remove the rating.
:type rating: str, int or float
:param sane_rating_update: Can be any one of the values for rating.
Setting it to 0 will remove the rating. This should be used instead
of rating if you don't want to unset the rating when setting it to
its current value.
:type sane_rating_update: str, int or float
:param int rewatched_times: Number of rewatches. Can be 0 or above.
:param str notes: The personal notes for the entry.
:param int episodes_watched: Number of watched episodes.
Can be between 0 and the total number of episodes. If equal to
total number of episodes, status should be set to completed.
:param bool increment_episodes: If set to true, increments watched
episodes by one. If used along with episodes_watched, provided
value will be incremented.
:raises: ValueError -- if Authentication Token is invalid
(it shouldn't be), or if there is a `500 Internal Server Error`
or if the response is `Invalid JSON Object`.
"""
r = self._query_('/libraries/%s' % anime_id, 'POST', {
'auth_token': self.auth_token,
'status': status,
'privacy': privacy,
'rating': rating,
'sane_rating_update': sane_rating_update,
'rewatched_times': rewatched_times,
'notes': notes,
'episodes_watched': episodes_watched,
'increment_episodes': increment_episodes})
if not (r.status_code == 200 or r.status_code == 201):
raise ValueError |
def train_model(
self,
L_train,
Y_dev=None,
deps=[],
class_balance=None,
log_writer=None,
**kwargs,
):
"""Train the model (i.e. estimate mu) in one of two ways, depending on
whether source dependencies are provided or not:
Args:
L_train: An [n,m] scipy.sparse matrix with values in {0,1,...,k}
corresponding to labels from supervision sources on the
training set
Y_dev: Target labels for the dev set, for estimating class_balance
deps: (list of tuples) known dependencies between supervision
sources. If not provided, sources are assumed to be independent.
TODO: add automatic dependency-learning code
class_balance: (np.array) each class's percentage of the population
(1) No dependencies (conditionally independent sources): Estimate mu
subject to constraints:
(1a) O_{B(i,j)} - (mu P mu.T)_{B(i,j)} = 0, for i != j, where B(i,j)
is the block of entries corresponding to sources i,j
(1b) np.sum( mu P, 1 ) = diag(O)
(2) Source dependencies:
- First, estimate Z subject to the inverse form
constraint:
(2a) O_\Omega + (ZZ.T)_\Omega = 0, \Omega is the deps mask
- Then, compute Q = mu P mu.T
- Finally, estimate mu subject to mu P mu.T = Q and (1b)
"""
self.config = recursive_merge_dicts(self.config, kwargs, misses="ignore")
train_config = self.config["train_config"]
# TODO: Implement logging for label model?
if log_writer is not None:
raise NotImplementedError("Logging for LabelModel.")
# Note that the LabelModel class implements its own (centered) L2 reg.
l2 = train_config.get("l2", 0)
self._set_class_balance(class_balance, Y_dev)
self._set_constants(L_train)
self._set_dependencies(deps)
self._check_L(L_train)
# Whether to take the simple conditionally independent approach, or the
# "inverse form" approach for handling dependencies
# This flag allows us to eg test the latter even with no deps present
self.inv_form = len(self.deps) > 0
# Creating this faux dataset is necessary for now because the LabelModel
# loss functions do not accept inputs, but Classifer._train_model()
# expects training data to feed to the loss functions.
dataset = MetalDataset([0], [0])
train_loader = DataLoader(dataset)
if self.inv_form:
# Compute O, O^{-1}, and initialize params
if self.config["verbose"]:
print("Computing O^{-1}...")
self._generate_O_inv(L_train)
self._init_params()
# Estimate Z, compute Q = \mu P \mu^T
if self.config["verbose"]:
print("Estimating Z...")
self._train_model(train_loader, self.loss_inv_Z)
self.Q = torch.from_numpy(self.get_Q()).float()
# Estimate \mu
if self.config["verbose"]:
print("Estimating \mu...")
self._train_model(train_loader, partial(self.loss_inv_mu, l2=l2))
else:
# Compute O and initialize params
if self.config["verbose"]:
print("Computing O...")
self._generate_O(L_train)
self._init_params()
# Estimate \mu
if self.config["verbose"]:
print("Estimating \mu...")
self._train_model(train_loader, partial(self.loss_mu, l2=l2)) | Train the model (i.e. estimate mu) in one of two ways, depending on
whether source dependencies are provided or not:
Args:
L_train: An [n,m] scipy.sparse matrix with values in {0,1,...,k}
corresponding to labels from supervision sources on the
training set
Y_dev: Target labels for the dev set, for estimating class_balance
deps: (list of tuples) known dependencies between supervision
sources. If not provided, sources are assumed to be independent.
TODO: add automatic dependency-learning code
class_balance: (np.array) each class's percentage of the population
(1) No dependencies (conditionally independent sources): Estimate mu
subject to constraints:
(1a) O_{B(i,j)} - (mu P mu.T)_{B(i,j)} = 0, for i != j, where B(i,j)
is the block of entries corresponding to sources i,j
(1b) np.sum( mu P, 1 ) = diag(O)
(2) Source dependencies:
- First, estimate Z subject to the inverse form
constraint:
(2a) O_\Omega + (ZZ.T)_\Omega = 0, \Omega is the deps mask
- Then, compute Q = mu P mu.T
- Finally, estimate mu subject to mu P mu.T = Q and (1b) | Below is the the instruction that describes the task:
### Input:
Train the model (i.e. estimate mu) in one of two ways, depending on
whether source dependencies are provided or not:
Args:
L_train: An [n,m] scipy.sparse matrix with values in {0,1,...,k}
corresponding to labels from supervision sources on the
training set
Y_dev: Target labels for the dev set, for estimating class_balance
deps: (list of tuples) known dependencies between supervision
sources. If not provided, sources are assumed to be independent.
TODO: add automatic dependency-learning code
class_balance: (np.array) each class's percentage of the population
(1) No dependencies (conditionally independent sources): Estimate mu
subject to constraints:
(1a) O_{B(i,j)} - (mu P mu.T)_{B(i,j)} = 0, for i != j, where B(i,j)
is the block of entries corresponding to sources i,j
(1b) np.sum( mu P, 1 ) = diag(O)
(2) Source dependencies:
- First, estimate Z subject to the inverse form
constraint:
(2a) O_\Omega + (ZZ.T)_\Omega = 0, \Omega is the deps mask
- Then, compute Q = mu P mu.T
- Finally, estimate mu subject to mu P mu.T = Q and (1b)
### Response:
def train_model(
self,
L_train,
Y_dev=None,
deps=[],
class_balance=None,
log_writer=None,
**kwargs,
):
"""Train the model (i.e. estimate mu) in one of two ways, depending on
whether source dependencies are provided or not:
Args:
L_train: An [n,m] scipy.sparse matrix with values in {0,1,...,k}
corresponding to labels from supervision sources on the
training set
Y_dev: Target labels for the dev set, for estimating class_balance
deps: (list of tuples) known dependencies between supervision
sources. If not provided, sources are assumed to be independent.
TODO: add automatic dependency-learning code
class_balance: (np.array) each class's percentage of the population
(1) No dependencies (conditionally independent sources): Estimate mu
subject to constraints:
(1a) O_{B(i,j)} - (mu P mu.T)_{B(i,j)} = 0, for i != j, where B(i,j)
is the block of entries corresponding to sources i,j
(1b) np.sum( mu P, 1 ) = diag(O)
(2) Source dependencies:
- First, estimate Z subject to the inverse form
constraint:
(2a) O_\Omega + (ZZ.T)_\Omega = 0, \Omega is the deps mask
- Then, compute Q = mu P mu.T
- Finally, estimate mu subject to mu P mu.T = Q and (1b)
"""
self.config = recursive_merge_dicts(self.config, kwargs, misses="ignore")
train_config = self.config["train_config"]
# TODO: Implement logging for label model?
if log_writer is not None:
raise NotImplementedError("Logging for LabelModel.")
# Note that the LabelModel class implements its own (centered) L2 reg.
l2 = train_config.get("l2", 0)
self._set_class_balance(class_balance, Y_dev)
self._set_constants(L_train)
self._set_dependencies(deps)
self._check_L(L_train)
# Whether to take the simple conditionally independent approach, or the
# "inverse form" approach for handling dependencies
# This flag allows us to eg test the latter even with no deps present
self.inv_form = len(self.deps) > 0
# Creating this faux dataset is necessary for now because the LabelModel
# loss functions do not accept inputs, but Classifer._train_model()
# expects training data to feed to the loss functions.
dataset = MetalDataset([0], [0])
train_loader = DataLoader(dataset)
if self.inv_form:
# Compute O, O^{-1}, and initialize params
if self.config["verbose"]:
print("Computing O^{-1}...")
self._generate_O_inv(L_train)
self._init_params()
# Estimate Z, compute Q = \mu P \mu^T
if self.config["verbose"]:
print("Estimating Z...")
self._train_model(train_loader, self.loss_inv_Z)
self.Q = torch.from_numpy(self.get_Q()).float()
# Estimate \mu
if self.config["verbose"]:
print("Estimating \mu...")
self._train_model(train_loader, partial(self.loss_inv_mu, l2=l2))
else:
# Compute O and initialize params
if self.config["verbose"]:
print("Computing O...")
self._generate_O(L_train)
self._init_params()
# Estimate \mu
if self.config["verbose"]:
print("Estimating \mu...")
self._train_model(train_loader, partial(self.loss_mu, l2=l2)) |
def serve(self, server=None):
"""Serve app using wsgiref or provided server.
Args:
- server (callable): An callable
"""
if server is None:
from wsgiref.simple_server import make_server
server = lambda app: make_server('', 8000, app).serve_forever()
print('Listening on 0.0.0.0:8000')
try:
server(self)
finally:
server.socket.close() | Serve app using wsgiref or provided server.
Args:
- server (callable): An callable | Below is the the instruction that describes the task:
### Input:
Serve app using wsgiref or provided server.
Args:
- server (callable): An callable
### Response:
def serve(self, server=None):
"""Serve app using wsgiref or provided server.
Args:
- server (callable): An callable
"""
if server is None:
from wsgiref.simple_server import make_server
server = lambda app: make_server('', 8000, app).serve_forever()
print('Listening on 0.0.0.0:8000')
try:
server(self)
finally:
server.socket.close() |
def rgb_to_greyscale(r, g=None, b=None):
"""Convert the color from RGB to its greyscale equivalent
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % rgb_to_greyscale(1, 0.8, 0)
'(0.6, 0.6, 0.6)'
"""
if type(r) in [list,tuple]:
r, g, b = r
v = (r + g + b) / 3.0
return (v, v, v) | Convert the color from RGB to its greyscale equivalent
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % rgb_to_greyscale(1, 0.8, 0)
'(0.6, 0.6, 0.6)' | Below is the the instruction that describes the task:
### Input:
Convert the color from RGB to its greyscale equivalent
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % rgb_to_greyscale(1, 0.8, 0)
'(0.6, 0.6, 0.6)'
### Response:
def rgb_to_greyscale(r, g=None, b=None):
"""Convert the color from RGB to its greyscale equivalent
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % rgb_to_greyscale(1, 0.8, 0)
'(0.6, 0.6, 0.6)'
"""
if type(r) in [list,tuple]:
r, g, b = r
v = (r + g + b) / 3.0
return (v, v, v) |
def bookmark(ctx, username): # pylint:disable=redefined-outer-name
"""Commands for bookmarks."""
ctx.obj = ctx.obj or {}
ctx.obj['username'] = username | Commands for bookmarks. | Below is the the instruction that describes the task:
### Input:
Commands for bookmarks.
### Response:
def bookmark(ctx, username): # pylint:disable=redefined-outer-name
"""Commands for bookmarks."""
ctx.obj = ctx.obj or {}
ctx.obj['username'] = username |
def validate_k8s_version(namespace):
"""Validates a string as a possible Kubernetes version. An empty string is also valid, which tells the server
to use its default version."""
if namespace.kubernetes_version:
k8s_release_regex = re.compile(r'^[v|V]?(\d+\.\d+\.\d+.*)$')
found = k8s_release_regex.findall(namespace.kubernetes_version)
if found:
namespace.kubernetes_version = found[0]
else:
raise CLIError('--kubernetes-version should be the full version number, '
'such as "1.7.12" or "1.8.7"') | Validates a string as a possible Kubernetes version. An empty string is also valid, which tells the server
to use its default version. | Below is the the instruction that describes the task:
### Input:
Validates a string as a possible Kubernetes version. An empty string is also valid, which tells the server
to use its default version.
### Response:
def validate_k8s_version(namespace):
"""Validates a string as a possible Kubernetes version. An empty string is also valid, which tells the server
to use its default version."""
if namespace.kubernetes_version:
k8s_release_regex = re.compile(r'^[v|V]?(\d+\.\d+\.\d+.*)$')
found = k8s_release_regex.findall(namespace.kubernetes_version)
if found:
namespace.kubernetes_version = found[0]
else:
raise CLIError('--kubernetes-version should be the full version number, '
'such as "1.7.12" or "1.8.7"') |
def infix_filename(self, name, default, infix, ext=None):
"""Unless *name* is provided, insert *infix* before the extension *ext* of *default*."""
if name is None:
p, oldext = os.path.splitext(default)
if ext is None:
ext = oldext
if ext.startswith(os.extsep):
ext = ext[1:]
name = self.filename(p+infix, ext=ext)
return name | Unless *name* is provided, insert *infix* before the extension *ext* of *default*. | Below is the the instruction that describes the task:
### Input:
Unless *name* is provided, insert *infix* before the extension *ext* of *default*.
### Response:
def infix_filename(self, name, default, infix, ext=None):
"""Unless *name* is provided, insert *infix* before the extension *ext* of *default*."""
if name is None:
p, oldext = os.path.splitext(default)
if ext is None:
ext = oldext
if ext.startswith(os.extsep):
ext = ext[1:]
name = self.filename(p+infix, ext=ext)
return name |
def write_directory_data(fp, offset, name_bytes, data_len, crc32, dt):
"""
Write a zip fie directory entry at the current file position
:param fp: the file point to which to write the header
:param offset: the offset of the associated local file header
:param name: the name of the file
:param data_len: the length of data that will be written to the archive
:param crc32: the crc32 of the data to be written
:param dt: the datetime to write to the archive
"""
fp.write(struct.pack('I', 0x02014b50)) # central directory header
fp.write(struct.pack('H', 10)) # made by version (default)
fp.write(struct.pack('H', 10)) # extract version (default)
fp.write(struct.pack('H', 0)) # general purpose bits
fp.write(struct.pack('H', 0)) # compression method
msdos_date = int(dt.year - 1980) << 9 | int(dt.month) << 5 | int(dt.day)
msdos_time = int(dt.hour) << 11 | int(dt.minute) << 5 | int(dt.second)
fp.write(struct.pack('H', msdos_time)) # extract version (default)
fp.write(struct.pack('H', msdos_date)) # extract version (default)
fp.write(struct.pack('I', crc32)) # crc32
fp.write(struct.pack('I', data_len)) # compressed length
fp.write(struct.pack('I', data_len)) # uncompressed length
fp.write(struct.pack('H', len(name_bytes))) # name length
fp.write(struct.pack('H', 0)) # extra length
fp.write(struct.pack('H', 0)) # comments length
fp.write(struct.pack('H', 0)) # disk number
fp.write(struct.pack('H', 0)) # internal file attributes
fp.write(struct.pack('I', 0)) # external file attributes
fp.write(struct.pack('I', offset)) # relative offset of file header
fp.write(name_bytes) | Write a zip fie directory entry at the current file position
:param fp: the file point to which to write the header
:param offset: the offset of the associated local file header
:param name: the name of the file
:param data_len: the length of data that will be written to the archive
:param crc32: the crc32 of the data to be written
:param dt: the datetime to write to the archive | Below is the the instruction that describes the task:
### Input:
Write a zip fie directory entry at the current file position
:param fp: the file point to which to write the header
:param offset: the offset of the associated local file header
:param name: the name of the file
:param data_len: the length of data that will be written to the archive
:param crc32: the crc32 of the data to be written
:param dt: the datetime to write to the archive
### Response:
def write_directory_data(fp, offset, name_bytes, data_len, crc32, dt):
"""
Write a zip fie directory entry at the current file position
:param fp: the file point to which to write the header
:param offset: the offset of the associated local file header
:param name: the name of the file
:param data_len: the length of data that will be written to the archive
:param crc32: the crc32 of the data to be written
:param dt: the datetime to write to the archive
"""
fp.write(struct.pack('I', 0x02014b50)) # central directory header
fp.write(struct.pack('H', 10)) # made by version (default)
fp.write(struct.pack('H', 10)) # extract version (default)
fp.write(struct.pack('H', 0)) # general purpose bits
fp.write(struct.pack('H', 0)) # compression method
msdos_date = int(dt.year - 1980) << 9 | int(dt.month) << 5 | int(dt.day)
msdos_time = int(dt.hour) << 11 | int(dt.minute) << 5 | int(dt.second)
fp.write(struct.pack('H', msdos_time)) # extract version (default)
fp.write(struct.pack('H', msdos_date)) # extract version (default)
fp.write(struct.pack('I', crc32)) # crc32
fp.write(struct.pack('I', data_len)) # compressed length
fp.write(struct.pack('I', data_len)) # uncompressed length
fp.write(struct.pack('H', len(name_bytes))) # name length
fp.write(struct.pack('H', 0)) # extra length
fp.write(struct.pack('H', 0)) # comments length
fp.write(struct.pack('H', 0)) # disk number
fp.write(struct.pack('H', 0)) # internal file attributes
fp.write(struct.pack('I', 0)) # external file attributes
fp.write(struct.pack('I', offset)) # relative offset of file header
fp.write(name_bytes) |
def update_job(self, job_details):
"""Update a job in the `JobArchive` """
other = self.get_details(job_details.jobname,
job_details.jobkey)
other.timestamp = job_details.timestamp
other.status = job_details.status
other.update_table_row(self._table, other.dbkey - 1)
return other | Update a job in the `JobArchive` | Below is the the instruction that describes the task:
### Input:
Update a job in the `JobArchive`
### Response:
def update_job(self, job_details):
"""Update a job in the `JobArchive` """
other = self.get_details(job_details.jobname,
job_details.jobkey)
other.timestamp = job_details.timestamp
other.status = job_details.status
other.update_table_row(self._table, other.dbkey - 1)
return other |
def get_xdg_dir():
"""Return the XDG_CONFIG_HOME, if it is defined and exists, else None.
This is only for non-OS X posix (Linux,Unix,etc.) systems.
"""
env = os.environ
if os.name == 'posix' and sys.platform != 'darwin':
# Linux, Unix, AIX, etc.
# use ~/.config if empty OR not set
xdg = env.get("XDG_CONFIG_HOME", None) or os.path.join(get_home_dir(), '.config')
if xdg and _writable_dir(xdg):
return py3compat.cast_unicode(xdg, fs_encoding)
return None | Return the XDG_CONFIG_HOME, if it is defined and exists, else None.
This is only for non-OS X posix (Linux,Unix,etc.) systems. | Below is the the instruction that describes the task:
### Input:
Return the XDG_CONFIG_HOME, if it is defined and exists, else None.
This is only for non-OS X posix (Linux,Unix,etc.) systems.
### Response:
def get_xdg_dir():
"""Return the XDG_CONFIG_HOME, if it is defined and exists, else None.
This is only for non-OS X posix (Linux,Unix,etc.) systems.
"""
env = os.environ
if os.name == 'posix' and sys.platform != 'darwin':
# Linux, Unix, AIX, etc.
# use ~/.config if empty OR not set
xdg = env.get("XDG_CONFIG_HOME", None) or os.path.join(get_home_dir(), '.config')
if xdg and _writable_dir(xdg):
return py3compat.cast_unicode(xdg, fs_encoding)
return None |
def flush_buffers(self):
"""Default implementation, calls Read() until it blocks."""
while True:
try:
self.read(FLUSH_READ_SIZE, timeout_ms=10)
except usb_exceptions.LibusbWrappingError as exception:
if exception.is_timeout():
break
raise | Default implementation, calls Read() until it blocks. | Below is the the instruction that describes the task:
### Input:
Default implementation, calls Read() until it blocks.
### Response:
def flush_buffers(self):
"""Default implementation, calls Read() until it blocks."""
while True:
try:
self.read(FLUSH_READ_SIZE, timeout_ms=10)
except usb_exceptions.LibusbWrappingError as exception:
if exception.is_timeout():
break
raise |
def split(self, amount):
"""Split the value given by amount according to the RecurringCostSplit's portions
Args:
amount (Decimal):
Returns:
list[(RecurringCostSplit, Decimal)]: A list with elements in the form (RecurringCostSplit, Decimal)
"""
split_objs = list(self.all())
if not split_objs:
raise NoSplitsFoundForRecurringCost()
portions = [split_obj.portion for split_obj in split_objs]
split_amounts = ratio_split(amount, portions)
return [
(split_objs[i], split_amount)
for i, split_amount
in enumerate(split_amounts)
] | Split the value given by amount according to the RecurringCostSplit's portions
Args:
amount (Decimal):
Returns:
list[(RecurringCostSplit, Decimal)]: A list with elements in the form (RecurringCostSplit, Decimal) | Below is the the instruction that describes the task:
### Input:
Split the value given by amount according to the RecurringCostSplit's portions
Args:
amount (Decimal):
Returns:
list[(RecurringCostSplit, Decimal)]: A list with elements in the form (RecurringCostSplit, Decimal)
### Response:
def split(self, amount):
"""Split the value given by amount according to the RecurringCostSplit's portions
Args:
amount (Decimal):
Returns:
list[(RecurringCostSplit, Decimal)]: A list with elements in the form (RecurringCostSplit, Decimal)
"""
split_objs = list(self.all())
if not split_objs:
raise NoSplitsFoundForRecurringCost()
portions = [split_obj.portion for split_obj in split_objs]
split_amounts = ratio_split(amount, portions)
return [
(split_objs[i], split_amount)
for i, split_amount
in enumerate(split_amounts)
] |
def _get_ref(data, position, obj_end, opts, element_name):
"""Decode (deprecated) BSON DBPointer to bson.dbref.DBRef."""
collection, position = _get_string(
data, position, obj_end, opts, element_name)
oid, position = _get_oid(data, position, obj_end, opts, element_name)
return DBRef(collection, oid), position | Decode (deprecated) BSON DBPointer to bson.dbref.DBRef. | Below is the the instruction that describes the task:
### Input:
Decode (deprecated) BSON DBPointer to bson.dbref.DBRef.
### Response:
def _get_ref(data, position, obj_end, opts, element_name):
"""Decode (deprecated) BSON DBPointer to bson.dbref.DBRef."""
collection, position = _get_string(
data, position, obj_end, opts, element_name)
oid, position = _get_oid(data, position, obj_end, opts, element_name)
return DBRef(collection, oid), position |
def render_summary(self, include_title=True):
"""Render the traceback for the interactive console."""
title = ''
description = ''
frames = []
classes = ['traceback']
if not self.frames:
classes.append('noframe-traceback')
if include_title:
if self.is_syntax_error:
title = text_('Syntax Error')
else:
title = text_('Traceback <em>(most recent call last)</em>:')
for frame in self.frames:
frames.append(text_('<li%s>%s') % (
frame.info and text_(' title="%s"') % escape(frame.info) or text_(''),
frame.render()
))
if self.is_syntax_error:
description_wrapper = text_('<pre class=syntaxerror>%s</pre>')
else:
description_wrapper = text_('<blockquote>%s</blockquote>')
return SUMMARY_HTML % {
'classes': text_(' '.join(classes)),
'title': title and text_('<h3>%s</h3>' % title) or text_(''),
'frames': text_('\n'.join(frames)),
'description': description_wrapper % escape(self.exception)
} | Render the traceback for the interactive console. | Below is the the instruction that describes the task:
### Input:
Render the traceback for the interactive console.
### Response:
def render_summary(self, include_title=True):
"""Render the traceback for the interactive console."""
title = ''
description = ''
frames = []
classes = ['traceback']
if not self.frames:
classes.append('noframe-traceback')
if include_title:
if self.is_syntax_error:
title = text_('Syntax Error')
else:
title = text_('Traceback <em>(most recent call last)</em>:')
for frame in self.frames:
frames.append(text_('<li%s>%s') % (
frame.info and text_(' title="%s"') % escape(frame.info) or text_(''),
frame.render()
))
if self.is_syntax_error:
description_wrapper = text_('<pre class=syntaxerror>%s</pre>')
else:
description_wrapper = text_('<blockquote>%s</blockquote>')
return SUMMARY_HTML % {
'classes': text_(' '.join(classes)),
'title': title and text_('<h3>%s</h3>' % title) or text_(''),
'frames': text_('\n'.join(frames)),
'description': description_wrapper % escape(self.exception)
} |
def reflect_overhang(self, clip):
"""
Compute the overhang and reflect it internally so respect periodic
padding rules (see states._tile_from_particle_change). Returns both
the inner tile and the inner tile with necessary pad.
"""
orig = self.copy()
tile = self.copy()
hangl, hangr = tile.overhang(clip)
tile = tile.pad(hangl)
tile = tile.pad(hangr)
inner = Tile.intersection([clip, orig])
outer = Tile.intersection([clip, tile])
return inner, outer | Compute the overhang and reflect it internally so respect periodic
padding rules (see states._tile_from_particle_change). Returns both
the inner tile and the inner tile with necessary pad. | Below is the the instruction that describes the task:
### Input:
Compute the overhang and reflect it internally so respect periodic
padding rules (see states._tile_from_particle_change). Returns both
the inner tile and the inner tile with necessary pad.
### Response:
def reflect_overhang(self, clip):
"""
Compute the overhang and reflect it internally so respect periodic
padding rules (see states._tile_from_particle_change). Returns both
the inner tile and the inner tile with necessary pad.
"""
orig = self.copy()
tile = self.copy()
hangl, hangr = tile.overhang(clip)
tile = tile.pad(hangl)
tile = tile.pad(hangr)
inner = Tile.intersection([clip, orig])
outer = Tile.intersection([clip, tile])
return inner, outer |
def issue(self, issue_id):
"""Get the issue data by its ID"""
path = urijoin("bugs", str(issue_id))
url_issue = self.__get_url(path)
raw_text = self.__send_request(url_issue)
return raw_text | Get the issue data by its ID | Below is the the instruction that describes the task:
### Input:
Get the issue data by its ID
### Response:
def issue(self, issue_id):
"""Get the issue data by its ID"""
path = urijoin("bugs", str(issue_id))
url_issue = self.__get_url(path)
raw_text = self.__send_request(url_issue)
return raw_text |
def normalize(self, var):
"""Perform sequence variants normalization for single variant
"""
assert isinstance(var, hgvs.sequencevariant.SequenceVariant
), "variant must be a parsed HGVS sequence variant object"
if self.validator:
self.validator.validate(var)
init_met = False
if var.posedit is not None and isinstance(var.posedit, hgvs.edit.AARefAlt):
init_met = var.posedit.init_met
if var.posedit is None or var.posedit.uncertain or init_met or var.posedit.pos is None:
return var
type = var.type
if type == "p":
raise HGVSUnsupportedOperationError(
"Unsupported normalization of protein level variants: {0}".format(var))
if var.posedit.edit.type == "con":
raise HGVSUnsupportedOperationError(
"Unsupported normalization of conversion variants: {0}", format(var))
var.fill_ref(self.hdp)
if var.posedit.edit.type == "identity":
var_norm = copy.deepcopy(var)
return var_norm
# For c. variants normalization, first convert to n. variant
# and perform normalization at the n. level, then convert the
# normalized n. variant back to c. variant.
if type == "c":
var = self.hm.c_to_n(var)
if var.type in "nr":
if var.posedit.pos.start.offset != 0 or var.posedit.pos.end.offset != 0:
raise HGVSUnsupportedOperationError(
"Normalization of intronic variants is not supported")
# g, m, n, r sequences all use sequence start as the datum
# That"s an essential assumption herein
# (this is why we may have converted from c to n above)
assert var.type in "gmnr", "Internal Error: variant must be of type g, m, n, r"
bound_s, bound_e = self._get_boundary(var)
boundary = (bound_s, bound_e)
start, end, (ref, alt) = self._normalize_alleles(var, boundary)
ref_len = len(ref)
alt_len = len(alt)
# Generate normalized variant
if alt_len == ref_len:
ref_start = start
ref_end = end - 1
# inversion
if ref_len > 1 and ref == reverse_complement(alt):
edit = hgvs.edit.Inv(ref=ref)
# ident
elif ref_len == 0 and alt_len == 0:
ref_start = ref_end
edit = hgvs.edit.NARefAlt(ref=ref, alt=alt)
# substitution or delins
else:
edit = hgvs.edit.NARefAlt(ref=ref, alt=alt)
if alt_len < ref_len:
# del or delins
ref_start = start
ref_end = end - 1
edit = hgvs.edit.NARefAlt(ref=ref, alt=None if alt_len == 0 else alt)
elif alt_len > ref_len:
# ins or dup
if ref_len == 0:
if self.shuffle_direction == 3:
adj_seq = self._fetch_bounded_seq(var, start - alt_len - 1, end - 1, 0,
boundary)
else:
adj_seq = self._fetch_bounded_seq(var, start - 1, start + alt_len - 1, 0,
boundary)
# ins
if alt != adj_seq:
ref_start = start - 1
ref_end = end
edit = hgvs.edit.NARefAlt(ref=None, alt=alt)
# dup
else:
if self.shuffle_direction == 3:
ref_start = start - alt_len
ref_end = end - 1
edit = hgvs.edit.Dup(ref=alt)
else:
ref_start = start
ref_end = start + alt_len - 1
edit = hgvs.edit.Dup(ref=alt)
# delins
else:
ref_start = start
ref_end = end - 1
edit = hgvs.edit.NARefAlt(ref=ref, alt=alt)
# ensure the start is not 0
if ref_start == 0:
ref = self._fetch_bounded_seq(var, 0, 1, 0, boundary)
alt = alt + ref
edit = hgvs.edit.NARefAlt(ref=ref, alt=alt)
ref_start = 1
ref_end = 1
# ensure the end is not outside of reference sequence
tgt_len = self._get_tgt_length(var)
if ref_end == tgt_len + 1:
ref = self._fetch_bounded_seq(var, tgt_len - 1, tgt_len, 0, boundary)
alt = ref + alt
edit = hgvs.edit.NARefAlt(ref=ref, alt=alt)
ref_start = tgt_len
ref_end = tgt_len
var_norm = copy.deepcopy(var)
var_norm.posedit.edit = edit
var_norm.posedit.pos.start.base = ref_start
var_norm.posedit.pos.end.base = ref_end
if type == "c":
var_norm = self.hm.n_to_c(var_norm)
return var_norm | Perform sequence variants normalization for single variant | Below is the the instruction that describes the task:
### Input:
Perform sequence variants normalization for single variant
### Response:
def normalize(self, var):
"""Perform sequence variants normalization for single variant
"""
assert isinstance(var, hgvs.sequencevariant.SequenceVariant
), "variant must be a parsed HGVS sequence variant object"
if self.validator:
self.validator.validate(var)
init_met = False
if var.posedit is not None and isinstance(var.posedit, hgvs.edit.AARefAlt):
init_met = var.posedit.init_met
if var.posedit is None or var.posedit.uncertain or init_met or var.posedit.pos is None:
return var
type = var.type
if type == "p":
raise HGVSUnsupportedOperationError(
"Unsupported normalization of protein level variants: {0}".format(var))
if var.posedit.edit.type == "con":
raise HGVSUnsupportedOperationError(
"Unsupported normalization of conversion variants: {0}", format(var))
var.fill_ref(self.hdp)
if var.posedit.edit.type == "identity":
var_norm = copy.deepcopy(var)
return var_norm
# For c. variants normalization, first convert to n. variant
# and perform normalization at the n. level, then convert the
# normalized n. variant back to c. variant.
if type == "c":
var = self.hm.c_to_n(var)
if var.type in "nr":
if var.posedit.pos.start.offset != 0 or var.posedit.pos.end.offset != 0:
raise HGVSUnsupportedOperationError(
"Normalization of intronic variants is not supported")
# g, m, n, r sequences all use sequence start as the datum
# That"s an essential assumption herein
# (this is why we may have converted from c to n above)
assert var.type in "gmnr", "Internal Error: variant must be of type g, m, n, r"
bound_s, bound_e = self._get_boundary(var)
boundary = (bound_s, bound_e)
start, end, (ref, alt) = self._normalize_alleles(var, boundary)
ref_len = len(ref)
alt_len = len(alt)
# Generate normalized variant
if alt_len == ref_len:
ref_start = start
ref_end = end - 1
# inversion
if ref_len > 1 and ref == reverse_complement(alt):
edit = hgvs.edit.Inv(ref=ref)
# ident
elif ref_len == 0 and alt_len == 0:
ref_start = ref_end
edit = hgvs.edit.NARefAlt(ref=ref, alt=alt)
# substitution or delins
else:
edit = hgvs.edit.NARefAlt(ref=ref, alt=alt)
if alt_len < ref_len:
# del or delins
ref_start = start
ref_end = end - 1
edit = hgvs.edit.NARefAlt(ref=ref, alt=None if alt_len == 0 else alt)
elif alt_len > ref_len:
# ins or dup
if ref_len == 0:
if self.shuffle_direction == 3:
adj_seq = self._fetch_bounded_seq(var, start - alt_len - 1, end - 1, 0,
boundary)
else:
adj_seq = self._fetch_bounded_seq(var, start - 1, start + alt_len - 1, 0,
boundary)
# ins
if alt != adj_seq:
ref_start = start - 1
ref_end = end
edit = hgvs.edit.NARefAlt(ref=None, alt=alt)
# dup
else:
if self.shuffle_direction == 3:
ref_start = start - alt_len
ref_end = end - 1
edit = hgvs.edit.Dup(ref=alt)
else:
ref_start = start
ref_end = start + alt_len - 1
edit = hgvs.edit.Dup(ref=alt)
# delins
else:
ref_start = start
ref_end = end - 1
edit = hgvs.edit.NARefAlt(ref=ref, alt=alt)
# ensure the start is not 0
if ref_start == 0:
ref = self._fetch_bounded_seq(var, 0, 1, 0, boundary)
alt = alt + ref
edit = hgvs.edit.NARefAlt(ref=ref, alt=alt)
ref_start = 1
ref_end = 1
# ensure the end is not outside of reference sequence
tgt_len = self._get_tgt_length(var)
if ref_end == tgt_len + 1:
ref = self._fetch_bounded_seq(var, tgt_len - 1, tgt_len, 0, boundary)
alt = ref + alt
edit = hgvs.edit.NARefAlt(ref=ref, alt=alt)
ref_start = tgt_len
ref_end = tgt_len
var_norm = copy.deepcopy(var)
var_norm.posedit.edit = edit
var_norm.posedit.pos.start.base = ref_start
var_norm.posedit.pos.end.base = ref_end
if type == "c":
var_norm = self.hm.n_to_c(var_norm)
return var_norm |
def lru_cache(maxsize=100, typed=False):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
def decorating_function(user_function):
cache = dict()
stats = [0, 0] # make statistics updateable non-locally
HITS, MISSES = 0, 1 # names for the stats fields
make_key = _make_key
cache_get = cache.get # bound method to lookup key or return None
_len = len # localize the global len() function
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
nonlocal_root = [root] # make updateable non-locally
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
if maxsize == 0:
def wrapper(*args, **kwds):
# no caching, just do a statistics update after a successful call
result = user_function(*args, **kwds)
stats[MISSES] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# simple caching without ordering or size limit
key = make_key(args, kwds, typed)
result = cache_get(key, root) # root used here as a unique not-found sentinel
if result is not root:
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
stats[MISSES] += 1
return result
else:
def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed) if kwds or typed else args
with lock:
link = cache_get(key)
if link is not None:
# record recent use of the key by moving it to the front of the list
root, = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
with lock:
root, = nonlocal_root
if key in cache:
# getting here means that this same key was added to the
# cache while the lock was released. since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif _len(cache) >= maxsize:
# use the old root to store the new key and result
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# empty the oldest link and make it the new root
root = nonlocal_root[0] = oldroot[NEXT]
oldkey = root[KEY]
root[KEY] = root[RESULT] = None
# now update the cache dictionary for the new links
del cache[oldkey]
cache[key] = oldroot
else:
# put result in a new link at the front of the list
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
stats[MISSES] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None]
stats[:] = [0, 0]
wrapper.__wrapped__ = user_function
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function | Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used | Below is the the instruction that describes the task:
### Input:
Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
### Response:
def lru_cache(maxsize=100, typed=False):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
def decorating_function(user_function):
cache = dict()
stats = [0, 0] # make statistics updateable non-locally
HITS, MISSES = 0, 1 # names for the stats fields
make_key = _make_key
cache_get = cache.get # bound method to lookup key or return None
_len = len # localize the global len() function
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
nonlocal_root = [root] # make updateable non-locally
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
if maxsize == 0:
def wrapper(*args, **kwds):
# no caching, just do a statistics update after a successful call
result = user_function(*args, **kwds)
stats[MISSES] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# simple caching without ordering or size limit
key = make_key(args, kwds, typed)
result = cache_get(key, root) # root used here as a unique not-found sentinel
if result is not root:
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
stats[MISSES] += 1
return result
else:
def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed) if kwds or typed else args
with lock:
link = cache_get(key)
if link is not None:
# record recent use of the key by moving it to the front of the list
root, = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
with lock:
root, = nonlocal_root
if key in cache:
# getting here means that this same key was added to the
# cache while the lock was released. since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif _len(cache) >= maxsize:
# use the old root to store the new key and result
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# empty the oldest link and make it the new root
root = nonlocal_root[0] = oldroot[NEXT]
oldkey = root[KEY]
root[KEY] = root[RESULT] = None
# now update the cache dictionary for the new links
del cache[oldkey]
cache[key] = oldroot
else:
# put result in a new link at the front of the list
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
stats[MISSES] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None]
stats[:] = [0, 0]
wrapper.__wrapped__ = user_function
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function |
def get_peer_creds(self): # LRU cached on per-instance basis, see __init__
"""Return the PID/UID/GID tuple of the peer socket for UNIX sockets.
This function uses SO_PEERCRED to query the UNIX PID, UID, GID
of the peer, which is only available if the bind address is
a UNIX domain socket.
Raises:
NotImplementedError: in case of unsupported socket type
RuntimeError: in case of SO_PEERCRED lookup unsupported or disabled
"""
PEERCRED_STRUCT_DEF = '3i'
if IS_WINDOWS or self.socket.family != socket.AF_UNIX:
raise NotImplementedError(
'SO_PEERCRED is only supported in Linux kernel and WSL',
)
elif not self.peercreds_enabled:
raise RuntimeError(
'Peer creds lookup is disabled within this server',
)
try:
peer_creds = self.socket.getsockopt(
# FIXME: Use LOCAL_CREDS for BSD-like OSs
# Ref: https://gist.github.com/LucaFilipozzi/e4f1e118202aff27af6aadebda1b5d91 # noqa
socket.SOL_SOCKET, socket.SO_PEERCRED,
struct.calcsize(PEERCRED_STRUCT_DEF),
)
except socket.error as socket_err:
"""Non-Linux kernels don't support SO_PEERCRED.
Refs:
http://welz.org.za/notes/on-peer-cred.html
https://github.com/daveti/tcpSockHack
msdn.microsoft.com/en-us/commandline/wsl/release_notes#build-15025
"""
six.raise_from( # 3.6+: raise RuntimeError from socket_err
RuntimeError,
socket_err,
)
else:
pid, uid, gid = struct.unpack(PEERCRED_STRUCT_DEF, peer_creds)
return pid, uid, gid | Return the PID/UID/GID tuple of the peer socket for UNIX sockets.
This function uses SO_PEERCRED to query the UNIX PID, UID, GID
of the peer, which is only available if the bind address is
a UNIX domain socket.
Raises:
NotImplementedError: in case of unsupported socket type
RuntimeError: in case of SO_PEERCRED lookup unsupported or disabled | Below is the the instruction that describes the task:
### Input:
Return the PID/UID/GID tuple of the peer socket for UNIX sockets.
This function uses SO_PEERCRED to query the UNIX PID, UID, GID
of the peer, which is only available if the bind address is
a UNIX domain socket.
Raises:
NotImplementedError: in case of unsupported socket type
RuntimeError: in case of SO_PEERCRED lookup unsupported or disabled
### Response:
def get_peer_creds(self): # LRU cached on per-instance basis, see __init__
"""Return the PID/UID/GID tuple of the peer socket for UNIX sockets.
This function uses SO_PEERCRED to query the UNIX PID, UID, GID
of the peer, which is only available if the bind address is
a UNIX domain socket.
Raises:
NotImplementedError: in case of unsupported socket type
RuntimeError: in case of SO_PEERCRED lookup unsupported or disabled
"""
PEERCRED_STRUCT_DEF = '3i'
if IS_WINDOWS or self.socket.family != socket.AF_UNIX:
raise NotImplementedError(
'SO_PEERCRED is only supported in Linux kernel and WSL',
)
elif not self.peercreds_enabled:
raise RuntimeError(
'Peer creds lookup is disabled within this server',
)
try:
peer_creds = self.socket.getsockopt(
# FIXME: Use LOCAL_CREDS for BSD-like OSs
# Ref: https://gist.github.com/LucaFilipozzi/e4f1e118202aff27af6aadebda1b5d91 # noqa
socket.SOL_SOCKET, socket.SO_PEERCRED,
struct.calcsize(PEERCRED_STRUCT_DEF),
)
except socket.error as socket_err:
"""Non-Linux kernels don't support SO_PEERCRED.
Refs:
http://welz.org.za/notes/on-peer-cred.html
https://github.com/daveti/tcpSockHack
msdn.microsoft.com/en-us/commandline/wsl/release_notes#build-15025
"""
six.raise_from( # 3.6+: raise RuntimeError from socket_err
RuntimeError,
socket_err,
)
else:
pid, uid, gid = struct.unpack(PEERCRED_STRUCT_DEF, peer_creds)
return pid, uid, gid |
def init():
'''
Return the list of svn remotes and their configuration information
'''
bp_ = os.path.join(__opts__['cachedir'], 'svnfs')
new_remote = False
repos = []
per_remote_defaults = {}
for param in PER_REMOTE_OVERRIDES:
per_remote_defaults[param] = \
six.text_type(__opts__['svnfs_{0}'.format(param)])
for remote in __opts__['svnfs_remotes']:
repo_conf = copy.deepcopy(per_remote_defaults)
if isinstance(remote, dict):
repo_url = next(iter(remote))
per_remote_conf = dict(
[(key, six.text_type(val)) for key, val in
six.iteritems(salt.utils.data.repack_dictlist(remote[repo_url]))]
)
if not per_remote_conf:
log.error(
'Invalid per-remote configuration for remote %s. If no '
'per-remote parameters are being specified, there may be '
'a trailing colon after the URL, which should be removed. '
'Check the master configuration file.', repo_url
)
_failhard()
per_remote_errors = False
for param in (x for x in per_remote_conf
if x not in PER_REMOTE_OVERRIDES):
log.error(
'Invalid configuration parameter \'%s\' for remote %s. '
'Valid parameters are: %s. See the documentation for '
'further information.',
param, repo_url, ', '.join(PER_REMOTE_OVERRIDES)
)
per_remote_errors = True
if per_remote_errors:
_failhard()
repo_conf.update(per_remote_conf)
else:
repo_url = remote
if not isinstance(repo_url, six.string_types):
log.error(
'Invalid svnfs remote %s. Remotes must be strings, you may '
'need to enclose the URL in quotes', repo_url
)
_failhard()
try:
repo_conf['mountpoint'] = salt.utils.url.strip_proto(
repo_conf['mountpoint']
)
except TypeError:
# mountpoint not specified
pass
hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5'))
repo_hash = hash_type(repo_url).hexdigest()
rp_ = os.path.join(bp_, repo_hash)
if not os.path.isdir(rp_):
os.makedirs(rp_)
if not os.listdir(rp_):
# Only attempt a new checkout if the directory is empty.
try:
CLIENT.checkout(repo_url, rp_)
repos.append(rp_)
new_remote = True
except pysvn._pysvn.ClientError as exc:
log.error(
'Failed to initialize svnfs remote \'%s\': %s',
repo_url, exc
)
_failhard()
else:
# Confirm that there is an svn checkout at the necessary path by
# running pysvn.Client().status()
try:
CLIENT.status(rp_)
except pysvn._pysvn.ClientError as exc:
log.error(
'Cache path %s (corresponding remote: %s) exists but is '
'not a valid subversion checkout. You will need to '
'manually delete this directory on the master to continue '
'to use this svnfs remote.', rp_, repo_url
)
_failhard()
repo_conf.update({
'repo': rp_,
'url': repo_url,
'hash': repo_hash,
'cachedir': rp_,
'lockfile': os.path.join(rp_, 'update.lk')
})
repos.append(repo_conf)
if new_remote:
remote_map = os.path.join(__opts__['cachedir'], 'svnfs/remote_map.txt')
try:
with salt.utils.files.fopen(remote_map, 'w+') as fp_:
timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f')
fp_.write('# svnfs_remote map as of {0}\n'.format(timestamp))
for repo_conf in repos:
fp_.write(
salt.utils.stringutils.to_str(
'{0} = {1}\n'.format(
repo_conf['hash'], repo_conf['url']
)
)
)
except OSError:
pass
else:
log.info('Wrote new svnfs_remote map to %s', remote_map)
return repos | Return the list of svn remotes and their configuration information | Below is the the instruction that describes the task:
### Input:
Return the list of svn remotes and their configuration information
### Response:
def init():
'''
Return the list of svn remotes and their configuration information
'''
bp_ = os.path.join(__opts__['cachedir'], 'svnfs')
new_remote = False
repos = []
per_remote_defaults = {}
for param in PER_REMOTE_OVERRIDES:
per_remote_defaults[param] = \
six.text_type(__opts__['svnfs_{0}'.format(param)])
for remote in __opts__['svnfs_remotes']:
repo_conf = copy.deepcopy(per_remote_defaults)
if isinstance(remote, dict):
repo_url = next(iter(remote))
per_remote_conf = dict(
[(key, six.text_type(val)) for key, val in
six.iteritems(salt.utils.data.repack_dictlist(remote[repo_url]))]
)
if not per_remote_conf:
log.error(
'Invalid per-remote configuration for remote %s. If no '
'per-remote parameters are being specified, there may be '
'a trailing colon after the URL, which should be removed. '
'Check the master configuration file.', repo_url
)
_failhard()
per_remote_errors = False
for param in (x for x in per_remote_conf
if x not in PER_REMOTE_OVERRIDES):
log.error(
'Invalid configuration parameter \'%s\' for remote %s. '
'Valid parameters are: %s. See the documentation for '
'further information.',
param, repo_url, ', '.join(PER_REMOTE_OVERRIDES)
)
per_remote_errors = True
if per_remote_errors:
_failhard()
repo_conf.update(per_remote_conf)
else:
repo_url = remote
if not isinstance(repo_url, six.string_types):
log.error(
'Invalid svnfs remote %s. Remotes must be strings, you may '
'need to enclose the URL in quotes', repo_url
)
_failhard()
try:
repo_conf['mountpoint'] = salt.utils.url.strip_proto(
repo_conf['mountpoint']
)
except TypeError:
# mountpoint not specified
pass
hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5'))
repo_hash = hash_type(repo_url).hexdigest()
rp_ = os.path.join(bp_, repo_hash)
if not os.path.isdir(rp_):
os.makedirs(rp_)
if not os.listdir(rp_):
# Only attempt a new checkout if the directory is empty.
try:
CLIENT.checkout(repo_url, rp_)
repos.append(rp_)
new_remote = True
except pysvn._pysvn.ClientError as exc:
log.error(
'Failed to initialize svnfs remote \'%s\': %s',
repo_url, exc
)
_failhard()
else:
# Confirm that there is an svn checkout at the necessary path by
# running pysvn.Client().status()
try:
CLIENT.status(rp_)
except pysvn._pysvn.ClientError as exc:
log.error(
'Cache path %s (corresponding remote: %s) exists but is '
'not a valid subversion checkout. You will need to '
'manually delete this directory on the master to continue '
'to use this svnfs remote.', rp_, repo_url
)
_failhard()
repo_conf.update({
'repo': rp_,
'url': repo_url,
'hash': repo_hash,
'cachedir': rp_,
'lockfile': os.path.join(rp_, 'update.lk')
})
repos.append(repo_conf)
if new_remote:
remote_map = os.path.join(__opts__['cachedir'], 'svnfs/remote_map.txt')
try:
with salt.utils.files.fopen(remote_map, 'w+') as fp_:
timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f')
fp_.write('# svnfs_remote map as of {0}\n'.format(timestamp))
for repo_conf in repos:
fp_.write(
salt.utils.stringutils.to_str(
'{0} = {1}\n'.format(
repo_conf['hash'], repo_conf['url']
)
)
)
except OSError:
pass
else:
log.info('Wrote new svnfs_remote map to %s', remote_map)
return repos |
def get_td_from_freqtau(template=None, taper=None, **kwargs):
"""Return time domain ringdown with all the modes specified.
Parameters
----------
template: object
An object that has attached properties. This can be used to substitute
for keyword arguments. A common example would be a row in an xml table.
taper: {None, float}, optional
Tapering at the beginning of the waveform with duration taper * tau.
This option is recommended with timescales taper=1./2 or 1. for
time-domain ringdown-only injections.
The abrupt turn on of the ringdown can cause issues on the waveform
when doing the fourier transform to the frequency domain. Setting
taper will add a rapid ringup with timescale tau/10.
Each mode and overtone will have a different taper depending on its tau,
the final taper being the superposition of all the tapers.
lmns : list
Desired lmn modes as strings (lm modes available: 22, 21, 33, 44, 55).
The n specifies the number of overtones desired for the corresponding
lm pair (maximum n=8).
Example: lmns = ['223','331'] are the modes 220, 221, 222, and 330
f_lmn: float
Central frequency of the lmn overtone, as many as number of modes.
tau_lmn: float
Damping time of the lmn overtone, as many as number of modes.
amp220 : float
Amplitude of the fundamental 220 mode.
amplmn : float
Fraction of the amplitude of the lmn overtone relative to the
fundamental mode, as many as the number of subdominant modes.
philmn : float
Phase of the lmn overtone, as many as the number of modes. Should also
include the information from the azimuthal angle (phi + m*Phi).
inclination : {None, float}, optional
Inclination of the system in radians. If None, the spherical harmonics
will be set to 1.
delta_t : {None, float}, optional
The time step used to generate the ringdown.
If None, it will be set to the inverse of the frequency at which the
amplitude is 1/1000 of the peak amplitude (the minimum of all modes).
t_final : {None, float}, optional
The ending time of the output frequency series.
If None, it will be set to the time at which the amplitude
is 1/1000 of the peak amplitude (the maximum of all modes).
Returns
-------
hplustilde: FrequencySeries
The plus phase of a ringdown with the lm modes specified and
n overtones in frequency domain.
hcrosstilde: FrequencySeries
The cross phase of a ringdown with the lm modes specified and
n overtones in frequency domain.
"""
input_params = props(template, freqtau_required_args, **kwargs)
# Get required args
f_0, tau = lm_freqs_taus(**input_params)
lmns = input_params['lmns']
for lmn in lmns:
if int(lmn[2]) == 0:
raise ValueError('Number of overtones (nmodes) must be greater '
'than zero.')
# following may not be in input_params
inc = input_params.pop('inclination', None)
delta_t = input_params.pop('delta_t', None)
t_final = input_params.pop('t_final', None)
if not delta_t:
delta_t = lm_deltat(f_0, tau, lmns)
if not t_final:
t_final = lm_tfinal(tau, lmns)
kmax = int(t_final / delta_t) + 1
# Different overtones will have different tapering window-size
# Find maximum window size to create long enough output vector
if taper:
taper_window = int(taper*max(tau.values())/delta_t)
kmax += taper_window
outplus = TimeSeries(zeros(kmax, dtype=float64), delta_t=delta_t)
outcross = TimeSeries(zeros(kmax, dtype=float64), delta_t=delta_t)
if taper:
start = - taper * max(tau.values())
outplus._epoch, outcross._epoch = start, start
for lmn in lmns:
l, m, nmodes = int(lmn[0]), int(lmn[1]), int(lmn[2])
hplus, hcross = get_td_lm(freqs=f_0, taus=tau, l=l, m=m, nmodes=nmodes,
taper=taper, inclination=inc, delta_t=delta_t,
t_final=t_final, **input_params)
if not taper:
outplus.data += hplus.data
outcross.data += hcross.data
else:
outplus = taper_shift(hplus, outplus)
outcross = taper_shift(hcross, outcross)
return outplus, outcross | Return time domain ringdown with all the modes specified.
Parameters
----------
template: object
An object that has attached properties. This can be used to substitute
for keyword arguments. A common example would be a row in an xml table.
taper: {None, float}, optional
Tapering at the beginning of the waveform with duration taper * tau.
This option is recommended with timescales taper=1./2 or 1. for
time-domain ringdown-only injections.
The abrupt turn on of the ringdown can cause issues on the waveform
when doing the fourier transform to the frequency domain. Setting
taper will add a rapid ringup with timescale tau/10.
Each mode and overtone will have a different taper depending on its tau,
the final taper being the superposition of all the tapers.
lmns : list
Desired lmn modes as strings (lm modes available: 22, 21, 33, 44, 55).
The n specifies the number of overtones desired for the corresponding
lm pair (maximum n=8).
Example: lmns = ['223','331'] are the modes 220, 221, 222, and 330
f_lmn: float
Central frequency of the lmn overtone, as many as number of modes.
tau_lmn: float
Damping time of the lmn overtone, as many as number of modes.
amp220 : float
Amplitude of the fundamental 220 mode.
amplmn : float
Fraction of the amplitude of the lmn overtone relative to the
fundamental mode, as many as the number of subdominant modes.
philmn : float
Phase of the lmn overtone, as many as the number of modes. Should also
include the information from the azimuthal angle (phi + m*Phi).
inclination : {None, float}, optional
Inclination of the system in radians. If None, the spherical harmonics
will be set to 1.
delta_t : {None, float}, optional
The time step used to generate the ringdown.
If None, it will be set to the inverse of the frequency at which the
amplitude is 1/1000 of the peak amplitude (the minimum of all modes).
t_final : {None, float}, optional
The ending time of the output frequency series.
If None, it will be set to the time at which the amplitude
is 1/1000 of the peak amplitude (the maximum of all modes).
Returns
-------
hplustilde: FrequencySeries
The plus phase of a ringdown with the lm modes specified and
n overtones in frequency domain.
hcrosstilde: FrequencySeries
The cross phase of a ringdown with the lm modes specified and
n overtones in frequency domain. | Below is the the instruction that describes the task:
### Input:
Return time domain ringdown with all the modes specified.
Parameters
----------
template: object
An object that has attached properties. This can be used to substitute
for keyword arguments. A common example would be a row in an xml table.
taper: {None, float}, optional
Tapering at the beginning of the waveform with duration taper * tau.
This option is recommended with timescales taper=1./2 or 1. for
time-domain ringdown-only injections.
The abrupt turn on of the ringdown can cause issues on the waveform
when doing the fourier transform to the frequency domain. Setting
taper will add a rapid ringup with timescale tau/10.
Each mode and overtone will have a different taper depending on its tau,
the final taper being the superposition of all the tapers.
lmns : list
Desired lmn modes as strings (lm modes available: 22, 21, 33, 44, 55).
The n specifies the number of overtones desired for the corresponding
lm pair (maximum n=8).
Example: lmns = ['223','331'] are the modes 220, 221, 222, and 330
f_lmn: float
Central frequency of the lmn overtone, as many as number of modes.
tau_lmn: float
Damping time of the lmn overtone, as many as number of modes.
amp220 : float
Amplitude of the fundamental 220 mode.
amplmn : float
Fraction of the amplitude of the lmn overtone relative to the
fundamental mode, as many as the number of subdominant modes.
philmn : float
Phase of the lmn overtone, as many as the number of modes. Should also
include the information from the azimuthal angle (phi + m*Phi).
inclination : {None, float}, optional
Inclination of the system in radians. If None, the spherical harmonics
will be set to 1.
delta_t : {None, float}, optional
The time step used to generate the ringdown.
If None, it will be set to the inverse of the frequency at which the
amplitude is 1/1000 of the peak amplitude (the minimum of all modes).
t_final : {None, float}, optional
The ending time of the output frequency series.
If None, it will be set to the time at which the amplitude
is 1/1000 of the peak amplitude (the maximum of all modes).
Returns
-------
hplustilde: FrequencySeries
The plus phase of a ringdown with the lm modes specified and
n overtones in frequency domain.
hcrosstilde: FrequencySeries
The cross phase of a ringdown with the lm modes specified and
n overtones in frequency domain.
### Response:
def get_td_from_freqtau(template=None, taper=None, **kwargs):
"""Return time domain ringdown with all the modes specified.
Parameters
----------
template: object
An object that has attached properties. This can be used to substitute
for keyword arguments. A common example would be a row in an xml table.
taper: {None, float}, optional
Tapering at the beginning of the waveform with duration taper * tau.
This option is recommended with timescales taper=1./2 or 1. for
time-domain ringdown-only injections.
The abrupt turn on of the ringdown can cause issues on the waveform
when doing the fourier transform to the frequency domain. Setting
taper will add a rapid ringup with timescale tau/10.
Each mode and overtone will have a different taper depending on its tau,
the final taper being the superposition of all the tapers.
lmns : list
Desired lmn modes as strings (lm modes available: 22, 21, 33, 44, 55).
The n specifies the number of overtones desired for the corresponding
lm pair (maximum n=8).
Example: lmns = ['223','331'] are the modes 220, 221, 222, and 330
f_lmn: float
Central frequency of the lmn overtone, as many as number of modes.
tau_lmn: float
Damping time of the lmn overtone, as many as number of modes.
amp220 : float
Amplitude of the fundamental 220 mode.
amplmn : float
Fraction of the amplitude of the lmn overtone relative to the
fundamental mode, as many as the number of subdominant modes.
philmn : float
Phase of the lmn overtone, as many as the number of modes. Should also
include the information from the azimuthal angle (phi + m*Phi).
inclination : {None, float}, optional
Inclination of the system in radians. If None, the spherical harmonics
will be set to 1.
delta_t : {None, float}, optional
The time step used to generate the ringdown.
If None, it will be set to the inverse of the frequency at which the
amplitude is 1/1000 of the peak amplitude (the minimum of all modes).
t_final : {None, float}, optional
The ending time of the output frequency series.
If None, it will be set to the time at which the amplitude
is 1/1000 of the peak amplitude (the maximum of all modes).
Returns
-------
hplustilde: FrequencySeries
The plus phase of a ringdown with the lm modes specified and
n overtones in frequency domain.
hcrosstilde: FrequencySeries
The cross phase of a ringdown with the lm modes specified and
n overtones in frequency domain.
"""
input_params = props(template, freqtau_required_args, **kwargs)
# Get required args
f_0, tau = lm_freqs_taus(**input_params)
lmns = input_params['lmns']
for lmn in lmns:
if int(lmn[2]) == 0:
raise ValueError('Number of overtones (nmodes) must be greater '
'than zero.')
# following may not be in input_params
inc = input_params.pop('inclination', None)
delta_t = input_params.pop('delta_t', None)
t_final = input_params.pop('t_final', None)
if not delta_t:
delta_t = lm_deltat(f_0, tau, lmns)
if not t_final:
t_final = lm_tfinal(tau, lmns)
kmax = int(t_final / delta_t) + 1
# Different overtones will have different tapering window-size
# Find maximum window size to create long enough output vector
if taper:
taper_window = int(taper*max(tau.values())/delta_t)
kmax += taper_window
outplus = TimeSeries(zeros(kmax, dtype=float64), delta_t=delta_t)
outcross = TimeSeries(zeros(kmax, dtype=float64), delta_t=delta_t)
if taper:
start = - taper * max(tau.values())
outplus._epoch, outcross._epoch = start, start
for lmn in lmns:
l, m, nmodes = int(lmn[0]), int(lmn[1]), int(lmn[2])
hplus, hcross = get_td_lm(freqs=f_0, taus=tau, l=l, m=m, nmodes=nmodes,
taper=taper, inclination=inc, delta_t=delta_t,
t_final=t_final, **input_params)
if not taper:
outplus.data += hplus.data
outcross.data += hcross.data
else:
outplus = taper_shift(hplus, outplus)
outcross = taper_shift(hcross, outcross)
return outplus, outcross |
def user_info(user, host='localhost', **connection_args):
'''
Get full info on a MySQL user
CLI Example:
.. code-block:: bash
salt '*' mysql.user_info root localhost
'''
dbc = _connect(**connection_args)
if dbc is None:
return False
cur = dbc.cursor(MySQLdb.cursors.DictCursor)
qry = ('SELECT * FROM mysql.user WHERE User = %(user)s AND '
'Host = %(host)s')
args = {}
args['user'] = user
args['host'] = host
try:
_execute(cur, qry, args)
except MySQLdb.OperationalError as exc:
err = 'MySQL Error {0}: {1}'.format(*exc.args)
__context__['mysql.error'] = err
log.error(err)
return False
result = cur.fetchone()
log.debug(result)
return result | Get full info on a MySQL user
CLI Example:
.. code-block:: bash
salt '*' mysql.user_info root localhost | Below is the the instruction that describes the task:
### Input:
Get full info on a MySQL user
CLI Example:
.. code-block:: bash
salt '*' mysql.user_info root localhost
### Response:
def user_info(user, host='localhost', **connection_args):
'''
Get full info on a MySQL user
CLI Example:
.. code-block:: bash
salt '*' mysql.user_info root localhost
'''
dbc = _connect(**connection_args)
if dbc is None:
return False
cur = dbc.cursor(MySQLdb.cursors.DictCursor)
qry = ('SELECT * FROM mysql.user WHERE User = %(user)s AND '
'Host = %(host)s')
args = {}
args['user'] = user
args['host'] = host
try:
_execute(cur, qry, args)
except MySQLdb.OperationalError as exc:
err = 'MySQL Error {0}: {1}'.format(*exc.args)
__context__['mysql.error'] = err
log.error(err)
return False
result = cur.fetchone()
log.debug(result)
return result |
def _get_temperature(self, decoded):
'''Return temperature in celsius'''
temp = (decoded[2] & 127) + decoded[3] / 100
sign = (decoded[2] >> 7) & 1
if sign == 0:
return round(temp, 2)
return round(-1 * temp, 2) | Return temperature in celsius | Below is the the instruction that describes the task:
### Input:
Return temperature in celsius
### Response:
def _get_temperature(self, decoded):
'''Return temperature in celsius'''
temp = (decoded[2] & 127) + decoded[3] / 100
sign = (decoded[2] >> 7) & 1
if sign == 0:
return round(temp, 2)
return round(-1 * temp, 2) |
def start_stop_video(self):
"""Start and stop the video, and change the button.
"""
if self.parent.info.dataset is None:
self.parent.statusBar().showMessage('No Dataset Loaded')
return
# & is added automatically by PyQt, it seems
if 'Start' in self.idx_button.text().replace('&', ''):
try:
self.update_video()
except IndexError as er:
lg.debug(er)
self.idx_button.setText('Not Available / Start')
return
except OSError as er:
lg.debug(er)
self.idx_button.setText('NO VIDEO for this dataset')
return
self.idx_button.setText('Stop')
elif 'Stop' in self.idx_button.text():
self.idx_button.setText('Start')
self.medialistplayer.stop()
self.t.stop() | Start and stop the video, and change the button. | Below is the the instruction that describes the task:
### Input:
Start and stop the video, and change the button.
### Response:
def start_stop_video(self):
"""Start and stop the video, and change the button.
"""
if self.parent.info.dataset is None:
self.parent.statusBar().showMessage('No Dataset Loaded')
return
# & is added automatically by PyQt, it seems
if 'Start' in self.idx_button.text().replace('&', ''):
try:
self.update_video()
except IndexError as er:
lg.debug(er)
self.idx_button.setText('Not Available / Start')
return
except OSError as er:
lg.debug(er)
self.idx_button.setText('NO VIDEO for this dataset')
return
self.idx_button.setText('Stop')
elif 'Stop' in self.idx_button.text():
self.idx_button.setText('Start')
self.medialistplayer.stop()
self.t.stop() |
def __send_notification(self, message, title, title_link='', color='good',
fields='', log_level=LogLv.INFO):
"""Send a message to a channel.
Args:
title: Message title.
title_link: Link of the message title.
message: Message body.
color: Message line color on Slack. This parameter should be one of the following values: 'good', 'warning',
'danger' or any hex color code.
Returns:
response: Response of Slack API.
Raises:
Exception:
"""
if log_level < self.log_level:
return None
payload = self.__build_payload(message, title, title_link, color, fields)
try:
response = self.__post(payload)
except Exception:
raise Exception(traceback.format_exc())
return response | Send a message to a channel.
Args:
title: Message title.
title_link: Link of the message title.
message: Message body.
color: Message line color on Slack. This parameter should be one of the following values: 'good', 'warning',
'danger' or any hex color code.
Returns:
response: Response of Slack API.
Raises:
Exception: | Below is the the instruction that describes the task:
### Input:
Send a message to a channel.
Args:
title: Message title.
title_link: Link of the message title.
message: Message body.
color: Message line color on Slack. This parameter should be one of the following values: 'good', 'warning',
'danger' or any hex color code.
Returns:
response: Response of Slack API.
Raises:
Exception:
### Response:
def __send_notification(self, message, title, title_link='', color='good',
fields='', log_level=LogLv.INFO):
"""Send a message to a channel.
Args:
title: Message title.
title_link: Link of the message title.
message: Message body.
color: Message line color on Slack. This parameter should be one of the following values: 'good', 'warning',
'danger' or any hex color code.
Returns:
response: Response of Slack API.
Raises:
Exception:
"""
if log_level < self.log_level:
return None
payload = self.__build_payload(message, title, title_link, color, fields)
try:
response = self.__post(payload)
except Exception:
raise Exception(traceback.format_exc())
return response |
def downsample(data, percent):
"""
downsample the data by removing a given percentage of the reads.
Args:
data: genes x cells array or sparse matrix
percent: float between 0 and 1
"""
n_genes = data.shape[0]
n_cells = data.shape[1]
new_data = data.copy()
total_count = float(data.sum())
to_remove = total_count*percent
# sum of read counts per cell
cell_sums = data.sum(0).astype(float)
# probability of selecting genes per cell
cell_gene_probs = data/cell_sums
# probability of selecting cells
cell_probs = np.array(cell_sums/total_count).flatten()
cells_selected = np.random.multinomial(to_remove, pvals=cell_probs)
for i, num_selected in enumerate(cells_selected):
cell_gene = np.array(cell_gene_probs[:,i]).flatten()
genes_selected = np.random.multinomial(num_selected, pvals=cell_gene)
if sparse.issparse(data):
genes_selected = sparse.csc_matrix(genes_selected).T
new_data[:,i] -= genes_selected
new_data[new_data < 0] = 0
return new_data | downsample the data by removing a given percentage of the reads.
Args:
data: genes x cells array or sparse matrix
percent: float between 0 and 1 | Below is the the instruction that describes the task:
### Input:
downsample the data by removing a given percentage of the reads.
Args:
data: genes x cells array or sparse matrix
percent: float between 0 and 1
### Response:
def downsample(data, percent):
"""
downsample the data by removing a given percentage of the reads.
Args:
data: genes x cells array or sparse matrix
percent: float between 0 and 1
"""
n_genes = data.shape[0]
n_cells = data.shape[1]
new_data = data.copy()
total_count = float(data.sum())
to_remove = total_count*percent
# sum of read counts per cell
cell_sums = data.sum(0).astype(float)
# probability of selecting genes per cell
cell_gene_probs = data/cell_sums
# probability of selecting cells
cell_probs = np.array(cell_sums/total_count).flatten()
cells_selected = np.random.multinomial(to_remove, pvals=cell_probs)
for i, num_selected in enumerate(cells_selected):
cell_gene = np.array(cell_gene_probs[:,i]).flatten()
genes_selected = np.random.multinomial(num_selected, pvals=cell_gene)
if sparse.issparse(data):
genes_selected = sparse.csc_matrix(genes_selected).T
new_data[:,i] -= genes_selected
new_data[new_data < 0] = 0
return new_data |
def scale(s, dtype=None):
"""Non-uniform scaling along the x, y, and z axes
Parameters
----------
s : array-like, shape (3,)
Scaling in x, y, z.
dtype : dtype | None
Output type (if None, don't cast).
Returns
-------
M : ndarray
Transformation matrix describing the scaling.
"""
assert len(s) == 3
return np.array(np.diag(np.concatenate([s, (1.,)])), dtype) | Non-uniform scaling along the x, y, and z axes
Parameters
----------
s : array-like, shape (3,)
Scaling in x, y, z.
dtype : dtype | None
Output type (if None, don't cast).
Returns
-------
M : ndarray
Transformation matrix describing the scaling. | Below is the the instruction that describes the task:
### Input:
Non-uniform scaling along the x, y, and z axes
Parameters
----------
s : array-like, shape (3,)
Scaling in x, y, z.
dtype : dtype | None
Output type (if None, don't cast).
Returns
-------
M : ndarray
Transformation matrix describing the scaling.
### Response:
def scale(s, dtype=None):
"""Non-uniform scaling along the x, y, and z axes
Parameters
----------
s : array-like, shape (3,)
Scaling in x, y, z.
dtype : dtype | None
Output type (if None, don't cast).
Returns
-------
M : ndarray
Transformation matrix describing the scaling.
"""
assert len(s) == 3
return np.array(np.diag(np.concatenate([s, (1.,)])), dtype) |
def expand_composites (properties):
""" Expand all composite properties in the set so that all components
are explicitly expressed.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
explicit_features = set(p.feature for p in properties)
result = []
# now expand composite features
for p in properties:
expanded = expand_composite(p)
for x in expanded:
if not x in result:
f = x.feature
if f.free:
result.append (x)
elif not x in properties: # x is the result of expansion
if not f in explicit_features: # not explicitly-specified
if any(r.feature == f for r in result):
raise FeatureConflict(
"expansions of composite features result in "
"conflicting values for '%s'\nvalues: '%s'\none contributing composite property was '%s'" %
(f.name, [r.value for r in result if r.feature == f] + [x.value], p))
else:
result.append (x)
elif any(r.feature == f for r in result):
raise FeatureConflict ("explicitly-specified values of non-free feature '%s' conflict\n"
"existing values: '%s'\nvalue from expanding '%s': '%s'" % (f,
[r.value for r in result if r.feature == f], p, x.value))
else:
result.append (x)
return result | Expand all composite properties in the set so that all components
are explicitly expressed. | Below is the the instruction that describes the task:
### Input:
Expand all composite properties in the set so that all components
are explicitly expressed.
### Response:
def expand_composites (properties):
""" Expand all composite properties in the set so that all components
are explicitly expressed.
"""
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
explicit_features = set(p.feature for p in properties)
result = []
# now expand composite features
for p in properties:
expanded = expand_composite(p)
for x in expanded:
if not x in result:
f = x.feature
if f.free:
result.append (x)
elif not x in properties: # x is the result of expansion
if not f in explicit_features: # not explicitly-specified
if any(r.feature == f for r in result):
raise FeatureConflict(
"expansions of composite features result in "
"conflicting values for '%s'\nvalues: '%s'\none contributing composite property was '%s'" %
(f.name, [r.value for r in result if r.feature == f] + [x.value], p))
else:
result.append (x)
elif any(r.feature == f for r in result):
raise FeatureConflict ("explicitly-specified values of non-free feature '%s' conflict\n"
"existing values: '%s'\nvalue from expanding '%s': '%s'" % (f,
[r.value for r in result if r.feature == f], p, x.value))
else:
result.append (x)
return result |
def compute_process_sigmas(self, dt, fx=None, **fx_args):
"""
computes the values of sigmas_f. Normally a user would not call
this, but it is useful if you need to call update more than once
between calls to predict (to update for multiple simultaneous
measurements), so the sigmas correctly reflect the updated state
x, P.
"""
if fx is None:
fx = self.fx
# calculate sigma points for given mean and covariance
sigmas = self.points_fn.sigma_points(self.x, self.P)
for i, s in enumerate(sigmas):
self.sigmas_f[i] = fx(s, dt, **fx_args) | computes the values of sigmas_f. Normally a user would not call
this, but it is useful if you need to call update more than once
between calls to predict (to update for multiple simultaneous
measurements), so the sigmas correctly reflect the updated state
x, P. | Below is the the instruction that describes the task:
### Input:
computes the values of sigmas_f. Normally a user would not call
this, but it is useful if you need to call update more than once
between calls to predict (to update for multiple simultaneous
measurements), so the sigmas correctly reflect the updated state
x, P.
### Response:
def compute_process_sigmas(self, dt, fx=None, **fx_args):
"""
computes the values of sigmas_f. Normally a user would not call
this, but it is useful if you need to call update more than once
between calls to predict (to update for multiple simultaneous
measurements), so the sigmas correctly reflect the updated state
x, P.
"""
if fx is None:
fx = self.fx
# calculate sigma points for given mean and covariance
sigmas = self.points_fn.sigma_points(self.x, self.P)
for i, s in enumerate(sigmas):
self.sigmas_f[i] = fx(s, dt, **fx_args) |
def _get_data(self, fields=None, compare=True):
"""
Get the changed property, it'll be used to save the object
If compare is False, then it'll include all data not only changed property
"""
fields = fields or []
if self._key is None or self._key == '' or self._key == 0:
d = {}
for k, v in self.properties.items():
#test fields
if fields and k not in fields:
continue
# if not isinstance(v, ManyToMany):
if v.property_type == 'compound':
continue
if v.sequence:
continue
if not isinstance(v, ManyToMany):
x = v.get_value_for_datastore(self)
if isinstance(x, Model):
x = x._key
elif x is None or (k==self._primary_field and not x):
if isinstance(v, DateTimeProperty) and v.auto_now_add:
x = v.now()
elif (v.auto_add or (not v.auto and not v.auto_add)):
x = v.default_value()
else:
x = v.get_value_for_datastore(self, cached=True)
if x is not None and not x is Lazy:
d[k] = x
else:
d = {}
d[self._primary_field] = self._key
for k, v in self.properties.items():
if fields and k not in fields:
continue
if v.property_type == 'compound':
continue
t = self._old_values.get(k, None)
if not isinstance(v, ManyToMany):
x = v.get_value_for_datastore(self)
if isinstance(x, Model):
x = x._key
else:
x = v.get_value_for_datastore(self, cached=True)
if not x is Lazy:
if (compare and t != self.field_str(x)) or not compare:
d[k] = x
return d | Get the changed property, it'll be used to save the object
If compare is False, then it'll include all data not only changed property | Below is the the instruction that describes the task:
### Input:
Get the changed property, it'll be used to save the object
If compare is False, then it'll include all data not only changed property
### Response:
def _get_data(self, fields=None, compare=True):
"""
Get the changed property, it'll be used to save the object
If compare is False, then it'll include all data not only changed property
"""
fields = fields or []
if self._key is None or self._key == '' or self._key == 0:
d = {}
for k, v in self.properties.items():
#test fields
if fields and k not in fields:
continue
# if not isinstance(v, ManyToMany):
if v.property_type == 'compound':
continue
if v.sequence:
continue
if not isinstance(v, ManyToMany):
x = v.get_value_for_datastore(self)
if isinstance(x, Model):
x = x._key
elif x is None or (k==self._primary_field and not x):
if isinstance(v, DateTimeProperty) and v.auto_now_add:
x = v.now()
elif (v.auto_add or (not v.auto and not v.auto_add)):
x = v.default_value()
else:
x = v.get_value_for_datastore(self, cached=True)
if x is not None and not x is Lazy:
d[k] = x
else:
d = {}
d[self._primary_field] = self._key
for k, v in self.properties.items():
if fields and k not in fields:
continue
if v.property_type == 'compound':
continue
t = self._old_values.get(k, None)
if not isinstance(v, ManyToMany):
x = v.get_value_for_datastore(self)
if isinstance(x, Model):
x = x._key
else:
x = v.get_value_for_datastore(self, cached=True)
if not x is Lazy:
if (compare and t != self.field_str(x)) or not compare:
d[k] = x
return d |
def ensure_data():
'''
Ensure that the Garuda directory and files
'''
if not os.path.exists(GARUDA_DIR):
os.makedirs(GARUDA_DIR)
Path(f'{GARUDA_DIR}/__init__.py').touch() | Ensure that the Garuda directory and files | Below is the the instruction that describes the task:
### Input:
Ensure that the Garuda directory and files
### Response:
def ensure_data():
'''
Ensure that the Garuda directory and files
'''
if not os.path.exists(GARUDA_DIR):
os.makedirs(GARUDA_DIR)
Path(f'{GARUDA_DIR}/__init__.py').touch() |
def delete_all_config_for_vlan(self, vlan_id, port_profile,
trunk_vlans):
"""Top level method to delete all config for vlan_id."""
ucsm_ips = list(CONF.ml2_cisco_ucsm.ucsms)
for ucsm_ip in ucsm_ips:
with self.ucsm_connect_disconnect(ucsm_ip) as handle:
LOG.debug('Deleting config for VLAN %d from UCSM %s', vlan_id,
ucsm_ip)
if (port_profile):
self._delete_port_profile(handle, port_profile, ucsm_ip)
ucsm = CONF.ml2_cisco_ucsm.ucsms[ucsm_ip]
if ucsm.sp_template_list:
self._remove_vlan_from_all_sp_templates(handle,
vlan_id,
ucsm_ip)
if ucsm.vnic_template_list:
self._remove_vlan_from_vnic_templates(handle,
vlan_id,
ucsm_ip)
if not (ucsm.sp_template_list and
ucsm.vnic_template_list):
self._remove_vlan_from_all_service_profiles(handle,
vlan_id,
ucsm_ip)
self._delete_vlan_profile(handle, vlan_id, ucsm_ip)
if trunk_vlans:
for vlan_id in trunk_vlans:
self._delete_vlan_profile(handle, vlan_id, ucsm_ip) | Top level method to delete all config for vlan_id. | Below is the the instruction that describes the task:
### Input:
Top level method to delete all config for vlan_id.
### Response:
def delete_all_config_for_vlan(self, vlan_id, port_profile,
trunk_vlans):
"""Top level method to delete all config for vlan_id."""
ucsm_ips = list(CONF.ml2_cisco_ucsm.ucsms)
for ucsm_ip in ucsm_ips:
with self.ucsm_connect_disconnect(ucsm_ip) as handle:
LOG.debug('Deleting config for VLAN %d from UCSM %s', vlan_id,
ucsm_ip)
if (port_profile):
self._delete_port_profile(handle, port_profile, ucsm_ip)
ucsm = CONF.ml2_cisco_ucsm.ucsms[ucsm_ip]
if ucsm.sp_template_list:
self._remove_vlan_from_all_sp_templates(handle,
vlan_id,
ucsm_ip)
if ucsm.vnic_template_list:
self._remove_vlan_from_vnic_templates(handle,
vlan_id,
ucsm_ip)
if not (ucsm.sp_template_list and
ucsm.vnic_template_list):
self._remove_vlan_from_all_service_profiles(handle,
vlan_id,
ucsm_ip)
self._delete_vlan_profile(handle, vlan_id, ucsm_ip)
if trunk_vlans:
for vlan_id in trunk_vlans:
self._delete_vlan_profile(handle, vlan_id, ucsm_ip) |
def _init_required_options(self, **kwargs):
"""
Initialize the required option as class members. The value will be
either None or the specified value in the kwargs or __init__. The logic
here is to make the required options accesible to edit after a class
instance has been created.
"""
for field in self.required_options:
setattr(self, field, kwargs.get(field))
self._store_option(field) | Initialize the required option as class members. The value will be
either None or the specified value in the kwargs or __init__. The logic
here is to make the required options accesible to edit after a class
instance has been created. | Below is the the instruction that describes the task:
### Input:
Initialize the required option as class members. The value will be
either None or the specified value in the kwargs or __init__. The logic
here is to make the required options accesible to edit after a class
instance has been created.
### Response:
def _init_required_options(self, **kwargs):
"""
Initialize the required option as class members. The value will be
either None or the specified value in the kwargs or __init__. The logic
here is to make the required options accesible to edit after a class
instance has been created.
"""
for field in self.required_options:
setattr(self, field, kwargs.get(field))
self._store_option(field) |
def reset_frequencies(self, frequency=0):
"""Resets all stored frequencies for the cache
:keyword int frequency: Frequency to reset to, must be >= 0"""
frequency = max(frequency, 0)
for key in self._store.keys():
self._store[key] = (self._store[key][0], frequency)
return frequency | Resets all stored frequencies for the cache
:keyword int frequency: Frequency to reset to, must be >= 0 | Below is the the instruction that describes the task:
### Input:
Resets all stored frequencies for the cache
:keyword int frequency: Frequency to reset to, must be >= 0
### Response:
def reset_frequencies(self, frequency=0):
"""Resets all stored frequencies for the cache
:keyword int frequency: Frequency to reset to, must be >= 0"""
frequency = max(frequency, 0)
for key in self._store.keys():
self._store[key] = (self._store[key][0], frequency)
return frequency |
def program_select(self, chan, sfid, bank, preset):
"""Select a program."""
return fluid_synth_program_select(self.synth, chan, sfid, bank, preset) | Select a program. | Below is the the instruction that describes the task:
### Input:
Select a program.
### Response:
def program_select(self, chan, sfid, bank, preset):
"""Select a program."""
return fluid_synth_program_select(self.synth, chan, sfid, bank, preset) |
def size(value):
"""ValueRef : number, area of the mark in pixels
This is the total area of a symbol. For example, a value of 500 and
a ``shape`` of ``'circle'`` would result in circles with an area of
500 square pixels. Only used if ``type`` is ``'symbol'``.
"""
if value.value:
_assert_is_type('size.value', value.value, int)
if value.value < 0:
raise ValueError('size cannot be negative') | ValueRef : number, area of the mark in pixels
This is the total area of a symbol. For example, a value of 500 and
a ``shape`` of ``'circle'`` would result in circles with an area of
500 square pixels. Only used if ``type`` is ``'symbol'``. | Below is the the instruction that describes the task:
### Input:
ValueRef : number, area of the mark in pixels
This is the total area of a symbol. For example, a value of 500 and
a ``shape`` of ``'circle'`` would result in circles with an area of
500 square pixels. Only used if ``type`` is ``'symbol'``.
### Response:
def size(value):
"""ValueRef : number, area of the mark in pixels
This is the total area of a symbol. For example, a value of 500 and
a ``shape`` of ``'circle'`` would result in circles with an area of
500 square pixels. Only used if ``type`` is ``'symbol'``.
"""
if value.value:
_assert_is_type('size.value', value.value, int)
if value.value < 0:
raise ValueError('size cannot be negative') |
def showEvent(self, event):
"""
Sets the visible state for this widget. If it is the first time this
widget will be visible, the initialized signal will be emitted.
:param state | <bool>
"""
super(XView, self).showEvent(event)
# record the visible state for this widget to be separate of Qt's
# system to know if this view WILL be visible or not once the
# system is done processing. This will affect how signals are
# validated as part of the visible slot delegation
self._visibleState = True
if not self.isInitialized():
self.initialize()
# after the initial time the view is loaded, the visibleStateChanged
# signal will be emitted
elif not self.signalsBlocked():
self.visibleStateChanged.emit(True)
QTimer.singleShot(0, self.shown) | Sets the visible state for this widget. If it is the first time this
widget will be visible, the initialized signal will be emitted.
:param state | <bool> | Below is the the instruction that describes the task:
### Input:
Sets the visible state for this widget. If it is the first time this
widget will be visible, the initialized signal will be emitted.
:param state | <bool>
### Response:
def showEvent(self, event):
"""
Sets the visible state for this widget. If it is the first time this
widget will be visible, the initialized signal will be emitted.
:param state | <bool>
"""
super(XView, self).showEvent(event)
# record the visible state for this widget to be separate of Qt's
# system to know if this view WILL be visible or not once the
# system is done processing. This will affect how signals are
# validated as part of the visible slot delegation
self._visibleState = True
if not self.isInitialized():
self.initialize()
# after the initial time the view is loaded, the visibleStateChanged
# signal will be emitted
elif not self.signalsBlocked():
self.visibleStateChanged.emit(True)
QTimer.singleShot(0, self.shown) |
def add_bigger_box(self):
"""
Sets the size of the figure by expanding the space of molecule.svg file. These dimension have been
previously determined. Also makes the lines of the molecule thicker.
"""
start1 = "width='"+str(int(self.molecule.molsize1))+"px' height='"+str(int(self.molecule.molsize2))+"px' >"
start2 = "<rect style='opacity:1.0;fill:#FFFFFF;stroke:none' width='"+str(int(self.molecule.molsize1))+"' height='"+str(int(self.molecule.molsize2))+"' x='0' y='0'> </rect>"
bigger_box ="width='100%' height='100%' viewbox='0 0 "+str(int(self.molecule.x_dim))+" "+str(int(self.molecule.y_dim))+"' > "
big_box2= "<rect style='opacity:1.0;fill:white;stroke:none' width='"+str(int(self.molecule.x_dim))+"px' height='"+str(int(self.molecule.y_dim))+"px' x='0' y='0'> </rect> <g id='molecularDrawing' transform='translate("+str((self.molecule.x_dim-self.molecule.molsize1)/2)+","+str((self.molecule.y_dim-self.molecule.molsize2)/2)+")'>'<rect style='opacity:1.0;fill:#ffffff;stroke:none' width='"+str(self.molecule.molsize1)+"' height='"+str(self.molecule.molsize2)+"' x='0' y='0' /> "
self.end_symbol = "</svg>"
no_end_symbol = "</g>"
#Make the lines in molecule drawing thicker to look better with the large plots
linewidth1 = "stroke-width:2px"
linewidth2 = "stroke-width:5px"
self.change_lines_in_svg("molecule.svg", linewidth1,linewidth2)
self.change_lines_in_svg("molecule.svg", start1, bigger_box)
self.change_lines_in_svg("molecule.svg", start2, big_box2)
self.change_lines_in_svg("molecule.svg", self.end_symbol, no_end_symbol)
with open("molecule.svg","r") as f:
lines = f.readlines()
self.filestart = " ".join(map(str,lines[0:8]))
self.draw_molecule ="".join(map(str,lines[8:]))
f.close() | Sets the size of the figure by expanding the space of molecule.svg file. These dimension have been
previously determined. Also makes the lines of the molecule thicker. | Below is the the instruction that describes the task:
### Input:
Sets the size of the figure by expanding the space of molecule.svg file. These dimension have been
previously determined. Also makes the lines of the molecule thicker.
### Response:
def add_bigger_box(self):
"""
Sets the size of the figure by expanding the space of molecule.svg file. These dimension have been
previously determined. Also makes the lines of the molecule thicker.
"""
start1 = "width='"+str(int(self.molecule.molsize1))+"px' height='"+str(int(self.molecule.molsize2))+"px' >"
start2 = "<rect style='opacity:1.0;fill:#FFFFFF;stroke:none' width='"+str(int(self.molecule.molsize1))+"' height='"+str(int(self.molecule.molsize2))+"' x='0' y='0'> </rect>"
bigger_box ="width='100%' height='100%' viewbox='0 0 "+str(int(self.molecule.x_dim))+" "+str(int(self.molecule.y_dim))+"' > "
big_box2= "<rect style='opacity:1.0;fill:white;stroke:none' width='"+str(int(self.molecule.x_dim))+"px' height='"+str(int(self.molecule.y_dim))+"px' x='0' y='0'> </rect> <g id='molecularDrawing' transform='translate("+str((self.molecule.x_dim-self.molecule.molsize1)/2)+","+str((self.molecule.y_dim-self.molecule.molsize2)/2)+")'>'<rect style='opacity:1.0;fill:#ffffff;stroke:none' width='"+str(self.molecule.molsize1)+"' height='"+str(self.molecule.molsize2)+"' x='0' y='0' /> "
self.end_symbol = "</svg>"
no_end_symbol = "</g>"
#Make the lines in molecule drawing thicker to look better with the large plots
linewidth1 = "stroke-width:2px"
linewidth2 = "stroke-width:5px"
self.change_lines_in_svg("molecule.svg", linewidth1,linewidth2)
self.change_lines_in_svg("molecule.svg", start1, bigger_box)
self.change_lines_in_svg("molecule.svg", start2, big_box2)
self.change_lines_in_svg("molecule.svg", self.end_symbol, no_end_symbol)
with open("molecule.svg","r") as f:
lines = f.readlines()
self.filestart = " ".join(map(str,lines[0:8]))
self.draw_molecule ="".join(map(str,lines[8:]))
f.close() |
def close(self):
"""gym api close"""
try:
# Purge last token from head node with <Close> message.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.server, self.port))
self._hello(sock)
comms.send_message(sock, ("<Close>" + self._get_token() + "</Close>").encode())
reply = comms.recv_message(sock)
ok, = struct.unpack('!I', reply)
assert ok
sock.close()
except Exception as e:
self._log_error(e)
if self.client_socket:
self.client_socket.close()
self.client_socket = None | gym api close | Below is the the instruction that describes the task:
### Input:
gym api close
### Response:
def close(self):
"""gym api close"""
try:
# Purge last token from head node with <Close> message.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.server, self.port))
self._hello(sock)
comms.send_message(sock, ("<Close>" + self._get_token() + "</Close>").encode())
reply = comms.recv_message(sock)
ok, = struct.unpack('!I', reply)
assert ok
sock.close()
except Exception as e:
self._log_error(e)
if self.client_socket:
self.client_socket.close()
self.client_socket = None |
def check_all(self, expected=None, line_offset=0):
"""Run all checks on the input file."""
self.report.init_file(self.filename, self.lines, expected, line_offset)
self.total_lines = len(self.lines)
if self._ast_checks:
self.check_ast()
self.line_number = 0
self.indent_char = None
self.indent_level = self.previous_indent_level = 0
self.previous_logical = ''
self.previous_unindented_logical_line = ''
self.tokens = []
self.blank_lines = self.blank_before = 0
parens = 0
for token in self.generate_tokens():
self.tokens.append(token)
token_type, text = token[0:2]
if self.verbose >= 3:
if token[2][0] == token[3][0]:
pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
else:
pos = 'l.%s' % token[3][0]
print('l.%s\t%s\t%s\t%r' %
(token[2][0], pos, tokenize.tok_name[token[0]], text))
if token_type == tokenize.OP:
if text in '([{':
parens += 1
elif text in '}])':
parens -= 1
elif not parens:
if token_type in NEWLINE:
if token_type == tokenize.NEWLINE:
self.check_logical()
self.blank_before = 0
elif len(self.tokens) == 1:
# The physical line contains only this token.
self.blank_lines += 1
del self.tokens[0]
else:
self.check_logical()
elif COMMENT_WITH_NL and token_type == tokenize.COMMENT:
if len(self.tokens) == 1:
# The comment also ends a physical line
token = list(token)
token[1] = text.rstrip('\r\n')
token[3] = (token[2][0], token[2][1] + len(token[1]))
self.tokens = [tuple(token)]
self.check_logical()
if self.tokens:
self.check_physical(self.lines[-1])
self.check_logical()
return self.report.get_file_results() | Run all checks on the input file. | Below is the the instruction that describes the task:
### Input:
Run all checks on the input file.
### Response:
def check_all(self, expected=None, line_offset=0):
"""Run all checks on the input file."""
self.report.init_file(self.filename, self.lines, expected, line_offset)
self.total_lines = len(self.lines)
if self._ast_checks:
self.check_ast()
self.line_number = 0
self.indent_char = None
self.indent_level = self.previous_indent_level = 0
self.previous_logical = ''
self.previous_unindented_logical_line = ''
self.tokens = []
self.blank_lines = self.blank_before = 0
parens = 0
for token in self.generate_tokens():
self.tokens.append(token)
token_type, text = token[0:2]
if self.verbose >= 3:
if token[2][0] == token[3][0]:
pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
else:
pos = 'l.%s' % token[3][0]
print('l.%s\t%s\t%s\t%r' %
(token[2][0], pos, tokenize.tok_name[token[0]], text))
if token_type == tokenize.OP:
if text in '([{':
parens += 1
elif text in '}])':
parens -= 1
elif not parens:
if token_type in NEWLINE:
if token_type == tokenize.NEWLINE:
self.check_logical()
self.blank_before = 0
elif len(self.tokens) == 1:
# The physical line contains only this token.
self.blank_lines += 1
del self.tokens[0]
else:
self.check_logical()
elif COMMENT_WITH_NL and token_type == tokenize.COMMENT:
if len(self.tokens) == 1:
# The comment also ends a physical line
token = list(token)
token[1] = text.rstrip('\r\n')
token[3] = (token[2][0], token[2][1] + len(token[1]))
self.tokens = [tuple(token)]
self.check_logical()
if self.tokens:
self.check_physical(self.lines[-1])
self.check_logical()
return self.report.get_file_results() |
def facade(projectmainfn, **kwargs):
# (Callable[[None], None], Any) -> None
"""Facade to simplify project setup that calls project main function
Args:
projectmainfn ((None) -> None): main function of project
**kwargs: configuration parameters to pass to HDX Configuration class
Returns:
None
"""
#
# Setting up configuration
#
site_url = Configuration._create(**kwargs)
logger.info('--------------------------------------------------')
logger.info('> Using HDX Python API Library %s' % Configuration.apiversion)
logger.info('> HDX Site: %s' % site_url)
UserAgent.user_agent = Configuration.read().user_agent
projectmainfn() | Facade to simplify project setup that calls project main function
Args:
projectmainfn ((None) -> None): main function of project
**kwargs: configuration parameters to pass to HDX Configuration class
Returns:
None | Below is the the instruction that describes the task:
### Input:
Facade to simplify project setup that calls project main function
Args:
projectmainfn ((None) -> None): main function of project
**kwargs: configuration parameters to pass to HDX Configuration class
Returns:
None
### Response:
def facade(projectmainfn, **kwargs):
# (Callable[[None], None], Any) -> None
"""Facade to simplify project setup that calls project main function
Args:
projectmainfn ((None) -> None): main function of project
**kwargs: configuration parameters to pass to HDX Configuration class
Returns:
None
"""
#
# Setting up configuration
#
site_url = Configuration._create(**kwargs)
logger.info('--------------------------------------------------')
logger.info('> Using HDX Python API Library %s' % Configuration.apiversion)
logger.info('> HDX Site: %s' % site_url)
UserAgent.user_agent = Configuration.read().user_agent
projectmainfn() |
def _validate_class(self, cl):
"""return error if class `cl` is not found in the ontology"""
if cl not in self.schema_def.attributes_by_class:
search_string = self._build_search_string(cl)
err = self.err(
"{0} - invalid class", self._field_name_from_uri(cl),
search_string=search_string)
return ValidationWarning(ValidationResult.ERROR, err['err'],
err['line'], err['num']) | return error if class `cl` is not found in the ontology | Below is the the instruction that describes the task:
### Input:
return error if class `cl` is not found in the ontology
### Response:
def _validate_class(self, cl):
"""return error if class `cl` is not found in the ontology"""
if cl not in self.schema_def.attributes_by_class:
search_string = self._build_search_string(cl)
err = self.err(
"{0} - invalid class", self._field_name_from_uri(cl),
search_string=search_string)
return ValidationWarning(ValidationResult.ERROR, err['err'],
err['line'], err['num']) |
def border_pixels_from_mask(mask):
"""Compute a 1D array listing all borders pixel indexes in the masks. A borders pixel is a pixel which:
1) is not fully surrounding by False masks values.
2) Can reach the edge of the array without hitting a masked pixel in one of four directions (upwards, downwards,
left, right).
The borders pixels are thus pixels which are on the exterior edge of the masks. For example, the inner ring of edge \
pixels in an annular masks are edge pixels but not borders pixels."""
edge_pixels = edge_pixels_from_mask(mask)
masked_grid_index_to_pixel = masked_grid_1d_index_to_2d_pixel_index_from_mask(mask)
border_pixel_total = total_border_pixels_from_mask_and_edge_pixels(mask, edge_pixels, masked_grid_index_to_pixel)
border_pixels = np.zeros(border_pixel_total)
border_pixel_index = 0
for edge_pixel_index in range(edge_pixels.shape[0]):
if check_if_border_pixel(mask, edge_pixels[edge_pixel_index], masked_grid_index_to_pixel):
border_pixels[border_pixel_index] = edge_pixels[edge_pixel_index]
border_pixel_index += 1
return border_pixels | Compute a 1D array listing all borders pixel indexes in the masks. A borders pixel is a pixel which:
1) is not fully surrounding by False masks values.
2) Can reach the edge of the array without hitting a masked pixel in one of four directions (upwards, downwards,
left, right).
The borders pixels are thus pixels which are on the exterior edge of the masks. For example, the inner ring of edge \
pixels in an annular masks are edge pixels but not borders pixels. | Below is the the instruction that describes the task:
### Input:
Compute a 1D array listing all borders pixel indexes in the masks. A borders pixel is a pixel which:
1) is not fully surrounding by False masks values.
2) Can reach the edge of the array without hitting a masked pixel in one of four directions (upwards, downwards,
left, right).
The borders pixels are thus pixels which are on the exterior edge of the masks. For example, the inner ring of edge \
pixels in an annular masks are edge pixels but not borders pixels.
### Response:
def border_pixels_from_mask(mask):
"""Compute a 1D array listing all borders pixel indexes in the masks. A borders pixel is a pixel which:
1) is not fully surrounding by False masks values.
2) Can reach the edge of the array without hitting a masked pixel in one of four directions (upwards, downwards,
left, right).
The borders pixels are thus pixels which are on the exterior edge of the masks. For example, the inner ring of edge \
pixels in an annular masks are edge pixels but not borders pixels."""
edge_pixels = edge_pixels_from_mask(mask)
masked_grid_index_to_pixel = masked_grid_1d_index_to_2d_pixel_index_from_mask(mask)
border_pixel_total = total_border_pixels_from_mask_and_edge_pixels(mask, edge_pixels, masked_grid_index_to_pixel)
border_pixels = np.zeros(border_pixel_total)
border_pixel_index = 0
for edge_pixel_index in range(edge_pixels.shape[0]):
if check_if_border_pixel(mask, edge_pixels[edge_pixel_index], masked_grid_index_to_pixel):
border_pixels[border_pixel_index] = edge_pixels[edge_pixel_index]
border_pixel_index += 1
return border_pixels |
def service_absent(name, namespace='default', **kwargs):
'''
Ensures that the named service is absent from the given namespace.
name
The name of the service
namespace
The name of the namespace
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
service = __salt__['kubernetes.show_service'](name, namespace, **kwargs)
if service is None:
ret['result'] = True if not __opts__['test'] else None
ret['comment'] = 'The service does not exist'
return ret
if __opts__['test']:
ret['comment'] = 'The service is going to be deleted'
ret['result'] = None
return ret
res = __salt__['kubernetes.delete_service'](name, namespace, **kwargs)
if res['code'] == 200:
ret['result'] = True
ret['changes'] = {
'kubernetes.service': {
'new': 'absent', 'old': 'present'}}
ret['comment'] = res['message']
else:
ret['comment'] = 'Something went wrong, response: {0}'.format(res)
return ret | Ensures that the named service is absent from the given namespace.
name
The name of the service
namespace
The name of the namespace | Below is the the instruction that describes the task:
### Input:
Ensures that the named service is absent from the given namespace.
name
The name of the service
namespace
The name of the namespace
### Response:
def service_absent(name, namespace='default', **kwargs):
'''
Ensures that the named service is absent from the given namespace.
name
The name of the service
namespace
The name of the namespace
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
service = __salt__['kubernetes.show_service'](name, namespace, **kwargs)
if service is None:
ret['result'] = True if not __opts__['test'] else None
ret['comment'] = 'The service does not exist'
return ret
if __opts__['test']:
ret['comment'] = 'The service is going to be deleted'
ret['result'] = None
return ret
res = __salt__['kubernetes.delete_service'](name, namespace, **kwargs)
if res['code'] == 200:
ret['result'] = True
ret['changes'] = {
'kubernetes.service': {
'new': 'absent', 'old': 'present'}}
ret['comment'] = res['message']
else:
ret['comment'] = 'Something went wrong, response: {0}'.format(res)
return ret |
def set_away(self, away=True):
"""
:param away: a boolean of true (away) or false ('home')
:return nothing
This function handles both ecobee and nest thermostats
which use a different field for away/home status.
"""
if self.profile() is not None:
if away:
desired_state = {"profile": "away"}
else:
desired_state = {"profile": "home"}
else:
desired_state = {"users_away": away}
response = self.api_interface.set_device_state(self, {
"desired_state": desired_state
})
self._update_state_from_response(response) | :param away: a boolean of true (away) or false ('home')
:return nothing
This function handles both ecobee and nest thermostats
which use a different field for away/home status. | Below is the the instruction that describes the task:
### Input:
:param away: a boolean of true (away) or false ('home')
:return nothing
This function handles both ecobee and nest thermostats
which use a different field for away/home status.
### Response:
def set_away(self, away=True):
"""
:param away: a boolean of true (away) or false ('home')
:return nothing
This function handles both ecobee and nest thermostats
which use a different field for away/home status.
"""
if self.profile() is not None:
if away:
desired_state = {"profile": "away"}
else:
desired_state = {"profile": "home"}
else:
desired_state = {"users_away": away}
response = self.api_interface.set_device_state(self, {
"desired_state": desired_state
})
self._update_state_from_response(response) |
def eventFilter(self, object, event):
"""
Ignore all events for the text label.
:param object | <QObject>
event | <QEvent>
"""
if object == self._richTextLabel:
if event.type() in (event.MouseButtonPress,
event.MouseMove,
event.MouseButtonRelease,
event.MouseButtonDblClick):
event.ignore()
return True
return False | Ignore all events for the text label.
:param object | <QObject>
event | <QEvent> | Below is the the instruction that describes the task:
### Input:
Ignore all events for the text label.
:param object | <QObject>
event | <QEvent>
### Response:
def eventFilter(self, object, event):
"""
Ignore all events for the text label.
:param object | <QObject>
event | <QEvent>
"""
if object == self._richTextLabel:
if event.type() in (event.MouseButtonPress,
event.MouseMove,
event.MouseButtonRelease,
event.MouseButtonDblClick):
event.ignore()
return True
return False |
def main():
"""
NAME
quick_hyst.py
DESCRIPTION
makes plots of hysteresis data
SYNTAX
quick_hyst.py [command line options]
OPTIONS
-h prints help message and quits
-f: specify input file, default is measurements.txt
-spc SPEC: specify specimen name to plot and quit
-sav save all plots and quit
-fmt [png,svg,eps,jpg]
"""
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
pltspec = ""
verbose = pmagplotlib.verbose
dir_path = pmag.get_named_arg('-WD', '.')
dir_path = os.path.realpath(dir_path)
meas_file = pmag.get_named_arg('-f', 'measurements.txt')
fmt = pmag.get_named_arg('-fmt', 'png')
save_plots = False
interactive = True
if '-sav' in args:
verbose = False
save_plots = True
interactive = False
if '-spc' in args:
ind = args.index("-spc")
pltspec = args[ind+1]
verbose = False
save_plots = True
ipmag.quick_hyst(dir_path, meas_file, save_plots,
interactive, fmt, pltspec, verbose) | NAME
quick_hyst.py
DESCRIPTION
makes plots of hysteresis data
SYNTAX
quick_hyst.py [command line options]
OPTIONS
-h prints help message and quits
-f: specify input file, default is measurements.txt
-spc SPEC: specify specimen name to plot and quit
-sav save all plots and quit
-fmt [png,svg,eps,jpg] | Below is the the instruction that describes the task:
### Input:
NAME
quick_hyst.py
DESCRIPTION
makes plots of hysteresis data
SYNTAX
quick_hyst.py [command line options]
OPTIONS
-h prints help message and quits
-f: specify input file, default is measurements.txt
-spc SPEC: specify specimen name to plot and quit
-sav save all plots and quit
-fmt [png,svg,eps,jpg]
### Response:
def main():
"""
NAME
quick_hyst.py
DESCRIPTION
makes plots of hysteresis data
SYNTAX
quick_hyst.py [command line options]
OPTIONS
-h prints help message and quits
-f: specify input file, default is measurements.txt
-spc SPEC: specify specimen name to plot and quit
-sav save all plots and quit
-fmt [png,svg,eps,jpg]
"""
args = sys.argv
if "-h" in args:
print(main.__doc__)
sys.exit()
pltspec = ""
verbose = pmagplotlib.verbose
dir_path = pmag.get_named_arg('-WD', '.')
dir_path = os.path.realpath(dir_path)
meas_file = pmag.get_named_arg('-f', 'measurements.txt')
fmt = pmag.get_named_arg('-fmt', 'png')
save_plots = False
interactive = True
if '-sav' in args:
verbose = False
save_plots = True
interactive = False
if '-spc' in args:
ind = args.index("-spc")
pltspec = args[ind+1]
verbose = False
save_plots = True
ipmag.quick_hyst(dir_path, meas_file, save_plots,
interactive, fmt, pltspec, verbose) |
def bbox_from_point(point, distance=1000, project_utm=False, return_crs=False):
"""
Create a bounding box some distance in each direction (north, south, east,
and west) from some (lat, lng) point.
Parameters
----------
point : tuple
the (lat, lon) point to create the bounding box around
distance : int
how many meters the north, south, east, and west sides of the box should
each be from the point
project_utm : bool
if True return bbox as UTM coordinates
return_crs : bool
if True and project_utm=True, return the projected CRS
Returns
-------
north, south, east, west : tuple, if return_crs=False
north, south, east, west, crs_proj : tuple, if return_crs=True
"""
# reverse the order of the (lat,lng) point so it is (x,y) for shapely, then
# project to UTM and buffer in meters
lat, lng = point
point_proj, crs_proj = project_geometry(Point((lng, lat)))
buffer_proj = point_proj.buffer(distance)
if project_utm:
west, south, east, north = buffer_proj.bounds
log('Created bounding box {} meters in each direction from {} and projected it: {},{},{},{}'.format(distance, point, north, south, east, west))
else:
# if project_utm is False, project back to lat-long then get the
# bounding coordinates
buffer_latlong, _ = project_geometry(buffer_proj, crs=crs_proj, to_latlong=True)
west, south, east, north = buffer_latlong.bounds
log('Created bounding box {} meters in each direction from {}: {},{},{},{}'.format(distance, point, north, south, east, west))
if return_crs:
return north, south, east, west, crs_proj
else:
return north, south, east, west | Create a bounding box some distance in each direction (north, south, east,
and west) from some (lat, lng) point.
Parameters
----------
point : tuple
the (lat, lon) point to create the bounding box around
distance : int
how many meters the north, south, east, and west sides of the box should
each be from the point
project_utm : bool
if True return bbox as UTM coordinates
return_crs : bool
if True and project_utm=True, return the projected CRS
Returns
-------
north, south, east, west : tuple, if return_crs=False
north, south, east, west, crs_proj : tuple, if return_crs=True | Below is the the instruction that describes the task:
### Input:
Create a bounding box some distance in each direction (north, south, east,
and west) from some (lat, lng) point.
Parameters
----------
point : tuple
the (lat, lon) point to create the bounding box around
distance : int
how many meters the north, south, east, and west sides of the box should
each be from the point
project_utm : bool
if True return bbox as UTM coordinates
return_crs : bool
if True and project_utm=True, return the projected CRS
Returns
-------
north, south, east, west : tuple, if return_crs=False
north, south, east, west, crs_proj : tuple, if return_crs=True
### Response:
def bbox_from_point(point, distance=1000, project_utm=False, return_crs=False):
"""
Create a bounding box some distance in each direction (north, south, east,
and west) from some (lat, lng) point.
Parameters
----------
point : tuple
the (lat, lon) point to create the bounding box around
distance : int
how many meters the north, south, east, and west sides of the box should
each be from the point
project_utm : bool
if True return bbox as UTM coordinates
return_crs : bool
if True and project_utm=True, return the projected CRS
Returns
-------
north, south, east, west : tuple, if return_crs=False
north, south, east, west, crs_proj : tuple, if return_crs=True
"""
# reverse the order of the (lat,lng) point so it is (x,y) for shapely, then
# project to UTM and buffer in meters
lat, lng = point
point_proj, crs_proj = project_geometry(Point((lng, lat)))
buffer_proj = point_proj.buffer(distance)
if project_utm:
west, south, east, north = buffer_proj.bounds
log('Created bounding box {} meters in each direction from {} and projected it: {},{},{},{}'.format(distance, point, north, south, east, west))
else:
# if project_utm is False, project back to lat-long then get the
# bounding coordinates
buffer_latlong, _ = project_geometry(buffer_proj, crs=crs_proj, to_latlong=True)
west, south, east, north = buffer_latlong.bounds
log('Created bounding box {} meters in each direction from {}: {},{},{},{}'.format(distance, point, north, south, east, west))
if return_crs:
return north, south, east, west, crs_proj
else:
return north, south, east, west |
def subscribe(self, sr):
"""Login required. Send POST to subscribe to a subreddit. If ``sr`` is the name of the subreddit, a GET request is sent to retrieve the full id of the subreddit, which is necessary for this API call. Returns True or raises :class:`exceptions.UnexpectedResponse` if non-"truthy" value in response.
URL: ``http://www.reddit.com/api/subscribe/``
:param sr: full id of subreddit or name of subreddit (full id is preferred)
"""
if not sr.startswith('t5_'):
sr = self.subreddit(sr).name
data = dict(action='sub', sr=sr)
j = self.post('api', 'subscribe', data=data)
return assert_truthy(j) | Login required. Send POST to subscribe to a subreddit. If ``sr`` is the name of the subreddit, a GET request is sent to retrieve the full id of the subreddit, which is necessary for this API call. Returns True or raises :class:`exceptions.UnexpectedResponse` if non-"truthy" value in response.
URL: ``http://www.reddit.com/api/subscribe/``
:param sr: full id of subreddit or name of subreddit (full id is preferred) | Below is the the instruction that describes the task:
### Input:
Login required. Send POST to subscribe to a subreddit. If ``sr`` is the name of the subreddit, a GET request is sent to retrieve the full id of the subreddit, which is necessary for this API call. Returns True or raises :class:`exceptions.UnexpectedResponse` if non-"truthy" value in response.
URL: ``http://www.reddit.com/api/subscribe/``
:param sr: full id of subreddit or name of subreddit (full id is preferred)
### Response:
def subscribe(self, sr):
"""Login required. Send POST to subscribe to a subreddit. If ``sr`` is the name of the subreddit, a GET request is sent to retrieve the full id of the subreddit, which is necessary for this API call. Returns True or raises :class:`exceptions.UnexpectedResponse` if non-"truthy" value in response.
URL: ``http://www.reddit.com/api/subscribe/``
:param sr: full id of subreddit or name of subreddit (full id is preferred)
"""
if not sr.startswith('t5_'):
sr = self.subreddit(sr).name
data = dict(action='sub', sr=sr)
j = self.post('api', 'subscribe', data=data)
return assert_truthy(j) |
def wait_for_compute_global_operation(project_name, operation):
"""Poll for global compute operation until finished."""
logger.info("wait_for_compute_global_operation: "
"Waiting for operation {} to finish...".format(
operation["name"]))
for _ in range(MAX_POLLS):
result = compute.globalOperations().get(
project=project_name,
operation=operation["name"],
).execute()
if "error" in result:
raise Exception(result["error"])
if result["status"] == "DONE":
logger.info("wait_for_compute_global_operation: "
"Operation done.")
break
time.sleep(POLL_INTERVAL)
return result | Poll for global compute operation until finished. | Below is the the instruction that describes the task:
### Input:
Poll for global compute operation until finished.
### Response:
def wait_for_compute_global_operation(project_name, operation):
"""Poll for global compute operation until finished."""
logger.info("wait_for_compute_global_operation: "
"Waiting for operation {} to finish...".format(
operation["name"]))
for _ in range(MAX_POLLS):
result = compute.globalOperations().get(
project=project_name,
operation=operation["name"],
).execute()
if "error" in result:
raise Exception(result["error"])
if result["status"] == "DONE":
logger.info("wait_for_compute_global_operation: "
"Operation done.")
break
time.sleep(POLL_INTERVAL)
return result |
def read_data_sets(train_dir, data_type="train"):
"""
Parse or download mnist data if train_dir is empty.
:param: train_dir: The directory storing the mnist data
:param: data_type: Reading training set or testing set.It can be either "train" or "test"
:return:
```
(ndarray, ndarray) representing (features, labels)
features is a 4D unit8 numpy array [index, y, x, depth] representing each pixel valued from 0 to 255.
labels is 1D unit8 nunpy array representing the label valued from 0 to 9.
```
"""
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
if data_type == "train":
local_file = base.maybe_download(TRAIN_IMAGES, train_dir,
SOURCE_URL + TRAIN_IMAGES)
with open(local_file, 'rb') as f:
train_images = extract_images(f)
local_file = base.maybe_download(TRAIN_LABELS, train_dir,
SOURCE_URL + TRAIN_LABELS)
with open(local_file, 'rb') as f:
train_labels = extract_labels(f)
return train_images, train_labels
else:
local_file = base.maybe_download(TEST_IMAGES, train_dir,
SOURCE_URL + TEST_IMAGES)
with open(local_file, 'rb') as f:
test_images = extract_images(f)
local_file = base.maybe_download(TEST_LABELS, train_dir,
SOURCE_URL + TEST_LABELS)
with open(local_file, 'rb') as f:
test_labels = extract_labels(f)
return test_images, test_labels | Parse or download mnist data if train_dir is empty.
:param: train_dir: The directory storing the mnist data
:param: data_type: Reading training set or testing set.It can be either "train" or "test"
:return:
```
(ndarray, ndarray) representing (features, labels)
features is a 4D unit8 numpy array [index, y, x, depth] representing each pixel valued from 0 to 255.
labels is 1D unit8 nunpy array representing the label valued from 0 to 9.
``` | Below is the the instruction that describes the task:
### Input:
Parse or download mnist data if train_dir is empty.
:param: train_dir: The directory storing the mnist data
:param: data_type: Reading training set or testing set.It can be either "train" or "test"
:return:
```
(ndarray, ndarray) representing (features, labels)
features is a 4D unit8 numpy array [index, y, x, depth] representing each pixel valued from 0 to 255.
labels is 1D unit8 nunpy array representing the label valued from 0 to 9.
```
### Response:
def read_data_sets(train_dir, data_type="train"):
"""
Parse or download mnist data if train_dir is empty.
:param: train_dir: The directory storing the mnist data
:param: data_type: Reading training set or testing set.It can be either "train" or "test"
:return:
```
(ndarray, ndarray) representing (features, labels)
features is a 4D unit8 numpy array [index, y, x, depth] representing each pixel valued from 0 to 255.
labels is 1D unit8 nunpy array representing the label valued from 0 to 9.
```
"""
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
if data_type == "train":
local_file = base.maybe_download(TRAIN_IMAGES, train_dir,
SOURCE_URL + TRAIN_IMAGES)
with open(local_file, 'rb') as f:
train_images = extract_images(f)
local_file = base.maybe_download(TRAIN_LABELS, train_dir,
SOURCE_URL + TRAIN_LABELS)
with open(local_file, 'rb') as f:
train_labels = extract_labels(f)
return train_images, train_labels
else:
local_file = base.maybe_download(TEST_IMAGES, train_dir,
SOURCE_URL + TEST_IMAGES)
with open(local_file, 'rb') as f:
test_images = extract_images(f)
local_file = base.maybe_download(TEST_LABELS, train_dir,
SOURCE_URL + TEST_LABELS)
with open(local_file, 'rb') as f:
test_labels = extract_labels(f)
return test_images, test_labels |
def generate_acyclic_graph(self):
"""
Generates an acyclic graph for the given words.
Adds the graph, and a list of edge-word associations to the object.
"""
# Maximum length of each table, respectively.
# Hardcoded n = cm, where c = 3
# There might be a good way to choose an appropriate C,
# but [1] suggests the average amount of iterations needed
# to generate an acyclic graph is sqrt(3).
self.n = 3 * len(self.words)
max_tries = len(self.words) ** 2
for trial in range(max_tries):
try:
self.generate_or_fail()
except forest.InvariantError:
continue
else:
# Generated successfully!
self.trials_taken = trial + 1
return
raise RuntimeError("Could not generate graph in "
"{} tries".format(max_tries)) | Generates an acyclic graph for the given words.
Adds the graph, and a list of edge-word associations to the object. | Below is the the instruction that describes the task:
### Input:
Generates an acyclic graph for the given words.
Adds the graph, and a list of edge-word associations to the object.
### Response:
def generate_acyclic_graph(self):
"""
Generates an acyclic graph for the given words.
Adds the graph, and a list of edge-word associations to the object.
"""
# Maximum length of each table, respectively.
# Hardcoded n = cm, where c = 3
# There might be a good way to choose an appropriate C,
# but [1] suggests the average amount of iterations needed
# to generate an acyclic graph is sqrt(3).
self.n = 3 * len(self.words)
max_tries = len(self.words) ** 2
for trial in range(max_tries):
try:
self.generate_or_fail()
except forest.InvariantError:
continue
else:
# Generated successfully!
self.trials_taken = trial + 1
return
raise RuntimeError("Could not generate graph in "
"{} tries".format(max_tries)) |
def _login(session):
"""Login to Fedex Delivery Manager."""
session.get(LOGIN_REFERER)
resp = session.post(LOGIN_URL, {
'user': session.auth.username,
'pwd': session.auth.password
}, headers={
'Referer': LOGIN_REFERER,
'X-Requested-With': 'XMLHttpRequest'
})
if resp.status_code != 200:
raise FedexError('could not login')
data = resp.json()
if not data['successful']:
raise FedexError(data['errorList'][0]['error']['message'])
_save_cookies(session.cookies, session.auth.cookie_path) | Login to Fedex Delivery Manager. | Below is the the instruction that describes the task:
### Input:
Login to Fedex Delivery Manager.
### Response:
def _login(session):
"""Login to Fedex Delivery Manager."""
session.get(LOGIN_REFERER)
resp = session.post(LOGIN_URL, {
'user': session.auth.username,
'pwd': session.auth.password
}, headers={
'Referer': LOGIN_REFERER,
'X-Requested-With': 'XMLHttpRequest'
})
if resp.status_code != 200:
raise FedexError('could not login')
data = resp.json()
if not data['successful']:
raise FedexError(data['errorList'][0]['error']['message'])
_save_cookies(session.cookies, session.auth.cookie_path) |
def reference_preprocessing(job, config):
"""
Creates a genome fasta index and sequence dictionary file if not already present in the pipeline config.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Pipeline configuration options and shared files.
Requires FileStoreID for genome fasta file as config.genome_fasta
:return: Updated config with reference index files
:rtype: Namespace
"""
job.fileStore.logToMaster('Preparing Reference Files')
genome_id = config.genome_fasta
if getattr(config, 'genome_fai', None) is None:
config.genome_fai = job.addChildJobFn(run_samtools_faidx,
genome_id,
cores=config.cores).rv()
if getattr(config, 'genome_dict', None) is None:
config.genome_dict = job.addChildJobFn(run_picard_create_sequence_dictionary,
genome_id,
cores=config.cores,
memory=config.xmx).rv()
return config | Creates a genome fasta index and sequence dictionary file if not already present in the pipeline config.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Pipeline configuration options and shared files.
Requires FileStoreID for genome fasta file as config.genome_fasta
:return: Updated config with reference index files
:rtype: Namespace | Below is the the instruction that describes the task:
### Input:
Creates a genome fasta index and sequence dictionary file if not already present in the pipeline config.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Pipeline configuration options and shared files.
Requires FileStoreID for genome fasta file as config.genome_fasta
:return: Updated config with reference index files
:rtype: Namespace
### Response:
def reference_preprocessing(job, config):
"""
Creates a genome fasta index and sequence dictionary file if not already present in the pipeline config.
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Pipeline configuration options and shared files.
Requires FileStoreID for genome fasta file as config.genome_fasta
:return: Updated config with reference index files
:rtype: Namespace
"""
job.fileStore.logToMaster('Preparing Reference Files')
genome_id = config.genome_fasta
if getattr(config, 'genome_fai', None) is None:
config.genome_fai = job.addChildJobFn(run_samtools_faidx,
genome_id,
cores=config.cores).rv()
if getattr(config, 'genome_dict', None) is None:
config.genome_dict = job.addChildJobFn(run_picard_create_sequence_dictionary,
genome_id,
cores=config.cores,
memory=config.xmx).rv()
return config |
def _is_inventory_group(key, value):
'''
Verify that a module-level variable (key = value) is a valid inventory group.
'''
if (
key.startswith('_')
or not isinstance(value, (list, tuple, GeneratorType))
):
return False
# If the group is a tuple of (hosts, data), check the hosts
if isinstance(value, tuple):
value = value[0]
# Expand any generators of hosts
if isinstance(value, GeneratorType):
value = list(value)
return all(
isinstance(item, ALLOWED_HOST_TYPES)
for item in value
) | Verify that a module-level variable (key = value) is a valid inventory group. | Below is the the instruction that describes the task:
### Input:
Verify that a module-level variable (key = value) is a valid inventory group.
### Response:
def _is_inventory_group(key, value):
'''
Verify that a module-level variable (key = value) is a valid inventory group.
'''
if (
key.startswith('_')
or not isinstance(value, (list, tuple, GeneratorType))
):
return False
# If the group is a tuple of (hosts, data), check the hosts
if isinstance(value, tuple):
value = value[0]
# Expand any generators of hosts
if isinstance(value, GeneratorType):
value = list(value)
return all(
isinstance(item, ALLOWED_HOST_TYPES)
for item in value
) |
async def set_conversation_notification_level(
self, set_conversation_notification_level_request
):
"""Set the notification level of a conversation."""
response = hangouts_pb2.SetConversationNotificationLevelResponse()
await self._pb_request(
'conversations/setconversationnotificationlevel',
set_conversation_notification_level_request, response
)
return response | Set the notification level of a conversation. | Below is the the instruction that describes the task:
### Input:
Set the notification level of a conversation.
### Response:
async def set_conversation_notification_level(
self, set_conversation_notification_level_request
):
"""Set the notification level of a conversation."""
response = hangouts_pb2.SetConversationNotificationLevelResponse()
await self._pb_request(
'conversations/setconversationnotificationlevel',
set_conversation_notification_level_request, response
)
return response |
def build(self, builder):
"""Build XML by appending to builder"""
params = dict(SoftHard=self.soft_hard.value, Comparator=self.comparator.value)
builder.start("RangeCheck", params)
if self.check_value is not None:
self.check_value.build(builder)
if self.measurement_unit_ref is not None:
self.measurement_unit_ref.build(builder)
builder.end("RangeCheck") | Build XML by appending to builder | Below is the the instruction that describes the task:
### Input:
Build XML by appending to builder
### Response:
def build(self, builder):
"""Build XML by appending to builder"""
params = dict(SoftHard=self.soft_hard.value, Comparator=self.comparator.value)
builder.start("RangeCheck", params)
if self.check_value is not None:
self.check_value.build(builder)
if self.measurement_unit_ref is not None:
self.measurement_unit_ref.build(builder)
builder.end("RangeCheck") |
def background_color(self):
"""Background color."""
if self._has_real():
return self._data.real_background_color
return self._data.background_color | Background color. | Below is the the instruction that describes the task:
### Input:
Background color.
### Response:
def background_color(self):
"""Background color."""
if self._has_real():
return self._data.real_background_color
return self._data.background_color |
def rpoplpush(self, sourcekey, destkey, *, encoding=_NOTSET):
"""Atomically returns and removes the last element (tail) of the
list stored at source, and pushes the element at the first element
(head) of the list stored at destination.
"""
return self.execute(b'RPOPLPUSH', sourcekey, destkey,
encoding=encoding) | Atomically returns and removes the last element (tail) of the
list stored at source, and pushes the element at the first element
(head) of the list stored at destination. | Below is the the instruction that describes the task:
### Input:
Atomically returns and removes the last element (tail) of the
list stored at source, and pushes the element at the first element
(head) of the list stored at destination.
### Response:
def rpoplpush(self, sourcekey, destkey, *, encoding=_NOTSET):
"""Atomically returns and removes the last element (tail) of the
list stored at source, and pushes the element at the first element
(head) of the list stored at destination.
"""
return self.execute(b'RPOPLPUSH', sourcekey, destkey,
encoding=encoding) |
def __get_grants(self, target_file, all_grant_data):
"""
Return grant permission, grant owner, grant owner email and grant id as a list.
It needs you to set k.key to a key on amazon (file path) before running this.
note that Amazon returns a list of grants for each file.
options:
- private: Owner gets FULL_CONTROL. No one else has any access rights.
- public-read: Owners gets FULL_CONTROL and the anonymous principal is granted READ access.
- public-read-write: Owner gets FULL_CONTROL and the anonymous principal is granted READ and WRITE access.
- authenticated-read: Owner gets FULL_CONTROL and any principal authenticated as a registered Amazon S3 user is granted READ access
"""
self.k.key = target_file
the_grants = self.k.get_acl().acl.grants
grant_list = []
for grant in the_grants:
if all_grant_data:
grant_list.append(
{"permission": grant.permission, "name": grant.display_name, "email": grant.email_address, "id": grant.id})
else:
grant_list.append({"permission": grant.permission, "name": grant.display_name})
return grant_list | Return grant permission, grant owner, grant owner email and grant id as a list.
It needs you to set k.key to a key on amazon (file path) before running this.
note that Amazon returns a list of grants for each file.
options:
- private: Owner gets FULL_CONTROL. No one else has any access rights.
- public-read: Owners gets FULL_CONTROL and the anonymous principal is granted READ access.
- public-read-write: Owner gets FULL_CONTROL and the anonymous principal is granted READ and WRITE access.
- authenticated-read: Owner gets FULL_CONTROL and any principal authenticated as a registered Amazon S3 user is granted READ access | Below is the the instruction that describes the task:
### Input:
Return grant permission, grant owner, grant owner email and grant id as a list.
It needs you to set k.key to a key on amazon (file path) before running this.
note that Amazon returns a list of grants for each file.
options:
- private: Owner gets FULL_CONTROL. No one else has any access rights.
- public-read: Owners gets FULL_CONTROL and the anonymous principal is granted READ access.
- public-read-write: Owner gets FULL_CONTROL and the anonymous principal is granted READ and WRITE access.
- authenticated-read: Owner gets FULL_CONTROL and any principal authenticated as a registered Amazon S3 user is granted READ access
### Response:
def __get_grants(self, target_file, all_grant_data):
"""
Return grant permission, grant owner, grant owner email and grant id as a list.
It needs you to set k.key to a key on amazon (file path) before running this.
note that Amazon returns a list of grants for each file.
options:
- private: Owner gets FULL_CONTROL. No one else has any access rights.
- public-read: Owners gets FULL_CONTROL and the anonymous principal is granted READ access.
- public-read-write: Owner gets FULL_CONTROL and the anonymous principal is granted READ and WRITE access.
- authenticated-read: Owner gets FULL_CONTROL and any principal authenticated as a registered Amazon S3 user is granted READ access
"""
self.k.key = target_file
the_grants = self.k.get_acl().acl.grants
grant_list = []
for grant in the_grants:
if all_grant_data:
grant_list.append(
{"permission": grant.permission, "name": grant.display_name, "email": grant.email_address, "id": grant.id})
else:
grant_list.append({"permission": grant.permission, "name": grant.display_name})
return grant_list |
def slerp(cls, q0, q1, amount=0.5):
"""Spherical Linear Interpolation between quaternions.
Implemented as described in https://en.wikipedia.org/wiki/Slerp
Find a valid quaternion rotation at a specified distance along the
minor arc of a great circle passing through any two existing quaternion
endpoints lying on the unit radius hypersphere.
This is a class method and is called as a method of the class itself rather than on a particular instance.
Params:
q0: first endpoint rotation as a Quaternion object
q1: second endpoint rotation as a Quaternion object
amount: interpolation parameter between 0 and 1. This describes the linear placement position of
the result along the arc between endpoints; 0 being at `q0` and 1 being at `q1`.
Defaults to the midpoint (0.5).
Returns:
A new Quaternion object representing the interpolated rotation. This is guaranteed to be a unit quaternion.
Note:
This feature only makes sense when interpolating between unit quaternions (those lying on the unit radius hypersphere).
Calling this method will implicitly normalise the endpoints to unit quaternions if they are not already unit length.
"""
# Ensure quaternion inputs are unit quaternions and 0 <= amount <=1
q0._fast_normalise()
q1._fast_normalise()
amount = np.clip(amount, 0, 1)
dot = np.dot(q0.q, q1.q)
# If the dot product is negative, slerp won't take the shorter path.
# Note that v1 and -v1 are equivalent when the negation is applied to all four components.
# Fix by reversing one quaternion
if (dot < 0.0):
q0.q = -q0.q
dot = -dot
# sin_theta_0 can not be zero
if (dot > 0.9995):
qr = Quaternion(q0.q + amount*(q1.q - q0.q))
qr._fast_normalise()
return qr
theta_0 = np.arccos(dot) # Since dot is in range [0, 0.9995], np.arccos() is safe
sin_theta_0 = np.sin(theta_0)
theta = theta_0*amount
sin_theta = np.sin(theta)
s0 = np.cos(theta) - dot * sin_theta / sin_theta_0
s1 = sin_theta / sin_theta_0
qr = Quaternion((s0 * q0.q) + (s1 * q1.q))
qr._fast_normalise()
return qr | Spherical Linear Interpolation between quaternions.
Implemented as described in https://en.wikipedia.org/wiki/Slerp
Find a valid quaternion rotation at a specified distance along the
minor arc of a great circle passing through any two existing quaternion
endpoints lying on the unit radius hypersphere.
This is a class method and is called as a method of the class itself rather than on a particular instance.
Params:
q0: first endpoint rotation as a Quaternion object
q1: second endpoint rotation as a Quaternion object
amount: interpolation parameter between 0 and 1. This describes the linear placement position of
the result along the arc between endpoints; 0 being at `q0` and 1 being at `q1`.
Defaults to the midpoint (0.5).
Returns:
A new Quaternion object representing the interpolated rotation. This is guaranteed to be a unit quaternion.
Note:
This feature only makes sense when interpolating between unit quaternions (those lying on the unit radius hypersphere).
Calling this method will implicitly normalise the endpoints to unit quaternions if they are not already unit length. | Below is the the instruction that describes the task:
### Input:
Spherical Linear Interpolation between quaternions.
Implemented as described in https://en.wikipedia.org/wiki/Slerp
Find a valid quaternion rotation at a specified distance along the
minor arc of a great circle passing through any two existing quaternion
endpoints lying on the unit radius hypersphere.
This is a class method and is called as a method of the class itself rather than on a particular instance.
Params:
q0: first endpoint rotation as a Quaternion object
q1: second endpoint rotation as a Quaternion object
amount: interpolation parameter between 0 and 1. This describes the linear placement position of
the result along the arc between endpoints; 0 being at `q0` and 1 being at `q1`.
Defaults to the midpoint (0.5).
Returns:
A new Quaternion object representing the interpolated rotation. This is guaranteed to be a unit quaternion.
Note:
This feature only makes sense when interpolating between unit quaternions (those lying on the unit radius hypersphere).
Calling this method will implicitly normalise the endpoints to unit quaternions if they are not already unit length.
### Response:
def slerp(cls, q0, q1, amount=0.5):
"""Spherical Linear Interpolation between quaternions.
Implemented as described in https://en.wikipedia.org/wiki/Slerp
Find a valid quaternion rotation at a specified distance along the
minor arc of a great circle passing through any two existing quaternion
endpoints lying on the unit radius hypersphere.
This is a class method and is called as a method of the class itself rather than on a particular instance.
Params:
q0: first endpoint rotation as a Quaternion object
q1: second endpoint rotation as a Quaternion object
amount: interpolation parameter between 0 and 1. This describes the linear placement position of
the result along the arc between endpoints; 0 being at `q0` and 1 being at `q1`.
Defaults to the midpoint (0.5).
Returns:
A new Quaternion object representing the interpolated rotation. This is guaranteed to be a unit quaternion.
Note:
This feature only makes sense when interpolating between unit quaternions (those lying on the unit radius hypersphere).
Calling this method will implicitly normalise the endpoints to unit quaternions if they are not already unit length.
"""
# Ensure quaternion inputs are unit quaternions and 0 <= amount <=1
q0._fast_normalise()
q1._fast_normalise()
amount = np.clip(amount, 0, 1)
dot = np.dot(q0.q, q1.q)
# If the dot product is negative, slerp won't take the shorter path.
# Note that v1 and -v1 are equivalent when the negation is applied to all four components.
# Fix by reversing one quaternion
if (dot < 0.0):
q0.q = -q0.q
dot = -dot
# sin_theta_0 can not be zero
if (dot > 0.9995):
qr = Quaternion(q0.q + amount*(q1.q - q0.q))
qr._fast_normalise()
return qr
theta_0 = np.arccos(dot) # Since dot is in range [0, 0.9995], np.arccos() is safe
sin_theta_0 = np.sin(theta_0)
theta = theta_0*amount
sin_theta = np.sin(theta)
s0 = np.cos(theta) - dot * sin_theta / sin_theta_0
s1 = sin_theta / sin_theta_0
qr = Quaternion((s0 * q0.q) + (s1 * q1.q))
qr._fast_normalise()
return qr |
def replace(self):
"""
Performs conversion to the regular Task object, referenced by the
stored UUID.
"""
replacement = self._tw.tasks.get(uuid=self._uuid)
self.__class__ = replacement.__class__
self.__dict__ = replacement.__dict__ | Performs conversion to the regular Task object, referenced by the
stored UUID. | Below is the the instruction that describes the task:
### Input:
Performs conversion to the regular Task object, referenced by the
stored UUID.
### Response:
def replace(self):
"""
Performs conversion to the regular Task object, referenced by the
stored UUID.
"""
replacement = self._tw.tasks.get(uuid=self._uuid)
self.__class__ = replacement.__class__
self.__dict__ = replacement.__dict__ |
def posthoc_dunn(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] O.J. Dunn (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] S.A. Glantz (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
'''
def compare_dunn(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
z_value = diff / np.sqrt((A - x_ties) * B)
p_value = 2. * ss.norm.sf(np.abs(z_value))
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = tie_sum / (12. * (n - 1))
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i,j in combs:
vs[i, j] = compare_dunn(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique) | Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] O.J. Dunn (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] S.A. Glantz (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm') | Below is the the instruction that describes the task:
### Input:
Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] O.J. Dunn (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] S.A. Glantz (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
### Response:
def posthoc_dunn(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] O.J. Dunn (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] S.A. Glantz (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
'''
def compare_dunn(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
z_value = diff / np.sqrt((A - x_ties) * B)
p_value = 2. * ss.norm.sf(np.abs(z_value))
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
n = len(x.index)
x_groups_unique = np.unique(x[_group_col])
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = tie_sum / (12. * (n - 1))
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i,j in combs:
vs[i, j] = compare_dunn(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique) |
def build_vep_annotation(csq_info, reference, alternatives, vep_columns):
"""
Build a dictionary with the vep information from the vep annotation.
Indels are handled different by vep depending on the number of
alternative alleles there is for a variant.
If only one alternative:
Insertion: vep represents the alternative by removing the first
base from the vcf alternative.
Deletion: vep represents the alternative with '-'
If there are several alternatives:
Insertion:
vep represents the alternative by removing the first
base from the vcf alternative(Like above).
Deletion:
If there are multiple alternative deletions vep represents them by
removing the first base from the vcf alternative.
If the vcf line looks like:
1 970549 . TGGG TG,TGG
vep annotation for alternatives will be: G,GG
Args:
csq_info (list): A list with the raw vep annotations from the vcf line.
reference (str): A string that represents the vcf reference
alternatives (list): A list of strings that represents the vcf formated
alternatives
vep_columns (list): A list of strings that represents the vep comluns
defined in the vcf header.
Returns:
vep_dict (dict): A dictionary with the alternative alleles (in vcf form)
as keys and a list of annotations for each alternative
alleles.
One key named 'gene_ids',
value is a set with the genes found.
"""
logger = getLogger(__name__)
# The keys in the vep dict are the vcf formatted alternatives, values are the
# dictionaries with vep annotations
vep_dict = {}
# If we have several alternatives we need to check what types of
# alternatives we have
vep_to_vcf = {}
number_of_deletions = 0
for alternative in alternatives:
if len(alternative) < len(reference):
number_of_deletions += 1
logger.debug("Number of deletions found: {0}".format(number_of_deletions))
for alternative in alternatives:
# We store the annotations with keys from the vcf alternatives
vep_dict[alternative] = []
# If substitutuion reference and alternative have the same length
if len(alternative) == len(reference):
vep_to_vcf[alternative] = alternative
# If deletion alternative is shorter that the reference
else:
# If there is a deletion then the alternative will be '-' in vep entry
if len(alternative) == 1:
vep_to_vcf['-'] = alternative
else:
vep_to_vcf[alternative[1:]] = alternative
for vep_annotation in csq_info:
logger.debug("Parsing vep annotation: {0}".format(vep_annotation))
splitted_vep = vep_annotation.split('|')
if len(splitted_vep) != len(vep_columns):
raise SyntaxError("Csq info for variant does not match csq info in "\
"header. {0}, {1}".format(
'|'.join(splitted_vep), '|'.join(vep_columns)))
# Build the vep dict:
vep_info = dict(zip(vep_columns, splitted_vep))
# If no allele is found we can not determine what allele
if vep_info.get('Allele', None):
vep_allele = vep_info['Allele']
try:
vcf_allele = vep_to_vcf[vep_allele]
except KeyError as e:
vcf_allele = vep_allele
if vcf_allele in vep_dict:
vep_dict[vcf_allele].append(vep_info)
else:
vep_dict[vcf_allele] = [vep_info]
else:
logger.warning("No allele found in vep annotation! Skipping annotation")
return vep_dict | Build a dictionary with the vep information from the vep annotation.
Indels are handled different by vep depending on the number of
alternative alleles there is for a variant.
If only one alternative:
Insertion: vep represents the alternative by removing the first
base from the vcf alternative.
Deletion: vep represents the alternative with '-'
If there are several alternatives:
Insertion:
vep represents the alternative by removing the first
base from the vcf alternative(Like above).
Deletion:
If there are multiple alternative deletions vep represents them by
removing the first base from the vcf alternative.
If the vcf line looks like:
1 970549 . TGGG TG,TGG
vep annotation for alternatives will be: G,GG
Args:
csq_info (list): A list with the raw vep annotations from the vcf line.
reference (str): A string that represents the vcf reference
alternatives (list): A list of strings that represents the vcf formated
alternatives
vep_columns (list): A list of strings that represents the vep comluns
defined in the vcf header.
Returns:
vep_dict (dict): A dictionary with the alternative alleles (in vcf form)
as keys and a list of annotations for each alternative
alleles.
One key named 'gene_ids',
value is a set with the genes found. | Below is the the instruction that describes the task:
### Input:
Build a dictionary with the vep information from the vep annotation.
Indels are handled different by vep depending on the number of
alternative alleles there is for a variant.
If only one alternative:
Insertion: vep represents the alternative by removing the first
base from the vcf alternative.
Deletion: vep represents the alternative with '-'
If there are several alternatives:
Insertion:
vep represents the alternative by removing the first
base from the vcf alternative(Like above).
Deletion:
If there are multiple alternative deletions vep represents them by
removing the first base from the vcf alternative.
If the vcf line looks like:
1 970549 . TGGG TG,TGG
vep annotation for alternatives will be: G,GG
Args:
csq_info (list): A list with the raw vep annotations from the vcf line.
reference (str): A string that represents the vcf reference
alternatives (list): A list of strings that represents the vcf formated
alternatives
vep_columns (list): A list of strings that represents the vep comluns
defined in the vcf header.
Returns:
vep_dict (dict): A dictionary with the alternative alleles (in vcf form)
as keys and a list of annotations for each alternative
alleles.
One key named 'gene_ids',
value is a set with the genes found.
### Response:
def build_vep_annotation(csq_info, reference, alternatives, vep_columns):
"""
Build a dictionary with the vep information from the vep annotation.
Indels are handled different by vep depending on the number of
alternative alleles there is for a variant.
If only one alternative:
Insertion: vep represents the alternative by removing the first
base from the vcf alternative.
Deletion: vep represents the alternative with '-'
If there are several alternatives:
Insertion:
vep represents the alternative by removing the first
base from the vcf alternative(Like above).
Deletion:
If there are multiple alternative deletions vep represents them by
removing the first base from the vcf alternative.
If the vcf line looks like:
1 970549 . TGGG TG,TGG
vep annotation for alternatives will be: G,GG
Args:
csq_info (list): A list with the raw vep annotations from the vcf line.
reference (str): A string that represents the vcf reference
alternatives (list): A list of strings that represents the vcf formated
alternatives
vep_columns (list): A list of strings that represents the vep comluns
defined in the vcf header.
Returns:
vep_dict (dict): A dictionary with the alternative alleles (in vcf form)
as keys and a list of annotations for each alternative
alleles.
One key named 'gene_ids',
value is a set with the genes found.
"""
logger = getLogger(__name__)
# The keys in the vep dict are the vcf formatted alternatives, values are the
# dictionaries with vep annotations
vep_dict = {}
# If we have several alternatives we need to check what types of
# alternatives we have
vep_to_vcf = {}
number_of_deletions = 0
for alternative in alternatives:
if len(alternative) < len(reference):
number_of_deletions += 1
logger.debug("Number of deletions found: {0}".format(number_of_deletions))
for alternative in alternatives:
# We store the annotations with keys from the vcf alternatives
vep_dict[alternative] = []
# If substitutuion reference and alternative have the same length
if len(alternative) == len(reference):
vep_to_vcf[alternative] = alternative
# If deletion alternative is shorter that the reference
else:
# If there is a deletion then the alternative will be '-' in vep entry
if len(alternative) == 1:
vep_to_vcf['-'] = alternative
else:
vep_to_vcf[alternative[1:]] = alternative
for vep_annotation in csq_info:
logger.debug("Parsing vep annotation: {0}".format(vep_annotation))
splitted_vep = vep_annotation.split('|')
if len(splitted_vep) != len(vep_columns):
raise SyntaxError("Csq info for variant does not match csq info in "\
"header. {0}, {1}".format(
'|'.join(splitted_vep), '|'.join(vep_columns)))
# Build the vep dict:
vep_info = dict(zip(vep_columns, splitted_vep))
# If no allele is found we can not determine what allele
if vep_info.get('Allele', None):
vep_allele = vep_info['Allele']
try:
vcf_allele = vep_to_vcf[vep_allele]
except KeyError as e:
vcf_allele = vep_allele
if vcf_allele in vep_dict:
vep_dict[vcf_allele].append(vep_info)
else:
vep_dict[vcf_allele] = [vep_info]
else:
logger.warning("No allele found in vep annotation! Skipping annotation")
return vep_dict |
def wc(filename, contents, parsed=None, is_jekyll=False):
"""Count the words, characters, and paragraphs in a string.
Args:
contents: the original string to count
filename (optional): the filename as provided to the CLI
parsed (optional): a parsed string, expected to be plaintext only
is_jekyll: whether the original contents were from a Jekyll file
Returns:
An object containing the various counts
"""
if is_jekyll:
fmt = 'jekyll'
else:
fmt = 'md/txt'
body = parsed.strip() if parsed else contents.strip()
# Strip the body down to just words
words = re.sub(r'\s+', ' ', body, re.MULTILINE)
for punctuation in INTERSTITIAL_PUNCTUATION:
words = re.sub(punctuation, ' ', words)
punct = re.compile('[^\w\s]', re.U)
words = punct.sub('', words)
# Retrieve only non-space characters
real_characters = re.sub(r'\s', '', words)
# Count paragraphs in an intelligent way
paragraphs = [1 if len(x) == 0 else 0 for x in
contents.strip().splitlines()]
for index, paragraph in enumerate(paragraphs):
if paragraph == 1 and paragraphs[index + 1] == 1:
paragraphs[index] = 0
return {
'counts': {
'file': filename,
'type': fmt,
'paragraphs': sum(paragraphs) + 1,
'words': len(re.split('\s+', words)),
'characters_real': len(real_characters),
'characters_total': len(words),
}
} | Count the words, characters, and paragraphs in a string.
Args:
contents: the original string to count
filename (optional): the filename as provided to the CLI
parsed (optional): a parsed string, expected to be plaintext only
is_jekyll: whether the original contents were from a Jekyll file
Returns:
An object containing the various counts | Below is the the instruction that describes the task:
### Input:
Count the words, characters, and paragraphs in a string.
Args:
contents: the original string to count
filename (optional): the filename as provided to the CLI
parsed (optional): a parsed string, expected to be plaintext only
is_jekyll: whether the original contents were from a Jekyll file
Returns:
An object containing the various counts
### Response:
def wc(filename, contents, parsed=None, is_jekyll=False):
"""Count the words, characters, and paragraphs in a string.
Args:
contents: the original string to count
filename (optional): the filename as provided to the CLI
parsed (optional): a parsed string, expected to be plaintext only
is_jekyll: whether the original contents were from a Jekyll file
Returns:
An object containing the various counts
"""
if is_jekyll:
fmt = 'jekyll'
else:
fmt = 'md/txt'
body = parsed.strip() if parsed else contents.strip()
# Strip the body down to just words
words = re.sub(r'\s+', ' ', body, re.MULTILINE)
for punctuation in INTERSTITIAL_PUNCTUATION:
words = re.sub(punctuation, ' ', words)
punct = re.compile('[^\w\s]', re.U)
words = punct.sub('', words)
# Retrieve only non-space characters
real_characters = re.sub(r'\s', '', words)
# Count paragraphs in an intelligent way
paragraphs = [1 if len(x) == 0 else 0 for x in
contents.strip().splitlines()]
for index, paragraph in enumerate(paragraphs):
if paragraph == 1 and paragraphs[index + 1] == 1:
paragraphs[index] = 0
return {
'counts': {
'file': filename,
'type': fmt,
'paragraphs': sum(paragraphs) + 1,
'words': len(re.split('\s+', words)),
'characters_real': len(real_characters),
'characters_total': len(words),
}
} |
def set(self, value):
"""
Sets the value of the string
:param value:
A unicode string
"""
if not isinstance(value, str_cls):
raise TypeError(unwrap(
'''
%s value must be a unicode string, not %s
''',
type_name(self),
type_name(value)
))
self._unicode = value
self.contents = iri_to_uri(value)
self._header = None
if self._trailer != b'':
self._trailer = b'' | Sets the value of the string
:param value:
A unicode string | Below is the the instruction that describes the task:
### Input:
Sets the value of the string
:param value:
A unicode string
### Response:
def set(self, value):
"""
Sets the value of the string
:param value:
A unicode string
"""
if not isinstance(value, str_cls):
raise TypeError(unwrap(
'''
%s value must be a unicode string, not %s
''',
type_name(self),
type_name(value)
))
self._unicode = value
self.contents = iri_to_uri(value)
self._header = None
if self._trailer != b'':
self._trailer = b'' |
def bounds_tree(triangles):
"""
Given a list of triangles, create an r-tree for broad- phase
collision detection
Parameters
---------
triangles : (n, 3, 3) float
Triangles in space
Returns
---------
tree : rtree.Rtree
One node per triangle
"""
triangles = np.asanyarray(triangles, dtype=np.float64)
if not util.is_shape(triangles, (-1, 3, 3)):
raise ValueError('Triangles must be (n,3,3)!')
# the (n,6) interleaved bounding box for every triangle
triangle_bounds = np.column_stack((triangles.min(axis=1),
triangles.max(axis=1)))
tree = util.bounds_tree(triangle_bounds)
return tree | Given a list of triangles, create an r-tree for broad- phase
collision detection
Parameters
---------
triangles : (n, 3, 3) float
Triangles in space
Returns
---------
tree : rtree.Rtree
One node per triangle | Below is the the instruction that describes the task:
### Input:
Given a list of triangles, create an r-tree for broad- phase
collision detection
Parameters
---------
triangles : (n, 3, 3) float
Triangles in space
Returns
---------
tree : rtree.Rtree
One node per triangle
### Response:
def bounds_tree(triangles):
"""
Given a list of triangles, create an r-tree for broad- phase
collision detection
Parameters
---------
triangles : (n, 3, 3) float
Triangles in space
Returns
---------
tree : rtree.Rtree
One node per triangle
"""
triangles = np.asanyarray(triangles, dtype=np.float64)
if not util.is_shape(triangles, (-1, 3, 3)):
raise ValueError('Triangles must be (n,3,3)!')
# the (n,6) interleaved bounding box for every triangle
triangle_bounds = np.column_stack((triangles.min(axis=1),
triangles.max(axis=1)))
tree = util.bounds_tree(triangle_bounds)
return tree |
def get_cluster_threshold(self):
''' getter '''
if isinstance(self.__cluster_threshold, int) is False:
raise TypeError("The type of __cluster_threshold must be int.")
return self.__cluster_threshold | getter | Below is the the instruction that describes the task:
### Input:
getter
### Response:
def get_cluster_threshold(self):
''' getter '''
if isinstance(self.__cluster_threshold, int) is False:
raise TypeError("The type of __cluster_threshold must be int.")
return self.__cluster_threshold |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.