code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def fill_rect(framebuf, x, y, width, height, color):
"""Draw a rectangle at the given location, size and color. The ``fill_rect`` method draws
both the outline and interior."""
# pylint: disable=too-many-arguments
while height > 0:
index = (y >> 3) * framebuf.stride + x
offset = y & 0x07
for w_w in range(width):
framebuf.buf[index + w_w] = (framebuf.buf[index + w_w] & ~(0x01 << offset)) |\
((color != 0) << offset)
y += 1
height -= 1 | Draw a rectangle at the given location, size and color. The ``fill_rect`` method draws
both the outline and interior. | Below is the the instruction that describes the task:
### Input:
Draw a rectangle at the given location, size and color. The ``fill_rect`` method draws
both the outline and interior.
### Response:
def fill_rect(framebuf, x, y, width, height, color):
"""Draw a rectangle at the given location, size and color. The ``fill_rect`` method draws
both the outline and interior."""
# pylint: disable=too-many-arguments
while height > 0:
index = (y >> 3) * framebuf.stride + x
offset = y & 0x07
for w_w in range(width):
framebuf.buf[index + w_w] = (framebuf.buf[index + w_w] & ~(0x01 << offset)) |\
((color != 0) << offset)
y += 1
height -= 1 |
def cdf(self, y, f, var):
r"""
Cumulative density function of the likelihood.
Parameters
----------
y: ndarray
query quantiles, i.e.\ :math:`P(Y \leq y)`.
f: ndarray
latent function from the GLM prior (:math:`\mathbf{f} =
\boldsymbol\Phi \mathbf{w}`)
var: float, ndarray, optional
The variance of the distribution, if not input, the initial value
of variance is used.
Returns
-------
cdf: ndarray
Cumulative density function evaluated at y.
"""
var = self._check_param(var)
return norm.cdf(y, loc=f, scale=np.sqrt(var)) | r"""
Cumulative density function of the likelihood.
Parameters
----------
y: ndarray
query quantiles, i.e.\ :math:`P(Y \leq y)`.
f: ndarray
latent function from the GLM prior (:math:`\mathbf{f} =
\boldsymbol\Phi \mathbf{w}`)
var: float, ndarray, optional
The variance of the distribution, if not input, the initial value
of variance is used.
Returns
-------
cdf: ndarray
Cumulative density function evaluated at y. | Below is the the instruction that describes the task:
### Input:
r"""
Cumulative density function of the likelihood.
Parameters
----------
y: ndarray
query quantiles, i.e.\ :math:`P(Y \leq y)`.
f: ndarray
latent function from the GLM prior (:math:`\mathbf{f} =
\boldsymbol\Phi \mathbf{w}`)
var: float, ndarray, optional
The variance of the distribution, if not input, the initial value
of variance is used.
Returns
-------
cdf: ndarray
Cumulative density function evaluated at y.
### Response:
def cdf(self, y, f, var):
r"""
Cumulative density function of the likelihood.
Parameters
----------
y: ndarray
query quantiles, i.e.\ :math:`P(Y \leq y)`.
f: ndarray
latent function from the GLM prior (:math:`\mathbf{f} =
\boldsymbol\Phi \mathbf{w}`)
var: float, ndarray, optional
The variance of the distribution, if not input, the initial value
of variance is used.
Returns
-------
cdf: ndarray
Cumulative density function evaluated at y.
"""
var = self._check_param(var)
return norm.cdf(y, loc=f, scale=np.sqrt(var)) |
def get_interval(x, intervals):
"""
finds interval of the interpolation in which x lies.
:param x:
:param intervals: the interpolation intervals
:return:
"""
n = len(intervals)
if n < 2:
return intervals[0]
n2 = n / 2
if x < intervals[n2][0]:
return spline.get_interval(x, intervals[:n2])
else:
return spline.get_interval(x, intervals[n2:]) | finds interval of the interpolation in which x lies.
:param x:
:param intervals: the interpolation intervals
:return: | Below is the the instruction that describes the task:
### Input:
finds interval of the interpolation in which x lies.
:param x:
:param intervals: the interpolation intervals
:return:
### Response:
def get_interval(x, intervals):
"""
finds interval of the interpolation in which x lies.
:param x:
:param intervals: the interpolation intervals
:return:
"""
n = len(intervals)
if n < 2:
return intervals[0]
n2 = n / 2
if x < intervals[n2][0]:
return spline.get_interval(x, intervals[:n2])
else:
return spline.get_interval(x, intervals[n2:]) |
def list_backups(path, limit=None):
'''
.. versionadded:: 0.17.0
Lists the previous versions of a file backed up using Salt's :ref:`file
state backup <file-state-backups>` system.
path
The path on the minion to check for backups
limit
Limit the number of results to the most recent N backups
CLI Example:
.. code-block:: bash
salt '*' file.list_backups /foo/bar/baz.txt
'''
path = os.path.expanduser(path)
try:
limit = int(limit)
except TypeError:
pass
except ValueError:
log.error('file.list_backups: \'limit\' value must be numeric')
limit = None
bkroot = _get_bkroot()
parent_dir, basename = os.path.split(path)
if salt.utils.platform.is_windows():
# ':' is an illegal filesystem path character on Windows
src_dir = parent_dir.replace(':', '_')
else:
src_dir = parent_dir[1:]
# Figure out full path of location of backup file in minion cache
bkdir = os.path.join(bkroot, src_dir)
if not os.path.isdir(bkdir):
return {}
files = {}
for fname in [x for x in os.listdir(bkdir)
if os.path.isfile(os.path.join(bkdir, x))]:
if salt.utils.platform.is_windows():
# ':' is an illegal filesystem path character on Windows
strpfmt = '{0}_%a_%b_%d_%H-%M-%S_%f_%Y'.format(basename)
else:
strpfmt = '{0}_%a_%b_%d_%H:%M:%S_%f_%Y'.format(basename)
try:
timestamp = datetime.datetime.strptime(fname, strpfmt)
except ValueError:
# File didn't match the strp format string, so it's not a backup
# for this file. Move on to the next one.
continue
if salt.utils.platform.is_windows():
str_format = '%a %b %d %Y %H-%M-%S.%f'
else:
str_format = '%a %b %d %Y %H:%M:%S.%f'
files.setdefault(timestamp, {})['Backup Time'] = \
timestamp.strftime(str_format)
location = os.path.join(bkdir, fname)
files[timestamp]['Size'] = os.stat(location).st_size
files[timestamp]['Location'] = location
return dict(list(zip(
list(range(len(files))),
[files[x] for x in sorted(files, reverse=True)[:limit]]
))) | .. versionadded:: 0.17.0
Lists the previous versions of a file backed up using Salt's :ref:`file
state backup <file-state-backups>` system.
path
The path on the minion to check for backups
limit
Limit the number of results to the most recent N backups
CLI Example:
.. code-block:: bash
salt '*' file.list_backups /foo/bar/baz.txt | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 0.17.0
Lists the previous versions of a file backed up using Salt's :ref:`file
state backup <file-state-backups>` system.
path
The path on the minion to check for backups
limit
Limit the number of results to the most recent N backups
CLI Example:
.. code-block:: bash
salt '*' file.list_backups /foo/bar/baz.txt
### Response:
def list_backups(path, limit=None):
'''
.. versionadded:: 0.17.0
Lists the previous versions of a file backed up using Salt's :ref:`file
state backup <file-state-backups>` system.
path
The path on the minion to check for backups
limit
Limit the number of results to the most recent N backups
CLI Example:
.. code-block:: bash
salt '*' file.list_backups /foo/bar/baz.txt
'''
path = os.path.expanduser(path)
try:
limit = int(limit)
except TypeError:
pass
except ValueError:
log.error('file.list_backups: \'limit\' value must be numeric')
limit = None
bkroot = _get_bkroot()
parent_dir, basename = os.path.split(path)
if salt.utils.platform.is_windows():
# ':' is an illegal filesystem path character on Windows
src_dir = parent_dir.replace(':', '_')
else:
src_dir = parent_dir[1:]
# Figure out full path of location of backup file in minion cache
bkdir = os.path.join(bkroot, src_dir)
if not os.path.isdir(bkdir):
return {}
files = {}
for fname in [x for x in os.listdir(bkdir)
if os.path.isfile(os.path.join(bkdir, x))]:
if salt.utils.platform.is_windows():
# ':' is an illegal filesystem path character on Windows
strpfmt = '{0}_%a_%b_%d_%H-%M-%S_%f_%Y'.format(basename)
else:
strpfmt = '{0}_%a_%b_%d_%H:%M:%S_%f_%Y'.format(basename)
try:
timestamp = datetime.datetime.strptime(fname, strpfmt)
except ValueError:
# File didn't match the strp format string, so it's not a backup
# for this file. Move on to the next one.
continue
if salt.utils.platform.is_windows():
str_format = '%a %b %d %Y %H-%M-%S.%f'
else:
str_format = '%a %b %d %Y %H:%M:%S.%f'
files.setdefault(timestamp, {})['Backup Time'] = \
timestamp.strftime(str_format)
location = os.path.join(bkdir, fname)
files[timestamp]['Size'] = os.stat(location).st_size
files[timestamp]['Location'] = location
return dict(list(zip(
list(range(len(files))),
[files[x] for x in sorted(files, reverse=True)[:limit]]
))) |
def create_cookie(name, value, **kwargs):
"""Make a cookie from underspecified parameters.
By default, the pair of `name` and `value` will be set for the domain ''
and sent on every request (this is sometimes called a "supercookie").
"""
result = dict(
version=0,
name=name,
value=value,
port=None,
domain='',
path='/',
secure=False,
expires=None,
discard=True,
comment=None,
comment_url=None,
rest={'HttpOnly': None},
rfc2109=False,)
badargs = set(kwargs) - set(result)
if badargs:
err = 'create_cookie() got unexpected keyword arguments: %s'
raise TypeError(err % list(badargs))
result.update(kwargs)
result['port_specified'] = bool(result['port'])
result['domain_specified'] = bool(result['domain'])
result['domain_initial_dot'] = result['domain'].startswith('.')
result['path_specified'] = bool(result['path'])
return Cookie(**result) | Make a cookie from underspecified parameters.
By default, the pair of `name` and `value` will be set for the domain ''
and sent on every request (this is sometimes called a "supercookie"). | Below is the the instruction that describes the task:
### Input:
Make a cookie from underspecified parameters.
By default, the pair of `name` and `value` will be set for the domain ''
and sent on every request (this is sometimes called a "supercookie").
### Response:
def create_cookie(name, value, **kwargs):
"""Make a cookie from underspecified parameters.
By default, the pair of `name` and `value` will be set for the domain ''
and sent on every request (this is sometimes called a "supercookie").
"""
result = dict(
version=0,
name=name,
value=value,
port=None,
domain='',
path='/',
secure=False,
expires=None,
discard=True,
comment=None,
comment_url=None,
rest={'HttpOnly': None},
rfc2109=False,)
badargs = set(kwargs) - set(result)
if badargs:
err = 'create_cookie() got unexpected keyword arguments: %s'
raise TypeError(err % list(badargs))
result.update(kwargs)
result['port_specified'] = bool(result['port'])
result['domain_specified'] = bool(result['domain'])
result['domain_initial_dot'] = result['domain'].startswith('.')
result['path_specified'] = bool(result['path'])
return Cookie(**result) |
def requests(self):
'''return accumulated requests to this bin'''
path = pathjoin(self.path, self.name, Request.path)
response = self.service.send(SRequest('GET', path))
# a bin behaves as a push-down store --- better to return the requests
# in order of appearance
return list(reversed(Request.from_response(response, bin=self))) | return accumulated requests to this bin | Below is the the instruction that describes the task:
### Input:
return accumulated requests to this bin
### Response:
def requests(self):
'''return accumulated requests to this bin'''
path = pathjoin(self.path, self.name, Request.path)
response = self.service.send(SRequest('GET', path))
# a bin behaves as a push-down store --- better to return the requests
# in order of appearance
return list(reversed(Request.from_response(response, bin=self))) |
def select_and_insert(self, name, data):
'''Combines selection and data insertion into one function
Args:
name: the name of the object you want to insert into
data: the data you want to insert
Returns:
None
Raises:
None
'''
self.select_obj(name)
self.insert_into_obj(data) | Combines selection and data insertion into one function
Args:
name: the name of the object you want to insert into
data: the data you want to insert
Returns:
None
Raises:
None | Below is the the instruction that describes the task:
### Input:
Combines selection and data insertion into one function
Args:
name: the name of the object you want to insert into
data: the data you want to insert
Returns:
None
Raises:
None
### Response:
def select_and_insert(self, name, data):
'''Combines selection and data insertion into one function
Args:
name: the name of the object you want to insert into
data: the data you want to insert
Returns:
None
Raises:
None
'''
self.select_obj(name)
self.insert_into_obj(data) |
def run(self):
"""Runs all errors, dependencies and run methods of all *Assistant objects in self.path.
Raises:
devassistant.exceptions.ExecutionException with a cause if something goes wrong
"""
error = None
# run 'pre_run', 'logging', 'dependencies' and 'run'
try: # serve as a central place for error logging
self._logging(self.parsed_args)
if 'deps_only' not in self.parsed_args:
self._run_path_run('pre', self.parsed_args)
self._run_path_dependencies(self.parsed_args)
if 'deps_only' not in self.parsed_args:
self._run_path_run('', self.parsed_args)
except exceptions.ExecutionException as e:
error = self._log_if_not_logged(e)
if isinstance(e, exceptions.YamlError): # if there's a yaml error, just shut down
raise e
# in any case, run post_run
try: # serve as a central place for error logging
self._run_path_run('post', self.parsed_args)
except exceptions.ExecutionException as e:
error = self._log_if_not_logged(e)
# exitfuncs are run all regardless of exceptions; if there is an exception in one
# of them, this function will raise it at the end
try:
utils.run_exitfuncs()
except exceptions.ExecutionException as e:
error = self._log_if_not_logged(e)
if error:
raise error | Runs all errors, dependencies and run methods of all *Assistant objects in self.path.
Raises:
devassistant.exceptions.ExecutionException with a cause if something goes wrong | Below is the the instruction that describes the task:
### Input:
Runs all errors, dependencies and run methods of all *Assistant objects in self.path.
Raises:
devassistant.exceptions.ExecutionException with a cause if something goes wrong
### Response:
def run(self):
"""Runs all errors, dependencies and run methods of all *Assistant objects in self.path.
Raises:
devassistant.exceptions.ExecutionException with a cause if something goes wrong
"""
error = None
# run 'pre_run', 'logging', 'dependencies' and 'run'
try: # serve as a central place for error logging
self._logging(self.parsed_args)
if 'deps_only' not in self.parsed_args:
self._run_path_run('pre', self.parsed_args)
self._run_path_dependencies(self.parsed_args)
if 'deps_only' not in self.parsed_args:
self._run_path_run('', self.parsed_args)
except exceptions.ExecutionException as e:
error = self._log_if_not_logged(e)
if isinstance(e, exceptions.YamlError): # if there's a yaml error, just shut down
raise e
# in any case, run post_run
try: # serve as a central place for error logging
self._run_path_run('post', self.parsed_args)
except exceptions.ExecutionException as e:
error = self._log_if_not_logged(e)
# exitfuncs are run all regardless of exceptions; if there is an exception in one
# of them, this function will raise it at the end
try:
utils.run_exitfuncs()
except exceptions.ExecutionException as e:
error = self._log_if_not_logged(e)
if error:
raise error |
def update_and_reencrypt(self, **kwargs):
"""
Support re-encryption by enforcing that every update triggers a
new encryption call, even if the the original call does not update
the encrypted field.
"""
encrypted_field_name = self.store.model_class.__plaintext__
id_ = kwargs[self.identifier_key]
current_model = self.store.retrieve(id_)
current_value = current_model.plaintext
null_update = (
# Check if the update is for the encrypted field, and if it's explicitly set to null
encrypted_field_name in kwargs
and kwargs.get(encrypted_field_name) is None
)
new_value = kwargs.pop(self.store.model_class.__plaintext__, None)
use_new_value = new_value is not None or null_update
updated_value = new_value if use_new_value else current_value
model_kwargs = {
self.identifier_key: id_,
encrypted_field_name: updated_value,
**kwargs,
}
return super().update(
**model_kwargs,
) | Support re-encryption by enforcing that every update triggers a
new encryption call, even if the the original call does not update
the encrypted field. | Below is the the instruction that describes the task:
### Input:
Support re-encryption by enforcing that every update triggers a
new encryption call, even if the the original call does not update
the encrypted field.
### Response:
def update_and_reencrypt(self, **kwargs):
"""
Support re-encryption by enforcing that every update triggers a
new encryption call, even if the the original call does not update
the encrypted field.
"""
encrypted_field_name = self.store.model_class.__plaintext__
id_ = kwargs[self.identifier_key]
current_model = self.store.retrieve(id_)
current_value = current_model.plaintext
null_update = (
# Check if the update is for the encrypted field, and if it's explicitly set to null
encrypted_field_name in kwargs
and kwargs.get(encrypted_field_name) is None
)
new_value = kwargs.pop(self.store.model_class.__plaintext__, None)
use_new_value = new_value is not None or null_update
updated_value = new_value if use_new_value else current_value
model_kwargs = {
self.identifier_key: id_,
encrypted_field_name: updated_value,
**kwargs,
}
return super().update(
**model_kwargs,
) |
def get_user_by_id(self, id):
"""Retrieve a User object by ID."""
return self.db_adapter.get_object(self.UserClass, id=id) | Retrieve a User object by ID. | Below is the the instruction that describes the task:
### Input:
Retrieve a User object by ID.
### Response:
def get_user_by_id(self, id):
"""Retrieve a User object by ID."""
return self.db_adapter.get_object(self.UserClass, id=id) |
def equal_to(self, key, value):
"""
增加查询条件,查询字段的值必须为指定值。
:param key: 查询条件的字段名
:param value: 查询条件的值
:rtype: Query
"""
self._where[key] = utils.encode(value)
return self | 增加查询条件,查询字段的值必须为指定值。
:param key: 查询条件的字段名
:param value: 查询条件的值
:rtype: Query | Below is the the instruction that describes the task:
### Input:
增加查询条件,查询字段的值必须为指定值。
:param key: 查询条件的字段名
:param value: 查询条件的值
:rtype: Query
### Response:
def equal_to(self, key, value):
"""
增加查询条件,查询字段的值必须为指定值。
:param key: 查询条件的字段名
:param value: 查询条件的值
:rtype: Query
"""
self._where[key] = utils.encode(value)
return self |
def update(self, vips):
"""
Method to update vip's
:param vips: List containing vip's desired to updated
:return: None
"""
data = {'vips': vips}
vips_ids = [str(vip.get('id')) for vip in vips]
return super(ApiVipRequest, self).put('api/v3/vip-request/%s/' %
';'.join(vips_ids), data) | Method to update vip's
:param vips: List containing vip's desired to updated
:return: None | Below is the the instruction that describes the task:
### Input:
Method to update vip's
:param vips: List containing vip's desired to updated
:return: None
### Response:
def update(self, vips):
"""
Method to update vip's
:param vips: List containing vip's desired to updated
:return: None
"""
data = {'vips': vips}
vips_ids = [str(vip.get('id')) for vip in vips]
return super(ApiVipRequest, self).put('api/v3/vip-request/%s/' %
';'.join(vips_ids), data) |
def _remove_redundancy(self, log):
"""Removes duplicate data from 'data' inside log dict and brings it
out.
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> log = {'id' : 46846876, 'type' : 'log',
... 'data' : {'a' : 1, 'b' : 2, 'type' : 'metric'}}
>>> lc._remove_redundancy(log)
{'data': {'a': 1, 'b': 2}, 'type': 'metric', 'id': 46846876}
"""
for key in log:
if key in log and key in log['data']:
log[key] = log['data'].pop(key)
return log | Removes duplicate data from 'data' inside log dict and brings it
out.
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> log = {'id' : 46846876, 'type' : 'log',
... 'data' : {'a' : 1, 'b' : 2, 'type' : 'metric'}}
>>> lc._remove_redundancy(log)
{'data': {'a': 1, 'b': 2}, 'type': 'metric', 'id': 46846876} | Below is the the instruction that describes the task:
### Input:
Removes duplicate data from 'data' inside log dict and brings it
out.
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> log = {'id' : 46846876, 'type' : 'log',
... 'data' : {'a' : 1, 'b' : 2, 'type' : 'metric'}}
>>> lc._remove_redundancy(log)
{'data': {'a': 1, 'b': 2}, 'type': 'metric', 'id': 46846876}
### Response:
def _remove_redundancy(self, log):
"""Removes duplicate data from 'data' inside log dict and brings it
out.
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30)
>>> log = {'id' : 46846876, 'type' : 'log',
... 'data' : {'a' : 1, 'b' : 2, 'type' : 'metric'}}
>>> lc._remove_redundancy(log)
{'data': {'a': 1, 'b': 2}, 'type': 'metric', 'id': 46846876}
"""
for key in log:
if key in log and key in log['data']:
log[key] = log['data'].pop(key)
return log |
def parse_date_input(value):
"""Return datetime based on the user's input.
@param value: User's input
@type value: str
@raise ValueError: If the input is not valid.
@return: Datetime of the beginning of the user's date.
"""
try:
limit = parse_date(value)
except ValueError:
limit = None
if limit is None:
raise ValueError("'{}' is not a valid date.".format(value))
limit = datetime(limit.year, limit.month, limit.day)
if settings.USE_TZ:
limit = make_aware(limit)
return limit | Return datetime based on the user's input.
@param value: User's input
@type value: str
@raise ValueError: If the input is not valid.
@return: Datetime of the beginning of the user's date. | Below is the the instruction that describes the task:
### Input:
Return datetime based on the user's input.
@param value: User's input
@type value: str
@raise ValueError: If the input is not valid.
@return: Datetime of the beginning of the user's date.
### Response:
def parse_date_input(value):
"""Return datetime based on the user's input.
@param value: User's input
@type value: str
@raise ValueError: If the input is not valid.
@return: Datetime of the beginning of the user's date.
"""
try:
limit = parse_date(value)
except ValueError:
limit = None
if limit is None:
raise ValueError("'{}' is not a valid date.".format(value))
limit = datetime(limit.year, limit.month, limit.day)
if settings.USE_TZ:
limit = make_aware(limit)
return limit |
def get_api_client(cls):
"""Get an API client (with configuration)."""
config = cloudsmith_api.Configuration()
client = cls()
client.config = config
client.api_client.rest_client = RestClient()
user_agent = getattr(config, "user_agent", None)
if user_agent:
client.api_client.user_agent = user_agent
headers = getattr(config, "headers", None)
if headers:
for k, v in six.iteritems(headers):
client.api_client.set_default_header(k, v)
return client | Get an API client (with configuration). | Below is the the instruction that describes the task:
### Input:
Get an API client (with configuration).
### Response:
def get_api_client(cls):
"""Get an API client (with configuration)."""
config = cloudsmith_api.Configuration()
client = cls()
client.config = config
client.api_client.rest_client = RestClient()
user_agent = getattr(config, "user_agent", None)
if user_agent:
client.api_client.user_agent = user_agent
headers = getattr(config, "headers", None)
if headers:
for k, v in six.iteritems(headers):
client.api_client.set_default_header(k, v)
return client |
def set_running_std(self, running_std):
"""
Set the running variance of the layer.
Only use this method for a BatchNormalization layer.
:param running_std: a Numpy array.
"""
callBigDlFunc(self.bigdl_type, "setRunningStd",
self.value, JTensor.from_ndarray(running_std))
return self | Set the running variance of the layer.
Only use this method for a BatchNormalization layer.
:param running_std: a Numpy array. | Below is the the instruction that describes the task:
### Input:
Set the running variance of the layer.
Only use this method for a BatchNormalization layer.
:param running_std: a Numpy array.
### Response:
def set_running_std(self, running_std):
"""
Set the running variance of the layer.
Only use this method for a BatchNormalization layer.
:param running_std: a Numpy array.
"""
callBigDlFunc(self.bigdl_type, "setRunningStd",
self.value, JTensor.from_ndarray(running_std))
return self |
def simple_host_process(argslist):
"""Call vamp-simple-host"""
vamp_host = 'vamp-simple-host'
command = [vamp_host]
command.extend(argslist)
# try ?
stdout = subprocess.check_output(command,
stderr=subprocess.STDOUT).splitlines()
return stdout | Call vamp-simple-host | Below is the the instruction that describes the task:
### Input:
Call vamp-simple-host
### Response:
def simple_host_process(argslist):
"""Call vamp-simple-host"""
vamp_host = 'vamp-simple-host'
command = [vamp_host]
command.extend(argslist)
# try ?
stdout = subprocess.check_output(command,
stderr=subprocess.STDOUT).splitlines()
return stdout |
def restart_apppool(name):
'''
Restart an IIS application pool.
.. versionadded:: 2016.11.0
Args:
name (str): The name of the IIS application pool.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.restart_apppool name='MyTestPool'
'''
ps_cmd = ['Restart-WebAppPool', r"'{0}'".format(name)]
cmd_ret = _srvmgr(ps_cmd)
return cmd_ret['retcode'] == 0 | Restart an IIS application pool.
.. versionadded:: 2016.11.0
Args:
name (str): The name of the IIS application pool.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.restart_apppool name='MyTestPool' | Below is the the instruction that describes the task:
### Input:
Restart an IIS application pool.
.. versionadded:: 2016.11.0
Args:
name (str): The name of the IIS application pool.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.restart_apppool name='MyTestPool'
### Response:
def restart_apppool(name):
'''
Restart an IIS application pool.
.. versionadded:: 2016.11.0
Args:
name (str): The name of the IIS application pool.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.restart_apppool name='MyTestPool'
'''
ps_cmd = ['Restart-WebAppPool', r"'{0}'".format(name)]
cmd_ret = _srvmgr(ps_cmd)
return cmd_ret['retcode'] == 0 |
def function_path(func):
"""
This will return the path to the calling function
:param func:
:return:
"""
if getattr(func, 'func_code', None):
return func.__code__.co_filename.replace('\\', '/')
else:
return func.__code__.co_filename.replace('\\', '/') | This will return the path to the calling function
:param func:
:return: | Below is the the instruction that describes the task:
### Input:
This will return the path to the calling function
:param func:
:return:
### Response:
def function_path(func):
"""
This will return the path to the calling function
:param func:
:return:
"""
if getattr(func, 'func_code', None):
return func.__code__.co_filename.replace('\\', '/')
else:
return func.__code__.co_filename.replace('\\', '/') |
def escape_LDAP(ldap_string):
# type: (str) -> str
# pylint: disable=C0103
"""
Escape a string to let it go in an LDAP filter
:param ldap_string: The string to escape
:return: The protected string
"""
if not ldap_string:
# No content
return ldap_string
# Protect escape character previously in the string
assert is_string(ldap_string)
ldap_string = ldap_string.replace(
ESCAPE_CHARACTER, ESCAPE_CHARACTER + ESCAPE_CHARACTER
)
# Leading space
if ldap_string.startswith(" "):
ldap_string = "\\ {0}".format(ldap_string[1:])
# Trailing space
if ldap_string.endswith(" "):
ldap_string = "{0}\\ ".format(ldap_string[:-1])
# Escape other characters
for escaped in ESCAPED_CHARACTERS:
ldap_string = ldap_string.replace(escaped, ESCAPE_CHARACTER + escaped)
return ldap_string | Escape a string to let it go in an LDAP filter
:param ldap_string: The string to escape
:return: The protected string | Below is the the instruction that describes the task:
### Input:
Escape a string to let it go in an LDAP filter
:param ldap_string: The string to escape
:return: The protected string
### Response:
def escape_LDAP(ldap_string):
# type: (str) -> str
# pylint: disable=C0103
"""
Escape a string to let it go in an LDAP filter
:param ldap_string: The string to escape
:return: The protected string
"""
if not ldap_string:
# No content
return ldap_string
# Protect escape character previously in the string
assert is_string(ldap_string)
ldap_string = ldap_string.replace(
ESCAPE_CHARACTER, ESCAPE_CHARACTER + ESCAPE_CHARACTER
)
# Leading space
if ldap_string.startswith(" "):
ldap_string = "\\ {0}".format(ldap_string[1:])
# Trailing space
if ldap_string.endswith(" "):
ldap_string = "{0}\\ ".format(ldap_string[:-1])
# Escape other characters
for escaped in ESCAPED_CHARACTERS:
ldap_string = ldap_string.replace(escaped, ESCAPE_CHARACTER + escaped)
return ldap_string |
def dump_declarations(declarations, file_path):
"""
Dump declarations tree rooted at each of the included nodes to the file
:param declarations: either a single :class:declaration_t object or a list
of :class:declaration_t objects
:param file_path: path to a file
"""
with open(file_path, "w+") as f:
print_declarations(declarations, writer=f.write) | Dump declarations tree rooted at each of the included nodes to the file
:param declarations: either a single :class:declaration_t object or a list
of :class:declaration_t objects
:param file_path: path to a file | Below is the the instruction that describes the task:
### Input:
Dump declarations tree rooted at each of the included nodes to the file
:param declarations: either a single :class:declaration_t object or a list
of :class:declaration_t objects
:param file_path: path to a file
### Response:
def dump_declarations(declarations, file_path):
"""
Dump declarations tree rooted at each of the included nodes to the file
:param declarations: either a single :class:declaration_t object or a list
of :class:declaration_t objects
:param file_path: path to a file
"""
with open(file_path, "w+") as f:
print_declarations(declarations, writer=f.write) |
def getCatalogPixels(self):
"""
Return the catalog pixels spanned by this ROI.
"""
filenames = self.config.getFilenames()
nside_catalog = self.config.params['coords']['nside_catalog']
nside_pixel = self.config.params['coords']['nside_pixel']
# All possible catalog pixels spanned by the ROI
superpix = ugali.utils.skymap.superpixel(self.pixels,nside_pixel,nside_catalog)
superpix = np.unique(superpix)
# Only catalog pixels that exist in catalog files
pixels = np.intersect1d(superpix, filenames['pix'].compressed())
return pixels | Return the catalog pixels spanned by this ROI. | Below is the the instruction that describes the task:
### Input:
Return the catalog pixels spanned by this ROI.
### Response:
def getCatalogPixels(self):
"""
Return the catalog pixels spanned by this ROI.
"""
filenames = self.config.getFilenames()
nside_catalog = self.config.params['coords']['nside_catalog']
nside_pixel = self.config.params['coords']['nside_pixel']
# All possible catalog pixels spanned by the ROI
superpix = ugali.utils.skymap.superpixel(self.pixels,nside_pixel,nside_catalog)
superpix = np.unique(superpix)
# Only catalog pixels that exist in catalog files
pixels = np.intersect1d(superpix, filenames['pix'].compressed())
return pixels |
def get_missing_deps(self, obj):
'''
Returns missing dependencies for provider key.
Missing meaning no instance can be provided at this time.
:param key: Provider key
:type key: object
:return: Missing dependencies
:rtype: list
'''
deps = self.get_deps(obj)
ret = []
for key in deps:
provider = self._providers.get(key)
if provider and provider.providable:
continue
ret.append(key)
return ret | Returns missing dependencies for provider key.
Missing meaning no instance can be provided at this time.
:param key: Provider key
:type key: object
:return: Missing dependencies
:rtype: list | Below is the the instruction that describes the task:
### Input:
Returns missing dependencies for provider key.
Missing meaning no instance can be provided at this time.
:param key: Provider key
:type key: object
:return: Missing dependencies
:rtype: list
### Response:
def get_missing_deps(self, obj):
'''
Returns missing dependencies for provider key.
Missing meaning no instance can be provided at this time.
:param key: Provider key
:type key: object
:return: Missing dependencies
:rtype: list
'''
deps = self.get_deps(obj)
ret = []
for key in deps:
provider = self._providers.get(key)
if provider and provider.providable:
continue
ret.append(key)
return ret |
def start_geoserver(options):
"""
Start GeoServer with GeoNode extensions
"""
from geonode.settings import OGC_SERVER
GEOSERVER_BASE_URL = OGC_SERVER['default']['LOCATION']
url = "http://localhost:8080/geoserver/"
if GEOSERVER_BASE_URL != url:
print 'your GEOSERVER_BASE_URL does not match %s' % url
sys.exit(1)
download_dir = path('downloaded').abspath()
jetty_runner = download_dir / os.path.basename(JETTY_RUNNER_URL)
data_dir = path('geoserver/data').abspath()
web_app = path('geoserver').abspath()
log_file = path('geoserver/jetty.log').abspath()
# @todo - we should not have set workdir to the datadir but a bug in geoserver
# prevents geonode security from initializing correctly otherwise
with pushd(data_dir):
sh(('java -Xmx512m -XX:MaxPermSize=256m'
' -DGEOSERVER_DATA_DIR=%(data_dir)s'
# workaround for JAI sealed jar issue and jetty classloader
' -Dorg.eclipse.jetty.server.webapp.parentLoaderPriority=true'
' -jar %(jetty_runner)s'
' --log %(log_file)s'
' --path /geoserver %(web_app)s'
' > /dev/null &' % locals()
))
info('Starting GeoServer on %s' % url)
# wait for GeoServer to start
started = waitfor(url)
info('The logs are available at %s' % log_file)
if not started:
# If applications did not start in time we will give the user a chance
# to inspect them and stop them manually.
info(('GeoServer never started properly or timed out.'
'It may still be running in the background.'))
sys.exit(1) | Start GeoServer with GeoNode extensions | Below is the the instruction that describes the task:
### Input:
Start GeoServer with GeoNode extensions
### Response:
def start_geoserver(options):
"""
Start GeoServer with GeoNode extensions
"""
from geonode.settings import OGC_SERVER
GEOSERVER_BASE_URL = OGC_SERVER['default']['LOCATION']
url = "http://localhost:8080/geoserver/"
if GEOSERVER_BASE_URL != url:
print 'your GEOSERVER_BASE_URL does not match %s' % url
sys.exit(1)
download_dir = path('downloaded').abspath()
jetty_runner = download_dir / os.path.basename(JETTY_RUNNER_URL)
data_dir = path('geoserver/data').abspath()
web_app = path('geoserver').abspath()
log_file = path('geoserver/jetty.log').abspath()
# @todo - we should not have set workdir to the datadir but a bug in geoserver
# prevents geonode security from initializing correctly otherwise
with pushd(data_dir):
sh(('java -Xmx512m -XX:MaxPermSize=256m'
' -DGEOSERVER_DATA_DIR=%(data_dir)s'
# workaround for JAI sealed jar issue and jetty classloader
' -Dorg.eclipse.jetty.server.webapp.parentLoaderPriority=true'
' -jar %(jetty_runner)s'
' --log %(log_file)s'
' --path /geoserver %(web_app)s'
' > /dev/null &' % locals()
))
info('Starting GeoServer on %s' % url)
# wait for GeoServer to start
started = waitfor(url)
info('The logs are available at %s' % log_file)
if not started:
# If applications did not start in time we will give the user a chance
# to inspect them and stop them manually.
info(('GeoServer never started properly or timed out.'
'It may still be running in the background.'))
sys.exit(1) |
def firehose(self, **params):
"""Stream statuses/firehose
:param \*\*params: Parameters to send with your stream request
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/statuses/firehose
"""
url = 'https://stream.twitter.com/%s/statuses/firehose.json' \
% self.streamer.api_version
self.streamer._request(url, params=params) | Stream statuses/firehose
:param \*\*params: Parameters to send with your stream request
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/statuses/firehose | Below is the the instruction that describes the task:
### Input:
Stream statuses/firehose
:param \*\*params: Parameters to send with your stream request
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/statuses/firehose
### Response:
def firehose(self, **params):
"""Stream statuses/firehose
:param \*\*params: Parameters to send with your stream request
Accepted params found at:
https://dev.twitter.com/docs/api/1.1/get/statuses/firehose
"""
url = 'https://stream.twitter.com/%s/statuses/firehose.json' \
% self.streamer.api_version
self.streamer._request(url, params=params) |
def download(self):
"""Download and extract the tarball, and download each individual photo."""
import tarfile
if self._check_integrity():
print('Files already downloaded and verified')
return
download_url(self.url, self.root, self.filename, self.md5_checksum)
# Extract file
with tarfile.open(os.path.join(self.root, self.filename), 'r:gz') as tar:
tar.extractall(path=self.root)
# Download individual photos
with open(os.path.join(self.root, 'dataset', 'SBU_captioned_photo_dataset_urls.txt')) as fh:
for line in fh:
url = line.rstrip()
try:
download_url(url, os.path.join(self.root, 'dataset'))
except OSError:
# The images point to public images on Flickr.
# Note: Images might be removed by users at anytime.
pass | Download and extract the tarball, and download each individual photo. | Below is the the instruction that describes the task:
### Input:
Download and extract the tarball, and download each individual photo.
### Response:
def download(self):
"""Download and extract the tarball, and download each individual photo."""
import tarfile
if self._check_integrity():
print('Files already downloaded and verified')
return
download_url(self.url, self.root, self.filename, self.md5_checksum)
# Extract file
with tarfile.open(os.path.join(self.root, self.filename), 'r:gz') as tar:
tar.extractall(path=self.root)
# Download individual photos
with open(os.path.join(self.root, 'dataset', 'SBU_captioned_photo_dataset_urls.txt')) as fh:
for line in fh:
url = line.rstrip()
try:
download_url(url, os.path.join(self.root, 'dataset'))
except OSError:
# The images point to public images on Flickr.
# Note: Images might be removed by users at anytime.
pass |
def house_exists(self, complex: str, house: str) -> bool:
"""
Shortcut to check if house exists in our database.
"""
try:
self.check_house(complex, house)
except exceptions.RumetrHouseNotFound:
return False
return True | Shortcut to check if house exists in our database. | Below is the the instruction that describes the task:
### Input:
Shortcut to check if house exists in our database.
### Response:
def house_exists(self, complex: str, house: str) -> bool:
"""
Shortcut to check if house exists in our database.
"""
try:
self.check_house(complex, house)
except exceptions.RumetrHouseNotFound:
return False
return True |
def neval(expression, globals=None, locals=None, **kwargs):
"""Evaluate *expression* using *globals* and *locals* dictionaries as
*global* and *local* namespace. *expression* is transformed using
:class:`.NapiTransformer`."""
try:
import __builtin__ as builtins
except ImportError:
import builtins
from ast import parse
from ast import fix_missing_locations as fml
try:
transformer = kwargs['transformer']
except KeyError:
from napi.transformers import NapiTransformer as transformer
#try:
node = parse(expression, '<string>', 'eval')
#except ImportError:
# builtins.eval(expression)
#else:
if globals is None:
globals = builtins.globals()
if locals is None:
locals = {}
trans = transformer(globals=globals, locals=locals, **kwargs)
trans.visit(node)
code = compile(fml(node), '<string>', 'eval')
return builtins.eval(code, globals, locals) | Evaluate *expression* using *globals* and *locals* dictionaries as
*global* and *local* namespace. *expression* is transformed using
:class:`.NapiTransformer`. | Below is the the instruction that describes the task:
### Input:
Evaluate *expression* using *globals* and *locals* dictionaries as
*global* and *local* namespace. *expression* is transformed using
:class:`.NapiTransformer`.
### Response:
def neval(expression, globals=None, locals=None, **kwargs):
"""Evaluate *expression* using *globals* and *locals* dictionaries as
*global* and *local* namespace. *expression* is transformed using
:class:`.NapiTransformer`."""
try:
import __builtin__ as builtins
except ImportError:
import builtins
from ast import parse
from ast import fix_missing_locations as fml
try:
transformer = kwargs['transformer']
except KeyError:
from napi.transformers import NapiTransformer as transformer
#try:
node = parse(expression, '<string>', 'eval')
#except ImportError:
# builtins.eval(expression)
#else:
if globals is None:
globals = builtins.globals()
if locals is None:
locals = {}
trans = transformer(globals=globals, locals=locals, **kwargs)
trans.visit(node)
code = compile(fml(node), '<string>', 'eval')
return builtins.eval(code, globals, locals) |
def _merge_relative_path(dst_path, rel_path):
"""Merge a relative tar file to a destination (which can be "gs://...")."""
# Convert rel_path to be relative and normalize it to remove ".", "..", "//",
# which are valid directories in fileystems like "gs://".
norm_rel_path = os.path.normpath(rel_path.lstrip("/"))
if norm_rel_path == ".":
return dst_path
# Check that the norm rel path does not starts with "..".
if norm_rel_path.startswith(".."):
raise ValueError("Relative path %r is invalid." % rel_path)
merged = os.path.join(dst_path, norm_rel_path)
# After merging verify that the merged path keeps the original dst_path.
if not merged.startswith(dst_path):
raise ValueError("Relative path %r is invalid. Failed to merge with %r." % (
rel_path, dst_path))
return merged | Merge a relative tar file to a destination (which can be "gs://..."). | Below is the the instruction that describes the task:
### Input:
Merge a relative tar file to a destination (which can be "gs://...").
### Response:
def _merge_relative_path(dst_path, rel_path):
"""Merge a relative tar file to a destination (which can be "gs://...")."""
# Convert rel_path to be relative and normalize it to remove ".", "..", "//",
# which are valid directories in fileystems like "gs://".
norm_rel_path = os.path.normpath(rel_path.lstrip("/"))
if norm_rel_path == ".":
return dst_path
# Check that the norm rel path does not starts with "..".
if norm_rel_path.startswith(".."):
raise ValueError("Relative path %r is invalid." % rel_path)
merged = os.path.join(dst_path, norm_rel_path)
# After merging verify that the merged path keeps the original dst_path.
if not merged.startswith(dst_path):
raise ValueError("Relative path %r is invalid. Failed to merge with %r." % (
rel_path, dst_path))
return merged |
def _set_logger(self, name=None):
"""Adds a logger with a given `name`.
If no name is given, name is constructed as
`type(self).__name__`.
"""
if name is None:
cls = self.__class__
name = '%s.%s' % (cls.__module__, cls.__name__)
self._logger = logging.getLogger(name) | Adds a logger with a given `name`.
If no name is given, name is constructed as
`type(self).__name__`. | Below is the the instruction that describes the task:
### Input:
Adds a logger with a given `name`.
If no name is given, name is constructed as
`type(self).__name__`.
### Response:
def _set_logger(self, name=None):
"""Adds a logger with a given `name`.
If no name is given, name is constructed as
`type(self).__name__`.
"""
if name is None:
cls = self.__class__
name = '%s.%s' % (cls.__module__, cls.__name__)
self._logger = logging.getLogger(name) |
def distcheck(appname='',version='',subdir=''):
'''checks if the sources compile (tarball from 'dist')'''
import tempfile,tarfile
if not appname:appname=Utils.g_module.APPNAME
if not version:version=Utils.g_module.VERSION
waf=os.path.abspath(sys.argv[0])
tarball=dist(appname,version)
path=appname+'-'+version
if os.path.exists(path):
shutil.rmtree(path)
t=tarfile.open(tarball)
for x in t:t.extract(x)
t.close()
if subdir:
build_path=os.path.join(path,subdir)
else:
build_path=path
instdir=tempfile.mkdtemp('.inst','%s-%s'%(appname,version))
ret=Utils.pproc.Popen([waf,'configure','build','install','uninstall','--destdir='+instdir],cwd=build_path).wait()
if ret:
raise Utils.WafError('distcheck failed with code %i'%ret)
if os.path.exists(instdir):
raise Utils.WafError('distcheck succeeded, but files were left in %s'%instdir)
shutil.rmtree(path) | checks if the sources compile (tarball from 'dist') | Below is the the instruction that describes the task:
### Input:
checks if the sources compile (tarball from 'dist')
### Response:
def distcheck(appname='',version='',subdir=''):
'''checks if the sources compile (tarball from 'dist')'''
import tempfile,tarfile
if not appname:appname=Utils.g_module.APPNAME
if not version:version=Utils.g_module.VERSION
waf=os.path.abspath(sys.argv[0])
tarball=dist(appname,version)
path=appname+'-'+version
if os.path.exists(path):
shutil.rmtree(path)
t=tarfile.open(tarball)
for x in t:t.extract(x)
t.close()
if subdir:
build_path=os.path.join(path,subdir)
else:
build_path=path
instdir=tempfile.mkdtemp('.inst','%s-%s'%(appname,version))
ret=Utils.pproc.Popen([waf,'configure','build','install','uninstall','--destdir='+instdir],cwd=build_path).wait()
if ret:
raise Utils.WafError('distcheck failed with code %i'%ret)
if os.path.exists(instdir):
raise Utils.WafError('distcheck succeeded, but files were left in %s'%instdir)
shutil.rmtree(path) |
def parser(self):
"""Create the argparser_ for this configuration by adding all
settings via the :meth:`Setting.add_argument` method.
:rtype: an instance of :class:`ArgumentParser`.
"""
parser = argparse.ArgumentParser(description=self.description,
epilog=self.epilog)
parser.add_argument('--version',
action='version',
version=self.version)
return self.add_to_parser(parser) | Create the argparser_ for this configuration by adding all
settings via the :meth:`Setting.add_argument` method.
:rtype: an instance of :class:`ArgumentParser`. | Below is the the instruction that describes the task:
### Input:
Create the argparser_ for this configuration by adding all
settings via the :meth:`Setting.add_argument` method.
:rtype: an instance of :class:`ArgumentParser`.
### Response:
def parser(self):
"""Create the argparser_ for this configuration by adding all
settings via the :meth:`Setting.add_argument` method.
:rtype: an instance of :class:`ArgumentParser`.
"""
parser = argparse.ArgumentParser(description=self.description,
epilog=self.epilog)
parser.add_argument('--version',
action='version',
version=self.version)
return self.add_to_parser(parser) |
def all_leaders_from(self, leaderboard_name, **options):
'''
Retrieves all leaders from the named leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param options [Hash] Options to be used when retrieving the leaders from the named leaderboard.
@return the named leaderboard.
'''
raw_leader_data = self._range_method(
self.redis_connection, leaderboard_name, 0, -1, withscores=False)
return self._parse_raw_members(
leaderboard_name, raw_leader_data, **options) | Retrieves all leaders from the named leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param options [Hash] Options to be used when retrieving the leaders from the named leaderboard.
@return the named leaderboard. | Below is the the instruction that describes the task:
### Input:
Retrieves all leaders from the named leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param options [Hash] Options to be used when retrieving the leaders from the named leaderboard.
@return the named leaderboard.
### Response:
def all_leaders_from(self, leaderboard_name, **options):
'''
Retrieves all leaders from the named leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param options [Hash] Options to be used when retrieving the leaders from the named leaderboard.
@return the named leaderboard.
'''
raw_leader_data = self._range_method(
self.redis_connection, leaderboard_name, 0, -1, withscores=False)
return self._parse_raw_members(
leaderboard_name, raw_leader_data, **options) |
def rgb_to_hsv(rgb):
"""Input RGB image [0~255] return HSV image [0~1].
Parameters
------------
rgb : numpy.array
An image with values between 0 and 255.
Returns
-------
numpy.array
A processed image.
"""
# Translated from source of colorsys.rgb_to_hsv
# r,g,b should be a numpy arrays with values between 0 and 255
# rgb_to_hsv returns an array of floats between 0.0 and 1.0.
rgb = rgb.astype('float')
hsv = np.zeros_like(rgb)
# in case an RGBA array was passed, just copy the A channel
hsv[..., 3:] = rgb[..., 3:]
r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2]
maxc = np.max(rgb[..., :3], axis=-1)
minc = np.min(rgb[..., :3], axis=-1)
hsv[..., 2] = maxc
mask = maxc != minc
hsv[mask, 1] = (maxc - minc)[mask] / maxc[mask]
rc = np.zeros_like(r)
gc = np.zeros_like(g)
bc = np.zeros_like(b)
rc[mask] = (maxc - r)[mask] / (maxc - minc)[mask]
gc[mask] = (maxc - g)[mask] / (maxc - minc)[mask]
bc[mask] = (maxc - b)[mask] / (maxc - minc)[mask]
hsv[..., 0] = np.select([r == maxc, g == maxc], [bc - gc, 2.0 + rc - bc], default=4.0 + gc - rc)
hsv[..., 0] = (hsv[..., 0] / 6.0) % 1.0
return hsv | Input RGB image [0~255] return HSV image [0~1].
Parameters
------------
rgb : numpy.array
An image with values between 0 and 255.
Returns
-------
numpy.array
A processed image. | Below is the the instruction that describes the task:
### Input:
Input RGB image [0~255] return HSV image [0~1].
Parameters
------------
rgb : numpy.array
An image with values between 0 and 255.
Returns
-------
numpy.array
A processed image.
### Response:
def rgb_to_hsv(rgb):
"""Input RGB image [0~255] return HSV image [0~1].
Parameters
------------
rgb : numpy.array
An image with values between 0 and 255.
Returns
-------
numpy.array
A processed image.
"""
# Translated from source of colorsys.rgb_to_hsv
# r,g,b should be a numpy arrays with values between 0 and 255
# rgb_to_hsv returns an array of floats between 0.0 and 1.0.
rgb = rgb.astype('float')
hsv = np.zeros_like(rgb)
# in case an RGBA array was passed, just copy the A channel
hsv[..., 3:] = rgb[..., 3:]
r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2]
maxc = np.max(rgb[..., :3], axis=-1)
minc = np.min(rgb[..., :3], axis=-1)
hsv[..., 2] = maxc
mask = maxc != minc
hsv[mask, 1] = (maxc - minc)[mask] / maxc[mask]
rc = np.zeros_like(r)
gc = np.zeros_like(g)
bc = np.zeros_like(b)
rc[mask] = (maxc - r)[mask] / (maxc - minc)[mask]
gc[mask] = (maxc - g)[mask] / (maxc - minc)[mask]
bc[mask] = (maxc - b)[mask] / (maxc - minc)[mask]
hsv[..., 0] = np.select([r == maxc, g == maxc], [bc - gc, 2.0 + rc - bc], default=4.0 + gc - rc)
hsv[..., 0] = (hsv[..., 0] / 6.0) % 1.0
return hsv |
def get_arguments():
"""
This get us the cli arguments.
Returns the args as parsed from the argsparser.
"""
# https://docs.python.org/3/library/argparse.html
parser = argparse.ArgumentParser(
description='Handles bumping of the artifact version')
parser.add_argument('--log-config',
'-l',
action='store',
dest='logger_config',
help='The location of the logging config json file',
default='')
parser.add_argument('--log-level',
'-L',
help='Provide the log level. Defaults to INFO.',
dest='log_level',
action='store',
default='INFO',
choices=['DEBUG',
'INFO',
'WARNING',
'ERROR',
'CRITICAL'])
parser.add_argument('--major',
help='Bump the major version',
dest='bump_major',
action='store_true',
default=False)
parser.add_argument('--minor',
help='Bump the minor version',
dest='bump_minor',
action='store_true',
default=False)
parser.add_argument('--patch',
help='Bump the patch version',
dest='bump_patch',
action='store_true',
default=False)
parser.add_argument('--version',
help='Set the version',
dest='version',
action='store',
default=False)
args = parser.parse_args()
return args | This get us the cli arguments.
Returns the args as parsed from the argsparser. | Below is the the instruction that describes the task:
### Input:
This get us the cli arguments.
Returns the args as parsed from the argsparser.
### Response:
def get_arguments():
"""
This get us the cli arguments.
Returns the args as parsed from the argsparser.
"""
# https://docs.python.org/3/library/argparse.html
parser = argparse.ArgumentParser(
description='Handles bumping of the artifact version')
parser.add_argument('--log-config',
'-l',
action='store',
dest='logger_config',
help='The location of the logging config json file',
default='')
parser.add_argument('--log-level',
'-L',
help='Provide the log level. Defaults to INFO.',
dest='log_level',
action='store',
default='INFO',
choices=['DEBUG',
'INFO',
'WARNING',
'ERROR',
'CRITICAL'])
parser.add_argument('--major',
help='Bump the major version',
dest='bump_major',
action='store_true',
default=False)
parser.add_argument('--minor',
help='Bump the minor version',
dest='bump_minor',
action='store_true',
default=False)
parser.add_argument('--patch',
help='Bump the patch version',
dest='bump_patch',
action='store_true',
default=False)
parser.add_argument('--version',
help='Set the version',
dest='version',
action='store',
default=False)
args = parser.parse_args()
return args |
def set_unique_child_node(self, name, node):
"""
Add one child node to this node.
Args:
name (str): Name of the child.
node (TreeMapNode): Node to add.
Note:
The name must **not** be in use.
"""
try:
temp = self._nodes[name]
raise RuntimeError("Name '%s' is already used for child node" % name)
except KeyError:
pass
self.set_child_node(name, node) | Add one child node to this node.
Args:
name (str): Name of the child.
node (TreeMapNode): Node to add.
Note:
The name must **not** be in use. | Below is the the instruction that describes the task:
### Input:
Add one child node to this node.
Args:
name (str): Name of the child.
node (TreeMapNode): Node to add.
Note:
The name must **not** be in use.
### Response:
def set_unique_child_node(self, name, node):
"""
Add one child node to this node.
Args:
name (str): Name of the child.
node (TreeMapNode): Node to add.
Note:
The name must **not** be in use.
"""
try:
temp = self._nodes[name]
raise RuntimeError("Name '%s' is already used for child node" % name)
except KeyError:
pass
self.set_child_node(name, node) |
def _createIndexRti(self, index, nodeName):
""" Auxiliary method that creates a PandasIndexRti.
"""
return PandasIndexRti(index=index, nodeName=nodeName, fileName=self.fileName,
iconColor=self._iconColor) | Auxiliary method that creates a PandasIndexRti. | Below is the the instruction that describes the task:
### Input:
Auxiliary method that creates a PandasIndexRti.
### Response:
def _createIndexRti(self, index, nodeName):
""" Auxiliary method that creates a PandasIndexRti.
"""
return PandasIndexRti(index=index, nodeName=nodeName, fileName=self.fileName,
iconColor=self._iconColor) |
def with_source(self, lease):
"""
Init Azure Blob Lease from existing.
"""
super().with_source(lease)
self.offset = lease.offset
self.sequence_number = lease.sequence_number | Init Azure Blob Lease from existing. | Below is the the instruction that describes the task:
### Input:
Init Azure Blob Lease from existing.
### Response:
def with_source(self, lease):
"""
Init Azure Blob Lease from existing.
"""
super().with_source(lease)
self.offset = lease.offset
self.sequence_number = lease.sequence_number |
def kill(self):
"""Kills the running loop and waits till it gets killed."""
assert self.has_started(), "called kill() on a non-active GeventLoop"
self._stop_event.set()
self._greenlet.kill()
self._clear() | Kills the running loop and waits till it gets killed. | Below is the the instruction that describes the task:
### Input:
Kills the running loop and waits till it gets killed.
### Response:
def kill(self):
"""Kills the running loop and waits till it gets killed."""
assert self.has_started(), "called kill() on a non-active GeventLoop"
self._stop_event.set()
self._greenlet.kill()
self._clear() |
def delete_cluster(
self,
cluster_identifier,
skip_final_cluster_snapshot=True,
final_cluster_snapshot_identifier=''):
"""
Delete a cluster and optionally create a snapshot
:param cluster_identifier: unique identifier of a cluster
:type cluster_identifier: str
:param skip_final_cluster_snapshot: determines cluster snapshot creation
:type skip_final_cluster_snapshot: bool
:param final_cluster_snapshot_identifier: name of final cluster snapshot
:type final_cluster_snapshot_identifier: str
"""
response = self.get_conn().delete_cluster(
ClusterIdentifier=cluster_identifier,
SkipFinalClusterSnapshot=skip_final_cluster_snapshot,
FinalClusterSnapshotIdentifier=final_cluster_snapshot_identifier
)
return response['Cluster'] if response['Cluster'] else None | Delete a cluster and optionally create a snapshot
:param cluster_identifier: unique identifier of a cluster
:type cluster_identifier: str
:param skip_final_cluster_snapshot: determines cluster snapshot creation
:type skip_final_cluster_snapshot: bool
:param final_cluster_snapshot_identifier: name of final cluster snapshot
:type final_cluster_snapshot_identifier: str | Below is the the instruction that describes the task:
### Input:
Delete a cluster and optionally create a snapshot
:param cluster_identifier: unique identifier of a cluster
:type cluster_identifier: str
:param skip_final_cluster_snapshot: determines cluster snapshot creation
:type skip_final_cluster_snapshot: bool
:param final_cluster_snapshot_identifier: name of final cluster snapshot
:type final_cluster_snapshot_identifier: str
### Response:
def delete_cluster(
self,
cluster_identifier,
skip_final_cluster_snapshot=True,
final_cluster_snapshot_identifier=''):
"""
Delete a cluster and optionally create a snapshot
:param cluster_identifier: unique identifier of a cluster
:type cluster_identifier: str
:param skip_final_cluster_snapshot: determines cluster snapshot creation
:type skip_final_cluster_snapshot: bool
:param final_cluster_snapshot_identifier: name of final cluster snapshot
:type final_cluster_snapshot_identifier: str
"""
response = self.get_conn().delete_cluster(
ClusterIdentifier=cluster_identifier,
SkipFinalClusterSnapshot=skip_final_cluster_snapshot,
FinalClusterSnapshotIdentifier=final_cluster_snapshot_identifier
)
return response['Cluster'] if response['Cluster'] else None |
def __get_queue_opts(queue=None, backend=None):
'''
Get consistent opts for the queued runners
'''
if queue is None:
queue = __opts__.get('runner_queue', {}).get('queue')
if backend is None:
backend = __opts__.get('runner_queue', {}).get('backend', 'pgjsonb')
return {'backend': backend,
'queue': queue} | Get consistent opts for the queued runners | Below is the the instruction that describes the task:
### Input:
Get consistent opts for the queued runners
### Response:
def __get_queue_opts(queue=None, backend=None):
'''
Get consistent opts for the queued runners
'''
if queue is None:
queue = __opts__.get('runner_queue', {}).get('queue')
if backend is None:
backend = __opts__.get('runner_queue', {}).get('backend', 'pgjsonb')
return {'backend': backend,
'queue': queue} |
def process_resource_schedule(self, i, value, time_type):
"""Does the resource tag schedule and policy match the current time."""
rid = i[self.id_key]
# this is to normalize trailing semicolons which when done allows
# dateutil.parser.parse to process: value='off=(m-f,1);' properly.
# before this normalization, some cases would silently fail.
value = ';'.join(filter(None, value.split(';')))
if self.parser.has_resource_schedule(value, time_type):
schedule = self.parser.parse(value)
elif self.parser.keys_are_valid(value):
# respect timezone from tag
raw_data = self.parser.raw_data(value)
if 'tz' in raw_data:
schedule = dict(self.default_schedule)
schedule['tz'] = raw_data['tz']
else:
schedule = self.default_schedule
else:
schedule = None
if schedule is None:
log.warning(
"Invalid schedule on resource:%s value:%s", rid, value)
self.parse_errors.append((rid, value))
return False
tz = self.get_tz(schedule['tz'])
if not tz:
log.warning(
"Could not resolve tz on resource:%s value:%s", rid, value)
self.parse_errors.append((rid, value))
return False
now = datetime.datetime.now(tz).replace(
minute=0, second=0, microsecond=0)
now_str = now.strftime("%Y-%m-%d")
if 'skip-days-from' in self.data:
values = ValuesFrom(self.data['skip-days-from'], self.manager)
self.skip_days = values.get_values()
else:
self.skip_days = self.data.get('skip-days', [])
if now_str in self.skip_days:
return False
return self.match(now, schedule) | Does the resource tag schedule and policy match the current time. | Below is the the instruction that describes the task:
### Input:
Does the resource tag schedule and policy match the current time.
### Response:
def process_resource_schedule(self, i, value, time_type):
"""Does the resource tag schedule and policy match the current time."""
rid = i[self.id_key]
# this is to normalize trailing semicolons which when done allows
# dateutil.parser.parse to process: value='off=(m-f,1);' properly.
# before this normalization, some cases would silently fail.
value = ';'.join(filter(None, value.split(';')))
if self.parser.has_resource_schedule(value, time_type):
schedule = self.parser.parse(value)
elif self.parser.keys_are_valid(value):
# respect timezone from tag
raw_data = self.parser.raw_data(value)
if 'tz' in raw_data:
schedule = dict(self.default_schedule)
schedule['tz'] = raw_data['tz']
else:
schedule = self.default_schedule
else:
schedule = None
if schedule is None:
log.warning(
"Invalid schedule on resource:%s value:%s", rid, value)
self.parse_errors.append((rid, value))
return False
tz = self.get_tz(schedule['tz'])
if not tz:
log.warning(
"Could not resolve tz on resource:%s value:%s", rid, value)
self.parse_errors.append((rid, value))
return False
now = datetime.datetime.now(tz).replace(
minute=0, second=0, microsecond=0)
now_str = now.strftime("%Y-%m-%d")
if 'skip-days-from' in self.data:
values = ValuesFrom(self.data['skip-days-from'], self.manager)
self.skip_days = values.get_values()
else:
self.skip_days = self.data.get('skip-days', [])
if now_str in self.skip_days:
return False
return self.match(now, schedule) |
def table_dataset_database_table(
table = None,
include_attributes = None,
rows_limit = None,
print_progress = False,
):
"""
Create a pyprel table contents list from a database table of the module
dataset. Attributes to be included in the table can be specified; by
default, all attributes are included. A limit on the number of rows included
can be specified. Progress on building the table can be reported.
"""
if print_progress:
import shijian
progress = shijian.Progress()
progress.engage_quick_calculation_mode()
number_of_rows = len(table)
if include_attributes:
columns = include_attributes
else:
columns = table.columns
table_contents = [columns]
for index_row, row in enumerate(table):
if rows_limit is not None:
if index_row >= rows_limit:
break
row_contents = []
for column in columns:
try:
string_representation = str(row[column])
except:
string_representation = str(row[column].encode("utf-8"))
row_contents.append(string_representation)
table_contents.append(row_contents)
if print_progress:
print(progress.add_datum(
fraction = float(index_row) / float(number_of_rows))
)
return table_contents | Create a pyprel table contents list from a database table of the module
dataset. Attributes to be included in the table can be specified; by
default, all attributes are included. A limit on the number of rows included
can be specified. Progress on building the table can be reported. | Below is the the instruction that describes the task:
### Input:
Create a pyprel table contents list from a database table of the module
dataset. Attributes to be included in the table can be specified; by
default, all attributes are included. A limit on the number of rows included
can be specified. Progress on building the table can be reported.
### Response:
def table_dataset_database_table(
table = None,
include_attributes = None,
rows_limit = None,
print_progress = False,
):
"""
Create a pyprel table contents list from a database table of the module
dataset. Attributes to be included in the table can be specified; by
default, all attributes are included. A limit on the number of rows included
can be specified. Progress on building the table can be reported.
"""
if print_progress:
import shijian
progress = shijian.Progress()
progress.engage_quick_calculation_mode()
number_of_rows = len(table)
if include_attributes:
columns = include_attributes
else:
columns = table.columns
table_contents = [columns]
for index_row, row in enumerate(table):
if rows_limit is not None:
if index_row >= rows_limit:
break
row_contents = []
for column in columns:
try:
string_representation = str(row[column])
except:
string_representation = str(row[column].encode("utf-8"))
row_contents.append(string_representation)
table_contents.append(row_contents)
if print_progress:
print(progress.add_datum(
fraction = float(index_row) / float(number_of_rows))
)
return table_contents |
def query_by_postid(postid, limit=5):
'''
Query history of certian records.
'''
recs = TabPostHist.select().where(
TabPostHist.post_id == postid
).order_by(
TabPostHist.time_update.desc()
).limit(limit)
return recs | Query history of certian records. | Below is the the instruction that describes the task:
### Input:
Query history of certian records.
### Response:
def query_by_postid(postid, limit=5):
'''
Query history of certian records.
'''
recs = TabPostHist.select().where(
TabPostHist.post_id == postid
).order_by(
TabPostHist.time_update.desc()
).limit(limit)
return recs |
def writeinfo(self, linelist, colour = None):
"""
We add a longer chunk of text on the upper left corner of the image.
Provide linelist, a list of strings that will be written one below the other.
"""
self.checkforpilimage()
colour = self.defaultcolour(colour)
self.changecolourmode(colour)
self.makedraw()
self.loadinfofont()
for i, line in enumerate(linelist):
topspacing = 5 + (12 + 5)*i
self.draw.text((10, topspacing), line, fill = colour, font = self.infofont)
if self.verbose :
print "I've written some info on the image." | We add a longer chunk of text on the upper left corner of the image.
Provide linelist, a list of strings that will be written one below the other. | Below is the the instruction that describes the task:
### Input:
We add a longer chunk of text on the upper left corner of the image.
Provide linelist, a list of strings that will be written one below the other.
### Response:
def writeinfo(self, linelist, colour = None):
"""
We add a longer chunk of text on the upper left corner of the image.
Provide linelist, a list of strings that will be written one below the other.
"""
self.checkforpilimage()
colour = self.defaultcolour(colour)
self.changecolourmode(colour)
self.makedraw()
self.loadinfofont()
for i, line in enumerate(linelist):
topspacing = 5 + (12 + 5)*i
self.draw.text((10, topspacing), line, fill = colour, font = self.infofont)
if self.verbose :
print "I've written some info on the image." |
def ff(items, targets):
"""First-Fit
This is perhaps the simplest packing heuristic;
it simply packs items in the next available bin.
Complexity O(n^2)
"""
bins = [(target, []) for target in targets]
skip = []
for item in items:
for target, content in bins:
if item <= (target - sum(content)):
content.append(item)
break
else:
skip.append(item)
return bins, skip | First-Fit
This is perhaps the simplest packing heuristic;
it simply packs items in the next available bin.
Complexity O(n^2) | Below is the the instruction that describes the task:
### Input:
First-Fit
This is perhaps the simplest packing heuristic;
it simply packs items in the next available bin.
Complexity O(n^2)
### Response:
def ff(items, targets):
"""First-Fit
This is perhaps the simplest packing heuristic;
it simply packs items in the next available bin.
Complexity O(n^2)
"""
bins = [(target, []) for target in targets]
skip = []
for item in items:
for target, content in bins:
if item <= (target - sum(content)):
content.append(item)
break
else:
skip.append(item)
return bins, skip |
def drop_all_tables(self):
"""Drop all tables in the database"""
for table_name in self.table_names():
self.execute_sql("DROP TABLE %s" % table_name)
self.connection.commit() | Drop all tables in the database | Below is the the instruction that describes the task:
### Input:
Drop all tables in the database
### Response:
def drop_all_tables(self):
"""Drop all tables in the database"""
for table_name in self.table_names():
self.execute_sql("DROP TABLE %s" % table_name)
self.connection.commit() |
def do_fake( formatter, *args, **kwargs ):
"""
call a faker format
uses:
{% fake "formatterName" *args **kwargs as myvar %}
{{ myvar }}
or:
{% fake 'name' %}
"""
return Faker.getGenerator().format( formatter, *args, **kwargs ) | call a faker format
uses:
{% fake "formatterName" *args **kwargs as myvar %}
{{ myvar }}
or:
{% fake 'name' %} | Below is the the instruction that describes the task:
### Input:
call a faker format
uses:
{% fake "formatterName" *args **kwargs as myvar %}
{{ myvar }}
or:
{% fake 'name' %}
### Response:
def do_fake( formatter, *args, **kwargs ):
"""
call a faker format
uses:
{% fake "formatterName" *args **kwargs as myvar %}
{{ myvar }}
or:
{% fake 'name' %}
"""
return Faker.getGenerator().format( formatter, *args, **kwargs ) |
def add_log_level(value, name):
"""
Add a new log level to the :mod:`logging` module.
:param value: The log level's number (an integer).
:param name: The name for the log level (a string).
"""
logging.addLevelName(value, name)
setattr(logging, name, value) | Add a new log level to the :mod:`logging` module.
:param value: The log level's number (an integer).
:param name: The name for the log level (a string). | Below is the the instruction that describes the task:
### Input:
Add a new log level to the :mod:`logging` module.
:param value: The log level's number (an integer).
:param name: The name for the log level (a string).
### Response:
def add_log_level(value, name):
"""
Add a new log level to the :mod:`logging` module.
:param value: The log level's number (an integer).
:param name: The name for the log level (a string).
"""
logging.addLevelName(value, name)
setattr(logging, name, value) |
def _matchremove_simple_endings(self, word):
"""Remove the noun, adjective, adverb word endings"""
was_stemmed = False
# noun, adjective, and adverb word endings sorted by charlen, then alph
simple_endings = ['ibus',
'ius',
'ae',
'am',
'as',
'em',
'es',
'ia',
'is',
'nt',
'os',
'ud',
'um',
'us',
'a',
'e',
'i',
'o',
'u']
for ending in simple_endings:
if word.endswith(ending):
word = re.sub(r'{0}$'.format(ending), '', word)
was_stemmed = True
break
return word, was_stemmed | Remove the noun, adjective, adverb word endings | Below is the the instruction that describes the task:
### Input:
Remove the noun, adjective, adverb word endings
### Response:
def _matchremove_simple_endings(self, word):
"""Remove the noun, adjective, adverb word endings"""
was_stemmed = False
# noun, adjective, and adverb word endings sorted by charlen, then alph
simple_endings = ['ibus',
'ius',
'ae',
'am',
'as',
'em',
'es',
'ia',
'is',
'nt',
'os',
'ud',
'um',
'us',
'a',
'e',
'i',
'o',
'u']
for ending in simple_endings:
if word.endswith(ending):
word = re.sub(r'{0}$'.format(ending), '', word)
was_stemmed = True
break
return word, was_stemmed |
def close(self):
"""Implementation of Reporter callback."""
self.emit(self.generate_epilog(self.settings), dest=ReporterDestination.ERR) | Implementation of Reporter callback. | Below is the the instruction that describes the task:
### Input:
Implementation of Reporter callback.
### Response:
def close(self):
"""Implementation of Reporter callback."""
self.emit(self.generate_epilog(self.settings), dest=ReporterDestination.ERR) |
def _spin_every(self, interval=1):
"""target func for use in spin_thread"""
while True:
if self._stop_spinning.is_set():
return
time.sleep(interval)
self.spin() | target func for use in spin_thread | Below is the the instruction that describes the task:
### Input:
target func for use in spin_thread
### Response:
def _spin_every(self, interval=1):
"""target func for use in spin_thread"""
while True:
if self._stop_spinning.is_set():
return
time.sleep(interval)
self.spin() |
def digest(dirname, glob=None):
"""Returns the md5 digest of all interesting files (or glob) in `dirname`.
"""
md5 = hashlib.md5()
if glob is None:
fnames = [fname for _, fname in list_files(Path(dirname))]
for fname in sorted(fnames):
fname = os.path.join(dirname, fname)
md5.update(open(fname, 'rb').read())
else:
fnames = Path(dirname).glob(glob)
for fname in sorted(fnames):
md5.update(fname.open('rb').read())
return md5.hexdigest() | Returns the md5 digest of all interesting files (or glob) in `dirname`. | Below is the the instruction that describes the task:
### Input:
Returns the md5 digest of all interesting files (or glob) in `dirname`.
### Response:
def digest(dirname, glob=None):
"""Returns the md5 digest of all interesting files (or glob) in `dirname`.
"""
md5 = hashlib.md5()
if glob is None:
fnames = [fname for _, fname in list_files(Path(dirname))]
for fname in sorted(fnames):
fname = os.path.join(dirname, fname)
md5.update(open(fname, 'rb').read())
else:
fnames = Path(dirname).glob(glob)
for fname in sorted(fnames):
md5.update(fname.open('rb').read())
return md5.hexdigest() |
def _add_games_to_schedule(self, schedule, game_type, year):
"""
Add games instances to schedule.
Create a Game instance for every applicable game in the season and
append the instance to the '_game' property.
Parameters
----------
schedule : PyQuery object
A PyQuery object pertaining to a team's schedule table.
game_type : string
A string constant denoting whether the game is being played as part
of the regular season or the playoffs.
year : string
The requested year to pull stats from.
"""
for item in schedule:
game = Game(item, game_type, year)
self._games.append(game) | Add games instances to schedule.
Create a Game instance for every applicable game in the season and
append the instance to the '_game' property.
Parameters
----------
schedule : PyQuery object
A PyQuery object pertaining to a team's schedule table.
game_type : string
A string constant denoting whether the game is being played as part
of the regular season or the playoffs.
year : string
The requested year to pull stats from. | Below is the the instruction that describes the task:
### Input:
Add games instances to schedule.
Create a Game instance for every applicable game in the season and
append the instance to the '_game' property.
Parameters
----------
schedule : PyQuery object
A PyQuery object pertaining to a team's schedule table.
game_type : string
A string constant denoting whether the game is being played as part
of the regular season or the playoffs.
year : string
The requested year to pull stats from.
### Response:
def _add_games_to_schedule(self, schedule, game_type, year):
"""
Add games instances to schedule.
Create a Game instance for every applicable game in the season and
append the instance to the '_game' property.
Parameters
----------
schedule : PyQuery object
A PyQuery object pertaining to a team's schedule table.
game_type : string
A string constant denoting whether the game is being played as part
of the regular season or the playoffs.
year : string
The requested year to pull stats from.
"""
for item in schedule:
game = Game(item, game_type, year)
self._games.append(game) |
def get(url, params={}):
"""Invoke an HTTP GET request on a url
Args:
url (string): URL endpoint to request
params (dict): Dictionary of url parameters
Returns:
dict: JSON response as a dictionary
"""
request_url = url
if len(params):
request_url = "{}?{}".format(url, urlencode(params))
try:
req = Request(request_url, headers={'User-Agent': 'Mozilla/5.0'})
response = json.loads(urlopen(req).read().decode("utf-8"))
return response
except HTTPError as err:
raise MtgException(err.read()) | Invoke an HTTP GET request on a url
Args:
url (string): URL endpoint to request
params (dict): Dictionary of url parameters
Returns:
dict: JSON response as a dictionary | Below is the the instruction that describes the task:
### Input:
Invoke an HTTP GET request on a url
Args:
url (string): URL endpoint to request
params (dict): Dictionary of url parameters
Returns:
dict: JSON response as a dictionary
### Response:
def get(url, params={}):
"""Invoke an HTTP GET request on a url
Args:
url (string): URL endpoint to request
params (dict): Dictionary of url parameters
Returns:
dict: JSON response as a dictionary
"""
request_url = url
if len(params):
request_url = "{}?{}".format(url, urlencode(params))
try:
req = Request(request_url, headers={'User-Agent': 'Mozilla/5.0'})
response = json.loads(urlopen(req).read().decode("utf-8"))
return response
except HTTPError as err:
raise MtgException(err.read()) |
def run_datafind_instance(cp, outputDir, connection, observatory, frameType,
startTime, endTime, ifo, tags=None):
"""
This function will query the datafind server once to find frames between
the specified times for the specified frame type and observatory.
Parameters
----------
cp : ConfigParser instance
Source for any kwargs that should be sent to the datafind module
outputDir : Output cache files will be written here. We also write the
commands for reproducing what is done in this function to this
directory.
connection : datafind connection object
Initialized through the glue.datafind module, this is the open
connection to the datafind server.
observatory : string
The observatory to query frames for. Ex. 'H', 'L' or 'V'. NB: not
'H1', 'L1', 'V1' which denote interferometers.
frameType : string
The frame type to query for.
startTime : int
Integer start time to query the datafind server for frames.
endTime : int
Integer end time to query the datafind server for frames.
ifo : string
The interferometer to use for naming output. Ex. 'H1', 'L1', 'V1'.
Maybe this could be merged with the observatory string, but this
could cause issues if running on old 'H2' and 'H1' data.
tags : list of string, optional (default=None)
Use this to specify tags. This can be used if this module is being
called more than once to give call specific configuration (by setting
options in [workflow-datafind-${TAG}] rather than [workflow-datafind]).
This is also used to tag the Files returned by the class to uniqueify
the Files and uniquify the actual filename.
FIXME: Filenames may not be unique with current codes!
Returns
--------
dfCache : glue.lal.Cache instance
The glue.lal.Cache representation of the call to the datafind
server and the returned frame files.
cacheFile : pycbc.workflow.core.File
Cache file listing all of the datafind output files for use later in the pipeline.
"""
if tags is None:
tags = []
seg = segments.segment([startTime, endTime])
# Take the datafind kwargs from config (usually urltype=file is
# given).
dfKwargs = {}
# By default ignore missing frames, this case is dealt with outside of here
dfKwargs['on_gaps'] = 'ignore'
if cp.has_section("datafind"):
for item, value in cp.items("datafind"):
dfKwargs[item] = value
for tag in tags:
if cp.has_section('datafind-%s' %(tag)):
for item, value in cp.items("datafind-%s" %(tag)):
dfKwargs[item] = value
# It is useful to print the corresponding command to the logs
# directory to check if this was expected.
log_datafind_command(observatory, frameType, startTime, endTime,
os.path.join(outputDir,'logs'), **dfKwargs)
logging.debug("Asking datafind server for frames.")
dfCache = connection.find_frame_urls(observatory, frameType,
startTime, endTime, **dfKwargs)
logging.debug("Frames returned")
# workflow format output file
cache_file = File(ifo, 'DATAFIND', seg, extension='lcf',
directory=outputDir, tags=tags)
cache_file.PFN(cache_file.cache_entry.path, site='local')
dfCache.ifo = ifo
# Dump output to file
fP = open(cache_file.storage_path, "w")
# FIXME: CANNOT use dfCache.tofile because it will print 815901601.00000
# as a gps time which is incompatible with the lal cache format
# (and the C codes) which demand an integer.
#dfCache.tofile(fP)
for entry in dfCache:
start = str(int(entry.segment[0]))
duration = str(int(abs(entry.segment)))
print("%s %s %s %s %s" \
% (entry.observatory, entry.description, start, duration, entry.url), file=fP)
entry.segment = segments.segment(int(entry.segment[0]), int(entry.segment[1]))
fP.close()
return dfCache, cache_file | This function will query the datafind server once to find frames between
the specified times for the specified frame type and observatory.
Parameters
----------
cp : ConfigParser instance
Source for any kwargs that should be sent to the datafind module
outputDir : Output cache files will be written here. We also write the
commands for reproducing what is done in this function to this
directory.
connection : datafind connection object
Initialized through the glue.datafind module, this is the open
connection to the datafind server.
observatory : string
The observatory to query frames for. Ex. 'H', 'L' or 'V'. NB: not
'H1', 'L1', 'V1' which denote interferometers.
frameType : string
The frame type to query for.
startTime : int
Integer start time to query the datafind server for frames.
endTime : int
Integer end time to query the datafind server for frames.
ifo : string
The interferometer to use for naming output. Ex. 'H1', 'L1', 'V1'.
Maybe this could be merged with the observatory string, but this
could cause issues if running on old 'H2' and 'H1' data.
tags : list of string, optional (default=None)
Use this to specify tags. This can be used if this module is being
called more than once to give call specific configuration (by setting
options in [workflow-datafind-${TAG}] rather than [workflow-datafind]).
This is also used to tag the Files returned by the class to uniqueify
the Files and uniquify the actual filename.
FIXME: Filenames may not be unique with current codes!
Returns
--------
dfCache : glue.lal.Cache instance
The glue.lal.Cache representation of the call to the datafind
server and the returned frame files.
cacheFile : pycbc.workflow.core.File
Cache file listing all of the datafind output files for use later in the pipeline. | Below is the the instruction that describes the task:
### Input:
This function will query the datafind server once to find frames between
the specified times for the specified frame type and observatory.
Parameters
----------
cp : ConfigParser instance
Source for any kwargs that should be sent to the datafind module
outputDir : Output cache files will be written here. We also write the
commands for reproducing what is done in this function to this
directory.
connection : datafind connection object
Initialized through the glue.datafind module, this is the open
connection to the datafind server.
observatory : string
The observatory to query frames for. Ex. 'H', 'L' or 'V'. NB: not
'H1', 'L1', 'V1' which denote interferometers.
frameType : string
The frame type to query for.
startTime : int
Integer start time to query the datafind server for frames.
endTime : int
Integer end time to query the datafind server for frames.
ifo : string
The interferometer to use for naming output. Ex. 'H1', 'L1', 'V1'.
Maybe this could be merged with the observatory string, but this
could cause issues if running on old 'H2' and 'H1' data.
tags : list of string, optional (default=None)
Use this to specify tags. This can be used if this module is being
called more than once to give call specific configuration (by setting
options in [workflow-datafind-${TAG}] rather than [workflow-datafind]).
This is also used to tag the Files returned by the class to uniqueify
the Files and uniquify the actual filename.
FIXME: Filenames may not be unique with current codes!
Returns
--------
dfCache : glue.lal.Cache instance
The glue.lal.Cache representation of the call to the datafind
server and the returned frame files.
cacheFile : pycbc.workflow.core.File
Cache file listing all of the datafind output files for use later in the pipeline.
### Response:
def run_datafind_instance(cp, outputDir, connection, observatory, frameType,
startTime, endTime, ifo, tags=None):
"""
This function will query the datafind server once to find frames between
the specified times for the specified frame type and observatory.
Parameters
----------
cp : ConfigParser instance
Source for any kwargs that should be sent to the datafind module
outputDir : Output cache files will be written here. We also write the
commands for reproducing what is done in this function to this
directory.
connection : datafind connection object
Initialized through the glue.datafind module, this is the open
connection to the datafind server.
observatory : string
The observatory to query frames for. Ex. 'H', 'L' or 'V'. NB: not
'H1', 'L1', 'V1' which denote interferometers.
frameType : string
The frame type to query for.
startTime : int
Integer start time to query the datafind server for frames.
endTime : int
Integer end time to query the datafind server for frames.
ifo : string
The interferometer to use for naming output. Ex. 'H1', 'L1', 'V1'.
Maybe this could be merged with the observatory string, but this
could cause issues if running on old 'H2' and 'H1' data.
tags : list of string, optional (default=None)
Use this to specify tags. This can be used if this module is being
called more than once to give call specific configuration (by setting
options in [workflow-datafind-${TAG}] rather than [workflow-datafind]).
This is also used to tag the Files returned by the class to uniqueify
the Files and uniquify the actual filename.
FIXME: Filenames may not be unique with current codes!
Returns
--------
dfCache : glue.lal.Cache instance
The glue.lal.Cache representation of the call to the datafind
server and the returned frame files.
cacheFile : pycbc.workflow.core.File
Cache file listing all of the datafind output files for use later in the pipeline.
"""
if tags is None:
tags = []
seg = segments.segment([startTime, endTime])
# Take the datafind kwargs from config (usually urltype=file is
# given).
dfKwargs = {}
# By default ignore missing frames, this case is dealt with outside of here
dfKwargs['on_gaps'] = 'ignore'
if cp.has_section("datafind"):
for item, value in cp.items("datafind"):
dfKwargs[item] = value
for tag in tags:
if cp.has_section('datafind-%s' %(tag)):
for item, value in cp.items("datafind-%s" %(tag)):
dfKwargs[item] = value
# It is useful to print the corresponding command to the logs
# directory to check if this was expected.
log_datafind_command(observatory, frameType, startTime, endTime,
os.path.join(outputDir,'logs'), **dfKwargs)
logging.debug("Asking datafind server for frames.")
dfCache = connection.find_frame_urls(observatory, frameType,
startTime, endTime, **dfKwargs)
logging.debug("Frames returned")
# workflow format output file
cache_file = File(ifo, 'DATAFIND', seg, extension='lcf',
directory=outputDir, tags=tags)
cache_file.PFN(cache_file.cache_entry.path, site='local')
dfCache.ifo = ifo
# Dump output to file
fP = open(cache_file.storage_path, "w")
# FIXME: CANNOT use dfCache.tofile because it will print 815901601.00000
# as a gps time which is incompatible with the lal cache format
# (and the C codes) which demand an integer.
#dfCache.tofile(fP)
for entry in dfCache:
start = str(int(entry.segment[0]))
duration = str(int(abs(entry.segment)))
print("%s %s %s %s %s" \
% (entry.observatory, entry.description, start, duration, entry.url), file=fP)
entry.segment = segments.segment(int(entry.segment[0]), int(entry.segment[1]))
fP.close()
return dfCache, cache_file |
def get(self, key, timeout=None):
"""Given a key, returns an element from the redis table"""
key = self.pre_identifier + key
# Check to see if we have this key
unpickled_entry = self.client.get(key)
if not unpickled_entry:
# No hit, return nothing
return None
entry = pickle.loads(unpickled_entry)
# Use provided timeout in arguments if provided
# otherwise use the one provided during init.
if timeout is None:
timeout = self.timeout
# Make sure entry is not expired
if self._is_expired(entry, timeout):
# entry expired, delete and return nothing
self.delete_entry(key)
return None
# entry found and not expired, return it
return entry[1] | Given a key, returns an element from the redis table | Below is the the instruction that describes the task:
### Input:
Given a key, returns an element from the redis table
### Response:
def get(self, key, timeout=None):
"""Given a key, returns an element from the redis table"""
key = self.pre_identifier + key
# Check to see if we have this key
unpickled_entry = self.client.get(key)
if not unpickled_entry:
# No hit, return nothing
return None
entry = pickle.loads(unpickled_entry)
# Use provided timeout in arguments if provided
# otherwise use the one provided during init.
if timeout is None:
timeout = self.timeout
# Make sure entry is not expired
if self._is_expired(entry, timeout):
# entry expired, delete and return nothing
self.delete_entry(key)
return None
# entry found and not expired, return it
return entry[1] |
def object_to_bytes(obj):
"""
Convert a object to a bytearray or call get_raw() of the object
if no useful type was found.
"""
if isinstance(obj, str):
return bytearray(obj, "UTF-8")
elif isinstance(obj, bool):
return bytearray()
elif isinstance(obj, int):
return pack("<L", obj)
elif obj is None:
return bytearray()
elif isinstance(obj, bytearray):
return obj
else:
return obj.get_raw() | Convert a object to a bytearray or call get_raw() of the object
if no useful type was found. | Below is the the instruction that describes the task:
### Input:
Convert a object to a bytearray or call get_raw() of the object
if no useful type was found.
### Response:
def object_to_bytes(obj):
"""
Convert a object to a bytearray or call get_raw() of the object
if no useful type was found.
"""
if isinstance(obj, str):
return bytearray(obj, "UTF-8")
elif isinstance(obj, bool):
return bytearray()
elif isinstance(obj, int):
return pack("<L", obj)
elif obj is None:
return bytearray()
elif isinstance(obj, bytearray):
return obj
else:
return obj.get_raw() |
def extract_vars_above(*names):
"""Extract a set of variables by name from another frame.
Similar to extractVars(), but with a specified depth of 1, so that names
are exctracted exactly from above the caller.
This is simply a convenience function so that the very common case (for us)
of skipping exactly 1 frame doesn't have to construct a special dict for
keyword passing."""
callerNS = sys._getframe(2).f_locals
return dict((k,callerNS[k]) for k in names) | Extract a set of variables by name from another frame.
Similar to extractVars(), but with a specified depth of 1, so that names
are exctracted exactly from above the caller.
This is simply a convenience function so that the very common case (for us)
of skipping exactly 1 frame doesn't have to construct a special dict for
keyword passing. | Below is the the instruction that describes the task:
### Input:
Extract a set of variables by name from another frame.
Similar to extractVars(), but with a specified depth of 1, so that names
are exctracted exactly from above the caller.
This is simply a convenience function so that the very common case (for us)
of skipping exactly 1 frame doesn't have to construct a special dict for
keyword passing.
### Response:
def extract_vars_above(*names):
"""Extract a set of variables by name from another frame.
Similar to extractVars(), but with a specified depth of 1, so that names
are exctracted exactly from above the caller.
This is simply a convenience function so that the very common case (for us)
of skipping exactly 1 frame doesn't have to construct a special dict for
keyword passing."""
callerNS = sys._getframe(2).f_locals
return dict((k,callerNS[k]) for k in names) |
def get_asset_search_session_for_repository(self, repository_id, proxy):
"""Gets an asset search session for the given repository.
arg: repository_id (osid.id.Id): the ``Id`` of the repository
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetSearchSession) - an
``AssetSearchSession``
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` or ``proxy`` is
``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_asset_search()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_asset_search()`` and
``supports_visible_federation()`` are ``true``.*
"""
if not self.supports_asset_search():
raise errors.Unimplemented()
##
# Also include check to see if the catalog Id is found otherwise raise errors.NotFound
##
# pylint: disable=no-member
return sessions.AssetSearchSession(repository_id, proxy, self._runtime) | Gets an asset search session for the given repository.
arg: repository_id (osid.id.Id): the ``Id`` of the repository
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetSearchSession) - an
``AssetSearchSession``
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` or ``proxy`` is
``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_asset_search()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_asset_search()`` and
``supports_visible_federation()`` are ``true``.* | Below is the the instruction that describes the task:
### Input:
Gets an asset search session for the given repository.
arg: repository_id (osid.id.Id): the ``Id`` of the repository
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetSearchSession) - an
``AssetSearchSession``
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` or ``proxy`` is
``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_asset_search()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_asset_search()`` and
``supports_visible_federation()`` are ``true``.*
### Response:
def get_asset_search_session_for_repository(self, repository_id, proxy):
"""Gets an asset search session for the given repository.
arg: repository_id (osid.id.Id): the ``Id`` of the repository
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetSearchSession) - an
``AssetSearchSession``
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` or ``proxy`` is
``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_asset_search()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_asset_search()`` and
``supports_visible_federation()`` are ``true``.*
"""
if not self.supports_asset_search():
raise errors.Unimplemented()
##
# Also include check to see if the catalog Id is found otherwise raise errors.NotFound
##
# pylint: disable=no-member
return sessions.AssetSearchSession(repository_id, proxy, self._runtime) |
async def continue_conversation(self, reference: ConversationReference, logic):
"""
Continues a conversation with a user. This is often referred to as the bots "Proactive Messaging"
flow as its lets the bot proactively send messages to a conversation or user that its already
communicated with. Scenarios like sending notifications or coupons to a user are enabled by this
method.
:param reference:
:param logic:
:return:
"""
request = TurnContext.apply_conversation_reference(Activity(), reference, is_incoming=True)
context = self.create_context(request)
return await self.run_middleware(context, logic) | Continues a conversation with a user. This is often referred to as the bots "Proactive Messaging"
flow as its lets the bot proactively send messages to a conversation or user that its already
communicated with. Scenarios like sending notifications or coupons to a user are enabled by this
method.
:param reference:
:param logic:
:return: | Below is the the instruction that describes the task:
### Input:
Continues a conversation with a user. This is often referred to as the bots "Proactive Messaging"
flow as its lets the bot proactively send messages to a conversation or user that its already
communicated with. Scenarios like sending notifications or coupons to a user are enabled by this
method.
:param reference:
:param logic:
:return:
### Response:
async def continue_conversation(self, reference: ConversationReference, logic):
"""
Continues a conversation with a user. This is often referred to as the bots "Proactive Messaging"
flow as its lets the bot proactively send messages to a conversation or user that its already
communicated with. Scenarios like sending notifications or coupons to a user are enabled by this
method.
:param reference:
:param logic:
:return:
"""
request = TurnContext.apply_conversation_reference(Activity(), reference, is_incoming=True)
context = self.create_context(request)
return await self.run_middleware(context, logic) |
def wherenotin(self, fieldname, value):
"""
Logical opposite of `wherein`.
"""
return self.wherein(fieldname, value, negate=True) | Logical opposite of `wherein`. | Below is the the instruction that describes the task:
### Input:
Logical opposite of `wherein`.
### Response:
def wherenotin(self, fieldname, value):
"""
Logical opposite of `wherein`.
"""
return self.wherein(fieldname, value, negate=True) |
def _generate_field_with_default(**kwargs):
"""Only called if field.default != NOT_PROVIDED"""
field = kwargs['field']
if callable(field.default):
return field.default()
return field.default | Only called if field.default != NOT_PROVIDED | Below is the the instruction that describes the task:
### Input:
Only called if field.default != NOT_PROVIDED
### Response:
def _generate_field_with_default(**kwargs):
"""Only called if field.default != NOT_PROVIDED"""
field = kwargs['field']
if callable(field.default):
return field.default()
return field.default |
def path(args):
"""
%prog path input.bed scaffolds.fasta
Construct golden path given a set of genetic maps. The respective weight for
each map is given in file `weights.txt`. The map with the highest weight is
considered the pivot map. The final output is an AGP file that contains
ordered scaffolds.
Please note that BED file and FASTA file cannot share the same prefix.
"""
oargs = args
p = OptionParser(path.__doc__)
p.add_option("-b", "--bedfile", help=SUPPRESS_HELP)
p.add_option("-s", "--fastafile", help=SUPPRESS_HELP)
p.add_option("-w", "--weightsfile", default="weights.txt",
help="Use weights from file")
p.add_option("--compress", default=1e-6, type="float",
help="Compress markers with distance <=")
p.add_option("--noremoveoutliers", default=False, action="store_true",
help="Don't remove outlier markers")
p.add_option("--distance", default="rank", choices=distance_choices,
help="Distance function when building initial consensus")
p.add_option("--linkage", default="double", choices=linkage_choices,
help="Linkage function when building initial consensus")
p.add_option("--gapsize", default=100, type="int",
help="Insert gaps of size between scaffolds")
p.add_option("--seqid", help="Only run partition with this seqid")
p.add_option("--partitions", help="Use predefined partitions of LGs")
p.add_option("--links", default=10, type="int",
help="Only plot matchings more than")
p.add_option("--mincount", default=1, type="int",
help="Minimum markers on a contig")
p.add_option("--noplot", default=False, action="store_true",
help="Do not visualize the alignments")
p.add_option("--skipconcorde", default=False, action="store_true",
help="Skip TSP optimizer, can speed up large cases")
p.add_option("--renumber", default=False, action="store_true",
help="Renumber chromosome based on decreasing sizes")
p.set_cpus(cpus=16)
q = OptionGroup(p, "Genetic algorithm options")
p.add_option_group(q)
q.add_option("--ngen", default=500, type="int",
help="Iterations in GA, higher ~ slower")
q.add_option("--npop", default=100, type="int",
help="Population size in GA, higher ~ slower")
q.add_option("--seed", default=666, type="int",
help="Random seed number")
opts, args, iopts = p.set_image_options(args, figsize="10x6")
if len(args) != 2:
sys.exit(not p.print_help())
inputbed, fastafile = args
inputbed = opts.bedfile or inputbed
fastafile = opts.fastafile or fastafile
pf = inputbed.rsplit(".", 1)[0]
if op.basename(fastafile).split(".")[0] == pf:
print("ERROR: Filename collision `{}`. We suggest to rename `{}`"\
.format(pf, inputbed), file=sys.stderr)
sys.exit(1)
bedfile = pf + ".bed"
weightsfile = opts.weightsfile
partitionsfile = opts.partitions
gapsize = opts.gapsize
mincount = opts.mincount
skipconcorde = opts.skipconcorde
ngen = opts.ngen
npop = opts.npop
cpus = opts.cpus
seed = opts.seed
if sys.version_info[:2] < (2, 7):
logging.debug("Python version: {0}. CPUs set to 1.".\
format(sys.version.splitlines()[0].strip()))
cpus = 1
function = get_function(opts.distance)
cc = Map(bedfile, function=function, compress=opts.compress, \
remove_outliers=(not opts.noremoveoutliers))
mapnames = cc.mapnames
allseqids = cc.seqids
weights = Weights(weightsfile, mapnames)
pivot = weights.pivot
ref = weights.ref
linkage = opts.linkage
oseqid = opts.seqid
logging.debug("Linkage function: {0}-linkage".format(linkage))
linkage = {"single": min, "double": double_linkage, "complete": max,
"average": np.mean, "median": np.median}[linkage]
# Partition the linkage groups into consensus clusters
C = Grouper()
# Initialize the partitions
for mlg in cc.mlgs:
C.join(mlg)
if partitionsfile:
logging.debug("Partition LGs based on `{}`".format(partitionsfile))
fp = open(partitionsfile)
for row in fp:
C.join(*row.strip().split(","))
else:
logging.debug("Partition LGs based on {0}".format(ref))
for mapname in mapnames:
if mapname == ref:
continue
# Compute co-occurrence between LG pairs
G = defaultdict(int)
for s in allseqids:
s = Scaffold(s, cc)
s.add_LG_pairs(G, (ref, mapname))
# Convert edge list to adj list
nodes = defaultdict(list)
for (a, b), w in G.items():
nodes[a].append((b, w))
# Find the best ref LG every non-ref LG matches to
for n, neighbors in nodes.items():
if n.split("-")[0] == ref:
continue
neighbors = dict(neighbors)
best_neighbor, best_value = best_no_ambiguous(neighbors, n)
if best_neighbor is None:
continue
C.join(n, best_neighbor)
partitions = defaultdict(list)
# Partition the scaffolds and assign them to one consensus
for s in allseqids:
s = Scaffold(s, cc)
seqid = s.seqid
counts = {}
for mlg, count in s.mlg_counts.items():
consensus = C[mlg]
mapname = mlg.split("-")[0]
mw = weights[mapname]
if consensus not in counts:
counts[consensus] = 0
if count < mincount:
continue
counts[consensus] += count * mw
best_consensus, best_value = best_no_ambiguous(counts, seqid)
if best_consensus is None:
continue
partitions[best_consensus].append(seqid)
# Perform OO within each partition
agpfile = pf + ".chr.agp"
tourfile = pf + ".tour"
sizes = Sizes(fastafile).mapping
fwagp = must_open(agpfile, "w")
fwtour = must_open(tourfile, "w")
solutions = []
for lgs, scaffolds in natsorted(partitions.items()):
if oseqid and oseqid not in lgs:
continue
tag = "|".join(lgs)
lgs_maps = set(x.split("-")[0] for x in lgs)
if pivot not in lgs_maps:
logging.debug("Skipping {0} ...".format(tag))
continue
logging.debug("Working on {0} ...".format(tag))
s = ScaffoldOO(lgs, scaffolds, cc, pivot, weights, sizes,
function=function, linkage=linkage, fwtour=fwtour,
skipconcorde=skipconcorde,
ngen=ngen, npop=npop, cpus=cpus, seed=seed)
solutions.append(s)
fwtour.close()
# Renumber chromosome based on decreasing size
if opts.renumber:
chrsizes = {}
conversion = {}
for s in solutions:
chrsizes[s.object] = sum(sizes[x] for (x, o) in s.tour) + \
(len(s.tour) - 1) * gapsize
for i, (c, size) in enumerate(sorted(chrsizes.items(), key=lambda x: -x[1])):
newc = "chr{0}".format(i + 1)
logging.debug("{0}: {1} => {2}".format(c, size, newc))
conversion[c] = newc
for s in solutions:
s.object = conversion[s.object]
# meta-data about the run parameters
command = "# COMMAND: python -m jcvi.assembly.allmaps path {0}".\
format(" ".join(oargs))
comment = "Generated by ALLMAPS v{0} ({1})\n{2}".\
format(version, get_today(), command)
AGP.print_header(fwagp, comment=comment)
for s in natsorted(solutions, key=lambda x: x.object):
order_to_agp(s.object, s.tour, sizes, fwagp, gapsize=gapsize,
gaptype="map")
fwagp.close()
logging.debug("AGP file written to `{0}`.".format(agpfile))
logging.debug("Tour file written to `{0}`.".format(tourfile))
build([inputbed, fastafile])
summaryfile = pf + ".summary.txt"
summary([inputbed, fastafile, "--outfile={0}".format(summaryfile)])
if not opts.noplot:
plotall([inputbed, "--links={0}".format(opts.links),
"--figsize={0}".format(opts.figsize)]) | %prog path input.bed scaffolds.fasta
Construct golden path given a set of genetic maps. The respective weight for
each map is given in file `weights.txt`. The map with the highest weight is
considered the pivot map. The final output is an AGP file that contains
ordered scaffolds.
Please note that BED file and FASTA file cannot share the same prefix. | Below is the the instruction that describes the task:
### Input:
%prog path input.bed scaffolds.fasta
Construct golden path given a set of genetic maps. The respective weight for
each map is given in file `weights.txt`. The map with the highest weight is
considered the pivot map. The final output is an AGP file that contains
ordered scaffolds.
Please note that BED file and FASTA file cannot share the same prefix.
### Response:
def path(args):
"""
%prog path input.bed scaffolds.fasta
Construct golden path given a set of genetic maps. The respective weight for
each map is given in file `weights.txt`. The map with the highest weight is
considered the pivot map. The final output is an AGP file that contains
ordered scaffolds.
Please note that BED file and FASTA file cannot share the same prefix.
"""
oargs = args
p = OptionParser(path.__doc__)
p.add_option("-b", "--bedfile", help=SUPPRESS_HELP)
p.add_option("-s", "--fastafile", help=SUPPRESS_HELP)
p.add_option("-w", "--weightsfile", default="weights.txt",
help="Use weights from file")
p.add_option("--compress", default=1e-6, type="float",
help="Compress markers with distance <=")
p.add_option("--noremoveoutliers", default=False, action="store_true",
help="Don't remove outlier markers")
p.add_option("--distance", default="rank", choices=distance_choices,
help="Distance function when building initial consensus")
p.add_option("--linkage", default="double", choices=linkage_choices,
help="Linkage function when building initial consensus")
p.add_option("--gapsize", default=100, type="int",
help="Insert gaps of size between scaffolds")
p.add_option("--seqid", help="Only run partition with this seqid")
p.add_option("--partitions", help="Use predefined partitions of LGs")
p.add_option("--links", default=10, type="int",
help="Only plot matchings more than")
p.add_option("--mincount", default=1, type="int",
help="Minimum markers on a contig")
p.add_option("--noplot", default=False, action="store_true",
help="Do not visualize the alignments")
p.add_option("--skipconcorde", default=False, action="store_true",
help="Skip TSP optimizer, can speed up large cases")
p.add_option("--renumber", default=False, action="store_true",
help="Renumber chromosome based on decreasing sizes")
p.set_cpus(cpus=16)
q = OptionGroup(p, "Genetic algorithm options")
p.add_option_group(q)
q.add_option("--ngen", default=500, type="int",
help="Iterations in GA, higher ~ slower")
q.add_option("--npop", default=100, type="int",
help="Population size in GA, higher ~ slower")
q.add_option("--seed", default=666, type="int",
help="Random seed number")
opts, args, iopts = p.set_image_options(args, figsize="10x6")
if len(args) != 2:
sys.exit(not p.print_help())
inputbed, fastafile = args
inputbed = opts.bedfile or inputbed
fastafile = opts.fastafile or fastafile
pf = inputbed.rsplit(".", 1)[0]
if op.basename(fastafile).split(".")[0] == pf:
print("ERROR: Filename collision `{}`. We suggest to rename `{}`"\
.format(pf, inputbed), file=sys.stderr)
sys.exit(1)
bedfile = pf + ".bed"
weightsfile = opts.weightsfile
partitionsfile = opts.partitions
gapsize = opts.gapsize
mincount = opts.mincount
skipconcorde = opts.skipconcorde
ngen = opts.ngen
npop = opts.npop
cpus = opts.cpus
seed = opts.seed
if sys.version_info[:2] < (2, 7):
logging.debug("Python version: {0}. CPUs set to 1.".\
format(sys.version.splitlines()[0].strip()))
cpus = 1
function = get_function(opts.distance)
cc = Map(bedfile, function=function, compress=opts.compress, \
remove_outliers=(not opts.noremoveoutliers))
mapnames = cc.mapnames
allseqids = cc.seqids
weights = Weights(weightsfile, mapnames)
pivot = weights.pivot
ref = weights.ref
linkage = opts.linkage
oseqid = opts.seqid
logging.debug("Linkage function: {0}-linkage".format(linkage))
linkage = {"single": min, "double": double_linkage, "complete": max,
"average": np.mean, "median": np.median}[linkage]
# Partition the linkage groups into consensus clusters
C = Grouper()
# Initialize the partitions
for mlg in cc.mlgs:
C.join(mlg)
if partitionsfile:
logging.debug("Partition LGs based on `{}`".format(partitionsfile))
fp = open(partitionsfile)
for row in fp:
C.join(*row.strip().split(","))
else:
logging.debug("Partition LGs based on {0}".format(ref))
for mapname in mapnames:
if mapname == ref:
continue
# Compute co-occurrence between LG pairs
G = defaultdict(int)
for s in allseqids:
s = Scaffold(s, cc)
s.add_LG_pairs(G, (ref, mapname))
# Convert edge list to adj list
nodes = defaultdict(list)
for (a, b), w in G.items():
nodes[a].append((b, w))
# Find the best ref LG every non-ref LG matches to
for n, neighbors in nodes.items():
if n.split("-")[0] == ref:
continue
neighbors = dict(neighbors)
best_neighbor, best_value = best_no_ambiguous(neighbors, n)
if best_neighbor is None:
continue
C.join(n, best_neighbor)
partitions = defaultdict(list)
# Partition the scaffolds and assign them to one consensus
for s in allseqids:
s = Scaffold(s, cc)
seqid = s.seqid
counts = {}
for mlg, count in s.mlg_counts.items():
consensus = C[mlg]
mapname = mlg.split("-")[0]
mw = weights[mapname]
if consensus not in counts:
counts[consensus] = 0
if count < mincount:
continue
counts[consensus] += count * mw
best_consensus, best_value = best_no_ambiguous(counts, seqid)
if best_consensus is None:
continue
partitions[best_consensus].append(seqid)
# Perform OO within each partition
agpfile = pf + ".chr.agp"
tourfile = pf + ".tour"
sizes = Sizes(fastafile).mapping
fwagp = must_open(agpfile, "w")
fwtour = must_open(tourfile, "w")
solutions = []
for lgs, scaffolds in natsorted(partitions.items()):
if oseqid and oseqid not in lgs:
continue
tag = "|".join(lgs)
lgs_maps = set(x.split("-")[0] for x in lgs)
if pivot not in lgs_maps:
logging.debug("Skipping {0} ...".format(tag))
continue
logging.debug("Working on {0} ...".format(tag))
s = ScaffoldOO(lgs, scaffolds, cc, pivot, weights, sizes,
function=function, linkage=linkage, fwtour=fwtour,
skipconcorde=skipconcorde,
ngen=ngen, npop=npop, cpus=cpus, seed=seed)
solutions.append(s)
fwtour.close()
# Renumber chromosome based on decreasing size
if opts.renumber:
chrsizes = {}
conversion = {}
for s in solutions:
chrsizes[s.object] = sum(sizes[x] for (x, o) in s.tour) + \
(len(s.tour) - 1) * gapsize
for i, (c, size) in enumerate(sorted(chrsizes.items(), key=lambda x: -x[1])):
newc = "chr{0}".format(i + 1)
logging.debug("{0}: {1} => {2}".format(c, size, newc))
conversion[c] = newc
for s in solutions:
s.object = conversion[s.object]
# meta-data about the run parameters
command = "# COMMAND: python -m jcvi.assembly.allmaps path {0}".\
format(" ".join(oargs))
comment = "Generated by ALLMAPS v{0} ({1})\n{2}".\
format(version, get_today(), command)
AGP.print_header(fwagp, comment=comment)
for s in natsorted(solutions, key=lambda x: x.object):
order_to_agp(s.object, s.tour, sizes, fwagp, gapsize=gapsize,
gaptype="map")
fwagp.close()
logging.debug("AGP file written to `{0}`.".format(agpfile))
logging.debug("Tour file written to `{0}`.".format(tourfile))
build([inputbed, fastafile])
summaryfile = pf + ".summary.txt"
summary([inputbed, fastafile, "--outfile={0}".format(summaryfile)])
if not opts.noplot:
plotall([inputbed, "--links={0}".format(opts.links),
"--figsize={0}".format(opts.figsize)]) |
def nvrtcCompileProgram(self, prog, options):
"""
Compiles the NVRTC program object into PTX, using the provided options
array. See the NVRTC API documentation for accepted options.
"""
options_array = (c_char_p * len(options))()
options_array[:] = encode_str_list(options)
code = self._lib.nvrtcCompileProgram(prog, len(options), options_array)
self._throw_on_error(code)
return | Compiles the NVRTC program object into PTX, using the provided options
array. See the NVRTC API documentation for accepted options. | Below is the the instruction that describes the task:
### Input:
Compiles the NVRTC program object into PTX, using the provided options
array. See the NVRTC API documentation for accepted options.
### Response:
def nvrtcCompileProgram(self, prog, options):
"""
Compiles the NVRTC program object into PTX, using the provided options
array. See the NVRTC API documentation for accepted options.
"""
options_array = (c_char_p * len(options))()
options_array[:] = encode_str_list(options)
code = self._lib.nvrtcCompileProgram(prog, len(options), options_array)
self._throw_on_error(code)
return |
def is_local_subsection(command_dict):
"""Returns True if command dict is "local subsection", meaning
that it is "if", "else" or "for" (not a real call, but calls
run_section recursively."""
for local_com in ['if ', 'for ', 'else ']:
if list(command_dict.keys())[0].startswith(local_com):
return True
return False | Returns True if command dict is "local subsection", meaning
that it is "if", "else" or "for" (not a real call, but calls
run_section recursively. | Below is the the instruction that describes the task:
### Input:
Returns True if command dict is "local subsection", meaning
that it is "if", "else" or "for" (not a real call, but calls
run_section recursively.
### Response:
def is_local_subsection(command_dict):
"""Returns True if command dict is "local subsection", meaning
that it is "if", "else" or "for" (not a real call, but calls
run_section recursively."""
for local_com in ['if ', 'for ', 'else ']:
if list(command_dict.keys())[0].startswith(local_com):
return True
return False |
def ParseForwardedIps(self, forwarded_ips):
"""Parse and validate forwarded IP addresses.
Args:
forwarded_ips: list, the IP address strings to parse.
Returns:
list, the valid IP address strings.
"""
addresses = []
forwarded_ips = forwarded_ips or []
for ip in forwarded_ips:
if ip and (IP_REGEX.match(ip) or IP_ALIAS_REGEX.match(ip)):
addresses.extend([str(addr) for addr in list(netaddr.IPNetwork(ip))])
else:
self.logger.warning('Could not parse IP address: "%s".', ip)
return addresses | Parse and validate forwarded IP addresses.
Args:
forwarded_ips: list, the IP address strings to parse.
Returns:
list, the valid IP address strings. | Below is the the instruction that describes the task:
### Input:
Parse and validate forwarded IP addresses.
Args:
forwarded_ips: list, the IP address strings to parse.
Returns:
list, the valid IP address strings.
### Response:
def ParseForwardedIps(self, forwarded_ips):
"""Parse and validate forwarded IP addresses.
Args:
forwarded_ips: list, the IP address strings to parse.
Returns:
list, the valid IP address strings.
"""
addresses = []
forwarded_ips = forwarded_ips or []
for ip in forwarded_ips:
if ip and (IP_REGEX.match(ip) or IP_ALIAS_REGEX.match(ip)):
addresses.extend([str(addr) for addr in list(netaddr.IPNetwork(ip))])
else:
self.logger.warning('Could not parse IP address: "%s".', ip)
return addresses |
def get_limits(self):
"""
Return all known limits for this service, as a dict of their names
to :py:class:`~.AwsLimit` objects.
:returns: dict of limit names to :py:class:`~.AwsLimit` objects
:rtype: dict
"""
self.connect()
region_name = self.conn._client_config.region_name
if self.limits != {}:
return self.limits
limits = {}
limits['Tables Per Region'] = AwsLimit(
'Tables Per Region',
self,
256,
self.warning_threshold,
self.critical_threshold,
limit_type='AWS::DynamoDB::Table',
)
limits['Account Max Write Capacity Units'] = AwsLimit(
'Account Max Write Capacity Units',
self,
80000 if region_name == 'us-east-1' else 20000,
self.warning_threshold,
self.critical_threshold,
limit_type='AWS::DynamoDB::Table', )
limits['Table Max Write Capacity Units'] = AwsLimit(
'Table Max Write Capacity Units',
self,
40000 if region_name == 'us-east-1' else 10000,
self.warning_threshold,
self.critical_threshold,
limit_type='AWS::DynamoDB::Table',
)
limits['Account Max Read Capacity Units'] = AwsLimit(
'Account Max Read Capacity Units',
self,
80000 if region_name == 'us-east-1' else 20000,
self.warning_threshold,
self.critical_threshold,
limit_type='AWS::DynamoDB::Table',
)
limits['Table Max Read Capacity Units'] = AwsLimit(
'Table Max Read Capacity Units',
self,
40000 if region_name == 'us-east-1' else 10000,
self.warning_threshold,
self.critical_threshold,
limit_type='AWS::DynamoDB::Table',
)
limits['Global Secondary Indexes'] = AwsLimit(
'Global Secondary Indexes',
self,
20,
self.warning_threshold,
self.critical_threshold,
limit_type='AWS::DynamoDB::Table',
)
limits['Local Secondary Indexes'] = AwsLimit(
'Local Secondary Indexes',
self,
5,
self.warning_threshold,
self.critical_threshold,
limit_type='AWS::DynamoDB::Table',
)
self.limits = limits
return limits | Return all known limits for this service, as a dict of their names
to :py:class:`~.AwsLimit` objects.
:returns: dict of limit names to :py:class:`~.AwsLimit` objects
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Return all known limits for this service, as a dict of their names
to :py:class:`~.AwsLimit` objects.
:returns: dict of limit names to :py:class:`~.AwsLimit` objects
:rtype: dict
### Response:
def get_limits(self):
"""
Return all known limits for this service, as a dict of their names
to :py:class:`~.AwsLimit` objects.
:returns: dict of limit names to :py:class:`~.AwsLimit` objects
:rtype: dict
"""
self.connect()
region_name = self.conn._client_config.region_name
if self.limits != {}:
return self.limits
limits = {}
limits['Tables Per Region'] = AwsLimit(
'Tables Per Region',
self,
256,
self.warning_threshold,
self.critical_threshold,
limit_type='AWS::DynamoDB::Table',
)
limits['Account Max Write Capacity Units'] = AwsLimit(
'Account Max Write Capacity Units',
self,
80000 if region_name == 'us-east-1' else 20000,
self.warning_threshold,
self.critical_threshold,
limit_type='AWS::DynamoDB::Table', )
limits['Table Max Write Capacity Units'] = AwsLimit(
'Table Max Write Capacity Units',
self,
40000 if region_name == 'us-east-1' else 10000,
self.warning_threshold,
self.critical_threshold,
limit_type='AWS::DynamoDB::Table',
)
limits['Account Max Read Capacity Units'] = AwsLimit(
'Account Max Read Capacity Units',
self,
80000 if region_name == 'us-east-1' else 20000,
self.warning_threshold,
self.critical_threshold,
limit_type='AWS::DynamoDB::Table',
)
limits['Table Max Read Capacity Units'] = AwsLimit(
'Table Max Read Capacity Units',
self,
40000 if region_name == 'us-east-1' else 10000,
self.warning_threshold,
self.critical_threshold,
limit_type='AWS::DynamoDB::Table',
)
limits['Global Secondary Indexes'] = AwsLimit(
'Global Secondary Indexes',
self,
20,
self.warning_threshold,
self.critical_threshold,
limit_type='AWS::DynamoDB::Table',
)
limits['Local Secondary Indexes'] = AwsLimit(
'Local Secondary Indexes',
self,
5,
self.warning_threshold,
self.critical_threshold,
limit_type='AWS::DynamoDB::Table',
)
self.limits = limits
return limits |
def getPrecision(self, result=None):
"""Returns the precision for the Analysis.
- If ManualUncertainty is set, calculates the precision of the result
in accordance with the manual uncertainty set.
- If Calculate Precision from Uncertainty is set in Analysis Service,
calculates the precision in accordance with the uncertainty infered
from uncertainties ranges.
- If neither Manual Uncertainty nor Calculate Precision from
Uncertainty are set, returns the precision from the Analysis Service
- If you have a number with zero uncertainty: If you roll a pair of
dice and observe five spots, the number of spots is 5. This is a raw
data point, with no uncertainty whatsoever. So just write down the
number. Similarly, the number of centimeters per inch is 2.54,
by definition, with no uncertainty whatsoever. Again: just write
down the number.
Further information at AbstractBaseAnalysis.getPrecision()
"""
allow_manual = self.getAllowManualUncertainty()
precision_unc = self.getPrecisionFromUncertainty()
if allow_manual or precision_unc:
uncertainty = self.getUncertainty(result)
if uncertainty is None:
return self.getField('Precision').get(self)
if uncertainty == 0 and result is None:
return self.getField('Precision').get(self)
if uncertainty == 0:
strres = str(result)
numdecimals = strres[::-1].find('.')
return numdecimals
return get_significant_digits(uncertainty)
return self.getField('Precision').get(self) | Returns the precision for the Analysis.
- If ManualUncertainty is set, calculates the precision of the result
in accordance with the manual uncertainty set.
- If Calculate Precision from Uncertainty is set in Analysis Service,
calculates the precision in accordance with the uncertainty infered
from uncertainties ranges.
- If neither Manual Uncertainty nor Calculate Precision from
Uncertainty are set, returns the precision from the Analysis Service
- If you have a number with zero uncertainty: If you roll a pair of
dice and observe five spots, the number of spots is 5. This is a raw
data point, with no uncertainty whatsoever. So just write down the
number. Similarly, the number of centimeters per inch is 2.54,
by definition, with no uncertainty whatsoever. Again: just write
down the number.
Further information at AbstractBaseAnalysis.getPrecision() | Below is the the instruction that describes the task:
### Input:
Returns the precision for the Analysis.
- If ManualUncertainty is set, calculates the precision of the result
in accordance with the manual uncertainty set.
- If Calculate Precision from Uncertainty is set in Analysis Service,
calculates the precision in accordance with the uncertainty infered
from uncertainties ranges.
- If neither Manual Uncertainty nor Calculate Precision from
Uncertainty are set, returns the precision from the Analysis Service
- If you have a number with zero uncertainty: If you roll a pair of
dice and observe five spots, the number of spots is 5. This is a raw
data point, with no uncertainty whatsoever. So just write down the
number. Similarly, the number of centimeters per inch is 2.54,
by definition, with no uncertainty whatsoever. Again: just write
down the number.
Further information at AbstractBaseAnalysis.getPrecision()
### Response:
def getPrecision(self, result=None):
"""Returns the precision for the Analysis.
- If ManualUncertainty is set, calculates the precision of the result
in accordance with the manual uncertainty set.
- If Calculate Precision from Uncertainty is set in Analysis Service,
calculates the precision in accordance with the uncertainty infered
from uncertainties ranges.
- If neither Manual Uncertainty nor Calculate Precision from
Uncertainty are set, returns the precision from the Analysis Service
- If you have a number with zero uncertainty: If you roll a pair of
dice and observe five spots, the number of spots is 5. This is a raw
data point, with no uncertainty whatsoever. So just write down the
number. Similarly, the number of centimeters per inch is 2.54,
by definition, with no uncertainty whatsoever. Again: just write
down the number.
Further information at AbstractBaseAnalysis.getPrecision()
"""
allow_manual = self.getAllowManualUncertainty()
precision_unc = self.getPrecisionFromUncertainty()
if allow_manual or precision_unc:
uncertainty = self.getUncertainty(result)
if uncertainty is None:
return self.getField('Precision').get(self)
if uncertainty == 0 and result is None:
return self.getField('Precision').get(self)
if uncertainty == 0:
strres = str(result)
numdecimals = strres[::-1].find('.')
return numdecimals
return get_significant_digits(uncertainty)
return self.getField('Precision').get(self) |
def _format_msg(self, msg, edata):
"""Substitute parameters in exception message."""
edata = edata if isinstance(edata, list) else [edata]
for fdict in edata:
if "*[{token}]*".format(token=fdict["field"]) not in msg:
raise RuntimeError(
"Field {token} not in exception message".format(
token=fdict["field"]
)
)
msg = msg.replace(
"*[{token}]*".format(token=fdict["field"]), "{value}"
).format(value=fdict["value"])
return msg | Substitute parameters in exception message. | Below is the the instruction that describes the task:
### Input:
Substitute parameters in exception message.
### Response:
def _format_msg(self, msg, edata):
"""Substitute parameters in exception message."""
edata = edata if isinstance(edata, list) else [edata]
for fdict in edata:
if "*[{token}]*".format(token=fdict["field"]) not in msg:
raise RuntimeError(
"Field {token} not in exception message".format(
token=fdict["field"]
)
)
msg = msg.replace(
"*[{token}]*".format(token=fdict["field"]), "{value}"
).format(value=fdict["value"])
return msg |
def connect(self):
"""Create connection to CasparCG Server"""
try:
self.connection = telnetlib.Telnet(self.host, self.port, timeout=self.timeout)
except Exception:
log_traceback()
return False
return True | Create connection to CasparCG Server | Below is the the instruction that describes the task:
### Input:
Create connection to CasparCG Server
### Response:
def connect(self):
"""Create connection to CasparCG Server"""
try:
self.connection = telnetlib.Telnet(self.host, self.port, timeout=self.timeout)
except Exception:
log_traceback()
return False
return True |
def runner(self, fun, timeout=None, full_return=False, **kwargs):
'''
Run `runner modules <all-salt.runners>` synchronously
Wraps :py:meth:`salt.runner.RunnerClient.cmd_sync`.
Note that runner functions must be called using keyword arguments.
Positional arguments are not supported.
:return: Returns the result from the runner module
'''
kwargs['fun'] = fun
runner = salt.runner.RunnerClient(self.opts)
return runner.cmd_sync(kwargs, timeout=timeout, full_return=full_return) | Run `runner modules <all-salt.runners>` synchronously
Wraps :py:meth:`salt.runner.RunnerClient.cmd_sync`.
Note that runner functions must be called using keyword arguments.
Positional arguments are not supported.
:return: Returns the result from the runner module | Below is the the instruction that describes the task:
### Input:
Run `runner modules <all-salt.runners>` synchronously
Wraps :py:meth:`salt.runner.RunnerClient.cmd_sync`.
Note that runner functions must be called using keyword arguments.
Positional arguments are not supported.
:return: Returns the result from the runner module
### Response:
def runner(self, fun, timeout=None, full_return=False, **kwargs):
'''
Run `runner modules <all-salt.runners>` synchronously
Wraps :py:meth:`salt.runner.RunnerClient.cmd_sync`.
Note that runner functions must be called using keyword arguments.
Positional arguments are not supported.
:return: Returns the result from the runner module
'''
kwargs['fun'] = fun
runner = salt.runner.RunnerClient(self.opts)
return runner.cmd_sync(kwargs, timeout=timeout, full_return=full_return) |
def signed_tree_multiplier(A, B, reducer=adders.wallace_reducer, adder_func=adders.kogge_stone):
"""Same as tree_multiplier, but uses two's-complement signed integers"""
if len(A) == 1 or len(B) == 1:
raise pyrtl.PyrtlError("sign bit required, one or both wires too small")
aneg, bneg = A[-1], B[-1]
a = _twos_comp_conditional(A, aneg)
b = _twos_comp_conditional(B, bneg)
res = tree_multiplier(a[:-1], b[:-1]).zero_extended(len(A) + len(B))
return _twos_comp_conditional(res, aneg ^ bneg) | Same as tree_multiplier, but uses two's-complement signed integers | Below is the the instruction that describes the task:
### Input:
Same as tree_multiplier, but uses two's-complement signed integers
### Response:
def signed_tree_multiplier(A, B, reducer=adders.wallace_reducer, adder_func=adders.kogge_stone):
"""Same as tree_multiplier, but uses two's-complement signed integers"""
if len(A) == 1 or len(B) == 1:
raise pyrtl.PyrtlError("sign bit required, one or both wires too small")
aneg, bneg = A[-1], B[-1]
a = _twos_comp_conditional(A, aneg)
b = _twos_comp_conditional(B, bneg)
res = tree_multiplier(a[:-1], b[:-1]).zero_extended(len(A) + len(B))
return _twos_comp_conditional(res, aneg ^ bneg) |
def save_persistent_attributes(self):
# type: () -> None
"""Save persistent attributes to the persistence layer if a
persistence adapter is provided.
:rtype: None
:raises: :py:class:`ask_sdk_core.exceptions.AttributesManagerException`
if trying to save persistence attributes without persistence adapter
"""
if not self._persistence_adapter:
raise AttributesManagerException(
"Cannot save PersistentAttributes without "
"persistence adapter!")
if self._persistent_attributes_set:
self._persistence_adapter.save_attributes(
request_envelope=self._request_envelope,
attributes=self._persistence_attributes) | Save persistent attributes to the persistence layer if a
persistence adapter is provided.
:rtype: None
:raises: :py:class:`ask_sdk_core.exceptions.AttributesManagerException`
if trying to save persistence attributes without persistence adapter | Below is the the instruction that describes the task:
### Input:
Save persistent attributes to the persistence layer if a
persistence adapter is provided.
:rtype: None
:raises: :py:class:`ask_sdk_core.exceptions.AttributesManagerException`
if trying to save persistence attributes without persistence adapter
### Response:
def save_persistent_attributes(self):
# type: () -> None
"""Save persistent attributes to the persistence layer if a
persistence adapter is provided.
:rtype: None
:raises: :py:class:`ask_sdk_core.exceptions.AttributesManagerException`
if trying to save persistence attributes without persistence adapter
"""
if not self._persistence_adapter:
raise AttributesManagerException(
"Cannot save PersistentAttributes without "
"persistence adapter!")
if self._persistent_attributes_set:
self._persistence_adapter.save_attributes(
request_envelope=self._request_envelope,
attributes=self._persistence_attributes) |
def _numpy_char_to_bytes(arr):
"""Like netCDF4.chartostring, but faster and more flexible.
"""
# based on: http://stackoverflow.com/a/10984878/809705
arr = np.array(arr, copy=False, order='C')
dtype = 'S' + str(arr.shape[-1])
return arr.view(dtype).reshape(arr.shape[:-1]) | Like netCDF4.chartostring, but faster and more flexible. | Below is the the instruction that describes the task:
### Input:
Like netCDF4.chartostring, but faster and more flexible.
### Response:
def _numpy_char_to_bytes(arr):
"""Like netCDF4.chartostring, but faster and more flexible.
"""
# based on: http://stackoverflow.com/a/10984878/809705
arr = np.array(arr, copy=False, order='C')
dtype = 'S' + str(arr.shape[-1])
return arr.view(dtype).reshape(arr.shape[:-1]) |
def read_blob(self,blob_dim,n_blob=0):
"""Read blob from a selection.
"""
n_blobs = self.calc_n_blobs(blob_dim)
if n_blob > n_blobs or n_blob < 0:
raise ValueError('Please provide correct n_blob value. Given %i, but max values is %i'%(n_blob,n_blobs))
# This prevents issues when the last blob is smaller than the others in time.
if blob_dim[self.time_axis]*(n_blob+1) > self.selection_shape[self.time_axis]:
updated_blob_dim = (int(self.selection_shape[self.time_axis] - blob_dim[self.time_axis]*n_blob), 1, int(blob_dim[self.freq_axis]))
else:
updated_blob_dim = [int(i) for i in blob_dim]
blob_start = self._find_blob_start()
blob = np.zeros(updated_blob_dim, dtype=self._d_type)
# EE: For now; also assuming one polarization and one beam.
# Assuming the blob will loop over the whole frequency range.
if self.f_start == self.f_begin and self.f_stop == self.f_end:
blob_flat_size = np.prod(blob_dim)
updated_blob_flat_size = np.prod(updated_blob_dim)
# Load binary data
with open(self.filename, 'rb') as f:
f.seek(int(self.idx_data + self._n_bytes * (blob_start + n_blob*blob_flat_size)))
dd = np.fromfile(f, count=updated_blob_flat_size, dtype=self._d_type)
if dd.shape[0] == updated_blob_flat_size:
blob = dd.reshape(updated_blob_dim)
else:
logger.info('DD shape != blob shape.')
blob = dd.reshape((int(dd.shape[0]/blob_dim[self.freq_axis]),blob_dim[self.beam_axis],blob_dim[self.freq_axis]))
else:
for blobt in range(updated_blob_dim[self.time_axis]):
#Load binary data
with open(self.filename, 'rb') as f:
f.seek(int(self.idx_data + self._n_bytes * (blob_start + n_blob*blob_dim[self.time_axis]*self.n_channels_in_file + blobt*self.n_channels_in_file)))
dd = np.fromfile(f, count=blob_dim[self.freq_axis], dtype=self._d_type)
blob[blobt] = dd
# if self.header[b'foff'] < 0:
# blob = blob[:,:,::-1]
return blob | Read blob from a selection. | Below is the the instruction that describes the task:
### Input:
Read blob from a selection.
### Response:
def read_blob(self,blob_dim,n_blob=0):
"""Read blob from a selection.
"""
n_blobs = self.calc_n_blobs(blob_dim)
if n_blob > n_blobs or n_blob < 0:
raise ValueError('Please provide correct n_blob value. Given %i, but max values is %i'%(n_blob,n_blobs))
# This prevents issues when the last blob is smaller than the others in time.
if blob_dim[self.time_axis]*(n_blob+1) > self.selection_shape[self.time_axis]:
updated_blob_dim = (int(self.selection_shape[self.time_axis] - blob_dim[self.time_axis]*n_blob), 1, int(blob_dim[self.freq_axis]))
else:
updated_blob_dim = [int(i) for i in blob_dim]
blob_start = self._find_blob_start()
blob = np.zeros(updated_blob_dim, dtype=self._d_type)
# EE: For now; also assuming one polarization and one beam.
# Assuming the blob will loop over the whole frequency range.
if self.f_start == self.f_begin and self.f_stop == self.f_end:
blob_flat_size = np.prod(blob_dim)
updated_blob_flat_size = np.prod(updated_blob_dim)
# Load binary data
with open(self.filename, 'rb') as f:
f.seek(int(self.idx_data + self._n_bytes * (blob_start + n_blob*blob_flat_size)))
dd = np.fromfile(f, count=updated_blob_flat_size, dtype=self._d_type)
if dd.shape[0] == updated_blob_flat_size:
blob = dd.reshape(updated_blob_dim)
else:
logger.info('DD shape != blob shape.')
blob = dd.reshape((int(dd.shape[0]/blob_dim[self.freq_axis]),blob_dim[self.beam_axis],blob_dim[self.freq_axis]))
else:
for blobt in range(updated_blob_dim[self.time_axis]):
#Load binary data
with open(self.filename, 'rb') as f:
f.seek(int(self.idx_data + self._n_bytes * (blob_start + n_blob*blob_dim[self.time_axis]*self.n_channels_in_file + blobt*self.n_channels_in_file)))
dd = np.fromfile(f, count=blob_dim[self.freq_axis], dtype=self._d_type)
blob[blobt] = dd
# if self.header[b'foff'] < 0:
# blob = blob[:,:,::-1]
return blob |
def load_state(cls, path:PathOrStr, state:dict) -> 'LabelList':
"Create a `LabelList` from `state`."
x = state['x_cls']([], path=path, processor=state['x_proc'], ignore_empty=True)
y = state['y_cls']([], path=path, processor=state['y_proc'], ignore_empty=True)
res = cls(x, y, tfms=state['tfms'], tfm_y=state['tfm_y'], **state['tfmargs']).process()
if state.get('tfms_y', False): res.tfms_y = state['tfms_y']
if state.get('tfmargs_y', False): res.tfmargs_y = state['tfmargs_y']
if state.get('normalize', False): res.normalize = state['normalize']
return res | Create a `LabelList` from `state`. | Below is the the instruction that describes the task:
### Input:
Create a `LabelList` from `state`.
### Response:
def load_state(cls, path:PathOrStr, state:dict) -> 'LabelList':
"Create a `LabelList` from `state`."
x = state['x_cls']([], path=path, processor=state['x_proc'], ignore_empty=True)
y = state['y_cls']([], path=path, processor=state['y_proc'], ignore_empty=True)
res = cls(x, y, tfms=state['tfms'], tfm_y=state['tfm_y'], **state['tfmargs']).process()
if state.get('tfms_y', False): res.tfms_y = state['tfms_y']
if state.get('tfmargs_y', False): res.tfmargs_y = state['tfmargs_y']
if state.get('normalize', False): res.normalize = state['normalize']
return res |
def update_dns(config, record, sa_version):
"Update the DNS record"
try:
domain = config.get('domain_name', 'sa.baruwa.com.')
dns_key = config.get('domain_key')
dns_ip = config.get('domain_ip', '127.0.0.1')
keyring = tsigkeyring.from_text({domain: dns_key})
transaction = update.Update(
domain,
keyring=keyring,
keyalgorithm=tsig.HMAC_SHA512)
txtrecord = '%s.%s' % (sa_version, domain)
transaction.replace(txtrecord, 120, 'txt', record)
query.tcp(transaction, dns_ip)
return True
except DNSException, msg:
raise SaChannelUpdateDNSError(msg) | Update the DNS record | Below is the the instruction that describes the task:
### Input:
Update the DNS record
### Response:
def update_dns(config, record, sa_version):
"Update the DNS record"
try:
domain = config.get('domain_name', 'sa.baruwa.com.')
dns_key = config.get('domain_key')
dns_ip = config.get('domain_ip', '127.0.0.1')
keyring = tsigkeyring.from_text({domain: dns_key})
transaction = update.Update(
domain,
keyring=keyring,
keyalgorithm=tsig.HMAC_SHA512)
txtrecord = '%s.%s' % (sa_version, domain)
transaction.replace(txtrecord, 120, 'txt', record)
query.tcp(transaction, dns_ip)
return True
except DNSException, msg:
raise SaChannelUpdateDNSError(msg) |
def patch_text(actions, tree):
"""Takes a string with XML and a string with actions"""
tree = etree.fromstring(tree)
actions = patch.DiffParser().parse(actions)
tree = patch_tree(actions, tree)
return etree.tounicode(tree) | Takes a string with XML and a string with actions | Below is the the instruction that describes the task:
### Input:
Takes a string with XML and a string with actions
### Response:
def patch_text(actions, tree):
"""Takes a string with XML and a string with actions"""
tree = etree.fromstring(tree)
actions = patch.DiffParser().parse(actions)
tree = patch_tree(actions, tree)
return etree.tounicode(tree) |
def check_base_required_attributes(self, dataset):
'''
Check the global required and highly recommended attributes for 2.0 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:Conventions = "CF-1.6, ACDD-1.3" ; //............................... REQUIRED - Always try to use latest value. (CF)
:featureType = "timeSeries" ; //..................................... REQUIRED - CF attribute for identifying the featureType.
:cdm_data_type = "Station" ; //...................................... REQUIRED (ACDD)
:ncei_template_version = "NCEI_NetCDF_TimeSeries_Orthogonal_Template_v1.1" ; //....... REQUIRED (NCEI)
:title = "" ; //............................................... HIGHLY RECOMMENDED - Provide a useful title for the data in the file. (ACDD)
:summary = "" ; //............................................. HIGHLY RECOMMENDED - Provide a useful summary or abstract for the data in the file. (ACDD)
:keywords = "" ; //............................................ HIGHLY RECOMMENDED - A comma separated list of keywords coming from the keywords_vocabulary. (ACDD)
:Conventions = "CF-1.6, ACDD-1.3" ; //......................... HIGHLY RECOMMENDED - A comma separated list of the conventions being followed. Always try to use latest version. (CF/ACDD)
'''
test_ctx = TestCtx(BaseCheck.HIGH, 'Required global attributes')
conventions = getattr(dataset, 'Conventions', '')
feature_type = getattr(dataset, 'featureType', '')
# Define conventions
accepted_conventions = ['CF-1.6', 'ACDD-1.3']
dataset_conventions = conventions.replace(' ', '').split(',')
for accepted_convention in accepted_conventions:
if accepted_convention not in dataset_conventions:
test_ctx.assert_true(False, 'Conventions attribute is missing or is not equal to "CF-1.6, ACDD-1.3": {}'.format(conventions))
break
else:
test_ctx.assert_true(True, '')
# Check feature types
test_ctx.assert_true(feature_type in ['point', 'timeSeries', 'trajectory', 'profile', 'timeSeriesProfile', 'trajectoryProfile'],
'Feature type must be one of point, timeSeries, trajectory, profile, timeSeriesProfile, trajectoryProfile: {}'.format(feature_type))
return test_ctx.to_result() | Check the global required and highly recommended attributes for 2.0 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:Conventions = "CF-1.6, ACDD-1.3" ; //............................... REQUIRED - Always try to use latest value. (CF)
:featureType = "timeSeries" ; //..................................... REQUIRED - CF attribute for identifying the featureType.
:cdm_data_type = "Station" ; //...................................... REQUIRED (ACDD)
:ncei_template_version = "NCEI_NetCDF_TimeSeries_Orthogonal_Template_v1.1" ; //....... REQUIRED (NCEI)
:title = "" ; //............................................... HIGHLY RECOMMENDED - Provide a useful title for the data in the file. (ACDD)
:summary = "" ; //............................................. HIGHLY RECOMMENDED - Provide a useful summary or abstract for the data in the file. (ACDD)
:keywords = "" ; //............................................ HIGHLY RECOMMENDED - A comma separated list of keywords coming from the keywords_vocabulary. (ACDD)
:Conventions = "CF-1.6, ACDD-1.3" ; //......................... HIGHLY RECOMMENDED - A comma separated list of the conventions being followed. Always try to use latest version. (CF/ACDD) | Below is the the instruction that describes the task:
### Input:
Check the global required and highly recommended attributes for 2.0 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:Conventions = "CF-1.6, ACDD-1.3" ; //............................... REQUIRED - Always try to use latest value. (CF)
:featureType = "timeSeries" ; //..................................... REQUIRED - CF attribute for identifying the featureType.
:cdm_data_type = "Station" ; //...................................... REQUIRED (ACDD)
:ncei_template_version = "NCEI_NetCDF_TimeSeries_Orthogonal_Template_v1.1" ; //....... REQUIRED (NCEI)
:title = "" ; //............................................... HIGHLY RECOMMENDED - Provide a useful title for the data in the file. (ACDD)
:summary = "" ; //............................................. HIGHLY RECOMMENDED - Provide a useful summary or abstract for the data in the file. (ACDD)
:keywords = "" ; //............................................ HIGHLY RECOMMENDED - A comma separated list of keywords coming from the keywords_vocabulary. (ACDD)
:Conventions = "CF-1.6, ACDD-1.3" ; //......................... HIGHLY RECOMMENDED - A comma separated list of the conventions being followed. Always try to use latest version. (CF/ACDD)
### Response:
def check_base_required_attributes(self, dataset):
'''
Check the global required and highly recommended attributes for 2.0 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:Conventions = "CF-1.6, ACDD-1.3" ; //............................... REQUIRED - Always try to use latest value. (CF)
:featureType = "timeSeries" ; //..................................... REQUIRED - CF attribute for identifying the featureType.
:cdm_data_type = "Station" ; //...................................... REQUIRED (ACDD)
:ncei_template_version = "NCEI_NetCDF_TimeSeries_Orthogonal_Template_v1.1" ; //....... REQUIRED (NCEI)
:title = "" ; //............................................... HIGHLY RECOMMENDED - Provide a useful title for the data in the file. (ACDD)
:summary = "" ; //............................................. HIGHLY RECOMMENDED - Provide a useful summary or abstract for the data in the file. (ACDD)
:keywords = "" ; //............................................ HIGHLY RECOMMENDED - A comma separated list of keywords coming from the keywords_vocabulary. (ACDD)
:Conventions = "CF-1.6, ACDD-1.3" ; //......................... HIGHLY RECOMMENDED - A comma separated list of the conventions being followed. Always try to use latest version. (CF/ACDD)
'''
test_ctx = TestCtx(BaseCheck.HIGH, 'Required global attributes')
conventions = getattr(dataset, 'Conventions', '')
feature_type = getattr(dataset, 'featureType', '')
# Define conventions
accepted_conventions = ['CF-1.6', 'ACDD-1.3']
dataset_conventions = conventions.replace(' ', '').split(',')
for accepted_convention in accepted_conventions:
if accepted_convention not in dataset_conventions:
test_ctx.assert_true(False, 'Conventions attribute is missing or is not equal to "CF-1.6, ACDD-1.3": {}'.format(conventions))
break
else:
test_ctx.assert_true(True, '')
# Check feature types
test_ctx.assert_true(feature_type in ['point', 'timeSeries', 'trajectory', 'profile', 'timeSeriesProfile', 'trajectoryProfile'],
'Feature type must be one of point, timeSeries, trajectory, profile, timeSeriesProfile, trajectoryProfile: {}'.format(feature_type))
return test_ctx.to_result() |
def raw_data(tag_value):
"""convert the tag to a dictionary, taking values as is
This method name and purpose are opaque... and not true.
"""
data = {}
pieces = []
for p in tag_value.split(' '):
pieces.extend(p.split(';'))
# parse components
for piece in pieces:
kv = piece.split('=')
# components must by key=value
if not len(kv) == 2:
continue
key, value = kv
data[key] = value
return data | convert the tag to a dictionary, taking values as is
This method name and purpose are opaque... and not true. | Below is the the instruction that describes the task:
### Input:
convert the tag to a dictionary, taking values as is
This method name and purpose are opaque... and not true.
### Response:
def raw_data(tag_value):
"""convert the tag to a dictionary, taking values as is
This method name and purpose are opaque... and not true.
"""
data = {}
pieces = []
for p in tag_value.split(' '):
pieces.extend(p.split(';'))
# parse components
for piece in pieces:
kv = piece.split('=')
# components must by key=value
if not len(kv) == 2:
continue
key, value = kv
data[key] = value
return data |
def register_user(self, data):
""" Parses input and register user """
error = False
msg = ""
email_re = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016-\177])*"' # quoted-string
r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?$', re.IGNORECASE) # domain
# Check input format
if re.match(r"^[-_|~0-9A-Z]{4,}$", data["username"], re.IGNORECASE) is None:
error = True
msg = _("Invalid username format.")
elif email_re.match(data["email"]) is None:
error = True
msg = _("Invalid email format.")
elif len(data["passwd"]) < 6:
error = True
msg = _("Password too short.")
elif data["passwd"] != data["passwd2"]:
error = True
msg = _("Passwords don't match !")
if not error:
existing_user = self.database.users.find_one({"$or": [{"username": data["username"]}, {"email": data["email"]}]})
if existing_user is not None:
error = True
if existing_user["username"] == data["username"]:
msg = _("This username is already taken !")
else:
msg = _("This email address is already in use !")
else:
passwd_hash = hashlib.sha512(data["passwd"].encode("utf-8")).hexdigest()
activate_hash = hashlib.sha512(str(random.getrandbits(256)).encode("utf-8")).hexdigest()
self.database.users.insert({"username": data["username"],
"realname": data["realname"],
"email": data["email"],
"password": passwd_hash,
"activate": activate_hash,
"bindings": {},
"language": self.user_manager._session.get("language", "en")})
try:
web.sendmail(web.config.smtp_sendername, data["email"], _("Welcome on INGInious"),
_("""Welcome on INGInious !
To activate your account, please click on the following link :
""")
+ web.ctx.home + "/register?activate=" + activate_hash)
msg = _("You are succesfully registered. An email has been sent to you for activation.")
except:
error = True
msg = _("Something went wrong while sending you activation email. Please contact the administrator.")
return msg, error | Parses input and register user | Below is the the instruction that describes the task:
### Input:
Parses input and register user
### Response:
def register_user(self, data):
""" Parses input and register user """
error = False
msg = ""
email_re = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016-\177])*"' # quoted-string
r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?$', re.IGNORECASE) # domain
# Check input format
if re.match(r"^[-_|~0-9A-Z]{4,}$", data["username"], re.IGNORECASE) is None:
error = True
msg = _("Invalid username format.")
elif email_re.match(data["email"]) is None:
error = True
msg = _("Invalid email format.")
elif len(data["passwd"]) < 6:
error = True
msg = _("Password too short.")
elif data["passwd"] != data["passwd2"]:
error = True
msg = _("Passwords don't match !")
if not error:
existing_user = self.database.users.find_one({"$or": [{"username": data["username"]}, {"email": data["email"]}]})
if existing_user is not None:
error = True
if existing_user["username"] == data["username"]:
msg = _("This username is already taken !")
else:
msg = _("This email address is already in use !")
else:
passwd_hash = hashlib.sha512(data["passwd"].encode("utf-8")).hexdigest()
activate_hash = hashlib.sha512(str(random.getrandbits(256)).encode("utf-8")).hexdigest()
self.database.users.insert({"username": data["username"],
"realname": data["realname"],
"email": data["email"],
"password": passwd_hash,
"activate": activate_hash,
"bindings": {},
"language": self.user_manager._session.get("language", "en")})
try:
web.sendmail(web.config.smtp_sendername, data["email"], _("Welcome on INGInious"),
_("""Welcome on INGInious !
To activate your account, please click on the following link :
""")
+ web.ctx.home + "/register?activate=" + activate_hash)
msg = _("You are succesfully registered. An email has been sent to you for activation.")
except:
error = True
msg = _("Something went wrong while sending you activation email. Please contact the administrator.")
return msg, error |
def item_status(self, **kwargs):
"""
Check to see if a movie id is already added to a list.
Args:
movie_id: The id of the movie.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('item_status')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | Check to see if a movie id is already added to a list.
Args:
movie_id: The id of the movie.
Returns:
A dict respresentation of the JSON returned from the API. | Below is the the instruction that describes the task:
### Input:
Check to see if a movie id is already added to a list.
Args:
movie_id: The id of the movie.
Returns:
A dict respresentation of the JSON returned from the API.
### Response:
def item_status(self, **kwargs):
"""
Check to see if a movie id is already added to a list.
Args:
movie_id: The id of the movie.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('item_status')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response |
def toHex(val):
"""Converts the given value (0-255) into its hexadecimal representation"""
hex = "0123456789abcdef"
return hex[int(val / 16)] + hex[int(val - int(val / 16) * 16)] | Converts the given value (0-255) into its hexadecimal representation | Below is the the instruction that describes the task:
### Input:
Converts the given value (0-255) into its hexadecimal representation
### Response:
def toHex(val):
"""Converts the given value (0-255) into its hexadecimal representation"""
hex = "0123456789abcdef"
return hex[int(val / 16)] + hex[int(val - int(val / 16) * 16)] |
def validate(self, value):
"""
Returns a cleaned and validated value. Raises a ValidationError
if there's a problem
"""
if value is None:
if self.required:
raise ValidationError('{} - None values are not allowed'.format(self.column_name or self.db_field))
return value | Returns a cleaned and validated value. Raises a ValidationError
if there's a problem | Below is the the instruction that describes the task:
### Input:
Returns a cleaned and validated value. Raises a ValidationError
if there's a problem
### Response:
def validate(self, value):
"""
Returns a cleaned and validated value. Raises a ValidationError
if there's a problem
"""
if value is None:
if self.required:
raise ValidationError('{} - None values are not allowed'.format(self.column_name or self.db_field))
return value |
def evaluate(contents, jsonnet_library_paths=None):
'''
Evaluate a jsonnet input string.
contents
Raw jsonnet string to evaluate.
jsonnet_library_paths
List of jsonnet library paths.
'''
if not jsonnet_library_paths:
jsonnet_library_paths = __salt__['config.option'](
'jsonnet.library_paths', ['.'])
return salt.utils.json.loads(
_jsonnet.evaluate_snippet(
"snippet",
contents,
import_callback=partial(
_import_callback, library_paths=jsonnet_library_paths))) | Evaluate a jsonnet input string.
contents
Raw jsonnet string to evaluate.
jsonnet_library_paths
List of jsonnet library paths. | Below is the the instruction that describes the task:
### Input:
Evaluate a jsonnet input string.
contents
Raw jsonnet string to evaluate.
jsonnet_library_paths
List of jsonnet library paths.
### Response:
def evaluate(contents, jsonnet_library_paths=None):
'''
Evaluate a jsonnet input string.
contents
Raw jsonnet string to evaluate.
jsonnet_library_paths
List of jsonnet library paths.
'''
if not jsonnet_library_paths:
jsonnet_library_paths = __salt__['config.option'](
'jsonnet.library_paths', ['.'])
return salt.utils.json.loads(
_jsonnet.evaluate_snippet(
"snippet",
contents,
import_callback=partial(
_import_callback, library_paths=jsonnet_library_paths))) |
def _add_to_ngcorpus(self, corpus, words, count):
"""Build up a corpus entry recursively.
Parameters
----------
corpus : Corpus
The corpus
words : [str]
Words to add to the corpus
count : int
Count of words
"""
if words[0] not in corpus:
corpus[words[0]] = Counter()
if len(words) == 1:
corpus[words[0]][None] += count
else:
self._add_to_ngcorpus(corpus[words[0]], words[1:], count) | Build up a corpus entry recursively.
Parameters
----------
corpus : Corpus
The corpus
words : [str]
Words to add to the corpus
count : int
Count of words | Below is the the instruction that describes the task:
### Input:
Build up a corpus entry recursively.
Parameters
----------
corpus : Corpus
The corpus
words : [str]
Words to add to the corpus
count : int
Count of words
### Response:
def _add_to_ngcorpus(self, corpus, words, count):
"""Build up a corpus entry recursively.
Parameters
----------
corpus : Corpus
The corpus
words : [str]
Words to add to the corpus
count : int
Count of words
"""
if words[0] not in corpus:
corpus[words[0]] = Counter()
if len(words) == 1:
corpus[words[0]][None] += count
else:
self._add_to_ngcorpus(corpus[words[0]], words[1:], count) |
def make_mutant_features(original_feature, index_to_mutate, viz_params):
"""Return a list of `MutantFeatureValue`s that are variants of original."""
lower = viz_params.x_min
upper = viz_params.x_max
examples = viz_params.examples
num_mutants = viz_params.num_mutants
if original_feature.feature_type == 'float_list':
return [
MutantFeatureValue(original_feature, index_to_mutate, value)
for value in np.linspace(lower, upper, num_mutants)
]
elif original_feature.feature_type == 'int64_list':
mutant_values = np.linspace(int(lower), int(upper),
num_mutants).astype(int).tolist()
# Remove duplicates that can occur due to integer constraint.
mutant_values = sorted(set(mutant_values))
return [
MutantFeatureValue(original_feature, index_to_mutate, value)
for value in mutant_values
]
elif original_feature.feature_type == 'bytes_list':
feature_to_samples = get_categorical_features_to_sampling(
examples, num_mutants)
# `mutant_values` looks like:
# [['Married-civ-spouse'], ['Never-married'], ['Divorced'], ['Separated']]
mutant_values = feature_to_samples[original_feature.feature_name]['samples']
return [
MutantFeatureValue(original_feature, None, value)
for value in mutant_values
]
else:
raise ValueError('Malformed original feature had type of: ' +
original_feature.feature_type) | Return a list of `MutantFeatureValue`s that are variants of original. | Below is the the instruction that describes the task:
### Input:
Return a list of `MutantFeatureValue`s that are variants of original.
### Response:
def make_mutant_features(original_feature, index_to_mutate, viz_params):
"""Return a list of `MutantFeatureValue`s that are variants of original."""
lower = viz_params.x_min
upper = viz_params.x_max
examples = viz_params.examples
num_mutants = viz_params.num_mutants
if original_feature.feature_type == 'float_list':
return [
MutantFeatureValue(original_feature, index_to_mutate, value)
for value in np.linspace(lower, upper, num_mutants)
]
elif original_feature.feature_type == 'int64_list':
mutant_values = np.linspace(int(lower), int(upper),
num_mutants).astype(int).tolist()
# Remove duplicates that can occur due to integer constraint.
mutant_values = sorted(set(mutant_values))
return [
MutantFeatureValue(original_feature, index_to_mutate, value)
for value in mutant_values
]
elif original_feature.feature_type == 'bytes_list':
feature_to_samples = get_categorical_features_to_sampling(
examples, num_mutants)
# `mutant_values` looks like:
# [['Married-civ-spouse'], ['Never-married'], ['Divorced'], ['Separated']]
mutant_values = feature_to_samples[original_feature.feature_name]['samples']
return [
MutantFeatureValue(original_feature, None, value)
for value in mutant_values
]
else:
raise ValueError('Malformed original feature had type of: ' +
original_feature.feature_type) |
def addAnalyses(self, analyses):
"""Adds a collection of analyses to the Worksheet at once
"""
actions_pool = ActionHandlerPool.get_instance()
actions_pool.queue_pool()
for analysis in analyses:
self.addAnalysis(api.get_object(analysis))
actions_pool.resume() | Adds a collection of analyses to the Worksheet at once | Below is the the instruction that describes the task:
### Input:
Adds a collection of analyses to the Worksheet at once
### Response:
def addAnalyses(self, analyses):
"""Adds a collection of analyses to the Worksheet at once
"""
actions_pool = ActionHandlerPool.get_instance()
actions_pool.queue_pool()
for analysis in analyses:
self.addAnalysis(api.get_object(analysis))
actions_pool.resume() |
def _get_updated_environment(self, env_dict=None):
"""Returns globals environment with 'magic' variable
Parameters
----------
env_dict: Dict, defaults to {'S': self}
\tDict that maps global variable name to value
"""
if env_dict is None:
env_dict = {'S': self}
env = globals().copy()
env.update(env_dict)
return env | Returns globals environment with 'magic' variable
Parameters
----------
env_dict: Dict, defaults to {'S': self}
\tDict that maps global variable name to value | Below is the the instruction that describes the task:
### Input:
Returns globals environment with 'magic' variable
Parameters
----------
env_dict: Dict, defaults to {'S': self}
\tDict that maps global variable name to value
### Response:
def _get_updated_environment(self, env_dict=None):
"""Returns globals environment with 'magic' variable
Parameters
----------
env_dict: Dict, defaults to {'S': self}
\tDict that maps global variable name to value
"""
if env_dict is None:
env_dict = {'S': self}
env = globals().copy()
env.update(env_dict)
return env |
def evaluate_rmse(self, dataset, target):
"""
Evaluate the prediction error for each user-item pair in the given data
set.
Parameters
----------
dataset : SFrame
An SFrame in the same format as the one used during training.
target : str
The name of the target rating column in `dataset`.
Returns
-------
out : dict
A dictionary with three items: 'rmse_by_user' and 'rmse_by_item',
which are SFrames containing the average rmse for each user and
item, respectively; and 'rmse_overall', which is a float.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> m.evaluate_rmse(test, target='target')
See Also
--------
turicreate.evaluation.rmse
"""
assert target in dataset.column_names(), \
'Provided dataset must contain a target column with the same \
name as the target used during training.'
y = dataset[target]
yhat = self.predict(dataset)
user_column = self.user_id
item_column = self.item_id
assert user_column in dataset.column_names() and \
item_column in dataset.column_names(), \
'Provided data set must have a column pertaining to user ids and \
item ids, similar to what we had during training.'
result = dataset[[user_column, item_column]]
result['sq_error'] = (y - yhat) * (y - yhat)
rmse_by_user = result.groupby(user_column,
{'rmse':_turicreate.aggregate.AVG('sq_error'),
'count':_turicreate.aggregate.COUNT})
rmse_by_user['rmse'] = rmse_by_user['rmse'].apply(lambda x: x**.5)
rmse_by_item = result.groupby(item_column,
{'rmse':_turicreate.aggregate.AVG('sq_error'),
'count':_turicreate.aggregate.COUNT})
rmse_by_item['rmse'] = rmse_by_item['rmse'].apply(lambda x: x**.5)
overall_rmse = result['sq_error'].mean() ** .5
return {'rmse_by_user': rmse_by_user,
'rmse_by_item': rmse_by_item,
'rmse_overall': overall_rmse} | Evaluate the prediction error for each user-item pair in the given data
set.
Parameters
----------
dataset : SFrame
An SFrame in the same format as the one used during training.
target : str
The name of the target rating column in `dataset`.
Returns
-------
out : dict
A dictionary with three items: 'rmse_by_user' and 'rmse_by_item',
which are SFrames containing the average rmse for each user and
item, respectively; and 'rmse_overall', which is a float.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> m.evaluate_rmse(test, target='target')
See Also
--------
turicreate.evaluation.rmse | Below is the the instruction that describes the task:
### Input:
Evaluate the prediction error for each user-item pair in the given data
set.
Parameters
----------
dataset : SFrame
An SFrame in the same format as the one used during training.
target : str
The name of the target rating column in `dataset`.
Returns
-------
out : dict
A dictionary with three items: 'rmse_by_user' and 'rmse_by_item',
which are SFrames containing the average rmse for each user and
item, respectively; and 'rmse_overall', which is a float.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> m.evaluate_rmse(test, target='target')
See Also
--------
turicreate.evaluation.rmse
### Response:
def evaluate_rmse(self, dataset, target):
"""
Evaluate the prediction error for each user-item pair in the given data
set.
Parameters
----------
dataset : SFrame
An SFrame in the same format as the one used during training.
target : str
The name of the target rating column in `dataset`.
Returns
-------
out : dict
A dictionary with three items: 'rmse_by_user' and 'rmse_by_item',
which are SFrames containing the average rmse for each user and
item, respectively; and 'rmse_overall', which is a float.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> m.evaluate_rmse(test, target='target')
See Also
--------
turicreate.evaluation.rmse
"""
assert target in dataset.column_names(), \
'Provided dataset must contain a target column with the same \
name as the target used during training.'
y = dataset[target]
yhat = self.predict(dataset)
user_column = self.user_id
item_column = self.item_id
assert user_column in dataset.column_names() and \
item_column in dataset.column_names(), \
'Provided data set must have a column pertaining to user ids and \
item ids, similar to what we had during training.'
result = dataset[[user_column, item_column]]
result['sq_error'] = (y - yhat) * (y - yhat)
rmse_by_user = result.groupby(user_column,
{'rmse':_turicreate.aggregate.AVG('sq_error'),
'count':_turicreate.aggregate.COUNT})
rmse_by_user['rmse'] = rmse_by_user['rmse'].apply(lambda x: x**.5)
rmse_by_item = result.groupby(item_column,
{'rmse':_turicreate.aggregate.AVG('sq_error'),
'count':_turicreate.aggregate.COUNT})
rmse_by_item['rmse'] = rmse_by_item['rmse'].apply(lambda x: x**.5)
overall_rmse = result['sq_error'].mean() ** .5
return {'rmse_by_user': rmse_by_user,
'rmse_by_item': rmse_by_item,
'rmse_overall': overall_rmse} |
def main():
"""pyprf_opt_brute entry point."""
# Get list of input arguments (without first one, which is the path to the
# function that is called): --NOTE: This is another way of accessing
# input arguments, but since we use 'argparse' it is redundant.
# lstArgs = sys.argv[1:]
strWelcome = 'pyprf_opt_brute ' + __version__
strDec = '=' * len(strWelcome)
print(strDec + '\n' + strWelcome + '\n' + strDec)
objNspc = get_arg_parse()
# Print info if no config argument is provided.
if any(item is None for item in [objNspc.config, objNspc.strPthPrior,
objNspc.varNumOpt1, objNspc.varNumOpt2]):
print('Please provide the necessary input arguments, i.e.:')
print('-strCsvCnfg -strPthPrior -varNumOpt1 and -varNumOpt2')
else:
# Signal non-test mode to lower functions (needed for pytest):
lgcTest = False
# Perform pRF fitting without suppressive surround
if objNspc.supsur is None:
print('***Mode: Fit pRF models, no suppressive surround***')
# Call to main function, to invoke pRF analysis:
pyprf_opt_brute(objNspc.config, objNspc, lgcTest=lgcTest,
strPathHrf=objNspc.strPathHrf, varRat=None)
# Perform pRF fitting with suppressive surround
else:
print('***Mode: Fit pRF models, suppressive surround***')
# Load config parameters from csv file into dictionary:
dicCnfg = load_config(objNspc.config, lgcTest=lgcTest,
lgcPrint=False)
# Load config parameters from dictionary into namespace.
# We do this on every loop so we have a fresh start in case
# variables are redefined during the prf analysis
cfg = cls_set_config(dicCnfg)
# Make sure that lgcCrteMdl is set to True since we will need
# to loop iteratively over pyprf_feature with different ratios
# for size surround to size center. On every loop models,
# reflecting the new ratio, need to be created from scratch
errorMsg = 'lgcCrteMdl needs to be set to True for -supsur.'
assert cfg.lgcCrteMdl, errorMsg
# Make sure that switchHrf is set to 1. It would not make sense
# to find the negative surround for the hrf deriavtive function
errorMsg = 'switchHrfSet needs to be set to 1 for -supsur.'
assert cfg.switchHrfSet == 1, errorMsg
# Get list with size ratios
lstRat = objNspc.supsur
# Make sure that all ratios are larger than 1.0
errorMsg = 'All provided ratios need to be larger than 1.0'
assert np.all(np.greater(np.array(lstRat), 1.0)), errorMsg
# Append None as the first entry, so fitting without surround
# is performed once as well
lstRat.insert(0, None)
# Loop over ratios and find best pRF
for varRat in lstRat:
# Print to command line, so the user knows which exponent
# is used
print('---Ratio surround to center: ' + str(varRat))
# Call to main function, to invoke pRF analysis:
pyprf_opt_brute(objNspc.config, objNspc, lgcTest=lgcTest,
strPathHrf=objNspc.strPathHrf, varRat=varRat)
# List with name suffices of output images:
lstNiiNames = ['_x_pos',
'_y_pos',
'_SD',
'_R2',
'_polar_angle',
'_eccentricity',
'_Betas']
# Compare results for the different ratios, export nii files
# based on the results of the comparison and delete in-between
# results
# Replace first entry (None) with 1, so it can be saved to nii
lstRat[0] = 1.0
# Append 'hrf' to cfg.strPathOut, if fitting was done with
# custom hrf
if objNspc.strPathHrf is not None:
cfg.strPathOut = cfg.strPathOut + '_hrf'
cmp_res_R2(lstRat, lstNiiNames, cfg.strPathOut, cfg.strPathMdl,
lgcDel=True, lgcSveMdlTc=False, strNmeExt='_brute') | pyprf_opt_brute entry point. | Below is the the instruction that describes the task:
### Input:
pyprf_opt_brute entry point.
### Response:
def main():
"""pyprf_opt_brute entry point."""
# Get list of input arguments (without first one, which is the path to the
# function that is called): --NOTE: This is another way of accessing
# input arguments, but since we use 'argparse' it is redundant.
# lstArgs = sys.argv[1:]
strWelcome = 'pyprf_opt_brute ' + __version__
strDec = '=' * len(strWelcome)
print(strDec + '\n' + strWelcome + '\n' + strDec)
objNspc = get_arg_parse()
# Print info if no config argument is provided.
if any(item is None for item in [objNspc.config, objNspc.strPthPrior,
objNspc.varNumOpt1, objNspc.varNumOpt2]):
print('Please provide the necessary input arguments, i.e.:')
print('-strCsvCnfg -strPthPrior -varNumOpt1 and -varNumOpt2')
else:
# Signal non-test mode to lower functions (needed for pytest):
lgcTest = False
# Perform pRF fitting without suppressive surround
if objNspc.supsur is None:
print('***Mode: Fit pRF models, no suppressive surround***')
# Call to main function, to invoke pRF analysis:
pyprf_opt_brute(objNspc.config, objNspc, lgcTest=lgcTest,
strPathHrf=objNspc.strPathHrf, varRat=None)
# Perform pRF fitting with suppressive surround
else:
print('***Mode: Fit pRF models, suppressive surround***')
# Load config parameters from csv file into dictionary:
dicCnfg = load_config(objNspc.config, lgcTest=lgcTest,
lgcPrint=False)
# Load config parameters from dictionary into namespace.
# We do this on every loop so we have a fresh start in case
# variables are redefined during the prf analysis
cfg = cls_set_config(dicCnfg)
# Make sure that lgcCrteMdl is set to True since we will need
# to loop iteratively over pyprf_feature with different ratios
# for size surround to size center. On every loop models,
# reflecting the new ratio, need to be created from scratch
errorMsg = 'lgcCrteMdl needs to be set to True for -supsur.'
assert cfg.lgcCrteMdl, errorMsg
# Make sure that switchHrf is set to 1. It would not make sense
# to find the negative surround for the hrf deriavtive function
errorMsg = 'switchHrfSet needs to be set to 1 for -supsur.'
assert cfg.switchHrfSet == 1, errorMsg
# Get list with size ratios
lstRat = objNspc.supsur
# Make sure that all ratios are larger than 1.0
errorMsg = 'All provided ratios need to be larger than 1.0'
assert np.all(np.greater(np.array(lstRat), 1.0)), errorMsg
# Append None as the first entry, so fitting without surround
# is performed once as well
lstRat.insert(0, None)
# Loop over ratios and find best pRF
for varRat in lstRat:
# Print to command line, so the user knows which exponent
# is used
print('---Ratio surround to center: ' + str(varRat))
# Call to main function, to invoke pRF analysis:
pyprf_opt_brute(objNspc.config, objNspc, lgcTest=lgcTest,
strPathHrf=objNspc.strPathHrf, varRat=varRat)
# List with name suffices of output images:
lstNiiNames = ['_x_pos',
'_y_pos',
'_SD',
'_R2',
'_polar_angle',
'_eccentricity',
'_Betas']
# Compare results for the different ratios, export nii files
# based on the results of the comparison and delete in-between
# results
# Replace first entry (None) with 1, so it can be saved to nii
lstRat[0] = 1.0
# Append 'hrf' to cfg.strPathOut, if fitting was done with
# custom hrf
if objNspc.strPathHrf is not None:
cfg.strPathOut = cfg.strPathOut + '_hrf'
cmp_res_R2(lstRat, lstNiiNames, cfg.strPathOut, cfg.strPathMdl,
lgcDel=True, lgcSveMdlTc=False, strNmeExt='_brute') |
def _dataset_qa(self, dataset):
"""Chequea si el dataset tiene una calidad mínima para cosechar."""
# 1. VALIDACIONES
# chequea que haya por lo menos algún formato de datos reconocido
has_data_format = helpers.dataset_has_data_distributions(dataset)
# chequea que algunos campos tengan longitudes mínimas
has_title = "title" in dataset
has_description = "description" in dataset
if has_title:
has_min_title = len(dataset["title"]) >= MIN_DATASET_TITLE
else:
has_min_title = False
if has_description:
has_min_desc = len(
dataset["description"]) >= MIN_DATASET_DESCRIPTION
else:
has_min_desc = False
# 2. EVALUACION DE COSECHA: evalua si se cosecha o no el dataset
harvest = (has_title and has_description and
has_data_format and has_min_title and has_min_desc)
# 3. NOTAS: genera notas de validación
notes = []
if not has_data_format:
notes.append("No tiene distribuciones con datos.")
if not has_title:
notes.append("Dataset sin titulo {}".format(dataset))
else:
if not has_min_title:
notes.append("Titulo tiene menos de {} caracteres".format(
MIN_DATASET_TITLE))
if not has_description:
notes.append("Dataset sin descripcion {}".format(dataset))
else:
if not has_min_desc:
notes.append("Descripcion tiene menos de {} caracteres".format(
MIN_DATASET_DESCRIPTION))
return harvest, notes | Chequea si el dataset tiene una calidad mínima para cosechar. | Below is the the instruction that describes the task:
### Input:
Chequea si el dataset tiene una calidad mínima para cosechar.
### Response:
def _dataset_qa(self, dataset):
"""Chequea si el dataset tiene una calidad mínima para cosechar."""
# 1. VALIDACIONES
# chequea que haya por lo menos algún formato de datos reconocido
has_data_format = helpers.dataset_has_data_distributions(dataset)
# chequea que algunos campos tengan longitudes mínimas
has_title = "title" in dataset
has_description = "description" in dataset
if has_title:
has_min_title = len(dataset["title"]) >= MIN_DATASET_TITLE
else:
has_min_title = False
if has_description:
has_min_desc = len(
dataset["description"]) >= MIN_DATASET_DESCRIPTION
else:
has_min_desc = False
# 2. EVALUACION DE COSECHA: evalua si se cosecha o no el dataset
harvest = (has_title and has_description and
has_data_format and has_min_title and has_min_desc)
# 3. NOTAS: genera notas de validación
notes = []
if not has_data_format:
notes.append("No tiene distribuciones con datos.")
if not has_title:
notes.append("Dataset sin titulo {}".format(dataset))
else:
if not has_min_title:
notes.append("Titulo tiene menos de {} caracteres".format(
MIN_DATASET_TITLE))
if not has_description:
notes.append("Dataset sin descripcion {}".format(dataset))
else:
if not has_min_desc:
notes.append("Descripcion tiene menos de {} caracteres".format(
MIN_DATASET_DESCRIPTION))
return harvest, notes |
def load_config(filename):
"""Load the event definitions from yaml config file."""
logger.debug("Event Definitions configuration file: %s", filename)
with open(filename, 'r') as cf:
config = cf.read()
try:
events_config = yaml.safe_load(config)
except yaml.YAMLError as err:
if hasattr(err, 'problem_mark'):
mark = err.problem_mark
errmsg = ("Invalid YAML syntax in Event Definitions file "
"%(file)s at line: %(line)s, column: %(column)s."
% dict(file=filename,
line=mark.line + 1,
column=mark.column + 1))
else:
errmsg = ("YAML error reading Event Definitions file "
"%(file)s"
% dict(file=filename))
logger.error(errmsg)
raise
logger.info("Event Definitions: %s", events_config)
return events_config | Load the event definitions from yaml config file. | Below is the the instruction that describes the task:
### Input:
Load the event definitions from yaml config file.
### Response:
def load_config(filename):
"""Load the event definitions from yaml config file."""
logger.debug("Event Definitions configuration file: %s", filename)
with open(filename, 'r') as cf:
config = cf.read()
try:
events_config = yaml.safe_load(config)
except yaml.YAMLError as err:
if hasattr(err, 'problem_mark'):
mark = err.problem_mark
errmsg = ("Invalid YAML syntax in Event Definitions file "
"%(file)s at line: %(line)s, column: %(column)s."
% dict(file=filename,
line=mark.line + 1,
column=mark.column + 1))
else:
errmsg = ("YAML error reading Event Definitions file "
"%(file)s"
% dict(file=filename))
logger.error(errmsg)
raise
logger.info("Event Definitions: %s", events_config)
return events_config |
def clear_all_breakpoints(self):
"""Clear breakpoints in all files"""
self.switch_to_plugin()
clear_all_breakpoints()
self.breakpoints_saved.emit()
editorstack = self.get_current_editorstack()
if editorstack is not None:
for data in editorstack.data:
data.editor.debugger.clear_breakpoints()
self.refresh_plugin() | Clear breakpoints in all files | Below is the the instruction that describes the task:
### Input:
Clear breakpoints in all files
### Response:
def clear_all_breakpoints(self):
"""Clear breakpoints in all files"""
self.switch_to_plugin()
clear_all_breakpoints()
self.breakpoints_saved.emit()
editorstack = self.get_current_editorstack()
if editorstack is not None:
for data in editorstack.data:
data.editor.debugger.clear_breakpoints()
self.refresh_plugin() |
def _list_audio_files(self, sub_dir=""):
"""
Parameters
----------
sub_dir : one of `needed_directories`, optional
Default is "", which means it'll look through all of subdirs.
Returns
-------
audio_files : [str]
A list whose elements are basenames of the present audiofiles whose
formats are `wav`
"""
audio_files = list()
for possibly_audio_file in os.listdir("{}/{}".format(self.src_dir,
sub_dir)):
file_format = ''.join(possibly_audio_file.split('.')[-1])
if file_format.lower() == "wav":
audio_files.append(possibly_audio_file)
return audio_files | Parameters
----------
sub_dir : one of `needed_directories`, optional
Default is "", which means it'll look through all of subdirs.
Returns
-------
audio_files : [str]
A list whose elements are basenames of the present audiofiles whose
formats are `wav` | Below is the the instruction that describes the task:
### Input:
Parameters
----------
sub_dir : one of `needed_directories`, optional
Default is "", which means it'll look through all of subdirs.
Returns
-------
audio_files : [str]
A list whose elements are basenames of the present audiofiles whose
formats are `wav`
### Response:
def _list_audio_files(self, sub_dir=""):
"""
Parameters
----------
sub_dir : one of `needed_directories`, optional
Default is "", which means it'll look through all of subdirs.
Returns
-------
audio_files : [str]
A list whose elements are basenames of the present audiofiles whose
formats are `wav`
"""
audio_files = list()
for possibly_audio_file in os.listdir("{}/{}".format(self.src_dir,
sub_dir)):
file_format = ''.join(possibly_audio_file.split('.')[-1])
if file_format.lower() == "wav":
audio_files.append(possibly_audio_file)
return audio_files |
def add_from_vla_obs (src, Lband, Cband):
"""Add an entry into the models table for a source based on L-band and
C-band flux densities.
"""
if src in models:
raise PKError ('already have a model for ' + src)
fL = np.log10 (1425)
fC = np.log10 (4860)
lL = np.log10 (Lband)
lC = np.log10 (Cband)
A = (lL - lC) / (fL - fC)
B = lL - A * fL
def fluxdens (freq_mhz):
return 10. ** (A * np.log10 (freq_mhz) + B)
def spindex (freq_mhz):
return A
models[src] = fluxdens
spindexes[src] = spindex | Add an entry into the models table for a source based on L-band and
C-band flux densities. | Below is the the instruction that describes the task:
### Input:
Add an entry into the models table for a source based on L-band and
C-band flux densities.
### Response:
def add_from_vla_obs (src, Lband, Cband):
"""Add an entry into the models table for a source based on L-band and
C-band flux densities.
"""
if src in models:
raise PKError ('already have a model for ' + src)
fL = np.log10 (1425)
fC = np.log10 (4860)
lL = np.log10 (Lband)
lC = np.log10 (Cband)
A = (lL - lC) / (fL - fC)
B = lL - A * fL
def fluxdens (freq_mhz):
return 10. ** (A * np.log10 (freq_mhz) + B)
def spindex (freq_mhz):
return A
models[src] = fluxdens
spindexes[src] = spindex |
def _run_flup(app, config, mode):
"""Run WsgiDAV using flup.server.fcgi if Flup is installed."""
# http://trac.saddi.com/flup/wiki/FlupServers
if mode == "flup-fcgi":
from flup.server.fcgi import WSGIServer, __version__ as flupver
elif mode == "flup-fcgi-fork":
from flup.server.fcgi_fork import WSGIServer, __version__ as flupver
else:
raise ValueError
_logger.info(
"Running WsgiDAV/{} {}/{}...".format(
__version__, WSGIServer.__module__, flupver
)
)
server = WSGIServer(
app,
bindAddress=(config["host"], config["port"]),
# debug=True,
)
try:
server.run()
except KeyboardInterrupt:
_logger.warning("Caught Ctrl-C, shutting down...")
return | Run WsgiDAV using flup.server.fcgi if Flup is installed. | Below is the the instruction that describes the task:
### Input:
Run WsgiDAV using flup.server.fcgi if Flup is installed.
### Response:
def _run_flup(app, config, mode):
"""Run WsgiDAV using flup.server.fcgi if Flup is installed."""
# http://trac.saddi.com/flup/wiki/FlupServers
if mode == "flup-fcgi":
from flup.server.fcgi import WSGIServer, __version__ as flupver
elif mode == "flup-fcgi-fork":
from flup.server.fcgi_fork import WSGIServer, __version__ as flupver
else:
raise ValueError
_logger.info(
"Running WsgiDAV/{} {}/{}...".format(
__version__, WSGIServer.__module__, flupver
)
)
server = WSGIServer(
app,
bindAddress=(config["host"], config["port"]),
# debug=True,
)
try:
server.run()
except KeyboardInterrupt:
_logger.warning("Caught Ctrl-C, shutting down...")
return |
def getLog(self, remove=True):
""" Retrieve buffered log data
If remove is true the data will be removed from the buffer.
Otherwise it will be left in the buffer
"""
res = self.logs
if remove:
self.logs = []
return res | Retrieve buffered log data
If remove is true the data will be removed from the buffer.
Otherwise it will be left in the buffer | Below is the the instruction that describes the task:
### Input:
Retrieve buffered log data
If remove is true the data will be removed from the buffer.
Otherwise it will be left in the buffer
### Response:
def getLog(self, remove=True):
""" Retrieve buffered log data
If remove is true the data will be removed from the buffer.
Otherwise it will be left in the buffer
"""
res = self.logs
if remove:
self.logs = []
return res |
def som_get_winner_number(som_pointer):
"""!
@brief Returns of number of winner at the last step of learning process.
@param[in] som_pointer (c_pointer): pointer to object of self-organized map.
"""
ccore = ccore_library.get()
ccore.som_get_winner_number.restype = c_size_t
return ccore.som_get_winner_number(som_pointer) | !
@brief Returns of number of winner at the last step of learning process.
@param[in] som_pointer (c_pointer): pointer to object of self-organized map. | Below is the the instruction that describes the task:
### Input:
!
@brief Returns of number of winner at the last step of learning process.
@param[in] som_pointer (c_pointer): pointer to object of self-organized map.
### Response:
def som_get_winner_number(som_pointer):
"""!
@brief Returns of number of winner at the last step of learning process.
@param[in] som_pointer (c_pointer): pointer to object of self-organized map.
"""
ccore = ccore_library.get()
ccore.som_get_winner_number.restype = c_size_t
return ccore.som_get_winner_number(som_pointer) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.