code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def conn_has_method(conn, method_name):
'''
Find if the provided connection object has a specific method
'''
if method_name in dir(conn):
return True
log.error('Method \'%s\' not yet supported!', method_name)
return False | Find if the provided connection object has a specific method | Below is the the instruction that describes the task:
### Input:
Find if the provided connection object has a specific method
### Response:
def conn_has_method(conn, method_name):
'''
Find if the provided connection object has a specific method
'''
if method_name in dir(conn):
return True
log.error('Method \'%s\' not yet supported!', method_name)
return False |
def read_line(self, timeout=0.0, purge_buffer=False):
"""
Reads a line from the device.
:param timeout: read timeout
:type timeout: float
:param purge_buffer: Indicates whether to purge the buffer prior to
reading.
:type purge_buffer: bool
:returns: line that was read
:raises: :py:class:`~alarmdecoder.util.CommError`, :py:class:`~alarmdecoder.util.TimeoutError`
"""
def timeout_event():
"""Handles read timeout event"""
timeout_event.reading = False
timeout_event.reading = True
if purge_buffer:
self._buffer = b''
got_line, ret = False, None
timer = threading.Timer(timeout, timeout_event)
if timeout > 0:
timer.start()
try:
while timeout_event.reading:
buf = self._device.read_data(1)
if buf != b'':
ub = bytes_hack(buf)
self._buffer += ub
if ub == b"\n":
self._buffer = self._buffer.rstrip(b"\r\n")
if len(self._buffer) > 0:
got_line = True
break
else:
time.sleep(0.01)
except (usb.core.USBError, FtdiError) as err:
raise CommError('Error reading from device: {0}'.format(str(err)), err)
else:
if got_line:
ret, self._buffer = self._buffer, b''
self.on_read(data=ret)
else:
raise TimeoutError('Timeout while waiting for line terminator.')
finally:
timer.cancel()
return ret | Reads a line from the device.
:param timeout: read timeout
:type timeout: float
:param purge_buffer: Indicates whether to purge the buffer prior to
reading.
:type purge_buffer: bool
:returns: line that was read
:raises: :py:class:`~alarmdecoder.util.CommError`, :py:class:`~alarmdecoder.util.TimeoutError` | Below is the the instruction that describes the task:
### Input:
Reads a line from the device.
:param timeout: read timeout
:type timeout: float
:param purge_buffer: Indicates whether to purge the buffer prior to
reading.
:type purge_buffer: bool
:returns: line that was read
:raises: :py:class:`~alarmdecoder.util.CommError`, :py:class:`~alarmdecoder.util.TimeoutError`
### Response:
def read_line(self, timeout=0.0, purge_buffer=False):
"""
Reads a line from the device.
:param timeout: read timeout
:type timeout: float
:param purge_buffer: Indicates whether to purge the buffer prior to
reading.
:type purge_buffer: bool
:returns: line that was read
:raises: :py:class:`~alarmdecoder.util.CommError`, :py:class:`~alarmdecoder.util.TimeoutError`
"""
def timeout_event():
"""Handles read timeout event"""
timeout_event.reading = False
timeout_event.reading = True
if purge_buffer:
self._buffer = b''
got_line, ret = False, None
timer = threading.Timer(timeout, timeout_event)
if timeout > 0:
timer.start()
try:
while timeout_event.reading:
buf = self._device.read_data(1)
if buf != b'':
ub = bytes_hack(buf)
self._buffer += ub
if ub == b"\n":
self._buffer = self._buffer.rstrip(b"\r\n")
if len(self._buffer) > 0:
got_line = True
break
else:
time.sleep(0.01)
except (usb.core.USBError, FtdiError) as err:
raise CommError('Error reading from device: {0}'.format(str(err)), err)
else:
if got_line:
ret, self._buffer = self._buffer, b''
self.on_read(data=ret)
else:
raise TimeoutError('Timeout while waiting for line terminator.')
finally:
timer.cancel()
return ret |
def get_cod_ids(self, formula):
"""
Queries the COD for all cod ids associated with a formula. Requires
mysql executable to be in the path.
Args:
formula (str): Formula.
Returns:
List of cod ids.
"""
# TODO: Remove dependency on external mysql call. MySQL-python package does not support Py3!
# Standardize formula to the version used by COD.
sql = 'select file from data where formula="- %s -"' % \
Composition(formula).hill_formula
text = self.query(sql).split("\n")
cod_ids = []
for l in text:
m = re.search(r"(\d+)", l)
if m:
cod_ids.append(int(m.group(1)))
return cod_ids | Queries the COD for all cod ids associated with a formula. Requires
mysql executable to be in the path.
Args:
formula (str): Formula.
Returns:
List of cod ids. | Below is the the instruction that describes the task:
### Input:
Queries the COD for all cod ids associated with a formula. Requires
mysql executable to be in the path.
Args:
formula (str): Formula.
Returns:
List of cod ids.
### Response:
def get_cod_ids(self, formula):
"""
Queries the COD for all cod ids associated with a formula. Requires
mysql executable to be in the path.
Args:
formula (str): Formula.
Returns:
List of cod ids.
"""
# TODO: Remove dependency on external mysql call. MySQL-python package does not support Py3!
# Standardize formula to the version used by COD.
sql = 'select file from data where formula="- %s -"' % \
Composition(formula).hill_formula
text = self.query(sql).split("\n")
cod_ids = []
for l in text:
m = re.search(r"(\d+)", l)
if m:
cod_ids.append(int(m.group(1)))
return cod_ids |
def bgrewriteaof(host=None, port=None, db=None, password=None):
'''
Asynchronously rewrite the append-only file
CLI Example:
.. code-block:: bash
salt '*' redis.bgrewriteaof
'''
server = _connect(host, port, db, password)
return server.bgrewriteaof() | Asynchronously rewrite the append-only file
CLI Example:
.. code-block:: bash
salt '*' redis.bgrewriteaof | Below is the the instruction that describes the task:
### Input:
Asynchronously rewrite the append-only file
CLI Example:
.. code-block:: bash
salt '*' redis.bgrewriteaof
### Response:
def bgrewriteaof(host=None, port=None, db=None, password=None):
'''
Asynchronously rewrite the append-only file
CLI Example:
.. code-block:: bash
salt '*' redis.bgrewriteaof
'''
server = _connect(host, port, db, password)
return server.bgrewriteaof() |
def print_usage(self, hint=None):
"""Usage format should be like:
Lineno | Content
1 | Script description (__doc__)
2 | Usage: {script name} [COMMAND] [ARGUMENTS]
3 | \n
4 | Commands:
5 | cmd1 cmd1 description.
6 | cmd2isverylong cmd2 description, and it is also
7 | long as shit.
7 | cmd3 cmd3 description.
"""
buf = []
# Description
if __doc__:
buf.append(__doc__)
# Usage
script_name = sys.argv[0]
buf.append('Usage: %s [COMMAND] [ARGUMENTS]' % script_name)
buf.append('')
buf.append('Commands:')
# Commands
indent_size = 2
tab_size = 4
doc_width = 50
grid_len = max(len(i) for i in list(self._commands.keys())) + tab_size
for name in self._commands_list:
command = self._commands[name]
line = ' ' * indent_size + name + ' ' * (grid_len - len(name))
doc = command.doc
pieces = [doc[i:i + doc_width] for i in range(0, len(doc), doc_width)]
line += pieces[0]
if len(pieces) > 1:
line += '\n'
line += '\n'.join(' ' * (grid_len + 2) + i for i in pieces[1:])
buf.append(line)
print('\n'.join(buf)) | Usage format should be like:
Lineno | Content
1 | Script description (__doc__)
2 | Usage: {script name} [COMMAND] [ARGUMENTS]
3 | \n
4 | Commands:
5 | cmd1 cmd1 description.
6 | cmd2isverylong cmd2 description, and it is also
7 | long as shit.
7 | cmd3 cmd3 description. | Below is the the instruction that describes the task:
### Input:
Usage format should be like:
Lineno | Content
1 | Script description (__doc__)
2 | Usage: {script name} [COMMAND] [ARGUMENTS]
3 | \n
4 | Commands:
5 | cmd1 cmd1 description.
6 | cmd2isverylong cmd2 description, and it is also
7 | long as shit.
7 | cmd3 cmd3 description.
### Response:
def print_usage(self, hint=None):
"""Usage format should be like:
Lineno | Content
1 | Script description (__doc__)
2 | Usage: {script name} [COMMAND] [ARGUMENTS]
3 | \n
4 | Commands:
5 | cmd1 cmd1 description.
6 | cmd2isverylong cmd2 description, and it is also
7 | long as shit.
7 | cmd3 cmd3 description.
"""
buf = []
# Description
if __doc__:
buf.append(__doc__)
# Usage
script_name = sys.argv[0]
buf.append('Usage: %s [COMMAND] [ARGUMENTS]' % script_name)
buf.append('')
buf.append('Commands:')
# Commands
indent_size = 2
tab_size = 4
doc_width = 50
grid_len = max(len(i) for i in list(self._commands.keys())) + tab_size
for name in self._commands_list:
command = self._commands[name]
line = ' ' * indent_size + name + ' ' * (grid_len - len(name))
doc = command.doc
pieces = [doc[i:i + doc_width] for i in range(0, len(doc), doc_width)]
line += pieces[0]
if len(pieces) > 1:
line += '\n'
line += '\n'.join(' ' * (grid_len + 2) + i for i in pieces[1:])
buf.append(line)
print('\n'.join(buf)) |
def _infer_sig_len(file_name, fmt, n_sig, dir_name, pb_dir=None):
"""
Infer the length of a signal from a dat file.
Parameters
----------
file_name : str
Name of the dat file
fmt : str
WFDB fmt of the dat file
n_sig : int
Number of signals contained in the dat file
Notes
-----
sig_len * n_sig * bytes_per_sample == file_size
"""
if pb_dir is None:
file_size = os.path.getsize(os.path.join(dir_name, file_name))
else:
file_size = download._remote_file_size(file_name=file_name,
pb_dir=pb_dir)
sig_len = int(file_size / (BYTES_PER_SAMPLE[fmt] * n_sig))
return sig_len | Infer the length of a signal from a dat file.
Parameters
----------
file_name : str
Name of the dat file
fmt : str
WFDB fmt of the dat file
n_sig : int
Number of signals contained in the dat file
Notes
-----
sig_len * n_sig * bytes_per_sample == file_size | Below is the the instruction that describes the task:
### Input:
Infer the length of a signal from a dat file.
Parameters
----------
file_name : str
Name of the dat file
fmt : str
WFDB fmt of the dat file
n_sig : int
Number of signals contained in the dat file
Notes
-----
sig_len * n_sig * bytes_per_sample == file_size
### Response:
def _infer_sig_len(file_name, fmt, n_sig, dir_name, pb_dir=None):
"""
Infer the length of a signal from a dat file.
Parameters
----------
file_name : str
Name of the dat file
fmt : str
WFDB fmt of the dat file
n_sig : int
Number of signals contained in the dat file
Notes
-----
sig_len * n_sig * bytes_per_sample == file_size
"""
if pb_dir is None:
file_size = os.path.getsize(os.path.join(dir_name, file_name))
else:
file_size = download._remote_file_size(file_name=file_name,
pb_dir=pb_dir)
sig_len = int(file_size / (BYTES_PER_SAMPLE[fmt] * n_sig))
return sig_len |
def one_step(
objective_function,
population,
population_values=None,
differential_weight=0.5,
crossover_prob=0.9,
seed=None,
name=None):
"""Performs one step of the differential evolution algorithm.
Args:
objective_function: A Python callable that accepts a batch of possible
solutions and returns the values of the objective function at those
arguments as a rank 1 real `Tensor`. This specifies the function to be
minimized. The input to this callable may be either a single `Tensor`
or a Python `list` of `Tensor`s. The signature must match the format of
the argument `population`. (i.e. objective_function(*population) must
return the value of the function to be minimized).
population: `Tensor` or Python `list` of `Tensor`s representing the
current population vectors. Each `Tensor` must be of the same real dtype.
The first dimension indexes individual population members while the
rest of the dimensions are consumed by the value function. For example,
if the population is a single `Tensor` of shape [n, m1, m2], then `n` is
the population size and the output of `objective_function` applied to the
population is a `Tensor` of shape [n]. If the population is a python
list of `Tensor`s then each `Tensor` in the list should have the first
axis of a common size, say `n` and `objective_function(*population)`
should return a `Tensor of shape [n]. The population must have at least
4 members for the algorithm to work correctly.
population_values: A `Tensor` of rank 1 and real dtype. The result of
applying `objective_function` to the `population`. If not supplied it is
computed using the `objective_function`.
Default value: None.
differential_weight: Real scalar `Tensor`. Must be positive and less than
2.0. The parameter controlling the strength of mutation.
Default value: 0.5
crossover_prob: Real scalar `Tensor`. Must be between 0 and 1. The
probability of recombination per site.
Default value: 0.9
seed: `int` or None. The random seed for this `Op`. If `None`, no seed is
applied.
Default value: None.
name: (Optional) Python str. The name prefixed to the ops created by this
function. If not supplied, the default name 'one_step' is
used.
Default value: None
Returns:
A sequence containing the following elements (in order):
next_population: A `Tensor` or Python `list` of `Tensor`s of the same
structure as the input population. The population at the next generation.
next_population_values: A `Tensor` of same shape and dtype as input
`population_values`. The function values for the `next_population`.
"""
with tf.compat.v1.name_scope(
name, 'one_step',
[population, population_values, differential_weight, crossover_prob]):
population, _ = _ensure_list(population)
if population_values is None:
population_values = objective_function(*population)
population_size = tf.shape(input=population[0])[0]
seed_stream = distributions.SeedStream(seed, salt='one_step')
mixing_indices = _get_mixing_indices(population_size, seed=seed_stream())
# Construct the mutated solution vectors. There is one for each member of
# the population.
mutants = _get_mutants(population,
population_size,
mixing_indices,
differential_weight)
# Perform recombination between the parents and the mutants.
candidates = _binary_crossover(population,
population_size,
mutants,
crossover_prob,
seed=seed_stream())
candidate_values = objective_function(*candidates)
if population_values is None:
population_values = objective_function(*population)
infinity = tf.zeros_like(population_values) + np.inf
population_values = tf.where(
tf.math.is_nan(population_values), x=infinity, y=population_values)
to_replace = candidate_values < population_values
next_population = [
tf.where(to_replace, x=candidates_part, y=population_part)
for candidates_part, population_part in zip(candidates, population)
]
next_values = tf.where(to_replace, x=candidate_values, y=population_values)
return next_population, next_values | Performs one step of the differential evolution algorithm.
Args:
objective_function: A Python callable that accepts a batch of possible
solutions and returns the values of the objective function at those
arguments as a rank 1 real `Tensor`. This specifies the function to be
minimized. The input to this callable may be either a single `Tensor`
or a Python `list` of `Tensor`s. The signature must match the format of
the argument `population`. (i.e. objective_function(*population) must
return the value of the function to be minimized).
population: `Tensor` or Python `list` of `Tensor`s representing the
current population vectors. Each `Tensor` must be of the same real dtype.
The first dimension indexes individual population members while the
rest of the dimensions are consumed by the value function. For example,
if the population is a single `Tensor` of shape [n, m1, m2], then `n` is
the population size and the output of `objective_function` applied to the
population is a `Tensor` of shape [n]. If the population is a python
list of `Tensor`s then each `Tensor` in the list should have the first
axis of a common size, say `n` and `objective_function(*population)`
should return a `Tensor of shape [n]. The population must have at least
4 members for the algorithm to work correctly.
population_values: A `Tensor` of rank 1 and real dtype. The result of
applying `objective_function` to the `population`. If not supplied it is
computed using the `objective_function`.
Default value: None.
differential_weight: Real scalar `Tensor`. Must be positive and less than
2.0. The parameter controlling the strength of mutation.
Default value: 0.5
crossover_prob: Real scalar `Tensor`. Must be between 0 and 1. The
probability of recombination per site.
Default value: 0.9
seed: `int` or None. The random seed for this `Op`. If `None`, no seed is
applied.
Default value: None.
name: (Optional) Python str. The name prefixed to the ops created by this
function. If not supplied, the default name 'one_step' is
used.
Default value: None
Returns:
A sequence containing the following elements (in order):
next_population: A `Tensor` or Python `list` of `Tensor`s of the same
structure as the input population. The population at the next generation.
next_population_values: A `Tensor` of same shape and dtype as input
`population_values`. The function values for the `next_population`. | Below is the the instruction that describes the task:
### Input:
Performs one step of the differential evolution algorithm.
Args:
objective_function: A Python callable that accepts a batch of possible
solutions and returns the values of the objective function at those
arguments as a rank 1 real `Tensor`. This specifies the function to be
minimized. The input to this callable may be either a single `Tensor`
or a Python `list` of `Tensor`s. The signature must match the format of
the argument `population`. (i.e. objective_function(*population) must
return the value of the function to be minimized).
population: `Tensor` or Python `list` of `Tensor`s representing the
current population vectors. Each `Tensor` must be of the same real dtype.
The first dimension indexes individual population members while the
rest of the dimensions are consumed by the value function. For example,
if the population is a single `Tensor` of shape [n, m1, m2], then `n` is
the population size and the output of `objective_function` applied to the
population is a `Tensor` of shape [n]. If the population is a python
list of `Tensor`s then each `Tensor` in the list should have the first
axis of a common size, say `n` and `objective_function(*population)`
should return a `Tensor of shape [n]. The population must have at least
4 members for the algorithm to work correctly.
population_values: A `Tensor` of rank 1 and real dtype. The result of
applying `objective_function` to the `population`. If not supplied it is
computed using the `objective_function`.
Default value: None.
differential_weight: Real scalar `Tensor`. Must be positive and less than
2.0. The parameter controlling the strength of mutation.
Default value: 0.5
crossover_prob: Real scalar `Tensor`. Must be between 0 and 1. The
probability of recombination per site.
Default value: 0.9
seed: `int` or None. The random seed for this `Op`. If `None`, no seed is
applied.
Default value: None.
name: (Optional) Python str. The name prefixed to the ops created by this
function. If not supplied, the default name 'one_step' is
used.
Default value: None
Returns:
A sequence containing the following elements (in order):
next_population: A `Tensor` or Python `list` of `Tensor`s of the same
structure as the input population. The population at the next generation.
next_population_values: A `Tensor` of same shape and dtype as input
`population_values`. The function values for the `next_population`.
### Response:
def one_step(
objective_function,
population,
population_values=None,
differential_weight=0.5,
crossover_prob=0.9,
seed=None,
name=None):
"""Performs one step of the differential evolution algorithm.
Args:
objective_function: A Python callable that accepts a batch of possible
solutions and returns the values of the objective function at those
arguments as a rank 1 real `Tensor`. This specifies the function to be
minimized. The input to this callable may be either a single `Tensor`
or a Python `list` of `Tensor`s. The signature must match the format of
the argument `population`. (i.e. objective_function(*population) must
return the value of the function to be minimized).
population: `Tensor` or Python `list` of `Tensor`s representing the
current population vectors. Each `Tensor` must be of the same real dtype.
The first dimension indexes individual population members while the
rest of the dimensions are consumed by the value function. For example,
if the population is a single `Tensor` of shape [n, m1, m2], then `n` is
the population size and the output of `objective_function` applied to the
population is a `Tensor` of shape [n]. If the population is a python
list of `Tensor`s then each `Tensor` in the list should have the first
axis of a common size, say `n` and `objective_function(*population)`
should return a `Tensor of shape [n]. The population must have at least
4 members for the algorithm to work correctly.
population_values: A `Tensor` of rank 1 and real dtype. The result of
applying `objective_function` to the `population`. If not supplied it is
computed using the `objective_function`.
Default value: None.
differential_weight: Real scalar `Tensor`. Must be positive and less than
2.0. The parameter controlling the strength of mutation.
Default value: 0.5
crossover_prob: Real scalar `Tensor`. Must be between 0 and 1. The
probability of recombination per site.
Default value: 0.9
seed: `int` or None. The random seed for this `Op`. If `None`, no seed is
applied.
Default value: None.
name: (Optional) Python str. The name prefixed to the ops created by this
function. If not supplied, the default name 'one_step' is
used.
Default value: None
Returns:
A sequence containing the following elements (in order):
next_population: A `Tensor` or Python `list` of `Tensor`s of the same
structure as the input population. The population at the next generation.
next_population_values: A `Tensor` of same shape and dtype as input
`population_values`. The function values for the `next_population`.
"""
with tf.compat.v1.name_scope(
name, 'one_step',
[population, population_values, differential_weight, crossover_prob]):
population, _ = _ensure_list(population)
if population_values is None:
population_values = objective_function(*population)
population_size = tf.shape(input=population[0])[0]
seed_stream = distributions.SeedStream(seed, salt='one_step')
mixing_indices = _get_mixing_indices(population_size, seed=seed_stream())
# Construct the mutated solution vectors. There is one for each member of
# the population.
mutants = _get_mutants(population,
population_size,
mixing_indices,
differential_weight)
# Perform recombination between the parents and the mutants.
candidates = _binary_crossover(population,
population_size,
mutants,
crossover_prob,
seed=seed_stream())
candidate_values = objective_function(*candidates)
if population_values is None:
population_values = objective_function(*population)
infinity = tf.zeros_like(population_values) + np.inf
population_values = tf.where(
tf.math.is_nan(population_values), x=infinity, y=population_values)
to_replace = candidate_values < population_values
next_population = [
tf.where(to_replace, x=candidates_part, y=population_part)
for candidates_part, population_part in zip(candidates, population)
]
next_values = tf.where(to_replace, x=candidate_values, y=population_values)
return next_population, next_values |
def create_user(self, user, table_privileges=['ALL PRIVILEGES'],
schema_privileges=['ALL PRIVILEGES'],
row_limit=50000):
con = self.connection or self._connect()
cur = con.cursor()
cur.execute('CREATE SCHEMA {0};'.format(user))
# self._initialize(schema=schema_name)
password = pwgen(8)
cur.execute(
"CREATE USER {user} with PASSWORD '{password}';"
.format(user=user, password=password))
""" Grant SELECT on public schema """
cur.execute('GRANT USAGE ON SCHEMA public TO {user};'
.format(user=user))
cur.execute(
'GRANT SELECT ON ALL TABLES IN SCHEMA public TO {user};'
.format(user=user))
cur.execute(
'ALTER ROLE {user} SET search_path TO {user};'
.format(user=user))
self.stdout.write(
'CREATED USER {user} WITH PASSWORD {password}\n'
.format(user=user, password=password))
""" initialize user-schema """
old_schema = self.schema
self.initialized = False
self.schema = user
self._initialize(con)
""" Privileges on user-schema"""
cur.execute(
'GRANT {privileges} ON SCHEMA {user} TO {user};'
.format(privileges=', '.join(schema_privileges), user=user))
cur.execute(
'GRANT {privileges} ON ALL TABLES IN SCHEMA {user} TO {user};'
.format(privileges=', '.join(table_privileges), user=user))
cur.execute(
'GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA {user} TO {user};'
.format(user=user))
con.commit()
if row_limit:
""" Limit number of rows"""
for table in ['reaction', 'publication', 'systems',
'reaction_system', 'publication_system',
'information']:
table_factor = 1
if table in ['reaction_system', 'publication_system']:
table_factor = 15
elif table == 'publication':
table_factor = 1 / 100
elif table == 'information':
table_factor = 1 / 100
trigger_function = """
CREATE OR REPLACE FUNCTION
check_number_of_rows_{user}_{table}()
RETURNS TRIGGER AS
$BODY$
BEGIN
IF (SELECT count(*) FROM {user}.{table}) > {row_limit}
THEN
RAISE EXCEPTION
'INSERT statement exceeding maximum number of rows';
END IF;
RETURN NEW;
END;
$BODY$
LANGUAGE plpgsql""".format(user=user, table=table,
row_limit=row_limit * table_factor)
cur.execute(trigger_function)
trigger = """
DROP TRIGGER IF EXISTS tr_check_number_of_rows_{user}_{table}
on {user}.{table};
CREATE TRIGGER tr_check_number_of_rows_{user}_{table}
BEFORE INSERT ON {user}.systems
FOR EACH ROW EXECUTE PROCEDURE check_number_of_rows_{user}_{table}();
""".format(user=user, table=table)
cur.execute(trigger)
self.schema = old_schema
set_schema = 'ALTER ROLE {user} SET search_path TO {schema};'\
.format(user=self.user, schema=self.schema)
cur.execute(set_schema)
if self.connection is None:
con.commit()
con.close()
return password | Grant SELECT on public schema | Below is the the instruction that describes the task:
### Input:
Grant SELECT on public schema
### Response:
def create_user(self, user, table_privileges=['ALL PRIVILEGES'],
schema_privileges=['ALL PRIVILEGES'],
row_limit=50000):
con = self.connection or self._connect()
cur = con.cursor()
cur.execute('CREATE SCHEMA {0};'.format(user))
# self._initialize(schema=schema_name)
password = pwgen(8)
cur.execute(
"CREATE USER {user} with PASSWORD '{password}';"
.format(user=user, password=password))
""" Grant SELECT on public schema """
cur.execute('GRANT USAGE ON SCHEMA public TO {user};'
.format(user=user))
cur.execute(
'GRANT SELECT ON ALL TABLES IN SCHEMA public TO {user};'
.format(user=user))
cur.execute(
'ALTER ROLE {user} SET search_path TO {user};'
.format(user=user))
self.stdout.write(
'CREATED USER {user} WITH PASSWORD {password}\n'
.format(user=user, password=password))
""" initialize user-schema """
old_schema = self.schema
self.initialized = False
self.schema = user
self._initialize(con)
""" Privileges on user-schema"""
cur.execute(
'GRANT {privileges} ON SCHEMA {user} TO {user};'
.format(privileges=', '.join(schema_privileges), user=user))
cur.execute(
'GRANT {privileges} ON ALL TABLES IN SCHEMA {user} TO {user};'
.format(privileges=', '.join(table_privileges), user=user))
cur.execute(
'GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA {user} TO {user};'
.format(user=user))
con.commit()
if row_limit:
""" Limit number of rows"""
for table in ['reaction', 'publication', 'systems',
'reaction_system', 'publication_system',
'information']:
table_factor = 1
if table in ['reaction_system', 'publication_system']:
table_factor = 15
elif table == 'publication':
table_factor = 1 / 100
elif table == 'information':
table_factor = 1 / 100
trigger_function = """
CREATE OR REPLACE FUNCTION
check_number_of_rows_{user}_{table}()
RETURNS TRIGGER AS
$BODY$
BEGIN
IF (SELECT count(*) FROM {user}.{table}) > {row_limit}
THEN
RAISE EXCEPTION
'INSERT statement exceeding maximum number of rows';
END IF;
RETURN NEW;
END;
$BODY$
LANGUAGE plpgsql""".format(user=user, table=table,
row_limit=row_limit * table_factor)
cur.execute(trigger_function)
trigger = """
DROP TRIGGER IF EXISTS tr_check_number_of_rows_{user}_{table}
on {user}.{table};
CREATE TRIGGER tr_check_number_of_rows_{user}_{table}
BEFORE INSERT ON {user}.systems
FOR EACH ROW EXECUTE PROCEDURE check_number_of_rows_{user}_{table}();
""".format(user=user, table=table)
cur.execute(trigger)
self.schema = old_schema
set_schema = 'ALTER ROLE {user} SET search_path TO {schema};'\
.format(user=self.user, schema=self.schema)
cur.execute(set_schema)
if self.connection is None:
con.commit()
con.close()
return password |
def time_choices():
"""Return digital time choices every half hour from 00:00 to 23:30."""
hours = list(range(0, 24))
times = []
for h in hours:
hour = str(h).zfill(2)
times.append(hour+':00')
times.append(hour+':30')
return list(zip(times, times)) | Return digital time choices every half hour from 00:00 to 23:30. | Below is the the instruction that describes the task:
### Input:
Return digital time choices every half hour from 00:00 to 23:30.
### Response:
def time_choices():
"""Return digital time choices every half hour from 00:00 to 23:30."""
hours = list(range(0, 24))
times = []
for h in hours:
hour = str(h).zfill(2)
times.append(hour+':00')
times.append(hour+':30')
return list(zip(times, times)) |
def flatten_list(node):
"""
List of expressions may be nested in groups of 32 and 1024
items. flatten that out and return the list
"""
flat_elems = []
for elem in node:
if elem == 'expr1024':
for subelem in elem:
assert subelem == 'expr32'
for subsubelem in subelem:
flat_elems.append(subsubelem)
elif elem == 'expr32':
for subelem in elem:
assert subelem == 'expr'
flat_elems.append(subelem)
else:
flat_elems.append(elem)
pass
pass
return flat_elems | List of expressions may be nested in groups of 32 and 1024
items. flatten that out and return the list | Below is the the instruction that describes the task:
### Input:
List of expressions may be nested in groups of 32 and 1024
items. flatten that out and return the list
### Response:
def flatten_list(node):
"""
List of expressions may be nested in groups of 32 and 1024
items. flatten that out and return the list
"""
flat_elems = []
for elem in node:
if elem == 'expr1024':
for subelem in elem:
assert subelem == 'expr32'
for subsubelem in subelem:
flat_elems.append(subsubelem)
elif elem == 'expr32':
for subelem in elem:
assert subelem == 'expr'
flat_elems.append(subelem)
else:
flat_elems.append(elem)
pass
pass
return flat_elems |
def _generate_signature(self, base_url, private_key):
"""
http://code.google.com/apis/maps/documentation/webservices/index.html#PythonSignatureExample
"""
url = urlparse.urlparse(base_url)
url_to_sign = url.path + '?' + url.query
decoded_key = base64.urlsafe_b64decode(private_key)
signature = hmac.new(decoded_key, url_to_sign, hashlib.sha1)
return base64.urlsafe_b64encode(signature.digest()) | http://code.google.com/apis/maps/documentation/webservices/index.html#PythonSignatureExample | Below is the the instruction that describes the task:
### Input:
http://code.google.com/apis/maps/documentation/webservices/index.html#PythonSignatureExample
### Response:
def _generate_signature(self, base_url, private_key):
"""
http://code.google.com/apis/maps/documentation/webservices/index.html#PythonSignatureExample
"""
url = urlparse.urlparse(base_url)
url_to_sign = url.path + '?' + url.query
decoded_key = base64.urlsafe_b64decode(private_key)
signature = hmac.new(decoded_key, url_to_sign, hashlib.sha1)
return base64.urlsafe_b64encode(signature.digest()) |
def scan(self, M):
"""
LML, fixed-effect sizes, and scale of the candidate set.
Parameters
----------
M : array_like
Fixed-effects set.
Returns
-------
lml : float
Log of the marginal likelihood.
effsizes0 : ndarray
Covariates fixed-effect sizes.
effsizes0_se : ndarray
Covariates fixed-effect size standard errors.
effsizes1 : ndarray
Candidate set fixed-effect sizes.
effsizes1_se : ndarray
Candidate fixed-effect size standard errors.
scale : ndarray
Optimal scale.
"""
from numpy_sugar.linalg import ddot
from numpy_sugar import is_all_finite
M = asarray(M, float)
if M.shape[1] == 0:
return {
"lml": self.null_lml(),
"effsizes0": self.null_beta,
"effsizes0_se": self.null_beta_se,
"effsizes1": empty((0)),
"effsizes1_se": empty((0)),
"scale": self.null_scale,
}
if not is_all_finite(M):
raise ValueError("M parameter has non-finite elements.")
MTQ = [dot(M.T, Q) for Q in self._QS[0] if Q.size > 0]
yTBM = [dot(i, j.T) for (i, j) in zip(self._yTQDi, MTQ)]
XTBM = [dot(i, j.T) for (i, j) in zip(self._XTQDi, MTQ)]
D = self._D
MTBM = [ddot(i, 1 / j) @ i.T for i, j in zip(MTQ, D) if j.min() > 0]
return self._multicovariate_set(yTBM, XTBM, MTBM) | LML, fixed-effect sizes, and scale of the candidate set.
Parameters
----------
M : array_like
Fixed-effects set.
Returns
-------
lml : float
Log of the marginal likelihood.
effsizes0 : ndarray
Covariates fixed-effect sizes.
effsizes0_se : ndarray
Covariates fixed-effect size standard errors.
effsizes1 : ndarray
Candidate set fixed-effect sizes.
effsizes1_se : ndarray
Candidate fixed-effect size standard errors.
scale : ndarray
Optimal scale. | Below is the the instruction that describes the task:
### Input:
LML, fixed-effect sizes, and scale of the candidate set.
Parameters
----------
M : array_like
Fixed-effects set.
Returns
-------
lml : float
Log of the marginal likelihood.
effsizes0 : ndarray
Covariates fixed-effect sizes.
effsizes0_se : ndarray
Covariates fixed-effect size standard errors.
effsizes1 : ndarray
Candidate set fixed-effect sizes.
effsizes1_se : ndarray
Candidate fixed-effect size standard errors.
scale : ndarray
Optimal scale.
### Response:
def scan(self, M):
"""
LML, fixed-effect sizes, and scale of the candidate set.
Parameters
----------
M : array_like
Fixed-effects set.
Returns
-------
lml : float
Log of the marginal likelihood.
effsizes0 : ndarray
Covariates fixed-effect sizes.
effsizes0_se : ndarray
Covariates fixed-effect size standard errors.
effsizes1 : ndarray
Candidate set fixed-effect sizes.
effsizes1_se : ndarray
Candidate fixed-effect size standard errors.
scale : ndarray
Optimal scale.
"""
from numpy_sugar.linalg import ddot
from numpy_sugar import is_all_finite
M = asarray(M, float)
if M.shape[1] == 0:
return {
"lml": self.null_lml(),
"effsizes0": self.null_beta,
"effsizes0_se": self.null_beta_se,
"effsizes1": empty((0)),
"effsizes1_se": empty((0)),
"scale": self.null_scale,
}
if not is_all_finite(M):
raise ValueError("M parameter has non-finite elements.")
MTQ = [dot(M.T, Q) for Q in self._QS[0] if Q.size > 0]
yTBM = [dot(i, j.T) for (i, j) in zip(self._yTQDi, MTQ)]
XTBM = [dot(i, j.T) for (i, j) in zip(self._XTQDi, MTQ)]
D = self._D
MTBM = [ddot(i, 1 / j) @ i.T for i, j in zip(MTQ, D) if j.min() > 0]
return self._multicovariate_set(yTBM, XTBM, MTBM) |
def __send_api_file(self, file_name):
"""
Send apidoc files from the apidoc folder to the browser.
This method replaces all absolute urls in the file by
the current url.
:param file_name: the apidoc file.
"""
file_name = join(self.app.static_folder, file_name)
with codecs.open(file_name, 'r', 'utf-8') as file:
data = file.read()
# replaces the hard coded url by the current url.
api_project = self.__read_api_project()
old_url = api_project.get('url')
# replaces the project's url only if it is present in the file.
if old_url:
new_url = request.url_root.strip('/')
data = data.replace(old_url, new_url)
# creates a flask response to send
# the file to the browser
headers = Headers()
headers['Content-Length'] = getsize(file_name)
response = self.app.response_class(data,
mimetype=mimetypes.guess_type(file_name)[0],
headers=headers,
direct_passthrough=True)
response.last_modified = int(getmtime(file_name))
return response | Send apidoc files from the apidoc folder to the browser.
This method replaces all absolute urls in the file by
the current url.
:param file_name: the apidoc file. | Below is the the instruction that describes the task:
### Input:
Send apidoc files from the apidoc folder to the browser.
This method replaces all absolute urls in the file by
the current url.
:param file_name: the apidoc file.
### Response:
def __send_api_file(self, file_name):
"""
Send apidoc files from the apidoc folder to the browser.
This method replaces all absolute urls in the file by
the current url.
:param file_name: the apidoc file.
"""
file_name = join(self.app.static_folder, file_name)
with codecs.open(file_name, 'r', 'utf-8') as file:
data = file.read()
# replaces the hard coded url by the current url.
api_project = self.__read_api_project()
old_url = api_project.get('url')
# replaces the project's url only if it is present in the file.
if old_url:
new_url = request.url_root.strip('/')
data = data.replace(old_url, new_url)
# creates a flask response to send
# the file to the browser
headers = Headers()
headers['Content-Length'] = getsize(file_name)
response = self.app.response_class(data,
mimetype=mimetypes.guess_type(file_name)[0],
headers=headers,
direct_passthrough=True)
response.last_modified = int(getmtime(file_name))
return response |
def get_bval(self, path, **kwargs):
""" Get bval file for passed path. """
result = self.get_nearest(path, extensions='bval', suffix='dwi',
all_=True, **kwargs)
return listify(result)[0] | Get bval file for passed path. | Below is the the instruction that describes the task:
### Input:
Get bval file for passed path.
### Response:
def get_bval(self, path, **kwargs):
""" Get bval file for passed path. """
result = self.get_nearest(path, extensions='bval', suffix='dwi',
all_=True, **kwargs)
return listify(result)[0] |
def object_factory(api, api_version, kind):
"""
Dynamically builds a Python class for the given Kubernetes object in an API.
For example:
api = pykube.HTTPClient(...)
NetworkPolicy = pykube.object_factory(api, "networking.k8s.io/v1", "NetworkPolicy")
This enables construction of any Kubernetes object kind without explicit support
from pykube.
Currently, the HTTPClient passed to this function will not be bound to the returned type.
It is planned to fix this, but in the mean time pass it as you would normally.
"""
resource_list = api.resource_list(api_version)
resource = next((resource for resource in resource_list["resources"] if resource["kind"] == kind), None)
base = NamespacedAPIObject if resource["namespaced"] else APIObject
return type(kind, (base,), {
"version": api_version,
"endpoint": resource["name"],
"kind": kind
}) | Dynamically builds a Python class for the given Kubernetes object in an API.
For example:
api = pykube.HTTPClient(...)
NetworkPolicy = pykube.object_factory(api, "networking.k8s.io/v1", "NetworkPolicy")
This enables construction of any Kubernetes object kind without explicit support
from pykube.
Currently, the HTTPClient passed to this function will not be bound to the returned type.
It is planned to fix this, but in the mean time pass it as you would normally. | Below is the the instruction that describes the task:
### Input:
Dynamically builds a Python class for the given Kubernetes object in an API.
For example:
api = pykube.HTTPClient(...)
NetworkPolicy = pykube.object_factory(api, "networking.k8s.io/v1", "NetworkPolicy")
This enables construction of any Kubernetes object kind without explicit support
from pykube.
Currently, the HTTPClient passed to this function will not be bound to the returned type.
It is planned to fix this, but in the mean time pass it as you would normally.
### Response:
def object_factory(api, api_version, kind):
"""
Dynamically builds a Python class for the given Kubernetes object in an API.
For example:
api = pykube.HTTPClient(...)
NetworkPolicy = pykube.object_factory(api, "networking.k8s.io/v1", "NetworkPolicy")
This enables construction of any Kubernetes object kind without explicit support
from pykube.
Currently, the HTTPClient passed to this function will not be bound to the returned type.
It is planned to fix this, but in the mean time pass it as you would normally.
"""
resource_list = api.resource_list(api_version)
resource = next((resource for resource in resource_list["resources"] if resource["kind"] == kind), None)
base = NamespacedAPIObject if resource["namespaced"] else APIObject
return type(kind, (base,), {
"version": api_version,
"endpoint": resource["name"],
"kind": kind
}) |
def get_summary_dict(self, print_subelectrodes=True):
"""
Args:
print_subelectrodes:
Also print data on all the possible subelectrodes
Returns:
a summary of this electrode"s properties in dictionary format
"""
d = {}
framework_comp = Composition({k: v
for k, v in self._composition.items()
if k.symbol != self.working_ion.symbol})
d["framework"] = framework_comp.to_data_dict
d["framework_pretty"] = framework_comp.reduced_formula
d["average_voltage"] = self.get_average_voltage()
d["max_voltage"] = self.max_voltage
d["min_voltage"] = self.min_voltage
d["max_delta_volume"] = self.max_delta_volume
d["max_instability"] = 0
d["max_voltage_step"] = self.max_voltage_step
d["nsteps"] = self.num_steps
d["capacity_grav"] = self.get_capacity_grav()
d["capacity_vol"] = self.get_capacity_vol()
d["energy_grav"] = self.get_specific_energy()
d["energy_vol"] = self.get_energy_density()
d["working_ion"] = self.working_ion.symbol
d["reactions"] = []
d["reactant_compositions"] = []
comps = []
frac = []
for pair in self._vpairs:
rxn = pair.rxn
frac.append(pair.frac_charge)
frac.append(pair.frac_discharge)
d["reactions"].append(str(rxn))
for i in range(len(rxn.coeffs)):
if abs(rxn.coeffs[i]) > 1e-5 and rxn.all_comp[i] not in comps:
comps.append(rxn.all_comp[i])
if abs(rxn.coeffs[i]) > 1e-5 and \
rxn.all_comp[i].reduced_formula != d["working_ion"]:
reduced_comp = rxn.all_comp[i].reduced_composition
comp_dict = reduced_comp.as_dict()
d["reactant_compositions"].append(comp_dict)
d["fracA_charge"] = min(frac)
d["fracA_discharge"] = max(frac)
d["nsteps"] = self.num_steps
if print_subelectrodes:
f_dict = lambda c: c.get_summary_dict(print_subelectrodes=False)
d["adj_pairs"] = map(f_dict,
self.get_sub_electrodes(adjacent_only=True))
d["all_pairs"] = map(f_dict,
self.get_sub_electrodes(adjacent_only=False))
return d | Args:
print_subelectrodes:
Also print data on all the possible subelectrodes
Returns:
a summary of this electrode"s properties in dictionary format | Below is the the instruction that describes the task:
### Input:
Args:
print_subelectrodes:
Also print data on all the possible subelectrodes
Returns:
a summary of this electrode"s properties in dictionary format
### Response:
def get_summary_dict(self, print_subelectrodes=True):
"""
Args:
print_subelectrodes:
Also print data on all the possible subelectrodes
Returns:
a summary of this electrode"s properties in dictionary format
"""
d = {}
framework_comp = Composition({k: v
for k, v in self._composition.items()
if k.symbol != self.working_ion.symbol})
d["framework"] = framework_comp.to_data_dict
d["framework_pretty"] = framework_comp.reduced_formula
d["average_voltage"] = self.get_average_voltage()
d["max_voltage"] = self.max_voltage
d["min_voltage"] = self.min_voltage
d["max_delta_volume"] = self.max_delta_volume
d["max_instability"] = 0
d["max_voltage_step"] = self.max_voltage_step
d["nsteps"] = self.num_steps
d["capacity_grav"] = self.get_capacity_grav()
d["capacity_vol"] = self.get_capacity_vol()
d["energy_grav"] = self.get_specific_energy()
d["energy_vol"] = self.get_energy_density()
d["working_ion"] = self.working_ion.symbol
d["reactions"] = []
d["reactant_compositions"] = []
comps = []
frac = []
for pair in self._vpairs:
rxn = pair.rxn
frac.append(pair.frac_charge)
frac.append(pair.frac_discharge)
d["reactions"].append(str(rxn))
for i in range(len(rxn.coeffs)):
if abs(rxn.coeffs[i]) > 1e-5 and rxn.all_comp[i] not in comps:
comps.append(rxn.all_comp[i])
if abs(rxn.coeffs[i]) > 1e-5 and \
rxn.all_comp[i].reduced_formula != d["working_ion"]:
reduced_comp = rxn.all_comp[i].reduced_composition
comp_dict = reduced_comp.as_dict()
d["reactant_compositions"].append(comp_dict)
d["fracA_charge"] = min(frac)
d["fracA_discharge"] = max(frac)
d["nsteps"] = self.num_steps
if print_subelectrodes:
f_dict = lambda c: c.get_summary_dict(print_subelectrodes=False)
d["adj_pairs"] = map(f_dict,
self.get_sub_electrodes(adjacent_only=True))
d["all_pairs"] = map(f_dict,
self.get_sub_electrodes(adjacent_only=False))
return d |
def GetAttributeContainerByIndex(self, index):
"""Retrieves a specific serialized attribute container from the list.
Args:
index (int): attribute container index.
Returns:
bytes: serialized attribute container data or None if not available.
Raises:
IndexError: if the index is less than zero.
"""
if index < 0:
raise IndexError(
'Unsupported negative index value: {0:d}.'.format(index))
if index < len(self._list):
return self._list[index]
return None | Retrieves a specific serialized attribute container from the list.
Args:
index (int): attribute container index.
Returns:
bytes: serialized attribute container data or None if not available.
Raises:
IndexError: if the index is less than zero. | Below is the the instruction that describes the task:
### Input:
Retrieves a specific serialized attribute container from the list.
Args:
index (int): attribute container index.
Returns:
bytes: serialized attribute container data or None if not available.
Raises:
IndexError: if the index is less than zero.
### Response:
def GetAttributeContainerByIndex(self, index):
"""Retrieves a specific serialized attribute container from the list.
Args:
index (int): attribute container index.
Returns:
bytes: serialized attribute container data or None if not available.
Raises:
IndexError: if the index is less than zero.
"""
if index < 0:
raise IndexError(
'Unsupported negative index value: {0:d}.'.format(index))
if index < len(self._list):
return self._list[index]
return None |
def libvlc_video_set_callbacks(mp, lock, unlock, display, opaque):
'''Set callbacks and private data to render decoded video to a custom area
in memory.
Use L{libvlc_video_set_format}() or L{libvlc_video_set_format_callbacks}()
to configure the decoded format.
@param mp: the media player.
@param lock: callback to lock video memory (must not be NULL).
@param unlock: callback to unlock video memory (or NULL if not needed).
@param display: callback to display video (or NULL if not needed).
@param opaque: private pointer for the three callbacks (as first parameter).
@version: LibVLC 1.1.1 or later.
'''
f = _Cfunctions.get('libvlc_video_set_callbacks', None) or \
_Cfunction('libvlc_video_set_callbacks', ((1,), (1,), (1,), (1,), (1,),), None,
None, MediaPlayer, VideoLockCb, VideoUnlockCb, VideoDisplayCb, ctypes.c_void_p)
return f(mp, lock, unlock, display, opaque) | Set callbacks and private data to render decoded video to a custom area
in memory.
Use L{libvlc_video_set_format}() or L{libvlc_video_set_format_callbacks}()
to configure the decoded format.
@param mp: the media player.
@param lock: callback to lock video memory (must not be NULL).
@param unlock: callback to unlock video memory (or NULL if not needed).
@param display: callback to display video (or NULL if not needed).
@param opaque: private pointer for the three callbacks (as first parameter).
@version: LibVLC 1.1.1 or later. | Below is the the instruction that describes the task:
### Input:
Set callbacks and private data to render decoded video to a custom area
in memory.
Use L{libvlc_video_set_format}() or L{libvlc_video_set_format_callbacks}()
to configure the decoded format.
@param mp: the media player.
@param lock: callback to lock video memory (must not be NULL).
@param unlock: callback to unlock video memory (or NULL if not needed).
@param display: callback to display video (or NULL if not needed).
@param opaque: private pointer for the three callbacks (as first parameter).
@version: LibVLC 1.1.1 or later.
### Response:
def libvlc_video_set_callbacks(mp, lock, unlock, display, opaque):
'''Set callbacks and private data to render decoded video to a custom area
in memory.
Use L{libvlc_video_set_format}() or L{libvlc_video_set_format_callbacks}()
to configure the decoded format.
@param mp: the media player.
@param lock: callback to lock video memory (must not be NULL).
@param unlock: callback to unlock video memory (or NULL if not needed).
@param display: callback to display video (or NULL if not needed).
@param opaque: private pointer for the three callbacks (as first parameter).
@version: LibVLC 1.1.1 or later.
'''
f = _Cfunctions.get('libvlc_video_set_callbacks', None) or \
_Cfunction('libvlc_video_set_callbacks', ((1,), (1,), (1,), (1,), (1,),), None,
None, MediaPlayer, VideoLockCb, VideoUnlockCb, VideoDisplayCb, ctypes.c_void_p)
return f(mp, lock, unlock, display, opaque) |
def rand_ktensor(shape, rank, norm=None, random_state=None):
"""
Generates a random N-way tensor with rank R, where the entries are
drawn from the standard uniform distribution in the interval [0.0,1].
Parameters
----------
shape : tuple
shape of the tensor
rank : integer
rank of the tensor
norm : float or None, optional (defaults: None)
If not None, the factor matrices are rescaled so that the Frobenius
norm of the returned tensor is equal to ``norm``.
ktensor : bool
If true, a KTensor object is returned, i.e., the components are in factored
form ``[U_1, U_2, ... U_N]``; Otherwise an N-way array is returned.
random_state : integer, RandomState instance or None, optional (default ``None``)
If integer, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by np.random.
Returns
-------
X : (I_1, ..., I_N) array_like
N-way tensor with rank R.
Example
-------
>>> # Create a rank-2 tensor of dimension 5x5x5:
>>> import tensortools as tt
>>> X = tt.randn_tensor((5,5,5), rank=2)
"""
# Check input.
rns = _check_random_state(random_state)
# Randomize low-rank factor matrices i.i.d. uniform random elements.
factors = KTensor([rns.uniform(0.0, 1.0, size=(i, rank)) for i in shape])
return _rescale_tensor(factors, norm) | Generates a random N-way tensor with rank R, where the entries are
drawn from the standard uniform distribution in the interval [0.0,1].
Parameters
----------
shape : tuple
shape of the tensor
rank : integer
rank of the tensor
norm : float or None, optional (defaults: None)
If not None, the factor matrices are rescaled so that the Frobenius
norm of the returned tensor is equal to ``norm``.
ktensor : bool
If true, a KTensor object is returned, i.e., the components are in factored
form ``[U_1, U_2, ... U_N]``; Otherwise an N-way array is returned.
random_state : integer, RandomState instance or None, optional (default ``None``)
If integer, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by np.random.
Returns
-------
X : (I_1, ..., I_N) array_like
N-way tensor with rank R.
Example
-------
>>> # Create a rank-2 tensor of dimension 5x5x5:
>>> import tensortools as tt
>>> X = tt.randn_tensor((5,5,5), rank=2) | Below is the the instruction that describes the task:
### Input:
Generates a random N-way tensor with rank R, where the entries are
drawn from the standard uniform distribution in the interval [0.0,1].
Parameters
----------
shape : tuple
shape of the tensor
rank : integer
rank of the tensor
norm : float or None, optional (defaults: None)
If not None, the factor matrices are rescaled so that the Frobenius
norm of the returned tensor is equal to ``norm``.
ktensor : bool
If true, a KTensor object is returned, i.e., the components are in factored
form ``[U_1, U_2, ... U_N]``; Otherwise an N-way array is returned.
random_state : integer, RandomState instance or None, optional (default ``None``)
If integer, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by np.random.
Returns
-------
X : (I_1, ..., I_N) array_like
N-way tensor with rank R.
Example
-------
>>> # Create a rank-2 tensor of dimension 5x5x5:
>>> import tensortools as tt
>>> X = tt.randn_tensor((5,5,5), rank=2)
### Response:
def rand_ktensor(shape, rank, norm=None, random_state=None):
"""
Generates a random N-way tensor with rank R, where the entries are
drawn from the standard uniform distribution in the interval [0.0,1].
Parameters
----------
shape : tuple
shape of the tensor
rank : integer
rank of the tensor
norm : float or None, optional (defaults: None)
If not None, the factor matrices are rescaled so that the Frobenius
norm of the returned tensor is equal to ``norm``.
ktensor : bool
If true, a KTensor object is returned, i.e., the components are in factored
form ``[U_1, U_2, ... U_N]``; Otherwise an N-way array is returned.
random_state : integer, RandomState instance or None, optional (default ``None``)
If integer, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by np.random.
Returns
-------
X : (I_1, ..., I_N) array_like
N-way tensor with rank R.
Example
-------
>>> # Create a rank-2 tensor of dimension 5x5x5:
>>> import tensortools as tt
>>> X = tt.randn_tensor((5,5,5), rank=2)
"""
# Check input.
rns = _check_random_state(random_state)
# Randomize low-rank factor matrices i.i.d. uniform random elements.
factors = KTensor([rns.uniform(0.0, 1.0, size=(i, rank)) for i in shape])
return _rescale_tensor(factors, norm) |
def Tracing_recordClockSyncMarker(self, syncId):
"""
Function path: Tracing.recordClockSyncMarker
Domain: Tracing
Method name: recordClockSyncMarker
Parameters:
Required arguments:
'syncId' (type: string) -> The ID of this clock sync marker
No return value.
Description: Record a clock sync marker in the trace.
"""
assert isinstance(syncId, (str,)
), "Argument 'syncId' must be of type '['str']'. Received type: '%s'" % type(
syncId)
subdom_funcs = self.synchronous_command('Tracing.recordClockSyncMarker',
syncId=syncId)
return subdom_funcs | Function path: Tracing.recordClockSyncMarker
Domain: Tracing
Method name: recordClockSyncMarker
Parameters:
Required arguments:
'syncId' (type: string) -> The ID of this clock sync marker
No return value.
Description: Record a clock sync marker in the trace. | Below is the the instruction that describes the task:
### Input:
Function path: Tracing.recordClockSyncMarker
Domain: Tracing
Method name: recordClockSyncMarker
Parameters:
Required arguments:
'syncId' (type: string) -> The ID of this clock sync marker
No return value.
Description: Record a clock sync marker in the trace.
### Response:
def Tracing_recordClockSyncMarker(self, syncId):
"""
Function path: Tracing.recordClockSyncMarker
Domain: Tracing
Method name: recordClockSyncMarker
Parameters:
Required arguments:
'syncId' (type: string) -> The ID of this clock sync marker
No return value.
Description: Record a clock sync marker in the trace.
"""
assert isinstance(syncId, (str,)
), "Argument 'syncId' must be of type '['str']'. Received type: '%s'" % type(
syncId)
subdom_funcs = self.synchronous_command('Tracing.recordClockSyncMarker',
syncId=syncId)
return subdom_funcs |
def update(self, hook):
"""
Update a hook
:param hook: The data to update. Must include keys:
* id (str)
* name (str)
* triggers (list of str)
* sources (list of str)
* groups (list of str)
* actions (list of str)
:type hook: dict
Example:
.. code-block:: python
Hooks().update(
hook={
'id': 'd9d4596e-49e4-4135-b3b3-847f9e7c1f43',
'name': 'My Sandbox',
'triggers': [
'host = you.example.com'
],
'sources': [
'4d42c719-4005-4929-aa4a-994da4b95040'
],
'groups': [],
'actions': [
'9f6adf69-37b9-4a4b-88fb-c3fc4c781a11',
'ddc36d71-33cb-4f4f-be1b-8591814b1946'
],
}
)
:return:
:rtype: dict
"""
data = {
'id': hook['id'],
'name': hook['name'],
'triggers': hook['triggers'],
'sources': hook['sources'],
'groups': hook['groups'],
'actions': hook['actions'],
}
return self._post(
request=ApiActions.UPDATE.value,
uri=ApiUri.HOOKS.value,
params=data
) | Update a hook
:param hook: The data to update. Must include keys:
* id (str)
* name (str)
* triggers (list of str)
* sources (list of str)
* groups (list of str)
* actions (list of str)
:type hook: dict
Example:
.. code-block:: python
Hooks().update(
hook={
'id': 'd9d4596e-49e4-4135-b3b3-847f9e7c1f43',
'name': 'My Sandbox',
'triggers': [
'host = you.example.com'
],
'sources': [
'4d42c719-4005-4929-aa4a-994da4b95040'
],
'groups': [],
'actions': [
'9f6adf69-37b9-4a4b-88fb-c3fc4c781a11',
'ddc36d71-33cb-4f4f-be1b-8591814b1946'
],
}
)
:return:
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Update a hook
:param hook: The data to update. Must include keys:
* id (str)
* name (str)
* triggers (list of str)
* sources (list of str)
* groups (list of str)
* actions (list of str)
:type hook: dict
Example:
.. code-block:: python
Hooks().update(
hook={
'id': 'd9d4596e-49e4-4135-b3b3-847f9e7c1f43',
'name': 'My Sandbox',
'triggers': [
'host = you.example.com'
],
'sources': [
'4d42c719-4005-4929-aa4a-994da4b95040'
],
'groups': [],
'actions': [
'9f6adf69-37b9-4a4b-88fb-c3fc4c781a11',
'ddc36d71-33cb-4f4f-be1b-8591814b1946'
],
}
)
:return:
:rtype: dict
### Response:
def update(self, hook):
"""
Update a hook
:param hook: The data to update. Must include keys:
* id (str)
* name (str)
* triggers (list of str)
* sources (list of str)
* groups (list of str)
* actions (list of str)
:type hook: dict
Example:
.. code-block:: python
Hooks().update(
hook={
'id': 'd9d4596e-49e4-4135-b3b3-847f9e7c1f43',
'name': 'My Sandbox',
'triggers': [
'host = you.example.com'
],
'sources': [
'4d42c719-4005-4929-aa4a-994da4b95040'
],
'groups': [],
'actions': [
'9f6adf69-37b9-4a4b-88fb-c3fc4c781a11',
'ddc36d71-33cb-4f4f-be1b-8591814b1946'
],
}
)
:return:
:rtype: dict
"""
data = {
'id': hook['id'],
'name': hook['name'],
'triggers': hook['triggers'],
'sources': hook['sources'],
'groups': hook['groups'],
'actions': hook['actions'],
}
return self._post(
request=ApiActions.UPDATE.value,
uri=ApiUri.HOOKS.value,
params=data
) |
def verify(self):
"""
:desc: Verifies the integrity of the database, specifically checks
the values for unusedIDs and currentMax
:returns: A boolean indicating whether the database is valid or
not
:rval: bool
"""
collections = self.get_data_collections()
allIDs = []
for coll in collections:
IDs = self.noteDB[coll].find({"ID": {"$exists": True}},
{"ID": 1, "_id": 0})
for ID in IDs:
allIDs.append(int(ID["ID"]))
query = {"currentMax": {"$exists": True}}
maxID = int(self.noteDB['IDs'].find_one(query)["currentMax"])
query = {"unusedIDs": {"$exists": True}}
unusedIDs = self.noteDB['IDs'].find_one(query)["unusedIDs"]
unusedIDs = [int(ii) for ii in unusedIDs]
unusedIDsMatch = True
for ID in allIDs:
if ID in unusedIDs:
unusedIDsMatch = False
maxIDMatch = True
if maxID is not max(allIDs):
maxIDMatch = False
if maxIDMatch and unusedIDsMatch:
print "Database is valid"
elif not maxIDMatch and not unusedIDsMatch:
print "Database not valid, max ID and unused IDs are incorrent"
elif not maxIDMatch:
print "Database not valid, max ID is incorrent"
elif not unusedIDsMatch:
print "Database not valid, unusedIDs is incorrent" | :desc: Verifies the integrity of the database, specifically checks
the values for unusedIDs and currentMax
:returns: A boolean indicating whether the database is valid or
not
:rval: bool | Below is the the instruction that describes the task:
### Input:
:desc: Verifies the integrity of the database, specifically checks
the values for unusedIDs and currentMax
:returns: A boolean indicating whether the database is valid or
not
:rval: bool
### Response:
def verify(self):
"""
:desc: Verifies the integrity of the database, specifically checks
the values for unusedIDs and currentMax
:returns: A boolean indicating whether the database is valid or
not
:rval: bool
"""
collections = self.get_data_collections()
allIDs = []
for coll in collections:
IDs = self.noteDB[coll].find({"ID": {"$exists": True}},
{"ID": 1, "_id": 0})
for ID in IDs:
allIDs.append(int(ID["ID"]))
query = {"currentMax": {"$exists": True}}
maxID = int(self.noteDB['IDs'].find_one(query)["currentMax"])
query = {"unusedIDs": {"$exists": True}}
unusedIDs = self.noteDB['IDs'].find_one(query)["unusedIDs"]
unusedIDs = [int(ii) for ii in unusedIDs]
unusedIDsMatch = True
for ID in allIDs:
if ID in unusedIDs:
unusedIDsMatch = False
maxIDMatch = True
if maxID is not max(allIDs):
maxIDMatch = False
if maxIDMatch and unusedIDsMatch:
print "Database is valid"
elif not maxIDMatch and not unusedIDsMatch:
print "Database not valid, max ID and unused IDs are incorrent"
elif not maxIDMatch:
print "Database not valid, max ID is incorrent"
elif not unusedIDsMatch:
print "Database not valid, unusedIDs is incorrent" |
def _run_tool(cmd, use_container=True, work_dir=None, log_file=None):
"""Run with injection of bcbio path.
Place at end for runs without containers to avoid overriding other
bcbio installations.
"""
if isinstance(cmd, (list, tuple)):
cmd = " ".join([str(x) for x in cmd])
cmd = utils.local_path_export(at_start=use_container) + cmd
if log_file:
cmd += " 2>&1 | tee -a %s" % log_file
try:
print("Running: %s" % cmd)
subprocess.check_call(cmd, shell=True)
finally:
if use_container and work_dir:
_chown_workdir(work_dir) | Run with injection of bcbio path.
Place at end for runs without containers to avoid overriding other
bcbio installations. | Below is the the instruction that describes the task:
### Input:
Run with injection of bcbio path.
Place at end for runs without containers to avoid overriding other
bcbio installations.
### Response:
def _run_tool(cmd, use_container=True, work_dir=None, log_file=None):
"""Run with injection of bcbio path.
Place at end for runs without containers to avoid overriding other
bcbio installations.
"""
if isinstance(cmd, (list, tuple)):
cmd = " ".join([str(x) for x in cmd])
cmd = utils.local_path_export(at_start=use_container) + cmd
if log_file:
cmd += " 2>&1 | tee -a %s" % log_file
try:
print("Running: %s" % cmd)
subprocess.check_call(cmd, shell=True)
finally:
if use_container and work_dir:
_chown_workdir(work_dir) |
def from_pkcs12(cls, key, email, scopes, subject=None,
passphrase=PKCS12_PASSPHRASE):
"""Alternate constructor intended for using .p12 files.
Args:
key (dict) - Parsed JSON with service account credentials.
email (str) - Service account email.
scopes (Union[str, collections.Iterable[str]]) -
List of permissions that the application requests.
subject (str) - The email address of the user for which
the application is requesting delegated access.
passphrase (str) - Passphrase of private key file.
Google generates .p12 files secured with fixed 'notasecret'
passphrase, so if you didn't change it it's fine to omit
this parameter.
Returns:
ServiceAccount
"""
key = OpenSSL.crypto.load_pkcs12(key, passphrase).get_privatekey()
return cls(key=key, email=email, scopes=scopes, subject=subject) | Alternate constructor intended for using .p12 files.
Args:
key (dict) - Parsed JSON with service account credentials.
email (str) - Service account email.
scopes (Union[str, collections.Iterable[str]]) -
List of permissions that the application requests.
subject (str) - The email address of the user for which
the application is requesting delegated access.
passphrase (str) - Passphrase of private key file.
Google generates .p12 files secured with fixed 'notasecret'
passphrase, so if you didn't change it it's fine to omit
this parameter.
Returns:
ServiceAccount | Below is the the instruction that describes the task:
### Input:
Alternate constructor intended for using .p12 files.
Args:
key (dict) - Parsed JSON with service account credentials.
email (str) - Service account email.
scopes (Union[str, collections.Iterable[str]]) -
List of permissions that the application requests.
subject (str) - The email address of the user for which
the application is requesting delegated access.
passphrase (str) - Passphrase of private key file.
Google generates .p12 files secured with fixed 'notasecret'
passphrase, so if you didn't change it it's fine to omit
this parameter.
Returns:
ServiceAccount
### Response:
def from_pkcs12(cls, key, email, scopes, subject=None,
passphrase=PKCS12_PASSPHRASE):
"""Alternate constructor intended for using .p12 files.
Args:
key (dict) - Parsed JSON with service account credentials.
email (str) - Service account email.
scopes (Union[str, collections.Iterable[str]]) -
List of permissions that the application requests.
subject (str) - The email address of the user for which
the application is requesting delegated access.
passphrase (str) - Passphrase of private key file.
Google generates .p12 files secured with fixed 'notasecret'
passphrase, so if you didn't change it it's fine to omit
this parameter.
Returns:
ServiceAccount
"""
key = OpenSSL.crypto.load_pkcs12(key, passphrase).get_privatekey()
return cls(key=key, email=email, scopes=scopes, subject=subject) |
def render_bump_release(self):
"""
If the bump_release plugin is present, configure it
"""
phase = 'prebuild_plugins'
plugin = 'bump_release'
if not self.dj.dock_json_has_plugin_conf(phase, plugin):
return
if self.spec.release.value:
logger.info('removing %s from request as release already specified',
plugin)
self.dj.remove_plugin(phase, plugin)
return
hub = self.spec.kojihub.value
if not hub:
logger.info('removing %s from request as koji hub not specified',
plugin)
self.dj.remove_plugin(phase, plugin)
return
self.dj.dock_json_set_arg(phase, plugin, 'hub', hub)
# For flatpak, we want a name-version-release of
# <name>-<stream>-<module_build_version>.<n>, where the .<n> makes
# sure that the build is unique in Koji
if self.spec.flatpak.value:
self.dj.dock_json_set_arg(phase, plugin, 'append', True) | If the bump_release plugin is present, configure it | Below is the the instruction that describes the task:
### Input:
If the bump_release plugin is present, configure it
### Response:
def render_bump_release(self):
"""
If the bump_release plugin is present, configure it
"""
phase = 'prebuild_plugins'
plugin = 'bump_release'
if not self.dj.dock_json_has_plugin_conf(phase, plugin):
return
if self.spec.release.value:
logger.info('removing %s from request as release already specified',
plugin)
self.dj.remove_plugin(phase, plugin)
return
hub = self.spec.kojihub.value
if not hub:
logger.info('removing %s from request as koji hub not specified',
plugin)
self.dj.remove_plugin(phase, plugin)
return
self.dj.dock_json_set_arg(phase, plugin, 'hub', hub)
# For flatpak, we want a name-version-release of
# <name>-<stream>-<module_build_version>.<n>, where the .<n> makes
# sure that the build is unique in Koji
if self.spec.flatpak.value:
self.dj.dock_json_set_arg(phase, plugin, 'append', True) |
def bytes2unicode(x, encoding='utf-8', errors='strict'):
"""
Convert a C{bytes} to a unicode string.
@param x: a unicode string, of type C{unicode} on Python 2,
or C{str} on Python 3.
@param encoding: an optional codec, default: 'utf-8'
@param errors: error handling scheme, default 'strict'
@return: a unicode string of type C{unicode} on Python 2, or
C{str} on Python 3.
"""
if isinstance(x, (text_type, type(None))):
return x
return text_type(x, encoding, errors) | Convert a C{bytes} to a unicode string.
@param x: a unicode string, of type C{unicode} on Python 2,
or C{str} on Python 3.
@param encoding: an optional codec, default: 'utf-8'
@param errors: error handling scheme, default 'strict'
@return: a unicode string of type C{unicode} on Python 2, or
C{str} on Python 3. | Below is the the instruction that describes the task:
### Input:
Convert a C{bytes} to a unicode string.
@param x: a unicode string, of type C{unicode} on Python 2,
or C{str} on Python 3.
@param encoding: an optional codec, default: 'utf-8'
@param errors: error handling scheme, default 'strict'
@return: a unicode string of type C{unicode} on Python 2, or
C{str} on Python 3.
### Response:
def bytes2unicode(x, encoding='utf-8', errors='strict'):
"""
Convert a C{bytes} to a unicode string.
@param x: a unicode string, of type C{unicode} on Python 2,
or C{str} on Python 3.
@param encoding: an optional codec, default: 'utf-8'
@param errors: error handling scheme, default 'strict'
@return: a unicode string of type C{unicode} on Python 2, or
C{str} on Python 3.
"""
if isinstance(x, (text_type, type(None))):
return x
return text_type(x, encoding, errors) |
def report(self):
"""
Performs rollups, prints report of sockets opened.
"""
aggregations = dict(
(test, Counter().rollup(values))
for test, values in self.socket_warnings.items()
)
total = sum(
len(warnings)
for warnings in self.socket_warnings.values()
)
def format_test_statistics(test, counter):
return "%s:\n%s" % (
test,
'\n'.join(
' - %s: %s' % (socket, count)
for socket, count in counter.items()
)
)
def format_statistics(aggregations):
return '\n'.join(
format_test_statistics(test, counter)
for test, counter in aggregations.items()
)
# Only print the report if there are actually things to report.
if aggregations:
print('=' * 70, file=self.stream)
print(
'NON-WHITELISTED SOCKETS OPENED: %s' % total,
file=self.stream,
)
print('-' * 70, file=self.stream)
print(format_statistics(aggregations), file=self.stream) | Performs rollups, prints report of sockets opened. | Below is the the instruction that describes the task:
### Input:
Performs rollups, prints report of sockets opened.
### Response:
def report(self):
"""
Performs rollups, prints report of sockets opened.
"""
aggregations = dict(
(test, Counter().rollup(values))
for test, values in self.socket_warnings.items()
)
total = sum(
len(warnings)
for warnings in self.socket_warnings.values()
)
def format_test_statistics(test, counter):
return "%s:\n%s" % (
test,
'\n'.join(
' - %s: %s' % (socket, count)
for socket, count in counter.items()
)
)
def format_statistics(aggregations):
return '\n'.join(
format_test_statistics(test, counter)
for test, counter in aggregations.items()
)
# Only print the report if there are actually things to report.
if aggregations:
print('=' * 70, file=self.stream)
print(
'NON-WHITELISTED SOCKETS OPENED: %s' % total,
file=self.stream,
)
print('-' * 70, file=self.stream)
print(format_statistics(aggregations), file=self.stream) |
def measure(self,fromGeometry,toGeometry,measureOperation,
geometryType="esriGeometryPoint",pixelSize=None,mosaicRule=None,
linearUnit=None,angularUnit=None,areaUnit=None):
"""
The measure operation is performed on an image service resource. It
lets a user measure distance, direction, area, perimeter, and height
from an image service. The result of this operation includes the name
of the raster dataset being used, sensor name, and measured values.
The measure operation can be supported by image services from raster
datasets and mosaic datasets. Spatial reference is required to perform
basic measurement (distance, area, and so on). Sensor metadata (geodata
transformation) needs to be present in the data source used by an image
service to enable height measurement (for example, imagery with RPCs).
The mosaic dataset or service needs to include DEM to perform 3D measure.
Users can provide arguments to the measure operation as query parameters.
Inputs:
fromGeometry - A geometry that defines the "from" location of the
measurement. The structure of the geometry is the same as the structure
of the JSON geometry objects returned by the ArcGIS REST API. In addition
to the JSON structures, for points, you can specify the geometry with a
simple comma-separated syntax.
By default, the geometry is assumed to be in the spatial reference of
the image service. You can specify a different spatial reference by
using the JSON structure syntax for geometries.
toGeometry - A geometry that defines the "to" location of the measurement.
The type of geometry must be the same as fromGeometry. The structure of
the geometry is the same as the structure of the JSON geometry objects
returned by the ArcGIS REST API. In addition to the JSON structures, for
points, you can specify the geometry with a simple comma-separated syntax.
By default, the geometry is assumed to be in the spatial reference of
the image service. You can specify a different spatial reference by
using the JSON structure syntax for geometries.
geometryType - The type of geometry specified by the fromGeometry and
toGeometry parameters. The geometry type can be a point, polygon, or
envelope. The default geometry type is point.
Values: esriGeometryPoint | esriGeometryPolygon | esriGeometryEnvelope
measureOperation - Specifies the type of measure being performed.
Values: esriMensurationPoint | esriMensurationDistanceAndAngle |
esriMensurationAreaAndPerimeter | esriMensurationHeightFromBaseAndTop |
esriMensurationHeightFromBaseAndTopShadow |
esriMensurationHeightFromTopAndTopShadow | esriMensurationCentroid |
esriMensurationPoint3D | esriMensurationDistanceAndAngle3D |
esriMensurationAreaAndPerimeter3D | esriMensurationCentroid3D
pixelSize - The pixel level (resolution) being measured. If pixel size
is not specified, pixelSize will default to the base resolution of the
image service. The raster at the specified pixel size in the mosaic
dataset will be used for measurement.
The structure of the pixelSize parameter is the same as the structure
of the point object returned by the ArcGIS REST API. In addition to the
JSON structure, you can specify the pixel size with a simple
comma-separated syntax.
mosaicRule - Specifies the mosaic rule when defining how individual
images should be mosaicked. When a mosaic rule is not specified, the
default mosaic rule of the image service will be used (as advertised
in the root resource: defaultMosaicMethod, mosaicOperator, sortField,
sortValue). The first visible image is used by measure.
linearUnit - The linear unit in which height, length, or perimeters
will be calculated. It can be any of the following esriUnits constant.
If the unit is not specified, the default is esriMeters. The list of
valid esriUnits constants include:
esriInches | esriFeet | esriYards | esriMiles | esriNauticalMiles |
esriMillimeters | esriCentimeters | esriDecimeters | esriMeters |
esriKilometers
angularUnit - The angular unit in which directions of line segments
will be calculated. It can be one of the following esriDirectionUnits
constants: esriDURadians | esriDUDecimalDegrees
If the unit is not specified, the default is esriDUDecimalDegrees.
areaUnit - The area unit in which areas of polygons will be calculated.
It can be any esriAreaUnits constant. If the unit is not specified, the
default is esriSquareMeters. The list of valid esriAreaUnits constants
include:
esriSquareInches | esriSquareFeet | esriSquareYards | esriAcres |
esriSquareMiles | esriSquareMillimeters | esriSquareCentimeters |
esriSquareDecimeters | esriSquareMeters | esriAres | esriHectares |
esriSquareKilometers
"""
url = self._url + "/measure"
params = {
"f" : "json",
"fromGeometry" : fromGeometry,
"toGeometry": toGeometry,
"geometryType": geometryType,
"measureOperation": measureOperation
}
if not pixelSize is None:
params["pixelSize"] = pixelSize
if not mosaicRule is None:
params["mosaicRule"] = mosaicRule
if not linearUnit is None:
params["linearUnit"] = linearUnit
if not angularUnit is None:
params["angularUnit"] = angularUnit
if not areaUnit is None:
params["areaUnit"] = areaUnit
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | The measure operation is performed on an image service resource. It
lets a user measure distance, direction, area, perimeter, and height
from an image service. The result of this operation includes the name
of the raster dataset being used, sensor name, and measured values.
The measure operation can be supported by image services from raster
datasets and mosaic datasets. Spatial reference is required to perform
basic measurement (distance, area, and so on). Sensor metadata (geodata
transformation) needs to be present in the data source used by an image
service to enable height measurement (for example, imagery with RPCs).
The mosaic dataset or service needs to include DEM to perform 3D measure.
Users can provide arguments to the measure operation as query parameters.
Inputs:
fromGeometry - A geometry that defines the "from" location of the
measurement. The structure of the geometry is the same as the structure
of the JSON geometry objects returned by the ArcGIS REST API. In addition
to the JSON structures, for points, you can specify the geometry with a
simple comma-separated syntax.
By default, the geometry is assumed to be in the spatial reference of
the image service. You can specify a different spatial reference by
using the JSON structure syntax for geometries.
toGeometry - A geometry that defines the "to" location of the measurement.
The type of geometry must be the same as fromGeometry. The structure of
the geometry is the same as the structure of the JSON geometry objects
returned by the ArcGIS REST API. In addition to the JSON structures, for
points, you can specify the geometry with a simple comma-separated syntax.
By default, the geometry is assumed to be in the spatial reference of
the image service. You can specify a different spatial reference by
using the JSON structure syntax for geometries.
geometryType - The type of geometry specified by the fromGeometry and
toGeometry parameters. The geometry type can be a point, polygon, or
envelope. The default geometry type is point.
Values: esriGeometryPoint | esriGeometryPolygon | esriGeometryEnvelope
measureOperation - Specifies the type of measure being performed.
Values: esriMensurationPoint | esriMensurationDistanceAndAngle |
esriMensurationAreaAndPerimeter | esriMensurationHeightFromBaseAndTop |
esriMensurationHeightFromBaseAndTopShadow |
esriMensurationHeightFromTopAndTopShadow | esriMensurationCentroid |
esriMensurationPoint3D | esriMensurationDistanceAndAngle3D |
esriMensurationAreaAndPerimeter3D | esriMensurationCentroid3D
pixelSize - The pixel level (resolution) being measured. If pixel size
is not specified, pixelSize will default to the base resolution of the
image service. The raster at the specified pixel size in the mosaic
dataset will be used for measurement.
The structure of the pixelSize parameter is the same as the structure
of the point object returned by the ArcGIS REST API. In addition to the
JSON structure, you can specify the pixel size with a simple
comma-separated syntax.
mosaicRule - Specifies the mosaic rule when defining how individual
images should be mosaicked. When a mosaic rule is not specified, the
default mosaic rule of the image service will be used (as advertised
in the root resource: defaultMosaicMethod, mosaicOperator, sortField,
sortValue). The first visible image is used by measure.
linearUnit - The linear unit in which height, length, or perimeters
will be calculated. It can be any of the following esriUnits constant.
If the unit is not specified, the default is esriMeters. The list of
valid esriUnits constants include:
esriInches | esriFeet | esriYards | esriMiles | esriNauticalMiles |
esriMillimeters | esriCentimeters | esriDecimeters | esriMeters |
esriKilometers
angularUnit - The angular unit in which directions of line segments
will be calculated. It can be one of the following esriDirectionUnits
constants: esriDURadians | esriDUDecimalDegrees
If the unit is not specified, the default is esriDUDecimalDegrees.
areaUnit - The area unit in which areas of polygons will be calculated.
It can be any esriAreaUnits constant. If the unit is not specified, the
default is esriSquareMeters. The list of valid esriAreaUnits constants
include:
esriSquareInches | esriSquareFeet | esriSquareYards | esriAcres |
esriSquareMiles | esriSquareMillimeters | esriSquareCentimeters |
esriSquareDecimeters | esriSquareMeters | esriAres | esriHectares |
esriSquareKilometers | Below is the the instruction that describes the task:
### Input:
The measure operation is performed on an image service resource. It
lets a user measure distance, direction, area, perimeter, and height
from an image service. The result of this operation includes the name
of the raster dataset being used, sensor name, and measured values.
The measure operation can be supported by image services from raster
datasets and mosaic datasets. Spatial reference is required to perform
basic measurement (distance, area, and so on). Sensor metadata (geodata
transformation) needs to be present in the data source used by an image
service to enable height measurement (for example, imagery with RPCs).
The mosaic dataset or service needs to include DEM to perform 3D measure.
Users can provide arguments to the measure operation as query parameters.
Inputs:
fromGeometry - A geometry that defines the "from" location of the
measurement. The structure of the geometry is the same as the structure
of the JSON geometry objects returned by the ArcGIS REST API. In addition
to the JSON structures, for points, you can specify the geometry with a
simple comma-separated syntax.
By default, the geometry is assumed to be in the spatial reference of
the image service. You can specify a different spatial reference by
using the JSON structure syntax for geometries.
toGeometry - A geometry that defines the "to" location of the measurement.
The type of geometry must be the same as fromGeometry. The structure of
the geometry is the same as the structure of the JSON geometry objects
returned by the ArcGIS REST API. In addition to the JSON structures, for
points, you can specify the geometry with a simple comma-separated syntax.
By default, the geometry is assumed to be in the spatial reference of
the image service. You can specify a different spatial reference by
using the JSON structure syntax for geometries.
geometryType - The type of geometry specified by the fromGeometry and
toGeometry parameters. The geometry type can be a point, polygon, or
envelope. The default geometry type is point.
Values: esriGeometryPoint | esriGeometryPolygon | esriGeometryEnvelope
measureOperation - Specifies the type of measure being performed.
Values: esriMensurationPoint | esriMensurationDistanceAndAngle |
esriMensurationAreaAndPerimeter | esriMensurationHeightFromBaseAndTop |
esriMensurationHeightFromBaseAndTopShadow |
esriMensurationHeightFromTopAndTopShadow | esriMensurationCentroid |
esriMensurationPoint3D | esriMensurationDistanceAndAngle3D |
esriMensurationAreaAndPerimeter3D | esriMensurationCentroid3D
pixelSize - The pixel level (resolution) being measured. If pixel size
is not specified, pixelSize will default to the base resolution of the
image service. The raster at the specified pixel size in the mosaic
dataset will be used for measurement.
The structure of the pixelSize parameter is the same as the structure
of the point object returned by the ArcGIS REST API. In addition to the
JSON structure, you can specify the pixel size with a simple
comma-separated syntax.
mosaicRule - Specifies the mosaic rule when defining how individual
images should be mosaicked. When a mosaic rule is not specified, the
default mosaic rule of the image service will be used (as advertised
in the root resource: defaultMosaicMethod, mosaicOperator, sortField,
sortValue). The first visible image is used by measure.
linearUnit - The linear unit in which height, length, or perimeters
will be calculated. It can be any of the following esriUnits constant.
If the unit is not specified, the default is esriMeters. The list of
valid esriUnits constants include:
esriInches | esriFeet | esriYards | esriMiles | esriNauticalMiles |
esriMillimeters | esriCentimeters | esriDecimeters | esriMeters |
esriKilometers
angularUnit - The angular unit in which directions of line segments
will be calculated. It can be one of the following esriDirectionUnits
constants: esriDURadians | esriDUDecimalDegrees
If the unit is not specified, the default is esriDUDecimalDegrees.
areaUnit - The area unit in which areas of polygons will be calculated.
It can be any esriAreaUnits constant. If the unit is not specified, the
default is esriSquareMeters. The list of valid esriAreaUnits constants
include:
esriSquareInches | esriSquareFeet | esriSquareYards | esriAcres |
esriSquareMiles | esriSquareMillimeters | esriSquareCentimeters |
esriSquareDecimeters | esriSquareMeters | esriAres | esriHectares |
esriSquareKilometers
### Response:
def measure(self,fromGeometry,toGeometry,measureOperation,
geometryType="esriGeometryPoint",pixelSize=None,mosaicRule=None,
linearUnit=None,angularUnit=None,areaUnit=None):
"""
The measure operation is performed on an image service resource. It
lets a user measure distance, direction, area, perimeter, and height
from an image service. The result of this operation includes the name
of the raster dataset being used, sensor name, and measured values.
The measure operation can be supported by image services from raster
datasets and mosaic datasets. Spatial reference is required to perform
basic measurement (distance, area, and so on). Sensor metadata (geodata
transformation) needs to be present in the data source used by an image
service to enable height measurement (for example, imagery with RPCs).
The mosaic dataset or service needs to include DEM to perform 3D measure.
Users can provide arguments to the measure operation as query parameters.
Inputs:
fromGeometry - A geometry that defines the "from" location of the
measurement. The structure of the geometry is the same as the structure
of the JSON geometry objects returned by the ArcGIS REST API. In addition
to the JSON structures, for points, you can specify the geometry with a
simple comma-separated syntax.
By default, the geometry is assumed to be in the spatial reference of
the image service. You can specify a different spatial reference by
using the JSON structure syntax for geometries.
toGeometry - A geometry that defines the "to" location of the measurement.
The type of geometry must be the same as fromGeometry. The structure of
the geometry is the same as the structure of the JSON geometry objects
returned by the ArcGIS REST API. In addition to the JSON structures, for
points, you can specify the geometry with a simple comma-separated syntax.
By default, the geometry is assumed to be in the spatial reference of
the image service. You can specify a different spatial reference by
using the JSON structure syntax for geometries.
geometryType - The type of geometry specified by the fromGeometry and
toGeometry parameters. The geometry type can be a point, polygon, or
envelope. The default geometry type is point.
Values: esriGeometryPoint | esriGeometryPolygon | esriGeometryEnvelope
measureOperation - Specifies the type of measure being performed.
Values: esriMensurationPoint | esriMensurationDistanceAndAngle |
esriMensurationAreaAndPerimeter | esriMensurationHeightFromBaseAndTop |
esriMensurationHeightFromBaseAndTopShadow |
esriMensurationHeightFromTopAndTopShadow | esriMensurationCentroid |
esriMensurationPoint3D | esriMensurationDistanceAndAngle3D |
esriMensurationAreaAndPerimeter3D | esriMensurationCentroid3D
pixelSize - The pixel level (resolution) being measured. If pixel size
is not specified, pixelSize will default to the base resolution of the
image service. The raster at the specified pixel size in the mosaic
dataset will be used for measurement.
The structure of the pixelSize parameter is the same as the structure
of the point object returned by the ArcGIS REST API. In addition to the
JSON structure, you can specify the pixel size with a simple
comma-separated syntax.
mosaicRule - Specifies the mosaic rule when defining how individual
images should be mosaicked. When a mosaic rule is not specified, the
default mosaic rule of the image service will be used (as advertised
in the root resource: defaultMosaicMethod, mosaicOperator, sortField,
sortValue). The first visible image is used by measure.
linearUnit - The linear unit in which height, length, or perimeters
will be calculated. It can be any of the following esriUnits constant.
If the unit is not specified, the default is esriMeters. The list of
valid esriUnits constants include:
esriInches | esriFeet | esriYards | esriMiles | esriNauticalMiles |
esriMillimeters | esriCentimeters | esriDecimeters | esriMeters |
esriKilometers
angularUnit - The angular unit in which directions of line segments
will be calculated. It can be one of the following esriDirectionUnits
constants: esriDURadians | esriDUDecimalDegrees
If the unit is not specified, the default is esriDUDecimalDegrees.
areaUnit - The area unit in which areas of polygons will be calculated.
It can be any esriAreaUnits constant. If the unit is not specified, the
default is esriSquareMeters. The list of valid esriAreaUnits constants
include:
esriSquareInches | esriSquareFeet | esriSquareYards | esriAcres |
esriSquareMiles | esriSquareMillimeters | esriSquareCentimeters |
esriSquareDecimeters | esriSquareMeters | esriAres | esriHectares |
esriSquareKilometers
"""
url = self._url + "/measure"
params = {
"f" : "json",
"fromGeometry" : fromGeometry,
"toGeometry": toGeometry,
"geometryType": geometryType,
"measureOperation": measureOperation
}
if not pixelSize is None:
params["pixelSize"] = pixelSize
if not mosaicRule is None:
params["mosaicRule"] = mosaicRule
if not linearUnit is None:
params["linearUnit"] = linearUnit
if not angularUnit is None:
params["angularUnit"] = angularUnit
if not areaUnit is None:
params["areaUnit"] = areaUnit
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) |
def set_conversion(self, idx):
"""
Adds the conversion to the format.
:param idx: The ending index of the conversion name.
"""
# First, determine the name
if self.conv_begin:
name = self.format[self.conv_begin:idx]
else:
name = self.format[idx]
# Next, add the status code modifiers, as needed
if self.codes:
self.modifier.set_codes(self.codes, self.reject)
# Append the conversion to the format
self.fmt.append_conv(self.fmt._get_conversion(name, self.modifier))
# Clear the conversion data
self.param_begin = None
self.conv_begin = None
self.modifier = None
self.codes = []
self.reject = False
self.code_last = False | Adds the conversion to the format.
:param idx: The ending index of the conversion name. | Below is the the instruction that describes the task:
### Input:
Adds the conversion to the format.
:param idx: The ending index of the conversion name.
### Response:
def set_conversion(self, idx):
"""
Adds the conversion to the format.
:param idx: The ending index of the conversion name.
"""
# First, determine the name
if self.conv_begin:
name = self.format[self.conv_begin:idx]
else:
name = self.format[idx]
# Next, add the status code modifiers, as needed
if self.codes:
self.modifier.set_codes(self.codes, self.reject)
# Append the conversion to the format
self.fmt.append_conv(self.fmt._get_conversion(name, self.modifier))
# Clear the conversion data
self.param_begin = None
self.conv_begin = None
self.modifier = None
self.codes = []
self.reject = False
self.code_last = False |
def to_dict(self):
"""
Creates a dictionary representing the state of this position.
Returns a dict object of the form:
"""
return {
'sid': self.asset,
'amount': self.amount,
'cost_basis': self.cost_basis,
'last_sale_price': self.last_sale_price
} | Creates a dictionary representing the state of this position.
Returns a dict object of the form: | Below is the the instruction that describes the task:
### Input:
Creates a dictionary representing the state of this position.
Returns a dict object of the form:
### Response:
def to_dict(self):
"""
Creates a dictionary representing the state of this position.
Returns a dict object of the form:
"""
return {
'sid': self.asset,
'amount': self.amount,
'cost_basis': self.cost_basis,
'last_sale_price': self.last_sale_price
} |
def fsync(self, file_des):
"""Perform fsync for a fake file (in other words, do nothing).
Args:
file_des: The file descriptor of the open file.
Raises:
OSError: file_des is an invalid file descriptor.
TypeError: file_des is not an integer.
"""
# Throw an error if file_des isn't valid
if 0 <= file_des < NR_STD_STREAMS:
self.filesystem.raise_os_error(errno.EINVAL)
file_object = self.filesystem.get_open_file(file_des)
if self.filesystem.is_windows_fs:
if (not hasattr(file_object, 'allow_update') or
not file_object.allow_update):
self.filesystem.raise_os_error(
errno.EBADF, file_object.file_path) | Perform fsync for a fake file (in other words, do nothing).
Args:
file_des: The file descriptor of the open file.
Raises:
OSError: file_des is an invalid file descriptor.
TypeError: file_des is not an integer. | Below is the the instruction that describes the task:
### Input:
Perform fsync for a fake file (in other words, do nothing).
Args:
file_des: The file descriptor of the open file.
Raises:
OSError: file_des is an invalid file descriptor.
TypeError: file_des is not an integer.
### Response:
def fsync(self, file_des):
"""Perform fsync for a fake file (in other words, do nothing).
Args:
file_des: The file descriptor of the open file.
Raises:
OSError: file_des is an invalid file descriptor.
TypeError: file_des is not an integer.
"""
# Throw an error if file_des isn't valid
if 0 <= file_des < NR_STD_STREAMS:
self.filesystem.raise_os_error(errno.EINVAL)
file_object = self.filesystem.get_open_file(file_des)
if self.filesystem.is_windows_fs:
if (not hasattr(file_object, 'allow_update') or
not file_object.allow_update):
self.filesystem.raise_os_error(
errno.EBADF, file_object.file_path) |
def validate_account_credentials(deployment, context):
"""Exit if requested deployment account doesn't match credentials."""
boto_args = {'region_name': context.env_vars['AWS_DEFAULT_REGION']}
for i in ['aws_access_key_id', 'aws_secret_access_key',
'aws_session_token']:
if context.env_vars.get(i.upper()):
boto_args[i] = context.env_vars[i.upper()]
if isinstance(deployment.get('account-id'), (int, six.string_types)):
account_id = str(deployment['account-id'])
elif deployment.get('account-id', {}).get(context.env_name):
account_id = str(deployment['account-id'][context.env_name])
else:
account_id = None
if account_id:
validate_account_id(boto3.client('sts', **boto_args), account_id)
if isinstance(deployment.get('account-alias'), six.string_types):
account_alias = deployment['account-alias']
elif deployment.get('account-alias', {}).get(context.env_name):
account_alias = deployment['account-alias'][context.env_name]
else:
account_alias = None
if account_alias:
validate_account_alias(boto3.client('iam', **boto_args),
account_alias) | Exit if requested deployment account doesn't match credentials. | Below is the the instruction that describes the task:
### Input:
Exit if requested deployment account doesn't match credentials.
### Response:
def validate_account_credentials(deployment, context):
"""Exit if requested deployment account doesn't match credentials."""
boto_args = {'region_name': context.env_vars['AWS_DEFAULT_REGION']}
for i in ['aws_access_key_id', 'aws_secret_access_key',
'aws_session_token']:
if context.env_vars.get(i.upper()):
boto_args[i] = context.env_vars[i.upper()]
if isinstance(deployment.get('account-id'), (int, six.string_types)):
account_id = str(deployment['account-id'])
elif deployment.get('account-id', {}).get(context.env_name):
account_id = str(deployment['account-id'][context.env_name])
else:
account_id = None
if account_id:
validate_account_id(boto3.client('sts', **boto_args), account_id)
if isinstance(deployment.get('account-alias'), six.string_types):
account_alias = deployment['account-alias']
elif deployment.get('account-alias', {}).get(context.env_name):
account_alias = deployment['account-alias'][context.env_name]
else:
account_alias = None
if account_alias:
validate_account_alias(boto3.client('iam', **boto_args),
account_alias) |
def remove_whitespace(s):
""" Unsafely attempts to remove HTML whitespace. This is not an HTML parser
which is why its considered 'unsafe', but it should work for most
implementations. Just use on at your own risk.
@s: #str
-> HTML with whitespace removed, ignoring <pre>, script, textarea and code
tags
"""
ignores = {}
for ignore in html_ignore_whitespace_re.finditer(s):
name = "{}{}{}".format(r"{}", uuid.uuid4(), r"{}")
ignores[name] = ignore.group()
s = s.replace(ignore.group(), name)
s = whitespace_re(r' ', s).strip()
for name, val in ignores.items():
s = s.replace(name, val)
return s | Unsafely attempts to remove HTML whitespace. This is not an HTML parser
which is why its considered 'unsafe', but it should work for most
implementations. Just use on at your own risk.
@s: #str
-> HTML with whitespace removed, ignoring <pre>, script, textarea and code
tags | Below is the the instruction that describes the task:
### Input:
Unsafely attempts to remove HTML whitespace. This is not an HTML parser
which is why its considered 'unsafe', but it should work for most
implementations. Just use on at your own risk.
@s: #str
-> HTML with whitespace removed, ignoring <pre>, script, textarea and code
tags
### Response:
def remove_whitespace(s):
""" Unsafely attempts to remove HTML whitespace. This is not an HTML parser
which is why its considered 'unsafe', but it should work for most
implementations. Just use on at your own risk.
@s: #str
-> HTML with whitespace removed, ignoring <pre>, script, textarea and code
tags
"""
ignores = {}
for ignore in html_ignore_whitespace_re.finditer(s):
name = "{}{}{}".format(r"{}", uuid.uuid4(), r"{}")
ignores[name] = ignore.group()
s = s.replace(ignore.group(), name)
s = whitespace_re(r' ', s).strip()
for name, val in ignores.items():
s = s.replace(name, val)
return s |
def _SkipFieldMessage(tokenizer):
"""Skips over a field message.
Args:
tokenizer: A tokenizer to parse the field name and values.
"""
if tokenizer.TryConsume('<'):
delimiter = '>'
else:
tokenizer.Consume('{')
delimiter = '}'
while not tokenizer.LookingAt('>') and not tokenizer.LookingAt('}'):
_SkipField(tokenizer)
tokenizer.Consume(delimiter) | Skips over a field message.
Args:
tokenizer: A tokenizer to parse the field name and values. | Below is the the instruction that describes the task:
### Input:
Skips over a field message.
Args:
tokenizer: A tokenizer to parse the field name and values.
### Response:
def _SkipFieldMessage(tokenizer):
"""Skips over a field message.
Args:
tokenizer: A tokenizer to parse the field name and values.
"""
if tokenizer.TryConsume('<'):
delimiter = '>'
else:
tokenizer.Consume('{')
delimiter = '}'
while not tokenizer.LookingAt('>') and not tokenizer.LookingAt('}'):
_SkipField(tokenizer)
tokenizer.Consume(delimiter) |
def get(self, key, bucket):
""" Get a cached item by key
If the cached item isn't found the return None.
"""
try:
return self._cache[bucket][key]
except (KeyError, TypeError):
return None | Get a cached item by key
If the cached item isn't found the return None. | Below is the the instruction that describes the task:
### Input:
Get a cached item by key
If the cached item isn't found the return None.
### Response:
def get(self, key, bucket):
""" Get a cached item by key
If the cached item isn't found the return None.
"""
try:
return self._cache[bucket][key]
except (KeyError, TypeError):
return None |
def str_repl(self, inputstring, **kwargs):
"""Add back strings."""
out = []
comment = None
string = None
for i, c in enumerate(append_it(inputstring, None)):
try:
if comment is not None:
if c is not None and c in nums:
comment += c
elif c == unwrapper and comment:
ref = self.get_ref("comment", comment)
if out and not out[-1].endswith("\n"):
out[-1] = out[-1].rstrip(" ")
if not self.minify:
out[-1] += " " # put two spaces before comment
out.append("#" + ref)
comment = None
else:
raise CoconutInternalException("invalid comment marker in", getline(i, inputstring))
elif string is not None:
if c is not None and c in nums:
string += c
elif c == unwrapper and string:
text, strchar = self.get_ref("str", string)
out.append(strchar + text + strchar)
string = None
else:
raise CoconutInternalException("invalid string marker in", getline(i, inputstring))
elif c is not None:
if c == "#":
comment = ""
elif c == strwrapper:
string = ""
else:
out.append(c)
except CoconutInternalException as err:
complain(err)
if comment is not None:
out.append(comment)
comment = None
if string is not None:
out.append(string)
string = None
out.append(c)
return "".join(out) | Add back strings. | Below is the the instruction that describes the task:
### Input:
Add back strings.
### Response:
def str_repl(self, inputstring, **kwargs):
"""Add back strings."""
out = []
comment = None
string = None
for i, c in enumerate(append_it(inputstring, None)):
try:
if comment is not None:
if c is not None and c in nums:
comment += c
elif c == unwrapper and comment:
ref = self.get_ref("comment", comment)
if out and not out[-1].endswith("\n"):
out[-1] = out[-1].rstrip(" ")
if not self.minify:
out[-1] += " " # put two spaces before comment
out.append("#" + ref)
comment = None
else:
raise CoconutInternalException("invalid comment marker in", getline(i, inputstring))
elif string is not None:
if c is not None and c in nums:
string += c
elif c == unwrapper and string:
text, strchar = self.get_ref("str", string)
out.append(strchar + text + strchar)
string = None
else:
raise CoconutInternalException("invalid string marker in", getline(i, inputstring))
elif c is not None:
if c == "#":
comment = ""
elif c == strwrapper:
string = ""
else:
out.append(c)
except CoconutInternalException as err:
complain(err)
if comment is not None:
out.append(comment)
comment = None
if string is not None:
out.append(string)
string = None
out.append(c)
return "".join(out) |
def infer_enum_class(node):
""" Specific inference for enums. """
for basename in node.basenames:
# TODO: doesn't handle subclasses yet. This implementation
# is a hack to support enums.
if basename not in ENUM_BASE_NAMES:
continue
if node.root().name == "enum":
# Skip if the class is directly from enum module.
break
for local, values in node.locals.items():
if any(not isinstance(value, nodes.AssignName) for value in values):
continue
targets = []
stmt = values[0].statement()
if isinstance(stmt, nodes.Assign):
if isinstance(stmt.targets[0], nodes.Tuple):
targets = stmt.targets[0].itered()
else:
targets = stmt.targets
elif isinstance(stmt, nodes.AnnAssign):
targets = [stmt.target]
inferred_return_value = None
if isinstance(stmt, nodes.Assign):
if isinstance(stmt.value, nodes.Const):
if isinstance(stmt.value.value, str):
inferred_return_value = repr(stmt.value.value)
else:
inferred_return_value = stmt.value.value
else:
inferred_return_value = stmt.value.as_string()
new_targets = []
for target in targets:
# Replace all the assignments with our mocked class.
classdef = dedent(
"""
class {name}({types}):
@property
def value(self):
return {return_value}
@property
def name(self):
return "{name}"
""".format(
name=target.name,
types=", ".join(node.basenames),
return_value=inferred_return_value,
)
)
if "IntFlag" in basename:
# Alright, we need to add some additional methods.
# Unfortunately we still can't infer the resulting objects as
# Enum members, but once we'll be able to do that, the following
# should result in some nice symbolic execution
classdef += INT_FLAG_ADDITION_METHODS.format(name=target.name)
fake = AstroidBuilder(MANAGER).string_build(classdef)[target.name]
fake.parent = target.parent
for method in node.mymethods():
fake.locals[method.name] = [method]
new_targets.append(fake.instantiate_class())
node.locals[local] = new_targets
break
return node | Specific inference for enums. | Below is the the instruction that describes the task:
### Input:
Specific inference for enums.
### Response:
def infer_enum_class(node):
""" Specific inference for enums. """
for basename in node.basenames:
# TODO: doesn't handle subclasses yet. This implementation
# is a hack to support enums.
if basename not in ENUM_BASE_NAMES:
continue
if node.root().name == "enum":
# Skip if the class is directly from enum module.
break
for local, values in node.locals.items():
if any(not isinstance(value, nodes.AssignName) for value in values):
continue
targets = []
stmt = values[0].statement()
if isinstance(stmt, nodes.Assign):
if isinstance(stmt.targets[0], nodes.Tuple):
targets = stmt.targets[0].itered()
else:
targets = stmt.targets
elif isinstance(stmt, nodes.AnnAssign):
targets = [stmt.target]
inferred_return_value = None
if isinstance(stmt, nodes.Assign):
if isinstance(stmt.value, nodes.Const):
if isinstance(stmt.value.value, str):
inferred_return_value = repr(stmt.value.value)
else:
inferred_return_value = stmt.value.value
else:
inferred_return_value = stmt.value.as_string()
new_targets = []
for target in targets:
# Replace all the assignments with our mocked class.
classdef = dedent(
"""
class {name}({types}):
@property
def value(self):
return {return_value}
@property
def name(self):
return "{name}"
""".format(
name=target.name,
types=", ".join(node.basenames),
return_value=inferred_return_value,
)
)
if "IntFlag" in basename:
# Alright, we need to add some additional methods.
# Unfortunately we still can't infer the resulting objects as
# Enum members, but once we'll be able to do that, the following
# should result in some nice symbolic execution
classdef += INT_FLAG_ADDITION_METHODS.format(name=target.name)
fake = AstroidBuilder(MANAGER).string_build(classdef)[target.name]
fake.parent = target.parent
for method in node.mymethods():
fake.locals[method.name] = [method]
new_targets.append(fake.instantiate_class())
node.locals[local] = new_targets
break
return node |
def fmt_partition(partition):
"""Format a |Bipartition|.
The returned string looks like::
0,1 ∅
─── ✕ ───
2 0,1
Args:
partition (Bipartition): The partition in question.
Returns:
str: A human-readable string representation of the partition.
"""
if not partition:
return ''
parts = [fmt_part(part, partition.node_labels).split('\n')
for part in partition]
times = (' ',
' {} '.format(MULTIPLY),
' ')
breaks = ('\n', '\n', '') # No newline at the end of string
between = [times] * (len(parts) - 1) + [breaks]
# Alternate [part, break, part, ..., end]
elements = chain.from_iterable(zip(parts, between))
# Transform vertical stacks into horizontal lines
return ''.join(chain.from_iterable(zip(*elements))) | Format a |Bipartition|.
The returned string looks like::
0,1 ∅
─── ✕ ───
2 0,1
Args:
partition (Bipartition): The partition in question.
Returns:
str: A human-readable string representation of the partition. | Below is the the instruction that describes the task:
### Input:
Format a |Bipartition|.
The returned string looks like::
0,1 ∅
─── ✕ ───
2 0,1
Args:
partition (Bipartition): The partition in question.
Returns:
str: A human-readable string representation of the partition.
### Response:
def fmt_partition(partition):
"""Format a |Bipartition|.
The returned string looks like::
0,1 ∅
─── ✕ ───
2 0,1
Args:
partition (Bipartition): The partition in question.
Returns:
str: A human-readable string representation of the partition.
"""
if not partition:
return ''
parts = [fmt_part(part, partition.node_labels).split('\n')
for part in partition]
times = (' ',
' {} '.format(MULTIPLY),
' ')
breaks = ('\n', '\n', '') # No newline at the end of string
between = [times] * (len(parts) - 1) + [breaks]
# Alternate [part, break, part, ..., end]
elements = chain.from_iterable(zip(parts, between))
# Transform vertical stacks into horizontal lines
return ''.join(chain.from_iterable(zip(*elements))) |
def get_default_api_key(self, email, password):
"""
Get the default API key for a user.
:param email: The email of the user.
:type email: string
:param password: The user's password.
:type password: string
:returns: API key to confirm that it was fetched successfully.
:rtype: string
"""
parameters = dict()
parameters['email'] = email
parameters['password'] = password
response = self.request('midas.user.apikey.default', parameters)
return response['apikey'] | Get the default API key for a user.
:param email: The email of the user.
:type email: string
:param password: The user's password.
:type password: string
:returns: API key to confirm that it was fetched successfully.
:rtype: string | Below is the the instruction that describes the task:
### Input:
Get the default API key for a user.
:param email: The email of the user.
:type email: string
:param password: The user's password.
:type password: string
:returns: API key to confirm that it was fetched successfully.
:rtype: string
### Response:
def get_default_api_key(self, email, password):
"""
Get the default API key for a user.
:param email: The email of the user.
:type email: string
:param password: The user's password.
:type password: string
:returns: API key to confirm that it was fetched successfully.
:rtype: string
"""
parameters = dict()
parameters['email'] = email
parameters['password'] = password
response = self.request('midas.user.apikey.default', parameters)
return response['apikey'] |
def backslashcase(string):
"""Convert string into spinal case.
Join punctuation with backslash.
Args:
string: String to convert.
Returns:
string: Spinal cased string.
"""
str1 = re.sub(r"_", r"\\", snakecase(string))
return str1 | Convert string into spinal case.
Join punctuation with backslash.
Args:
string: String to convert.
Returns:
string: Spinal cased string. | Below is the the instruction that describes the task:
### Input:
Convert string into spinal case.
Join punctuation with backslash.
Args:
string: String to convert.
Returns:
string: Spinal cased string.
### Response:
def backslashcase(string):
"""Convert string into spinal case.
Join punctuation with backslash.
Args:
string: String to convert.
Returns:
string: Spinal cased string.
"""
str1 = re.sub(r"_", r"\\", snakecase(string))
return str1 |
def _bucket_key(self):
""" Returns hash bucket key for the redis key """
return "{}.size.{}".format(
self.prefix, (self._hashed_key//1000)
if self._hashed_key > 1000 else self._hashed_key) | Returns hash bucket key for the redis key | Below is the the instruction that describes the task:
### Input:
Returns hash bucket key for the redis key
### Response:
def _bucket_key(self):
""" Returns hash bucket key for the redis key """
return "{}.size.{}".format(
self.prefix, (self._hashed_key//1000)
if self._hashed_key > 1000 else self._hashed_key) |
def blueprint(self) -> Optional[str]:
"""Returns the blueprint the matched endpoint belongs to.
This can be None if the request has not been matched or the
endpoint is not in a blueprint.
"""
if self.endpoint is not None and '.' in self.endpoint:
return self.endpoint.rsplit('.', 1)[0]
else:
return None | Returns the blueprint the matched endpoint belongs to.
This can be None if the request has not been matched or the
endpoint is not in a blueprint. | Below is the the instruction that describes the task:
### Input:
Returns the blueprint the matched endpoint belongs to.
This can be None if the request has not been matched or the
endpoint is not in a blueprint.
### Response:
def blueprint(self) -> Optional[str]:
"""Returns the blueprint the matched endpoint belongs to.
This can be None if the request has not been matched or the
endpoint is not in a blueprint.
"""
if self.endpoint is not None and '.' in self.endpoint:
return self.endpoint.rsplit('.', 1)[0]
else:
return None |
def _jobresult(self, jobid, json=True, headers=None):
"""Poll the async job result.
To be run via in a Thread, the result is put within
the result list which is a hack.
"""
failures = 0
total_time = self.job_timeout or 2**30
remaining = timedelta(seconds=total_time)
endtime = datetime.now() + remaining
while remaining.total_seconds() > 0:
timeout = max(min(self.timeout, remaining.total_seconds()), 1)
try:
kind, params = self._prepare_request('queryAsyncJobResult',
jobid=jobid)
transform(params)
params['signature'] = self._sign(params)
req = requests.Request(self.method,
self.endpoint,
headers=headers,
**{kind: params})
prepped = req.prepare()
if self.trace:
print(prepped.method, prepped.url, file=sys.stderr)
if prepped.headers:
print(prepped.headers, "\n", file=sys.stderr)
if prepped.body:
print(prepped.body, file=sys.stderr)
else:
print(file=sys.stderr)
with requests.Session() as session:
response = session.send(prepped,
timeout=timeout,
verify=self.verify,
cert=self.cert)
j = self._response_value(response, json)
if self.trace:
print(response.status_code, response.reason,
file=sys.stderr)
headersTrace = "\n".join(
"{}: {}".format(k, v)
for k, v in response.headers.items())
print(headersTrace, "\n", file=sys.stderr)
print(response.text, "\n", file=sys.stderr)
failures = 0
if j['jobstatus'] != PENDING:
if j['jobresultcode'] or j['jobstatus'] != SUCCESS:
raise CloudStackException("Job failure",
response=response)
if 'jobresult' not in j:
raise CloudStackException("Unknown job result",
response=response)
return j['jobresult']
except CloudStackException:
raise
except Exception as e:
failures += 1
if failures > 10:
raise e
time.sleep(self.poll_interval)
remaining = endtime - datetime.now()
if response:
response.status_code = 408
raise CloudStackException("Timeout waiting for async job result",
jobid,
response=response) | Poll the async job result.
To be run via in a Thread, the result is put within
the result list which is a hack. | Below is the the instruction that describes the task:
### Input:
Poll the async job result.
To be run via in a Thread, the result is put within
the result list which is a hack.
### Response:
def _jobresult(self, jobid, json=True, headers=None):
"""Poll the async job result.
To be run via in a Thread, the result is put within
the result list which is a hack.
"""
failures = 0
total_time = self.job_timeout or 2**30
remaining = timedelta(seconds=total_time)
endtime = datetime.now() + remaining
while remaining.total_seconds() > 0:
timeout = max(min(self.timeout, remaining.total_seconds()), 1)
try:
kind, params = self._prepare_request('queryAsyncJobResult',
jobid=jobid)
transform(params)
params['signature'] = self._sign(params)
req = requests.Request(self.method,
self.endpoint,
headers=headers,
**{kind: params})
prepped = req.prepare()
if self.trace:
print(prepped.method, prepped.url, file=sys.stderr)
if prepped.headers:
print(prepped.headers, "\n", file=sys.stderr)
if prepped.body:
print(prepped.body, file=sys.stderr)
else:
print(file=sys.stderr)
with requests.Session() as session:
response = session.send(prepped,
timeout=timeout,
verify=self.verify,
cert=self.cert)
j = self._response_value(response, json)
if self.trace:
print(response.status_code, response.reason,
file=sys.stderr)
headersTrace = "\n".join(
"{}: {}".format(k, v)
for k, v in response.headers.items())
print(headersTrace, "\n", file=sys.stderr)
print(response.text, "\n", file=sys.stderr)
failures = 0
if j['jobstatus'] != PENDING:
if j['jobresultcode'] or j['jobstatus'] != SUCCESS:
raise CloudStackException("Job failure",
response=response)
if 'jobresult' not in j:
raise CloudStackException("Unknown job result",
response=response)
return j['jobresult']
except CloudStackException:
raise
except Exception as e:
failures += 1
if failures > 10:
raise e
time.sleep(self.poll_interval)
remaining = endtime - datetime.now()
if response:
response.status_code = 408
raise CloudStackException("Timeout waiting for async job result",
jobid,
response=response) |
def get_alt_texts_metadata(self):
"""Gets the metadata for all alt_texts.
return: (osid.Metadata) - metadata for the alt_texts
*compliance: mandatory -- This method must be implemented.*
"""
metadata = dict(self._alt_texts_metadata)
metadata.update({'existing_string_values': [t['text'] for t in self.my_osid_object_form._my_map['altTexts']]})
return Metadata(**metadata) | Gets the metadata for all alt_texts.
return: (osid.Metadata) - metadata for the alt_texts
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the metadata for all alt_texts.
return: (osid.Metadata) - metadata for the alt_texts
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_alt_texts_metadata(self):
"""Gets the metadata for all alt_texts.
return: (osid.Metadata) - metadata for the alt_texts
*compliance: mandatory -- This method must be implemented.*
"""
metadata = dict(self._alt_texts_metadata)
metadata.update({'existing_string_values': [t['text'] for t in self.my_osid_object_form._my_map['altTexts']]})
return Metadata(**metadata) |
def read_file(filename):
'''
Reads files
:param filename:
The full path of the file to read
:returns:
The content of the file as string (if `filename` exists)
.. note:: If `filename`'s content is empty, ``None`` will also returned.
To check if a file really exists
use :func:`util.locations.search_location`
'''
if filename and _path.exists(filename):
with open(filename, 'r') as f:
return f.read() | Reads files
:param filename:
The full path of the file to read
:returns:
The content of the file as string (if `filename` exists)
.. note:: If `filename`'s content is empty, ``None`` will also returned.
To check if a file really exists
use :func:`util.locations.search_location` | Below is the the instruction that describes the task:
### Input:
Reads files
:param filename:
The full path of the file to read
:returns:
The content of the file as string (if `filename` exists)
.. note:: If `filename`'s content is empty, ``None`` will also returned.
To check if a file really exists
use :func:`util.locations.search_location`
### Response:
def read_file(filename):
'''
Reads files
:param filename:
The full path of the file to read
:returns:
The content of the file as string (if `filename` exists)
.. note:: If `filename`'s content is empty, ``None`` will also returned.
To check if a file really exists
use :func:`util.locations.search_location`
'''
if filename and _path.exists(filename):
with open(filename, 'r') as f:
return f.read() |
def _split_sequences_singletraj(dtraj, nstates, lag):
""" splits the discrete trajectory into conditional sequences by starting state
Parameters
----------
dtraj : int-iterable
discrete trajectory
nstates : int
total number of discrete states
lag : int
lag time
"""
sall = [[] for _ in range(nstates)]
res_states = []
res_seqs = []
for t in range(len(dtraj)-lag):
sall[dtraj[t]].append(dtraj[t+lag])
for i in range(nstates):
if len(sall[i]) > 0:
res_states.append(i)
res_seqs.append(np.array(sall[i]))
return res_states, res_seqs | splits the discrete trajectory into conditional sequences by starting state
Parameters
----------
dtraj : int-iterable
discrete trajectory
nstates : int
total number of discrete states
lag : int
lag time | Below is the the instruction that describes the task:
### Input:
splits the discrete trajectory into conditional sequences by starting state
Parameters
----------
dtraj : int-iterable
discrete trajectory
nstates : int
total number of discrete states
lag : int
lag time
### Response:
def _split_sequences_singletraj(dtraj, nstates, lag):
""" splits the discrete trajectory into conditional sequences by starting state
Parameters
----------
dtraj : int-iterable
discrete trajectory
nstates : int
total number of discrete states
lag : int
lag time
"""
sall = [[] for _ in range(nstates)]
res_states = []
res_seqs = []
for t in range(len(dtraj)-lag):
sall[dtraj[t]].append(dtraj[t+lag])
for i in range(nstates):
if len(sall[i]) > 0:
res_states.append(i)
res_seqs.append(np.array(sall[i]))
return res_states, res_seqs |
def encode(self, uuid, pad_length=22):
"""
Encodes a UUID into a string (LSB first) according to the alphabet
If leftmost (MSB) bits 0, string might be shorter
"""
return self._num_to_string(uuid.int, pad_to_length=pad_length) | Encodes a UUID into a string (LSB first) according to the alphabet
If leftmost (MSB) bits 0, string might be shorter | Below is the the instruction that describes the task:
### Input:
Encodes a UUID into a string (LSB first) according to the alphabet
If leftmost (MSB) bits 0, string might be shorter
### Response:
def encode(self, uuid, pad_length=22):
"""
Encodes a UUID into a string (LSB first) according to the alphabet
If leftmost (MSB) bits 0, string might be shorter
"""
return self._num_to_string(uuid.int, pad_to_length=pad_length) |
def find_stoichiometrically_balanced_cycles(model):
u"""
Find metabolic reactions in stoichiometrically balanced cycles (SBCs).
Identify forward and reverse cycles by closing all exchanges and using FVA.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
"SBCs are artifacts of metabolic reconstructions due to insufficient
constraints (e.g., thermodynamic constraints and regulatory
constraints) [1]_." They are defined by internal reactions that carry
flux in spite of closed exchange reactions.
References
----------
.. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for
generating a high-quality genome-scale metabolic reconstruction.
Nature protocols. Nature Publishing Group.
http://doi.org/10.1038/nprot.2009.203
"""
helpers.close_boundaries_sensibly(model)
fva_result = flux_variability_analysis(model, loopless=False)
return fva_result.index[
(fva_result["minimum"] <= (-1 + TOLERANCE_THRESHOLD)) |
(fva_result["maximum"] >= (1 - TOLERANCE_THRESHOLD))
].tolist() | u"""
Find metabolic reactions in stoichiometrically balanced cycles (SBCs).
Identify forward and reverse cycles by closing all exchanges and using FVA.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
"SBCs are artifacts of metabolic reconstructions due to insufficient
constraints (e.g., thermodynamic constraints and regulatory
constraints) [1]_." They are defined by internal reactions that carry
flux in spite of closed exchange reactions.
References
----------
.. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for
generating a high-quality genome-scale metabolic reconstruction.
Nature protocols. Nature Publishing Group.
http://doi.org/10.1038/nprot.2009.203 | Below is the the instruction that describes the task:
### Input:
u"""
Find metabolic reactions in stoichiometrically balanced cycles (SBCs).
Identify forward and reverse cycles by closing all exchanges and using FVA.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
"SBCs are artifacts of metabolic reconstructions due to insufficient
constraints (e.g., thermodynamic constraints and regulatory
constraints) [1]_." They are defined by internal reactions that carry
flux in spite of closed exchange reactions.
References
----------
.. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for
generating a high-quality genome-scale metabolic reconstruction.
Nature protocols. Nature Publishing Group.
http://doi.org/10.1038/nprot.2009.203
### Response:
def find_stoichiometrically_balanced_cycles(model):
u"""
Find metabolic reactions in stoichiometrically balanced cycles (SBCs).
Identify forward and reverse cycles by closing all exchanges and using FVA.
Parameters
----------
model : cobra.Model
The metabolic model under investigation.
Notes
-----
"SBCs are artifacts of metabolic reconstructions due to insufficient
constraints (e.g., thermodynamic constraints and regulatory
constraints) [1]_." They are defined by internal reactions that carry
flux in spite of closed exchange reactions.
References
----------
.. [1] Thiele, I., & Palsson, B. Ø. (2010, January). A protocol for
generating a high-quality genome-scale metabolic reconstruction.
Nature protocols. Nature Publishing Group.
http://doi.org/10.1038/nprot.2009.203
"""
helpers.close_boundaries_sensibly(model)
fva_result = flux_variability_analysis(model, loopless=False)
return fva_result.index[
(fva_result["minimum"] <= (-1 + TOLERANCE_THRESHOLD)) |
(fva_result["maximum"] >= (1 - TOLERANCE_THRESHOLD))
].tolist() |
def get_edge_colors_by_attr(G, attr, num_bins=5, cmap='viridis', start=0, stop=1, na_color='none'):
"""
Get a list of edge colors by binning some continuous-variable attribute into
quantiles.
Parameters
----------
G : networkx multidigraph
attr : string
the name of the continuous-variable attribute
num_bins : int
how many quantiles
cmap : string
name of a colormap
start : float
where to start in the colorspace
stop : float
where to end in the colorspace
na_color : string
what color to assign nodes with null attribute values
Returns
-------
list
"""
if num_bins is None:
num_bins=len(G.edges())
bin_labels = range(num_bins)
attr_values = pd.Series([data[attr] for u, v, key, data in G.edges(keys=True, data=True)])
cats = pd.qcut(x=attr_values, q=num_bins, labels=bin_labels)
colors = get_colors(num_bins, cmap, start, stop)
edge_colors = [colors[int(cat)] if pd.notnull(cat) else na_color for cat in cats]
return edge_colors | Get a list of edge colors by binning some continuous-variable attribute into
quantiles.
Parameters
----------
G : networkx multidigraph
attr : string
the name of the continuous-variable attribute
num_bins : int
how many quantiles
cmap : string
name of a colormap
start : float
where to start in the colorspace
stop : float
where to end in the colorspace
na_color : string
what color to assign nodes with null attribute values
Returns
-------
list | Below is the the instruction that describes the task:
### Input:
Get a list of edge colors by binning some continuous-variable attribute into
quantiles.
Parameters
----------
G : networkx multidigraph
attr : string
the name of the continuous-variable attribute
num_bins : int
how many quantiles
cmap : string
name of a colormap
start : float
where to start in the colorspace
stop : float
where to end in the colorspace
na_color : string
what color to assign nodes with null attribute values
Returns
-------
list
### Response:
def get_edge_colors_by_attr(G, attr, num_bins=5, cmap='viridis', start=0, stop=1, na_color='none'):
"""
Get a list of edge colors by binning some continuous-variable attribute into
quantiles.
Parameters
----------
G : networkx multidigraph
attr : string
the name of the continuous-variable attribute
num_bins : int
how many quantiles
cmap : string
name of a colormap
start : float
where to start in the colorspace
stop : float
where to end in the colorspace
na_color : string
what color to assign nodes with null attribute values
Returns
-------
list
"""
if num_bins is None:
num_bins=len(G.edges())
bin_labels = range(num_bins)
attr_values = pd.Series([data[attr] for u, v, key, data in G.edges(keys=True, data=True)])
cats = pd.qcut(x=attr_values, q=num_bins, labels=bin_labels)
colors = get_colors(num_bins, cmap, start, stop)
edge_colors = [colors[int(cat)] if pd.notnull(cat) else na_color for cat in cats]
return edge_colors |
def binary_connect_convolution(inp, outmaps, kernel,
pad=None, stride=None, dilation=None, group=1,
quantize_zero_to=1.0,
w_init=None, wb_init=None, b_init=None,
base_axis=1, fix_parameters=False, rng=None,
with_bias=True):
"""Binary Connect Convolution, multiplier-less inner-product.
Binary Connect Convolution is the convolution function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} sign(w_{n, m, i, j}) x_{m, a + i, b + j}.
Therefore :math:`sign(w_i)` is either :math:`1` or :math:`-1` and the inner product
simplifies to addition.
This function should be used together with BatchNormalization.
References:
M. Courbariaux, Y. Bengio, and J.-P. David. "BinaryConnect:
Training Deep Neural Networks with binary weights during propagations."
Advances in Neural Information Processing Systems. 2015.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the binarized weights (`binary_weight`)
2) The weights and the binary weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the binary weights will not be in sync.
3) Quantized values are stored as floating point number for `binary_weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
group (int): Number of groups of channels. This makes connections across channels sparser by grouping connections along map direction.
quantize_zero_to (float): Input value at zero is quantized to this value.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
wb_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for binary weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable`
"""
if w_init is None:
w_init = UniformInitializer(
calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng)
if wb_init is None:
wb_init = UniformInitializer(
calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng)
if b_init is None:
b_init = ConstantInitializer()
w = get_parameter_or_create(
"W", (outmaps, inp.shape[base_axis]) + tuple(kernel),
w_init, True, not fix_parameters)
wb = get_parameter_or_create(
"Wb", (outmaps, inp.shape[base_axis]) + tuple(kernel),
wb_init, False)
b = None
if with_bias:
b = get_parameter_or_create(
"b", (outmaps,), b_init, True, not fix_parameters)
return F.binary_connect_convolution(inp, w, wb, b, base_axis, pad, stride, dilation, group, quantize_zero_to) | Binary Connect Convolution, multiplier-less inner-product.
Binary Connect Convolution is the convolution function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} sign(w_{n, m, i, j}) x_{m, a + i, b + j}.
Therefore :math:`sign(w_i)` is either :math:`1` or :math:`-1` and the inner product
simplifies to addition.
This function should be used together with BatchNormalization.
References:
M. Courbariaux, Y. Bengio, and J.-P. David. "BinaryConnect:
Training Deep Neural Networks with binary weights during propagations."
Advances in Neural Information Processing Systems. 2015.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the binarized weights (`binary_weight`)
2) The weights and the binary weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the binary weights will not be in sync.
3) Quantized values are stored as floating point number for `binary_weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
group (int): Number of groups of channels. This makes connections across channels sparser by grouping connections along map direction.
quantize_zero_to (float): Input value at zero is quantized to this value.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
wb_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for binary weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable` | Below is the the instruction that describes the task:
### Input:
Binary Connect Convolution, multiplier-less inner-product.
Binary Connect Convolution is the convolution function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} sign(w_{n, m, i, j}) x_{m, a + i, b + j}.
Therefore :math:`sign(w_i)` is either :math:`1` or :math:`-1` and the inner product
simplifies to addition.
This function should be used together with BatchNormalization.
References:
M. Courbariaux, Y. Bengio, and J.-P. David. "BinaryConnect:
Training Deep Neural Networks with binary weights during propagations."
Advances in Neural Information Processing Systems. 2015.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the binarized weights (`binary_weight`)
2) The weights and the binary weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the binary weights will not be in sync.
3) Quantized values are stored as floating point number for `binary_weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
group (int): Number of groups of channels. This makes connections across channels sparser by grouping connections along map direction.
quantize_zero_to (float): Input value at zero is quantized to this value.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
wb_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for binary weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable`
### Response:
def binary_connect_convolution(inp, outmaps, kernel,
pad=None, stride=None, dilation=None, group=1,
quantize_zero_to=1.0,
w_init=None, wb_init=None, b_init=None,
base_axis=1, fix_parameters=False, rng=None,
with_bias=True):
"""Binary Connect Convolution, multiplier-less inner-product.
Binary Connect Convolution is the convolution function,
except the definition of the inner product is modified.
The input-output relation of this function is as follows:
.. math::
y_{n, a, b} = \sum_{m} \sum_{i} \sum_{j} sign(w_{n, m, i, j}) x_{m, a + i, b + j}.
Therefore :math:`sign(w_i)` is either :math:`1` or :math:`-1` and the inner product
simplifies to addition.
This function should be used together with BatchNormalization.
References:
M. Courbariaux, Y. Bengio, and J.-P. David. "BinaryConnect:
Training Deep Neural Networks with binary weights during propagations."
Advances in Neural Information Processing Systems. 2015.
.. note::
1) if you would like to share weights between some layers, please
make sure to share the standard, floating value weights (`weight`)
and not the binarized weights (`binary_weight`)
2) The weights and the binary weights become synced only after :func:`~nnabla._variable.Variable.forward` is called,
and not after a call to :func:`~nnabla._variable.Variable.backward`.
To access the parameters of the network, remember to call :func:`~nnabla._variable.Variable.forward` once before doing so, otherwise the
float weights and the binary weights will not be in sync.
3) Quantized values are stored as floating point number for `binary_weight`,
since this function is only for simulation purposes.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of convolution kernels (which is equal to the number of output channels). For example, to apply convolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply convolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
group (int): Number of groups of channels. This makes connections across channels sparser by grouping connections along map direction.
quantize_zero_to (float): Input value at zero is quantized to this value.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
wb_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for binary weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable`
"""
if w_init is None:
w_init = UniformInitializer(
calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng)
if wb_init is None:
wb_init = UniformInitializer(
calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng)
if b_init is None:
b_init = ConstantInitializer()
w = get_parameter_or_create(
"W", (outmaps, inp.shape[base_axis]) + tuple(kernel),
w_init, True, not fix_parameters)
wb = get_parameter_or_create(
"Wb", (outmaps, inp.shape[base_axis]) + tuple(kernel),
wb_init, False)
b = None
if with_bias:
b = get_parameter_or_create(
"b", (outmaps,), b_init, True, not fix_parameters)
return F.binary_connect_convolution(inp, w, wb, b, base_axis, pad, stride, dilation, group, quantize_zero_to) |
def _lazily_initialize(self):
"""Initialize the graph and session, if this has not yet been done."""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
with self._initialization_lock:
if self._session:
return
graph = tf.Graph()
with graph.as_default():
self.initialize_graph()
# Don't reserve GPU because libpng can't run on GPU.
config = tf.ConfigProto(device_count={'GPU': 0})
self._session = tf.Session(graph=graph, config=config) | Initialize the graph and session, if this has not yet been done. | Below is the the instruction that describes the task:
### Input:
Initialize the graph and session, if this has not yet been done.
### Response:
def _lazily_initialize(self):
"""Initialize the graph and session, if this has not yet been done."""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
with self._initialization_lock:
if self._session:
return
graph = tf.Graph()
with graph.as_default():
self.initialize_graph()
# Don't reserve GPU because libpng can't run on GPU.
config = tf.ConfigProto(device_count={'GPU': 0})
self._session = tf.Session(graph=graph, config=config) |
def _process_corpus(self, corpus, output_path, processing_func, frame_size=400, hop_size=160, sr=None):
""" Utility function for processing a corpus with a separate processing function. """
feat_container = containers.FeatureContainer(output_path)
feat_container.open()
sampling_rate = -1
for utterance in corpus.utterances.values():
utt_sampling_rate = utterance.sampling_rate
if sr is None:
if sampling_rate > 0 and sampling_rate != utt_sampling_rate:
raise ValueError(
'File {} has a different sampling-rate than the previous ones!'.format(utterance.track.idx))
sampling_rate = utt_sampling_rate
processing_func(utterance, feat_container, frame_size, hop_size, sr, corpus)
tf_frame_size, tf_hop_size = self.frame_transform(frame_size, hop_size)
feat_container.frame_size = tf_frame_size
feat_container.hop_size = tf_hop_size
feat_container.sampling_rate = sr or sampling_rate
feat_container.close()
return feat_container | Utility function for processing a corpus with a separate processing function. | Below is the the instruction that describes the task:
### Input:
Utility function for processing a corpus with a separate processing function.
### Response:
def _process_corpus(self, corpus, output_path, processing_func, frame_size=400, hop_size=160, sr=None):
""" Utility function for processing a corpus with a separate processing function. """
feat_container = containers.FeatureContainer(output_path)
feat_container.open()
sampling_rate = -1
for utterance in corpus.utterances.values():
utt_sampling_rate = utterance.sampling_rate
if sr is None:
if sampling_rate > 0 and sampling_rate != utt_sampling_rate:
raise ValueError(
'File {} has a different sampling-rate than the previous ones!'.format(utterance.track.idx))
sampling_rate = utt_sampling_rate
processing_func(utterance, feat_container, frame_size, hop_size, sr, corpus)
tf_frame_size, tf_hop_size = self.frame_transform(frame_size, hop_size)
feat_container.frame_size = tf_frame_size
feat_container.hop_size = tf_hop_size
feat_container.sampling_rate = sr or sampling_rate
feat_container.close()
return feat_container |
def issuer_cert_urls(self):
"""
:return:
A list of unicode strings that are URLs that should contain either
an individual DER-encoded X.509 certificate, or a DER-encoded CMS
message containing multiple certificates
"""
if self._issuer_cert_urls is None:
self._issuer_cert_urls = []
if self.authority_information_access_value:
for entry in self.authority_information_access_value:
if entry['access_method'].native == 'ca_issuers':
location = entry['access_location']
if location.name != 'uniform_resource_identifier':
continue
url = location.native
if url.lower()[0:7] == 'http://':
self._issuer_cert_urls.append(url)
return self._issuer_cert_urls | :return:
A list of unicode strings that are URLs that should contain either
an individual DER-encoded X.509 certificate, or a DER-encoded CMS
message containing multiple certificates | Below is the the instruction that describes the task:
### Input:
:return:
A list of unicode strings that are URLs that should contain either
an individual DER-encoded X.509 certificate, or a DER-encoded CMS
message containing multiple certificates
### Response:
def issuer_cert_urls(self):
"""
:return:
A list of unicode strings that are URLs that should contain either
an individual DER-encoded X.509 certificate, or a DER-encoded CMS
message containing multiple certificates
"""
if self._issuer_cert_urls is None:
self._issuer_cert_urls = []
if self.authority_information_access_value:
for entry in self.authority_information_access_value:
if entry['access_method'].native == 'ca_issuers':
location = entry['access_location']
if location.name != 'uniform_resource_identifier':
continue
url = location.native
if url.lower()[0:7] == 'http://':
self._issuer_cert_urls.append(url)
return self._issuer_cert_urls |
def update(self, stats):
"""Update stats in the CSV output file."""
# Get the stats
all_stats = stats.getAllExportsAsDict(plugin_list=self.plugins_to_export())
# Init data with timestamp (issue#708)
if self.first_line:
csv_header = ['timestamp']
csv_data = [time.strftime('%Y-%m-%d %H:%M:%S')]
# Loop over plugins to export
for plugin in self.plugins_to_export():
if isinstance(all_stats[plugin], list):
for stat in all_stats[plugin]:
# First line: header
if self.first_line:
csv_header += ('{}_{}_{}'.format(
plugin, self.get_item_key(stat), item) for item in stat)
# Others lines: stats
csv_data += itervalues(stat)
elif isinstance(all_stats[plugin], dict):
# First line: header
if self.first_line:
fieldnames = iterkeys(all_stats[plugin])
csv_header += ('{}_{}'.format(plugin, fieldname)
for fieldname in fieldnames)
# Others lines: stats
csv_data += itervalues(all_stats[plugin])
# Export to CSV
if self.first_line:
self.writer.writerow(csv_header)
self.first_line = False
self.writer.writerow(csv_data)
self.csv_file.flush() | Update stats in the CSV output file. | Below is the the instruction that describes the task:
### Input:
Update stats in the CSV output file.
### Response:
def update(self, stats):
"""Update stats in the CSV output file."""
# Get the stats
all_stats = stats.getAllExportsAsDict(plugin_list=self.plugins_to_export())
# Init data with timestamp (issue#708)
if self.first_line:
csv_header = ['timestamp']
csv_data = [time.strftime('%Y-%m-%d %H:%M:%S')]
# Loop over plugins to export
for plugin in self.plugins_to_export():
if isinstance(all_stats[plugin], list):
for stat in all_stats[plugin]:
# First line: header
if self.first_line:
csv_header += ('{}_{}_{}'.format(
plugin, self.get_item_key(stat), item) for item in stat)
# Others lines: stats
csv_data += itervalues(stat)
elif isinstance(all_stats[plugin], dict):
# First line: header
if self.first_line:
fieldnames = iterkeys(all_stats[plugin])
csv_header += ('{}_{}'.format(plugin, fieldname)
for fieldname in fieldnames)
# Others lines: stats
csv_data += itervalues(all_stats[plugin])
# Export to CSV
if self.first_line:
self.writer.writerow(csv_header)
self.first_line = False
self.writer.writerow(csv_data)
self.csv_file.flush() |
def _reset_suffix_links(self):
'''
Reset all suffix links in all nodes in this trie.
'''
self._suffix_links_set = False
for current, _parent in self.dfs():
current.suffix = None
current.dict_suffix = None
current.longest_prefix = None | Reset all suffix links in all nodes in this trie. | Below is the the instruction that describes the task:
### Input:
Reset all suffix links in all nodes in this trie.
### Response:
def _reset_suffix_links(self):
'''
Reset all suffix links in all nodes in this trie.
'''
self._suffix_links_set = False
for current, _parent in self.dfs():
current.suffix = None
current.dict_suffix = None
current.longest_prefix = None |
def push_external_commands_to_schedulers(self):
"""Push received external commands to the schedulers
:return: None
"""
if not self.unprocessed_external_commands:
return
# Those are the global external commands
commands_to_process = self.unprocessed_external_commands
self.unprocessed_external_commands = []
logger.debug("Commands: %s", commands_to_process)
# Now get all external commands and put them into the good schedulers
logger.debug("Commands to process: %d commands", len(commands_to_process))
for ext_cmd in commands_to_process:
cmd = self.external_commands_manager.resolve_command(ext_cmd)
logger.debug("Resolved command: %s, result: %s", ext_cmd.cmd_line, cmd)
if cmd and cmd['global']:
# Send global command to all our schedulers
for scheduler_link_uuid in self.schedulers:
self.schedulers[scheduler_link_uuid].pushed_commands.append(ext_cmd)
# Now for all active schedulers, send the commands
count_pushed_commands = 0
count_failed_commands = 0
for scheduler_link_uuid in self.schedulers:
link = self.schedulers[scheduler_link_uuid]
if not link.active:
logger.debug("The scheduler '%s' is not active, it is not possible to push "
"external commands to its connection!", link.name)
continue
# If there are some commands for this scheduler...
commands = [ext_cmd.cmd_line for ext_cmd in link.pushed_commands]
if not commands:
logger.debug("The scheduler '%s' has no commands.", link.name)
continue
logger.debug("Sending %d commands to scheduler %s", len(commands), link.name)
sent = []
try:
sent = link.push_external_commands(commands)
except LinkError:
logger.warning("Scheduler connection failed, I could not push external commands!")
# Whether we sent the commands or not, clean the scheduler list
link.pushed_commands = []
# If we didn't sent them, add the commands to the arbiter list
if sent:
statsmgr.gauge('external-commands.pushed.%s' % link.name, len(commands))
count_pushed_commands = count_pushed_commands + len(commands)
else:
count_failed_commands = count_failed_commands + len(commands)
statsmgr.gauge('external-commands.failed.%s' % link.name, len(commands))
# Kepp the not sent commands... for a next try
self.external_commands.extend(commands)
statsmgr.gauge('external-commands.pushed.all', count_pushed_commands)
statsmgr.gauge('external-commands.failed.all', count_failed_commands) | Push received external commands to the schedulers
:return: None | Below is the the instruction that describes the task:
### Input:
Push received external commands to the schedulers
:return: None
### Response:
def push_external_commands_to_schedulers(self):
"""Push received external commands to the schedulers
:return: None
"""
if not self.unprocessed_external_commands:
return
# Those are the global external commands
commands_to_process = self.unprocessed_external_commands
self.unprocessed_external_commands = []
logger.debug("Commands: %s", commands_to_process)
# Now get all external commands and put them into the good schedulers
logger.debug("Commands to process: %d commands", len(commands_to_process))
for ext_cmd in commands_to_process:
cmd = self.external_commands_manager.resolve_command(ext_cmd)
logger.debug("Resolved command: %s, result: %s", ext_cmd.cmd_line, cmd)
if cmd and cmd['global']:
# Send global command to all our schedulers
for scheduler_link_uuid in self.schedulers:
self.schedulers[scheduler_link_uuid].pushed_commands.append(ext_cmd)
# Now for all active schedulers, send the commands
count_pushed_commands = 0
count_failed_commands = 0
for scheduler_link_uuid in self.schedulers:
link = self.schedulers[scheduler_link_uuid]
if not link.active:
logger.debug("The scheduler '%s' is not active, it is not possible to push "
"external commands to its connection!", link.name)
continue
# If there are some commands for this scheduler...
commands = [ext_cmd.cmd_line for ext_cmd in link.pushed_commands]
if not commands:
logger.debug("The scheduler '%s' has no commands.", link.name)
continue
logger.debug("Sending %d commands to scheduler %s", len(commands), link.name)
sent = []
try:
sent = link.push_external_commands(commands)
except LinkError:
logger.warning("Scheduler connection failed, I could not push external commands!")
# Whether we sent the commands or not, clean the scheduler list
link.pushed_commands = []
# If we didn't sent them, add the commands to the arbiter list
if sent:
statsmgr.gauge('external-commands.pushed.%s' % link.name, len(commands))
count_pushed_commands = count_pushed_commands + len(commands)
else:
count_failed_commands = count_failed_commands + len(commands)
statsmgr.gauge('external-commands.failed.%s' % link.name, len(commands))
# Kepp the not sent commands... for a next try
self.external_commands.extend(commands)
statsmgr.gauge('external-commands.pushed.all', count_pushed_commands)
statsmgr.gauge('external-commands.failed.all', count_failed_commands) |
def j1_2(a=1):
r"""Hankel transform pair J1_2 ([Ande75]_)."""
def lhs(x):
return np.exp(-a*x)
def rhs(b):
return (np.sqrt(b**2 + a**2) - a)/(b*np.sqrt(b**2 + a**2))
return Ghosh('j1', lhs, rhs) | r"""Hankel transform pair J1_2 ([Ande75]_). | Below is the the instruction that describes the task:
### Input:
r"""Hankel transform pair J1_2 ([Ande75]_).
### Response:
def j1_2(a=1):
r"""Hankel transform pair J1_2 ([Ande75]_)."""
def lhs(x):
return np.exp(-a*x)
def rhs(b):
return (np.sqrt(b**2 + a**2) - a)/(b*np.sqrt(b**2 + a**2))
return Ghosh('j1', lhs, rhs) |
def produce_examples(shard_ids, wikis_dir, refs_dir, urls_dir, vocab_path,
out_filepaths):
"""Produce examples from shard_ids to out_filepaths."""
# * Join the Wikipedia articles with their references
# * Run Tf-idf to sort reference paragraphs
# * Encode the Wikipedia and reference text with the vocabulary
# * Write out TFRecords of tensorflow.Example
tf.logging.info("Processing %d input shards into %d output files.",
len(shard_ids), len(out_filepaths))
vocab = text_encoder.SubwordTextEncoder(vocab_path)
eot_ids = vocab.encode(EOT)
def example_generator():
"""Generate Example dicts."""
stats = dict(total_original_wikis=0, total_original_refs=0,
total_found_refs=0, ref_lengths=[], wiki_original_refs=[],
wiki_found_refs=[], wikis_skipped_no_refs=0,
wikis_skipped_short_lead=0, num_wikis_written=0)
ref_files_by_shard = _references_files_by_shard(refs_dir)
for shard_id in shard_ids:
tf.logging.info("Processing shard %d", shard_id)
wiki_urls = _wiki_urls_for_shard(shard_id, urls_dir)
tf.logging.info("Loaded wiki URLs for shard")
refs_content = _references_content(ref_files_by_shard[shard_id])
tf.logging.info("Loaded reference content for shard")
for i, wiki in enumerate(_wiki_articles(shard_id, wikis_dir)):
if not i % 1000:
tf.logging.info("Processing wiki index %d for shard %d", i, shard_id)
stats["total_original_wikis"] += 1
# Get reference content
wiki_ref_content = []
ref_urls = wiki_urls[wiki.url]["refs"]
stats["total_original_refs"] += len(ref_urls)
stats_wiki_original_refs = len(ref_urls)
stats_wiki_found_refs = 0
for ref_url in ref_urls:
ref_content = refs_content.get(ref_url)
if not ref_content:
continue
stats["total_found_refs"] += 1
stats["ref_lengths"].append(len(ref_content))
stats_wiki_found_refs += 1
wiki_ref_content.append(ref_content)
stats["wiki_original_refs"].append(stats_wiki_original_refs)
stats["wiki_found_refs"].append(stats_wiki_found_refs)
if not wiki_ref_content or len(wiki_ref_content) < _MIN_REFS:
# No/few refs were found
stats["wikis_skipped_no_refs"] += 1
continue
# Rank reference paragraphs with TFIDF
wiki_title = _normalize_text(wiki.title)
ranked_paragraphs = rank_reference_paragraphs(wiki_title,
wiki_ref_content)
# Construct inputs from Wiki title and references
inputs = []
inputs.extend(vocab.encode(wiki_title))
inputs.extend(eot_ids)
for paragraph in ranked_paragraphs:
if len(inputs) >= 1e6:
break
paragraph += " "
inputs.extend(vocab.encode(paragraph))
# Construct targets from article sections
targets, section_boundaries = _encode_wiki_sections(
wiki.sections, vocab)
# Skip if lead section is too short
if (not section_boundaries or
section_boundaries[0] < _MIN_LEADSECTION_TOKENS):
stats["wikis_skipped_short_lead"] += 1
continue
inputs.append(text_encoder.EOS_ID)
targets.append(text_encoder.EOS_ID)
stats["num_wikis_written"] += 1
yield {
"inputs": inputs,
"targets": targets,
"section_boundaries": section_boundaries,
}
tf.logging.info("Total: %d, Skipped: %d",
stats["num_wikis_written"],
stats["total_original_wikis"] - stats["num_wikis_written"])
tf.logging.info("Total refs: %d, Skipped refs: %d",
stats["total_found_refs"],
stats["total_original_refs"] - stats["total_found_refs"])
stats_fname = os.path.join(os.path.split(out_filepaths[0])[0],
"stats.%d.json" % shard_ids[0])
with tf.gfile.Open(stats_fname, "w") as f:
f.write(json.dumps(stats))
generator_utils.generate_files(example_generator(), out_filepaths) | Produce examples from shard_ids to out_filepaths. | Below is the the instruction that describes the task:
### Input:
Produce examples from shard_ids to out_filepaths.
### Response:
def produce_examples(shard_ids, wikis_dir, refs_dir, urls_dir, vocab_path,
out_filepaths):
"""Produce examples from shard_ids to out_filepaths."""
# * Join the Wikipedia articles with their references
# * Run Tf-idf to sort reference paragraphs
# * Encode the Wikipedia and reference text with the vocabulary
# * Write out TFRecords of tensorflow.Example
tf.logging.info("Processing %d input shards into %d output files.",
len(shard_ids), len(out_filepaths))
vocab = text_encoder.SubwordTextEncoder(vocab_path)
eot_ids = vocab.encode(EOT)
def example_generator():
"""Generate Example dicts."""
stats = dict(total_original_wikis=0, total_original_refs=0,
total_found_refs=0, ref_lengths=[], wiki_original_refs=[],
wiki_found_refs=[], wikis_skipped_no_refs=0,
wikis_skipped_short_lead=0, num_wikis_written=0)
ref_files_by_shard = _references_files_by_shard(refs_dir)
for shard_id in shard_ids:
tf.logging.info("Processing shard %d", shard_id)
wiki_urls = _wiki_urls_for_shard(shard_id, urls_dir)
tf.logging.info("Loaded wiki URLs for shard")
refs_content = _references_content(ref_files_by_shard[shard_id])
tf.logging.info("Loaded reference content for shard")
for i, wiki in enumerate(_wiki_articles(shard_id, wikis_dir)):
if not i % 1000:
tf.logging.info("Processing wiki index %d for shard %d", i, shard_id)
stats["total_original_wikis"] += 1
# Get reference content
wiki_ref_content = []
ref_urls = wiki_urls[wiki.url]["refs"]
stats["total_original_refs"] += len(ref_urls)
stats_wiki_original_refs = len(ref_urls)
stats_wiki_found_refs = 0
for ref_url in ref_urls:
ref_content = refs_content.get(ref_url)
if not ref_content:
continue
stats["total_found_refs"] += 1
stats["ref_lengths"].append(len(ref_content))
stats_wiki_found_refs += 1
wiki_ref_content.append(ref_content)
stats["wiki_original_refs"].append(stats_wiki_original_refs)
stats["wiki_found_refs"].append(stats_wiki_found_refs)
if not wiki_ref_content or len(wiki_ref_content) < _MIN_REFS:
# No/few refs were found
stats["wikis_skipped_no_refs"] += 1
continue
# Rank reference paragraphs with TFIDF
wiki_title = _normalize_text(wiki.title)
ranked_paragraphs = rank_reference_paragraphs(wiki_title,
wiki_ref_content)
# Construct inputs from Wiki title and references
inputs = []
inputs.extend(vocab.encode(wiki_title))
inputs.extend(eot_ids)
for paragraph in ranked_paragraphs:
if len(inputs) >= 1e6:
break
paragraph += " "
inputs.extend(vocab.encode(paragraph))
# Construct targets from article sections
targets, section_boundaries = _encode_wiki_sections(
wiki.sections, vocab)
# Skip if lead section is too short
if (not section_boundaries or
section_boundaries[0] < _MIN_LEADSECTION_TOKENS):
stats["wikis_skipped_short_lead"] += 1
continue
inputs.append(text_encoder.EOS_ID)
targets.append(text_encoder.EOS_ID)
stats["num_wikis_written"] += 1
yield {
"inputs": inputs,
"targets": targets,
"section_boundaries": section_boundaries,
}
tf.logging.info("Total: %d, Skipped: %d",
stats["num_wikis_written"],
stats["total_original_wikis"] - stats["num_wikis_written"])
tf.logging.info("Total refs: %d, Skipped refs: %d",
stats["total_found_refs"],
stats["total_original_refs"] - stats["total_found_refs"])
stats_fname = os.path.join(os.path.split(out_filepaths[0])[0],
"stats.%d.json" % shard_ids[0])
with tf.gfile.Open(stats_fname, "w") as f:
f.write(json.dumps(stats))
generator_utils.generate_files(example_generator(), out_filepaths) |
def set_event_tags(self, id, **kwargs): # noqa: E501
"""Set all tags associated with a specific event # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_event_tags(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param list[str] body:
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.set_event_tags_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.set_event_tags_with_http_info(id, **kwargs) # noqa: E501
return data | Set all tags associated with a specific event # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_event_tags(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param list[str] body:
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Set all tags associated with a specific event # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_event_tags(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param list[str] body:
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
### Response:
def set_event_tags(self, id, **kwargs): # noqa: E501
"""Set all tags associated with a specific event # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_event_tags(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param list[str] body:
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.set_event_tags_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.set_event_tags_with_http_info(id, **kwargs) # noqa: E501
return data |
def _candidate_sort_key(self, candidate, ignore_compatibility=True):
# type: (InstallationCandidate, bool) -> CandidateSortingKey
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min(self.valid_tags)
3. source archives
If prefer_binary was set, then all wheels are sorted above sources.
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
support_num = len(self.valid_tags)
build_tag = tuple() # type: BuildTag
binary_preference = 0
if candidate.location.is_wheel:
# can raise InvalidWheelFilename
wheel = Wheel(candidate.location.filename)
if not wheel.supported(self.valid_tags) and not ignore_compatibility:
raise UnsupportedWheel(
"%s is not a supported wheel for this platform. It "
"can't be sorted." % wheel.filename
)
if self.prefer_binary:
binary_preference = 1
tags = self.valid_tags if not ignore_compatibility else None
try:
pri = -(wheel.support_index_min(tags=tags))
except TypeError:
pri = -(support_num)
if wheel.build_tag is not None:
match = re.match(r'^(\d+)(.*)$', wheel.build_tag)
build_tag_groups = match.groups()
build_tag = (int(build_tag_groups[0]), build_tag_groups[1])
else: # sdist
pri = -(support_num)
return (binary_preference, candidate.version, build_tag, pri) | Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min(self.valid_tags)
3. source archives
If prefer_binary was set, then all wheels are sorted above sources.
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal | Below is the the instruction that describes the task:
### Input:
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min(self.valid_tags)
3. source archives
If prefer_binary was set, then all wheels are sorted above sources.
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
### Response:
def _candidate_sort_key(self, candidate, ignore_compatibility=True):
# type: (InstallationCandidate, bool) -> CandidateSortingKey
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min(self.valid_tags)
3. source archives
If prefer_binary was set, then all wheels are sorted above sources.
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
support_num = len(self.valid_tags)
build_tag = tuple() # type: BuildTag
binary_preference = 0
if candidate.location.is_wheel:
# can raise InvalidWheelFilename
wheel = Wheel(candidate.location.filename)
if not wheel.supported(self.valid_tags) and not ignore_compatibility:
raise UnsupportedWheel(
"%s is not a supported wheel for this platform. It "
"can't be sorted." % wheel.filename
)
if self.prefer_binary:
binary_preference = 1
tags = self.valid_tags if not ignore_compatibility else None
try:
pri = -(wheel.support_index_min(tags=tags))
except TypeError:
pri = -(support_num)
if wheel.build_tag is not None:
match = re.match(r'^(\d+)(.*)$', wheel.build_tag)
build_tag_groups = match.groups()
build_tag = (int(build_tag_groups[0]), build_tag_groups[1])
else: # sdist
pri = -(support_num)
return (binary_preference, candidate.version, build_tag, pri) |
def type_has_good_TimeMS(self, type):
'''The TimeMS in some messages is not from *our* clock!'''
if type.startswith('ACC'):
return False;
if type.startswith('GYR'):
return False;
return True | The TimeMS in some messages is not from *our* clock! | Below is the the instruction that describes the task:
### Input:
The TimeMS in some messages is not from *our* clock!
### Response:
def type_has_good_TimeMS(self, type):
'''The TimeMS in some messages is not from *our* clock!'''
if type.startswith('ACC'):
return False;
if type.startswith('GYR'):
return False;
return True |
def copy(configfile='', destpath='', overwrite=False, sub_node=''):
"""Copies the files in the built file tree map
to despath.
:param configfile: string
Path to the FileTreeMap config file
:param destpath: string
Path to the files destination
:param overwrite: bool
Overwrite files if they already exist.
:param sub_node: string
Tree map configuration sub path.
Will copy only the contents within this sub-node
"""
log.info('Running {0} {1} {2}'.format(os.path.basename(__file__),
whoami(),
locals()))
assert(os.path.isfile(configfile))
if os.path.exists(destpath):
if os.listdir(destpath):
raise FolderAlreadyExists('Folder {0} already exists. Please clean '
'it or change destpath.'.format(destpath))
else:
log.info('Creating folder {0}'.format(destpath))
path(destpath).makedirs_p()
from boyle.files.file_tree_map import FileTreeMap
file_map = FileTreeMap()
try:
file_map.from_config_file(configfile)
except Exception as e:
raise FileTreeMapError(str(e))
if sub_node:
sub_map = file_map.get_node(sub_node)
if not sub_map:
raise FileTreeMapError('Could not find sub node '
'{0}'.format(sub_node))
file_map._filetree = {}
file_map._filetree[sub_node] = sub_map
try:
file_map.copy_to(destpath, overwrite=overwrite)
except Exception as e:
raise FileTreeMapError(str(e)) | Copies the files in the built file tree map
to despath.
:param configfile: string
Path to the FileTreeMap config file
:param destpath: string
Path to the files destination
:param overwrite: bool
Overwrite files if they already exist.
:param sub_node: string
Tree map configuration sub path.
Will copy only the contents within this sub-node | Below is the the instruction that describes the task:
### Input:
Copies the files in the built file tree map
to despath.
:param configfile: string
Path to the FileTreeMap config file
:param destpath: string
Path to the files destination
:param overwrite: bool
Overwrite files if they already exist.
:param sub_node: string
Tree map configuration sub path.
Will copy only the contents within this sub-node
### Response:
def copy(configfile='', destpath='', overwrite=False, sub_node=''):
"""Copies the files in the built file tree map
to despath.
:param configfile: string
Path to the FileTreeMap config file
:param destpath: string
Path to the files destination
:param overwrite: bool
Overwrite files if they already exist.
:param sub_node: string
Tree map configuration sub path.
Will copy only the contents within this sub-node
"""
log.info('Running {0} {1} {2}'.format(os.path.basename(__file__),
whoami(),
locals()))
assert(os.path.isfile(configfile))
if os.path.exists(destpath):
if os.listdir(destpath):
raise FolderAlreadyExists('Folder {0} already exists. Please clean '
'it or change destpath.'.format(destpath))
else:
log.info('Creating folder {0}'.format(destpath))
path(destpath).makedirs_p()
from boyle.files.file_tree_map import FileTreeMap
file_map = FileTreeMap()
try:
file_map.from_config_file(configfile)
except Exception as e:
raise FileTreeMapError(str(e))
if sub_node:
sub_map = file_map.get_node(sub_node)
if not sub_map:
raise FileTreeMapError('Could not find sub node '
'{0}'.format(sub_node))
file_map._filetree = {}
file_map._filetree[sub_node] = sub_map
try:
file_map.copy_to(destpath, overwrite=overwrite)
except Exception as e:
raise FileTreeMapError(str(e)) |
def process_shells(self, shells):
"""Processing a list of shells."""
result = {'success': True, 'output': []}
if self.parallel and len(shells) > 1:
result = self.process_shells_parallel(shells)
elif len(shells) > 0:
result = self.process_shells_ordered(shells)
return result | Processing a list of shells. | Below is the the instruction that describes the task:
### Input:
Processing a list of shells.
### Response:
def process_shells(self, shells):
"""Processing a list of shells."""
result = {'success': True, 'output': []}
if self.parallel and len(shells) > 1:
result = self.process_shells_parallel(shells)
elif len(shells) > 0:
result = self.process_shells_ordered(shells)
return result |
def _default_body(self):
""" If the body is not passed in by the user try to create one
using the given data parameters.
"""
if not self.data:
return ""
if self.content_type == 'application/json':
import json
return json.dumps(self.data)
elif self.content_type == 'application/x-www-form-urlencoded':
import urllib
return urllib.urlencode(self.data)
else:
raise NotImplementedError(
"You must manually encode the request "
"body for '{}'".format(self.content_type)
) | If the body is not passed in by the user try to create one
using the given data parameters. | Below is the the instruction that describes the task:
### Input:
If the body is not passed in by the user try to create one
using the given data parameters.
### Response:
def _default_body(self):
""" If the body is not passed in by the user try to create one
using the given data parameters.
"""
if not self.data:
return ""
if self.content_type == 'application/json':
import json
return json.dumps(self.data)
elif self.content_type == 'application/x-www-form-urlencoded':
import urllib
return urllib.urlencode(self.data)
else:
raise NotImplementedError(
"You must manually encode the request "
"body for '{}'".format(self.content_type)
) |
def status(id):
"""
View status of all versions in a dataset.
The command also accepts a specific dataset version.
"""
if id:
data_source = get_data_object(id, use_data_config=False)
print_data([data_source] if data_source else [])
else:
data_sources = DataClient().get_all()
print_data(data_sources) | View status of all versions in a dataset.
The command also accepts a specific dataset version. | Below is the the instruction that describes the task:
### Input:
View status of all versions in a dataset.
The command also accepts a specific dataset version.
### Response:
def status(id):
"""
View status of all versions in a dataset.
The command also accepts a specific dataset version.
"""
if id:
data_source = get_data_object(id, use_data_config=False)
print_data([data_source] if data_source else [])
else:
data_sources = DataClient().get_all()
print_data(data_sources) |
def post_slack_message(message=None, channel=None, username=None, icon_emoji=None):
"""Format the message and post to the appropriate slack channel.
Args:
message (str): Message to post to slack
channel (str): Desired channel. Must start with #
"""
LOG.debug('Slack Channel: %s\nSlack Message: %s', channel, message)
slack = slacker.Slacker(SLACK_TOKEN)
try:
slack.chat.post_message(channel=channel, text=message, username=username, icon_emoji=icon_emoji)
LOG.info('Message posted to %s', channel)
except slacker.Error:
LOG.info("error posted message to %s", channel) | Format the message and post to the appropriate slack channel.
Args:
message (str): Message to post to slack
channel (str): Desired channel. Must start with # | Below is the the instruction that describes the task:
### Input:
Format the message and post to the appropriate slack channel.
Args:
message (str): Message to post to slack
channel (str): Desired channel. Must start with #
### Response:
def post_slack_message(message=None, channel=None, username=None, icon_emoji=None):
"""Format the message and post to the appropriate slack channel.
Args:
message (str): Message to post to slack
channel (str): Desired channel. Must start with #
"""
LOG.debug('Slack Channel: %s\nSlack Message: %s', channel, message)
slack = slacker.Slacker(SLACK_TOKEN)
try:
slack.chat.post_message(channel=channel, text=message, username=username, icon_emoji=icon_emoji)
LOG.info('Message posted to %s', channel)
except slacker.Error:
LOG.info("error posted message to %s", channel) |
def timeseries():
"""Create a timeseries builder.
Returns:
A builder function which, given a class creates a timeseries
relationship for that class.
"""
def method_builder(cls):
method_doc = """Fetch the timeseries for this :class:`{0}`.
Returns:
The :class:`Timeseries` for this :class:`{0}`
Keyword Args:
**kwargs: The :class:`Timeseries` object constructor arguments.
""".format(cls.__name__)
def method(self, **kwargs):
resource_id = None if self.is_singleton() else self.id
return Timeseries(self._session, cls, resource_id, **kwargs)
method.__doc__ = method_doc
setattr(cls, 'timeseries', method)
return cls
return method_builder | Create a timeseries builder.
Returns:
A builder function which, given a class creates a timeseries
relationship for that class. | Below is the the instruction that describes the task:
### Input:
Create a timeseries builder.
Returns:
A builder function which, given a class creates a timeseries
relationship for that class.
### Response:
def timeseries():
"""Create a timeseries builder.
Returns:
A builder function which, given a class creates a timeseries
relationship for that class.
"""
def method_builder(cls):
method_doc = """Fetch the timeseries for this :class:`{0}`.
Returns:
The :class:`Timeseries` for this :class:`{0}`
Keyword Args:
**kwargs: The :class:`Timeseries` object constructor arguments.
""".format(cls.__name__)
def method(self, **kwargs):
resource_id = None if self.is_singleton() else self.id
return Timeseries(self._session, cls, resource_id, **kwargs)
method.__doc__ = method_doc
setattr(cls, 'timeseries', method)
return cls
return method_builder |
def remove_listener(self, event, listener):
"""Remove a listener from the emitter.
Args:
event (str): The event name on which the listener is bound.
listener: A reference to the same object given to add_listener.
Returns:
bool: True if a listener was removed else False.
This method only removes one listener at a time. If a listener is
attached multiple times then this method must be called repeatedly.
Additionally, this method removes listeners first from the those
registered with 'on' or 'add_listener'. If none are found it continue
to remove afterwards from those added with 'once'.
"""
with contextlib.suppress(ValueError):
self._listeners[event].remove(listener)
return True
with contextlib.suppress(ValueError):
self._once[event].remove(listener)
return True
return False | Remove a listener from the emitter.
Args:
event (str): The event name on which the listener is bound.
listener: A reference to the same object given to add_listener.
Returns:
bool: True if a listener was removed else False.
This method only removes one listener at a time. If a listener is
attached multiple times then this method must be called repeatedly.
Additionally, this method removes listeners first from the those
registered with 'on' or 'add_listener'. If none are found it continue
to remove afterwards from those added with 'once'. | Below is the the instruction that describes the task:
### Input:
Remove a listener from the emitter.
Args:
event (str): The event name on which the listener is bound.
listener: A reference to the same object given to add_listener.
Returns:
bool: True if a listener was removed else False.
This method only removes one listener at a time. If a listener is
attached multiple times then this method must be called repeatedly.
Additionally, this method removes listeners first from the those
registered with 'on' or 'add_listener'. If none are found it continue
to remove afterwards from those added with 'once'.
### Response:
def remove_listener(self, event, listener):
"""Remove a listener from the emitter.
Args:
event (str): The event name on which the listener is bound.
listener: A reference to the same object given to add_listener.
Returns:
bool: True if a listener was removed else False.
This method only removes one listener at a time. If a listener is
attached multiple times then this method must be called repeatedly.
Additionally, this method removes listeners first from the those
registered with 'on' or 'add_listener'. If none are found it continue
to remove afterwards from those added with 'once'.
"""
with contextlib.suppress(ValueError):
self._listeners[event].remove(listener)
return True
with contextlib.suppress(ValueError):
self._once[event].remove(listener)
return True
return False |
def find_DQ_extension(self):
""" Return the suffix for the data quality extension and the name of
the file which that DQ extension should be read from.
"""
dqfile = None
# Look for additional file with DQ array, primarily for WFPC2 data
indx = self._filename.find('.fits')
if indx > 3:
suffix = self._filename[indx-4:indx]
dqfile = self._filename.replace(suffix[:3],'_c1')
elif indx < 0 and len(self._filename) > 3 and \
self._filename[-4] == os.extsep and \
self._filename[-1].lower() == 'h':
# assume we've got a GEIS file
dqfile = self._filename[:-2]+'1'+self._filename[-1]
hdulist = readgeis.readgeis(dqfile)
prih = hdulist[0].header
if 'FILETYPE' in prih:
dq_suffix = prih['FILETYPE'].strip().upper()
else:
# assume extension name is 'SDQ' for WFPC2 GEIS files
dq_suffix = 'SDQ'
hdulist.close()
return dqfile,dq_suffix
else:
raise ValueError("Input file {} does not appear to be neither " \
"a FITS file nor a GEIS file.".format(self._filename))
if os.path.exists(dqfile):
dq_suffix = fits.getval(dqfile, "EXTNAME", ext=1, memmap=False)
else:
dq_suffix = "SCI"
return dqfile, dq_suffix | Return the suffix for the data quality extension and the name of
the file which that DQ extension should be read from. | Below is the the instruction that describes the task:
### Input:
Return the suffix for the data quality extension and the name of
the file which that DQ extension should be read from.
### Response:
def find_DQ_extension(self):
""" Return the suffix for the data quality extension and the name of
the file which that DQ extension should be read from.
"""
dqfile = None
# Look for additional file with DQ array, primarily for WFPC2 data
indx = self._filename.find('.fits')
if indx > 3:
suffix = self._filename[indx-4:indx]
dqfile = self._filename.replace(suffix[:3],'_c1')
elif indx < 0 and len(self._filename) > 3 and \
self._filename[-4] == os.extsep and \
self._filename[-1].lower() == 'h':
# assume we've got a GEIS file
dqfile = self._filename[:-2]+'1'+self._filename[-1]
hdulist = readgeis.readgeis(dqfile)
prih = hdulist[0].header
if 'FILETYPE' in prih:
dq_suffix = prih['FILETYPE'].strip().upper()
else:
# assume extension name is 'SDQ' for WFPC2 GEIS files
dq_suffix = 'SDQ'
hdulist.close()
return dqfile,dq_suffix
else:
raise ValueError("Input file {} does not appear to be neither " \
"a FITS file nor a GEIS file.".format(self._filename))
if os.path.exists(dqfile):
dq_suffix = fits.getval(dqfile, "EXTNAME", ext=1, memmap=False)
else:
dq_suffix = "SCI"
return dqfile, dq_suffix |
def check_job_collection_name(self, cloud_service_id, job_collection_id):
'''
The Check Name Availability operation checks if a new job collection with
the given name may be created, or if it is unavailable. The result of the
operation is a Boolean true or false.
cloud_service_id:
The cloud service id
job_collection_id:
The name of the job_collection_id.
'''
_validate_not_none('cloud_service_id', cloud_service_id)
_validate_not_none('job_collection_id', job_collection_id)
path = self._get_cloud_services_path(
cloud_service_id, "scheduler", "jobCollections")
path += "?op=checknameavailability&resourceName=" + job_collection_id
return self._perform_post(path, None, AvailabilityResponse) | The Check Name Availability operation checks if a new job collection with
the given name may be created, or if it is unavailable. The result of the
operation is a Boolean true or false.
cloud_service_id:
The cloud service id
job_collection_id:
The name of the job_collection_id. | Below is the the instruction that describes the task:
### Input:
The Check Name Availability operation checks if a new job collection with
the given name may be created, or if it is unavailable. The result of the
operation is a Boolean true or false.
cloud_service_id:
The cloud service id
job_collection_id:
The name of the job_collection_id.
### Response:
def check_job_collection_name(self, cloud_service_id, job_collection_id):
'''
The Check Name Availability operation checks if a new job collection with
the given name may be created, or if it is unavailable. The result of the
operation is a Boolean true or false.
cloud_service_id:
The cloud service id
job_collection_id:
The name of the job_collection_id.
'''
_validate_not_none('cloud_service_id', cloud_service_id)
_validate_not_none('job_collection_id', job_collection_id)
path = self._get_cloud_services_path(
cloud_service_id, "scheduler", "jobCollections")
path += "?op=checknameavailability&resourceName=" + job_collection_id
return self._perform_post(path, None, AvailabilityResponse) |
def rekey(self,
uuid=None,
offset=None,
template_attribute=None,
credential=None):
"""
Check object usage according to specific constraints.
Args:
uuid (string): The unique identifier of a managed cryptographic
object that should be checked. Optional, defaults to None.
offset (int): An integer specifying, in seconds, the difference
between the rekeyed objects initialization date and activation
date. Optional, defaults to None.
template_attribute (TemplateAttribute): A TemplateAttribute struct
containing the attributes to set on the newly rekeyed object.
Optional, defaults to None.
credential (Credential): A Credential struct containing a set of
authorization parameters for the operation. Optional, defaults
to None.
Returns:
dict: The results of the check operation, containing the following
key/value pairs:
Key | Value
---------------------------|-----------------------------------
'unique_identifier' | (string) The unique ID of the
| checked cryptographic object.
'template_attribute' | (TemplateAttribute) A struct
| containing attribute set by the
| server. Optional.
'result_status' | (ResultStatus) An enumeration
| indicating the status of the
| operation result.
'result_reason' | (ResultReason) An enumeration
| providing context for the result
| status.
'result_message' | (string) A message providing
| additional context for the
| operation result.
"""
operation = Operation(OperationEnum.REKEY)
request_payload = payloads.RekeyRequestPayload(
unique_identifier=uuid,
offset=offset,
template_attribute=template_attribute
)
batch_item = messages.RequestBatchItem(
operation=operation,
request_payload=request_payload
)
request = self._build_request_message(credential, [batch_item])
response = self._send_and_receive_message(request)
batch_item = response.batch_items[0]
payload = batch_item.response_payload
result = {}
if payload:
result['unique_identifier'] = payload.unique_identifier
if payload.template_attribute is not None:
result['template_attribute'] = payload.template_attribute
result['result_status'] = batch_item.result_status.value
try:
result['result_reason'] = batch_item.result_reason.value
except Exception:
result['result_reason'] = batch_item.result_reason
try:
result['result_message'] = batch_item.result_message.value
except Exception:
result['result_message'] = batch_item.result_message
return result | Check object usage according to specific constraints.
Args:
uuid (string): The unique identifier of a managed cryptographic
object that should be checked. Optional, defaults to None.
offset (int): An integer specifying, in seconds, the difference
between the rekeyed objects initialization date and activation
date. Optional, defaults to None.
template_attribute (TemplateAttribute): A TemplateAttribute struct
containing the attributes to set on the newly rekeyed object.
Optional, defaults to None.
credential (Credential): A Credential struct containing a set of
authorization parameters for the operation. Optional, defaults
to None.
Returns:
dict: The results of the check operation, containing the following
key/value pairs:
Key | Value
---------------------------|-----------------------------------
'unique_identifier' | (string) The unique ID of the
| checked cryptographic object.
'template_attribute' | (TemplateAttribute) A struct
| containing attribute set by the
| server. Optional.
'result_status' | (ResultStatus) An enumeration
| indicating the status of the
| operation result.
'result_reason' | (ResultReason) An enumeration
| providing context for the result
| status.
'result_message' | (string) A message providing
| additional context for the
| operation result. | Below is the the instruction that describes the task:
### Input:
Check object usage according to specific constraints.
Args:
uuid (string): The unique identifier of a managed cryptographic
object that should be checked. Optional, defaults to None.
offset (int): An integer specifying, in seconds, the difference
between the rekeyed objects initialization date and activation
date. Optional, defaults to None.
template_attribute (TemplateAttribute): A TemplateAttribute struct
containing the attributes to set on the newly rekeyed object.
Optional, defaults to None.
credential (Credential): A Credential struct containing a set of
authorization parameters for the operation. Optional, defaults
to None.
Returns:
dict: The results of the check operation, containing the following
key/value pairs:
Key | Value
---------------------------|-----------------------------------
'unique_identifier' | (string) The unique ID of the
| checked cryptographic object.
'template_attribute' | (TemplateAttribute) A struct
| containing attribute set by the
| server. Optional.
'result_status' | (ResultStatus) An enumeration
| indicating the status of the
| operation result.
'result_reason' | (ResultReason) An enumeration
| providing context for the result
| status.
'result_message' | (string) A message providing
| additional context for the
| operation result.
### Response:
def rekey(self,
uuid=None,
offset=None,
template_attribute=None,
credential=None):
"""
Check object usage according to specific constraints.
Args:
uuid (string): The unique identifier of a managed cryptographic
object that should be checked. Optional, defaults to None.
offset (int): An integer specifying, in seconds, the difference
between the rekeyed objects initialization date and activation
date. Optional, defaults to None.
template_attribute (TemplateAttribute): A TemplateAttribute struct
containing the attributes to set on the newly rekeyed object.
Optional, defaults to None.
credential (Credential): A Credential struct containing a set of
authorization parameters for the operation. Optional, defaults
to None.
Returns:
dict: The results of the check operation, containing the following
key/value pairs:
Key | Value
---------------------------|-----------------------------------
'unique_identifier' | (string) The unique ID of the
| checked cryptographic object.
'template_attribute' | (TemplateAttribute) A struct
| containing attribute set by the
| server. Optional.
'result_status' | (ResultStatus) An enumeration
| indicating the status of the
| operation result.
'result_reason' | (ResultReason) An enumeration
| providing context for the result
| status.
'result_message' | (string) A message providing
| additional context for the
| operation result.
"""
operation = Operation(OperationEnum.REKEY)
request_payload = payloads.RekeyRequestPayload(
unique_identifier=uuid,
offset=offset,
template_attribute=template_attribute
)
batch_item = messages.RequestBatchItem(
operation=operation,
request_payload=request_payload
)
request = self._build_request_message(credential, [batch_item])
response = self._send_and_receive_message(request)
batch_item = response.batch_items[0]
payload = batch_item.response_payload
result = {}
if payload:
result['unique_identifier'] = payload.unique_identifier
if payload.template_attribute is not None:
result['template_attribute'] = payload.template_attribute
result['result_status'] = batch_item.result_status.value
try:
result['result_reason'] = batch_item.result_reason.value
except Exception:
result['result_reason'] = batch_item.result_reason
try:
result['result_message'] = batch_item.result_message.value
except Exception:
result['result_message'] = batch_item.result_message
return result |
def _set_extent(ax, projection, extent, extrema):
"""
Sets the plot extent.
Parameters
----------
ax : cartopy.GeoAxesSubplot instance
The axis whose boundaries are being tweaked.
projection : None or geoplot.crs instance
The projection, if one is being used.
extent : None or (xmin, xmax, ymin, ymax) tuple
A copy of the ``extent`` top-level parameter, if the user choses to specify their own extent. These values
will be used if ``extent`` is non-``None``.
extrema : None or (xmin, xmax, ymin, ymax) tuple
Plot-calculated extrema. These values, which are calculated in the plot above and passed to this function
(different plots require different calculations), will be used if a user-provided ``extent`` is not provided.
Returns
-------
None
"""
if extent:
xmin, xmax, ymin, ymax = extent
xmin, xmax, ymin, ymax = max(xmin, -180), min(xmax, 180), max(ymin, -90), min(ymax, 90)
if projection: # Input ``extent`` into set_extent().
ax.set_extent((xmin, xmax, ymin, ymax), crs=ccrs.PlateCarree())
else: # Input ``extent`` into set_ylim, set_xlim.
ax.set_xlim((xmin, xmax))
ax.set_ylim((ymin, ymax))
else:
xmin, xmax, ymin, ymax = extrema
xmin, xmax, ymin, ymax = max(xmin, -180), min(xmax, 180), max(ymin, -90), min(ymax, 90)
if projection: # Input ``extrema`` into set_extent.
ax.set_extent((xmin, xmax, ymin, ymax), crs=ccrs.PlateCarree())
else: # Input ``extrema`` into set_ylim, set_xlim.
ax.set_xlim((xmin, xmax))
ax.set_ylim((ymin, ymax)) | Sets the plot extent.
Parameters
----------
ax : cartopy.GeoAxesSubplot instance
The axis whose boundaries are being tweaked.
projection : None or geoplot.crs instance
The projection, if one is being used.
extent : None or (xmin, xmax, ymin, ymax) tuple
A copy of the ``extent`` top-level parameter, if the user choses to specify their own extent. These values
will be used if ``extent`` is non-``None``.
extrema : None or (xmin, xmax, ymin, ymax) tuple
Plot-calculated extrema. These values, which are calculated in the plot above and passed to this function
(different plots require different calculations), will be used if a user-provided ``extent`` is not provided.
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
Sets the plot extent.
Parameters
----------
ax : cartopy.GeoAxesSubplot instance
The axis whose boundaries are being tweaked.
projection : None or geoplot.crs instance
The projection, if one is being used.
extent : None or (xmin, xmax, ymin, ymax) tuple
A copy of the ``extent`` top-level parameter, if the user choses to specify their own extent. These values
will be used if ``extent`` is non-``None``.
extrema : None or (xmin, xmax, ymin, ymax) tuple
Plot-calculated extrema. These values, which are calculated in the plot above and passed to this function
(different plots require different calculations), will be used if a user-provided ``extent`` is not provided.
Returns
-------
None
### Response:
def _set_extent(ax, projection, extent, extrema):
"""
Sets the plot extent.
Parameters
----------
ax : cartopy.GeoAxesSubplot instance
The axis whose boundaries are being tweaked.
projection : None or geoplot.crs instance
The projection, if one is being used.
extent : None or (xmin, xmax, ymin, ymax) tuple
A copy of the ``extent`` top-level parameter, if the user choses to specify their own extent. These values
will be used if ``extent`` is non-``None``.
extrema : None or (xmin, xmax, ymin, ymax) tuple
Plot-calculated extrema. These values, which are calculated in the plot above and passed to this function
(different plots require different calculations), will be used if a user-provided ``extent`` is not provided.
Returns
-------
None
"""
if extent:
xmin, xmax, ymin, ymax = extent
xmin, xmax, ymin, ymax = max(xmin, -180), min(xmax, 180), max(ymin, -90), min(ymax, 90)
if projection: # Input ``extent`` into set_extent().
ax.set_extent((xmin, xmax, ymin, ymax), crs=ccrs.PlateCarree())
else: # Input ``extent`` into set_ylim, set_xlim.
ax.set_xlim((xmin, xmax))
ax.set_ylim((ymin, ymax))
else:
xmin, xmax, ymin, ymax = extrema
xmin, xmax, ymin, ymax = max(xmin, -180), min(xmax, 180), max(ymin, -90), min(ymax, 90)
if projection: # Input ``extrema`` into set_extent.
ax.set_extent((xmin, xmax, ymin, ymax), crs=ccrs.PlateCarree())
else: # Input ``extrema`` into set_ylim, set_xlim.
ax.set_xlim((xmin, xmax))
ax.set_ylim((ymin, ymax)) |
def distance(self, channel=1):
"""
Returns distance (0, 100) to the beacon on the given channel.
Returns None when beacon is not found.
"""
self._ensure_mode(self.MODE_IR_SEEK)
channel = self._normalize_channel(channel)
ret_value = self.value((channel * 2) + 1)
# The value will be -128 if no beacon is found, return None instead
return None if ret_value == -128 else ret_value | Returns distance (0, 100) to the beacon on the given channel.
Returns None when beacon is not found. | Below is the the instruction that describes the task:
### Input:
Returns distance (0, 100) to the beacon on the given channel.
Returns None when beacon is not found.
### Response:
def distance(self, channel=1):
"""
Returns distance (0, 100) to the beacon on the given channel.
Returns None when beacon is not found.
"""
self._ensure_mode(self.MODE_IR_SEEK)
channel = self._normalize_channel(channel)
ret_value = self.value((channel * 2) + 1)
# The value will be -128 if no beacon is found, return None instead
return None if ret_value == -128 else ret_value |
def advance(self, blocksize):
"""Add blocksize seconds more to the buffer, push blocksize seconds
from the beginning.
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
"""
ts = self._read_frame(blocksize)
self.raw_buffer.roll(-len(ts))
self.raw_buffer[-len(ts):] = ts[:]
self.read_pos += blocksize
self.raw_buffer.start_time += blocksize
return ts | Add blocksize seconds more to the buffer, push blocksize seconds
from the beginning.
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel | Below is the the instruction that describes the task:
### Input:
Add blocksize seconds more to the buffer, push blocksize seconds
from the beginning.
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
### Response:
def advance(self, blocksize):
"""Add blocksize seconds more to the buffer, push blocksize seconds
from the beginning.
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
"""
ts = self._read_frame(blocksize)
self.raw_buffer.roll(-len(ts))
self.raw_buffer[-len(ts):] = ts[:]
self.read_pos += blocksize
self.raw_buffer.start_time += blocksize
return ts |
def firmware_download_input_protocol_type_sftp_protocol_sftp_password(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
firmware_download = ET.Element("firmware_download")
config = firmware_download
input = ET.SubElement(firmware_download, "input")
protocol_type = ET.SubElement(input, "protocol-type")
sftp_protocol = ET.SubElement(protocol_type, "sftp-protocol")
sftp = ET.SubElement(sftp_protocol, "sftp")
password = ET.SubElement(sftp, "password")
password.text = kwargs.pop('password')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def firmware_download_input_protocol_type_sftp_protocol_sftp_password(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
firmware_download = ET.Element("firmware_download")
config = firmware_download
input = ET.SubElement(firmware_download, "input")
protocol_type = ET.SubElement(input, "protocol-type")
sftp_protocol = ET.SubElement(protocol_type, "sftp-protocol")
sftp = ET.SubElement(sftp_protocol, "sftp")
password = ET.SubElement(sftp, "password")
password.text = kwargs.pop('password')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def get_line(self, section, line):
"""Retrieve all lines compatible with a given line."""
try:
section = self._get_section(section, create=False)
except KeyError:
return []
return section.find_lines(line) | Retrieve all lines compatible with a given line. | Below is the the instruction that describes the task:
### Input:
Retrieve all lines compatible with a given line.
### Response:
def get_line(self, section, line):
"""Retrieve all lines compatible with a given line."""
try:
section = self._get_section(section, create=False)
except KeyError:
return []
return section.find_lines(line) |
def fetchcbatch(self):
'''Return a CBatch object of any data currently in the buffer or
if no data currently in buffer then fetch a batch'''
if not self._last_operation.is_columnar:
raise NotSupportedError("Server does not support columnar "
"fetching")
if not self.has_result_set:
raise ProgrammingError(
"Trying to fetch results on an operation with no results.")
if len(self._buffer) > 0:
log.debug('fetchcbatch: buffer has data in. Returning it and wiping buffer')
batch = self._buffer
self._buffer = Batch()
return batch
elif self._last_operation_active:
log.debug('fetchcbatch: buffer empty and op is active => fetching '
'more data')
batch = (self._last_operation.fetch(
self.description,
self.buffersize,
convert_types=self.convert_types))
if len(batch) == 0:
return None
return batch
else:
return None | Return a CBatch object of any data currently in the buffer or
if no data currently in buffer then fetch a batch | Below is the the instruction that describes the task:
### Input:
Return a CBatch object of any data currently in the buffer or
if no data currently in buffer then fetch a batch
### Response:
def fetchcbatch(self):
'''Return a CBatch object of any data currently in the buffer or
if no data currently in buffer then fetch a batch'''
if not self._last_operation.is_columnar:
raise NotSupportedError("Server does not support columnar "
"fetching")
if not self.has_result_set:
raise ProgrammingError(
"Trying to fetch results on an operation with no results.")
if len(self._buffer) > 0:
log.debug('fetchcbatch: buffer has data in. Returning it and wiping buffer')
batch = self._buffer
self._buffer = Batch()
return batch
elif self._last_operation_active:
log.debug('fetchcbatch: buffer empty and op is active => fetching '
'more data')
batch = (self._last_operation.fetch(
self.description,
self.buffersize,
convert_types=self.convert_types))
if len(batch) == 0:
return None
return batch
else:
return None |
def get_file_to_path(self, share_name, directory_name, file_name, file_path,
open_mode='wb', start_range=None, end_range=None,
validate_content=False, progress_callback=None,
max_connections=2, timeout=None):
'''
Downloads a file to a file path, with automatic chunking and progress
notifications. Returns an instance of File with properties and metadata.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of existing file.
:param str file_path:
Path of file to write to.
:param str open_mode:
Mode to use when opening the file. Note that specifying append only
open_mode prevents parallel download. So, max_connections must be set
to 1 if this open_mode is used.
:param int start_range:
Start of byte range to use for downloading a section of the file.
If no end_range is given, all bytes after the start_range will be downloaded.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
End of byte range to use for downloading a section of the file.
If end_range is given, start_range must be provided.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param bool validate_content:
If set to true, validates an MD5 hash for each retrieved portion of
the file. This is primarily valuable for detecting bitflips on the wire
if using http instead of https as https (the default) will already
validate. Note that the service will only return transactional MD5s
for chunks 4MB or less so the first get request will be of size
self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If
self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be
thrown. As computing the MD5 takes processing time and more requests
will need to be done due to the reduced chunk size there may be some
increase in latency.
:param progress_callback:
Callback for progress with signature function(current, total)
where current is the number of bytes transfered so far, and total is
the size of the file if known.
:type progress_callback: callback function in format of func(current, total)
:param int max_connections:
If set to 2 or greater, an initial get will be done for the first
self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file,
the method returns at this point. If it is not, it will download the
remaining data parallel using the number of threads equal to
max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
If set to 1, a single large get request will be done. This is not
generally recommended but available if very few threads should be
used, network requests are very expensive, or a non-seekable stream
prevents parallel download. This may also be valuable if the file is
being concurrently modified to enforce atomicity or if many files are
expected to be empty as an extra request is required for empty files
if max_connections is greater than 1.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: A File with properties and metadata.
:rtype: :class:`~azure.storage.file.models.File`
'''
_validate_not_none('share_name', share_name)
_validate_not_none('file_name', file_name)
_validate_not_none('file_path', file_path)
_validate_not_none('open_mode', open_mode)
if max_connections > 1 and 'a' in open_mode:
raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE)
with open(file_path, open_mode) as stream:
file = self.get_file_to_stream(
share_name, directory_name, file_name, stream,
start_range, end_range, validate_content,
progress_callback, max_connections, timeout)
return file | Downloads a file to a file path, with automatic chunking and progress
notifications. Returns an instance of File with properties and metadata.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of existing file.
:param str file_path:
Path of file to write to.
:param str open_mode:
Mode to use when opening the file. Note that specifying append only
open_mode prevents parallel download. So, max_connections must be set
to 1 if this open_mode is used.
:param int start_range:
Start of byte range to use for downloading a section of the file.
If no end_range is given, all bytes after the start_range will be downloaded.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
End of byte range to use for downloading a section of the file.
If end_range is given, start_range must be provided.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param bool validate_content:
If set to true, validates an MD5 hash for each retrieved portion of
the file. This is primarily valuable for detecting bitflips on the wire
if using http instead of https as https (the default) will already
validate. Note that the service will only return transactional MD5s
for chunks 4MB or less so the first get request will be of size
self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If
self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be
thrown. As computing the MD5 takes processing time and more requests
will need to be done due to the reduced chunk size there may be some
increase in latency.
:param progress_callback:
Callback for progress with signature function(current, total)
where current is the number of bytes transfered so far, and total is
the size of the file if known.
:type progress_callback: callback function in format of func(current, total)
:param int max_connections:
If set to 2 or greater, an initial get will be done for the first
self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file,
the method returns at this point. If it is not, it will download the
remaining data parallel using the number of threads equal to
max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
If set to 1, a single large get request will be done. This is not
generally recommended but available if very few threads should be
used, network requests are very expensive, or a non-seekable stream
prevents parallel download. This may also be valuable if the file is
being concurrently modified to enforce atomicity or if many files are
expected to be empty as an extra request is required for empty files
if max_connections is greater than 1.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: A File with properties and metadata.
:rtype: :class:`~azure.storage.file.models.File` | Below is the the instruction that describes the task:
### Input:
Downloads a file to a file path, with automatic chunking and progress
notifications. Returns an instance of File with properties and metadata.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of existing file.
:param str file_path:
Path of file to write to.
:param str open_mode:
Mode to use when opening the file. Note that specifying append only
open_mode prevents parallel download. So, max_connections must be set
to 1 if this open_mode is used.
:param int start_range:
Start of byte range to use for downloading a section of the file.
If no end_range is given, all bytes after the start_range will be downloaded.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
End of byte range to use for downloading a section of the file.
If end_range is given, start_range must be provided.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param bool validate_content:
If set to true, validates an MD5 hash for each retrieved portion of
the file. This is primarily valuable for detecting bitflips on the wire
if using http instead of https as https (the default) will already
validate. Note that the service will only return transactional MD5s
for chunks 4MB or less so the first get request will be of size
self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If
self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be
thrown. As computing the MD5 takes processing time and more requests
will need to be done due to the reduced chunk size there may be some
increase in latency.
:param progress_callback:
Callback for progress with signature function(current, total)
where current is the number of bytes transfered so far, and total is
the size of the file if known.
:type progress_callback: callback function in format of func(current, total)
:param int max_connections:
If set to 2 or greater, an initial get will be done for the first
self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file,
the method returns at this point. If it is not, it will download the
remaining data parallel using the number of threads equal to
max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
If set to 1, a single large get request will be done. This is not
generally recommended but available if very few threads should be
used, network requests are very expensive, or a non-seekable stream
prevents parallel download. This may also be valuable if the file is
being concurrently modified to enforce atomicity or if many files are
expected to be empty as an extra request is required for empty files
if max_connections is greater than 1.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: A File with properties and metadata.
:rtype: :class:`~azure.storage.file.models.File`
### Response:
def get_file_to_path(self, share_name, directory_name, file_name, file_path,
open_mode='wb', start_range=None, end_range=None,
validate_content=False, progress_callback=None,
max_connections=2, timeout=None):
'''
Downloads a file to a file path, with automatic chunking and progress
notifications. Returns an instance of File with properties and metadata.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of existing file.
:param str file_path:
Path of file to write to.
:param str open_mode:
Mode to use when opening the file. Note that specifying append only
open_mode prevents parallel download. So, max_connections must be set
to 1 if this open_mode is used.
:param int start_range:
Start of byte range to use for downloading a section of the file.
If no end_range is given, all bytes after the start_range will be downloaded.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
End of byte range to use for downloading a section of the file.
If end_range is given, start_range must be provided.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param bool validate_content:
If set to true, validates an MD5 hash for each retrieved portion of
the file. This is primarily valuable for detecting bitflips on the wire
if using http instead of https as https (the default) will already
validate. Note that the service will only return transactional MD5s
for chunks 4MB or less so the first get request will be of size
self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If
self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be
thrown. As computing the MD5 takes processing time and more requests
will need to be done due to the reduced chunk size there may be some
increase in latency.
:param progress_callback:
Callback for progress with signature function(current, total)
where current is the number of bytes transfered so far, and total is
the size of the file if known.
:type progress_callback: callback function in format of func(current, total)
:param int max_connections:
If set to 2 or greater, an initial get will be done for the first
self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file,
the method returns at this point. If it is not, it will download the
remaining data parallel using the number of threads equal to
max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE.
If set to 1, a single large get request will be done. This is not
generally recommended but available if very few threads should be
used, network requests are very expensive, or a non-seekable stream
prevents parallel download. This may also be valuable if the file is
being concurrently modified to enforce atomicity or if many files are
expected to be empty as an extra request is required for empty files
if max_connections is greater than 1.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: A File with properties and metadata.
:rtype: :class:`~azure.storage.file.models.File`
'''
_validate_not_none('share_name', share_name)
_validate_not_none('file_name', file_name)
_validate_not_none('file_path', file_path)
_validate_not_none('open_mode', open_mode)
if max_connections > 1 and 'a' in open_mode:
raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE)
with open(file_path, open_mode) as stream:
file = self.get_file_to_stream(
share_name, directory_name, file_name, stream,
start_range, end_range, validate_content,
progress_callback, max_connections, timeout)
return file |
def queryset(self, request, queryset):
"""Filter based on whether an update (of any sort) is available."""
if self.value() == '-1':
return queryset.filter(latest_version__isnull=True)
elif self.value() == '0':
return (
queryset
.filter(
current_version__isnull=False,
latest_version__isnull=False,
latest_version=F('current_version')
)
)
elif self.value() == '1':
return (
queryset
.filter(
current_version__isnull=False,
latest_version__isnull=False
).exclude(
latest_version=F('current_version')
)
)
else:
return queryset | Filter based on whether an update (of any sort) is available. | Below is the the instruction that describes the task:
### Input:
Filter based on whether an update (of any sort) is available.
### Response:
def queryset(self, request, queryset):
"""Filter based on whether an update (of any sort) is available."""
if self.value() == '-1':
return queryset.filter(latest_version__isnull=True)
elif self.value() == '0':
return (
queryset
.filter(
current_version__isnull=False,
latest_version__isnull=False,
latest_version=F('current_version')
)
)
elif self.value() == '1':
return (
queryset
.filter(
current_version__isnull=False,
latest_version__isnull=False
).exclude(
latest_version=F('current_version')
)
)
else:
return queryset |
def init_library(database_dsn, accounts_password, limited_run = False):
"""Child initializer, setup in Library.process_pool"""
import os
import signal
# Have the child processes ignore the keyboard interrupt, and other signals. Instead, the parent will
# catch these, and clean up the children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
#signal.signal(signal.SIGTERM, sigterm_handler)
os.environ['AMBRY_DB'] = database_dsn
if accounts_password:
os.environ['AMBRY_PASSWORD'] = accounts_password
os.environ['AMBRY_LIMITED_RUN'] = '1' if limited_run else '0' | Child initializer, setup in Library.process_pool | Below is the the instruction that describes the task:
### Input:
Child initializer, setup in Library.process_pool
### Response:
def init_library(database_dsn, accounts_password, limited_run = False):
"""Child initializer, setup in Library.process_pool"""
import os
import signal
# Have the child processes ignore the keyboard interrupt, and other signals. Instead, the parent will
# catch these, and clean up the children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
#signal.signal(signal.SIGTERM, sigterm_handler)
os.environ['AMBRY_DB'] = database_dsn
if accounts_password:
os.environ['AMBRY_PASSWORD'] = accounts_password
os.environ['AMBRY_LIMITED_RUN'] = '1' if limited_run else '0' |
async def mute(gc: GroupControl, mute):
"""(Un)mute group."""
click.echo("Muting group: %s" % mute)
click.echo(await gc.set_mute(mute)) | (Un)mute group. | Below is the the instruction that describes the task:
### Input:
(Un)mute group.
### Response:
async def mute(gc: GroupControl, mute):
"""(Un)mute group."""
click.echo("Muting group: %s" % mute)
click.echo(await gc.set_mute(mute)) |
def p_enumItems(self, p):
"""enumItems : enumItems ',' enumItem
| enumItem"""
n = len(p)
if n == 4:
p[0] = p[1] + [p[3]]
elif n == 2:
p[0] = [p[1]] | enumItems : enumItems ',' enumItem
| enumItem | Below is the the instruction that describes the task:
### Input:
enumItems : enumItems ',' enumItem
| enumItem
### Response:
def p_enumItems(self, p):
"""enumItems : enumItems ',' enumItem
| enumItem"""
n = len(p)
if n == 4:
p[0] = p[1] + [p[3]]
elif n == 2:
p[0] = [p[1]] |
def rsr(obsvalues, # type: Union[numpy.ndarray, List[Union[float, int]]]
simvalues # type: Union[numpy.ndarray, List[Union[float, int]]]
):
# type: (...) -> Union[float, numpy.ScalarType]
"""Calculate RSR (RMSE-to-SD Ratio).
Programmed according to equation (3) in
Moriasi et al. 2007. Model evalutaion guidelines for systematic quantification of accuracy
in watershed simulations. Transactions of the ASABE 50(3): 885-900.
Args:
obsvalues: observe values array
simvalues: simulate values array
Examples:
>>> obs = [2.92, 2.75, 2.01, 1.09, 2.87, 1.43, 1.96,\
4.00, 2.24, 29.28, 5.88, 0.86, 13.21]
>>> sim = [2.90, 2.87, 2.85, 2.83, 3.04, 2.81, 2.85,\
2.78, 2.76, 13.40, 2.70, 2.09, 1.62]
>>> MathClass.rsr(obs, sim) # doctest: +ELLIPSIS
0.7404026155824978...
Returns:
RSR value, or raise exception
"""
if len(obsvalues) != len(simvalues):
raise ValueError("The size of observed and simulated values must be"
" the same for RSR calculation!")
mean_obs = sum(obsvalues) / len(obsvalues)
return sqrt(sum(map(lambda x, y: (x - y) ** 2, obsvalues, simvalues))) / \
sqrt(sum(map(lambda x, y: (x - y) ** 2, obsvalues, [mean_obs] * len(obsvalues)))) | Calculate RSR (RMSE-to-SD Ratio).
Programmed according to equation (3) in
Moriasi et al. 2007. Model evalutaion guidelines for systematic quantification of accuracy
in watershed simulations. Transactions of the ASABE 50(3): 885-900.
Args:
obsvalues: observe values array
simvalues: simulate values array
Examples:
>>> obs = [2.92, 2.75, 2.01, 1.09, 2.87, 1.43, 1.96,\
4.00, 2.24, 29.28, 5.88, 0.86, 13.21]
>>> sim = [2.90, 2.87, 2.85, 2.83, 3.04, 2.81, 2.85,\
2.78, 2.76, 13.40, 2.70, 2.09, 1.62]
>>> MathClass.rsr(obs, sim) # doctest: +ELLIPSIS
0.7404026155824978...
Returns:
RSR value, or raise exception | Below is the the instruction that describes the task:
### Input:
Calculate RSR (RMSE-to-SD Ratio).
Programmed according to equation (3) in
Moriasi et al. 2007. Model evalutaion guidelines for systematic quantification of accuracy
in watershed simulations. Transactions of the ASABE 50(3): 885-900.
Args:
obsvalues: observe values array
simvalues: simulate values array
Examples:
>>> obs = [2.92, 2.75, 2.01, 1.09, 2.87, 1.43, 1.96,\
4.00, 2.24, 29.28, 5.88, 0.86, 13.21]
>>> sim = [2.90, 2.87, 2.85, 2.83, 3.04, 2.81, 2.85,\
2.78, 2.76, 13.40, 2.70, 2.09, 1.62]
>>> MathClass.rsr(obs, sim) # doctest: +ELLIPSIS
0.7404026155824978...
Returns:
RSR value, or raise exception
### Response:
def rsr(obsvalues, # type: Union[numpy.ndarray, List[Union[float, int]]]
simvalues # type: Union[numpy.ndarray, List[Union[float, int]]]
):
# type: (...) -> Union[float, numpy.ScalarType]
"""Calculate RSR (RMSE-to-SD Ratio).
Programmed according to equation (3) in
Moriasi et al. 2007. Model evalutaion guidelines for systematic quantification of accuracy
in watershed simulations. Transactions of the ASABE 50(3): 885-900.
Args:
obsvalues: observe values array
simvalues: simulate values array
Examples:
>>> obs = [2.92, 2.75, 2.01, 1.09, 2.87, 1.43, 1.96,\
4.00, 2.24, 29.28, 5.88, 0.86, 13.21]
>>> sim = [2.90, 2.87, 2.85, 2.83, 3.04, 2.81, 2.85,\
2.78, 2.76, 13.40, 2.70, 2.09, 1.62]
>>> MathClass.rsr(obs, sim) # doctest: +ELLIPSIS
0.7404026155824978...
Returns:
RSR value, or raise exception
"""
if len(obsvalues) != len(simvalues):
raise ValueError("The size of observed and simulated values must be"
" the same for RSR calculation!")
mean_obs = sum(obsvalues) / len(obsvalues)
return sqrt(sum(map(lambda x, y: (x - y) ** 2, obsvalues, simvalues))) / \
sqrt(sum(map(lambda x, y: (x - y) ** 2, obsvalues, [mean_obs] * len(obsvalues)))) |
def build(self, bug: Bug):
"""
Instructs the server to build the Docker image associated with a given
bug.
"""
r = self.__api.post('bugs/{}/build'.format(bug.name))
if r.status_code == 204:
return
if r.status_code == 200:
raise Exception("bug already built: {}".format(bug.name))
# TODO: implement ImageBuildFailed.from_dict
if r.status_code == 400:
raise Exception("build failure")
if r.status_code == 404:
raise KeyError("no bug found with given name: {}".format(bug.name))
self.__api.handle_erroneous_response(r) | Instructs the server to build the Docker image associated with a given
bug. | Below is the the instruction that describes the task:
### Input:
Instructs the server to build the Docker image associated with a given
bug.
### Response:
def build(self, bug: Bug):
"""
Instructs the server to build the Docker image associated with a given
bug.
"""
r = self.__api.post('bugs/{}/build'.format(bug.name))
if r.status_code == 204:
return
if r.status_code == 200:
raise Exception("bug already built: {}".format(bug.name))
# TODO: implement ImageBuildFailed.from_dict
if r.status_code == 400:
raise Exception("build failure")
if r.status_code == 404:
raise KeyError("no bug found with given name: {}".format(bug.name))
self.__api.handle_erroneous_response(r) |
def do_run(self, count=1):
'''Roll count dice, store results. Does all stats so might be slower
than specific doFoo methods. But, it is proly faster than running
each of those seperately to get same stats.
Sets the following properties:
- stats.bucket
- stats.sum
- stats.avr
:param count: Number of rolls to make.
'''
if not self.roll.summable:
raise Exception('Roll is not summable')
h = dict()
total = 0
for roll in self.roll.x_rolls(count):
total += roll
h[roll] = h.get(roll, 0) + 1
self._bucket = h
self.sum = total
self.avr = total / count | Roll count dice, store results. Does all stats so might be slower
than specific doFoo methods. But, it is proly faster than running
each of those seperately to get same stats.
Sets the following properties:
- stats.bucket
- stats.sum
- stats.avr
:param count: Number of rolls to make. | Below is the the instruction that describes the task:
### Input:
Roll count dice, store results. Does all stats so might be slower
than specific doFoo methods. But, it is proly faster than running
each of those seperately to get same stats.
Sets the following properties:
- stats.bucket
- stats.sum
- stats.avr
:param count: Number of rolls to make.
### Response:
def do_run(self, count=1):
'''Roll count dice, store results. Does all stats so might be slower
than specific doFoo methods. But, it is proly faster than running
each of those seperately to get same stats.
Sets the following properties:
- stats.bucket
- stats.sum
- stats.avr
:param count: Number of rolls to make.
'''
if not self.roll.summable:
raise Exception('Roll is not summable')
h = dict()
total = 0
for roll in self.roll.x_rolls(count):
total += roll
h[roll] = h.get(roll, 0) + 1
self._bucket = h
self.sum = total
self.avr = total / count |
def all_conditional_solidity_variables_read(self, include_loop=True):
"""
Return the Soldiity variables directly used in a condtion
Use of the IR to filter index access
Assumption: the solidity vars are used directly in the conditional node
It won't work if the variable is assigned to a temp variable
"""
if include_loop:
if self._all_conditional_solidity_variables_read_with_loop is None:
self._all_conditional_solidity_variables_read_with_loop = self._explore_functions(
lambda x: self._explore_func_conditional(x,
self._solidity_variable_in_binary,
include_loop))
return self._all_conditional_solidity_variables_read_with_loop
else:
if self._all_conditional_solidity_variables_read is None:
self._all_conditional_solidity_variables_read = self._explore_functions(
lambda x: self._explore_func_conditional(x,
self._solidity_variable_in_binary,
include_loop))
return self._all_conditional_solidity_variables_read | Return the Soldiity variables directly used in a condtion
Use of the IR to filter index access
Assumption: the solidity vars are used directly in the conditional node
It won't work if the variable is assigned to a temp variable | Below is the the instruction that describes the task:
### Input:
Return the Soldiity variables directly used in a condtion
Use of the IR to filter index access
Assumption: the solidity vars are used directly in the conditional node
It won't work if the variable is assigned to a temp variable
### Response:
def all_conditional_solidity_variables_read(self, include_loop=True):
"""
Return the Soldiity variables directly used in a condtion
Use of the IR to filter index access
Assumption: the solidity vars are used directly in the conditional node
It won't work if the variable is assigned to a temp variable
"""
if include_loop:
if self._all_conditional_solidity_variables_read_with_loop is None:
self._all_conditional_solidity_variables_read_with_loop = self._explore_functions(
lambda x: self._explore_func_conditional(x,
self._solidity_variable_in_binary,
include_loop))
return self._all_conditional_solidity_variables_read_with_loop
else:
if self._all_conditional_solidity_variables_read is None:
self._all_conditional_solidity_variables_read = self._explore_functions(
lambda x: self._explore_func_conditional(x,
self._solidity_variable_in_binary,
include_loop))
return self._all_conditional_solidity_variables_read |
def execute_command_by_uuid(self, tab_uuid, command):
# TODO DBUS_ONLY
"""Execute the `command' in the tab whose terminal has the `tab_uuid' uuid
"""
if command[-1] != '\n':
command += '\n'
try:
tab_uuid = uuid.UUID(tab_uuid)
page_index, = (
index for index, t in enumerate(self.get_notebook().iter_terminals())
if t.get_uuid() == tab_uuid
)
except ValueError:
pass
else:
terminals = self.get_notebook().get_terminals_for_page(page_index)
for current_vte in terminals:
current_vte.feed_child(command) | Execute the `command' in the tab whose terminal has the `tab_uuid' uuid | Below is the the instruction that describes the task:
### Input:
Execute the `command' in the tab whose terminal has the `tab_uuid' uuid
### Response:
def execute_command_by_uuid(self, tab_uuid, command):
# TODO DBUS_ONLY
"""Execute the `command' in the tab whose terminal has the `tab_uuid' uuid
"""
if command[-1] != '\n':
command += '\n'
try:
tab_uuid = uuid.UUID(tab_uuid)
page_index, = (
index for index, t in enumerate(self.get_notebook().iter_terminals())
if t.get_uuid() == tab_uuid
)
except ValueError:
pass
else:
terminals = self.get_notebook().get_terminals_for_page(page_index)
for current_vte in terminals:
current_vte.feed_child(command) |
def addPlugin(self, plugin, call):
"""Add plugin to my list of plugins to call, if it has the attribute
I'm bound to.
"""
meth = getattr(plugin, call, None)
if meth is not None:
if call == 'loadTestsFromModule' and \
len(inspect.getargspec(meth)[0]) == 2:
orig_meth = meth
meth = lambda module, path, **kwargs: orig_meth(module)
self.plugins.append((plugin, meth)) | Add plugin to my list of plugins to call, if it has the attribute
I'm bound to. | Below is the the instruction that describes the task:
### Input:
Add plugin to my list of plugins to call, if it has the attribute
I'm bound to.
### Response:
def addPlugin(self, plugin, call):
"""Add plugin to my list of plugins to call, if it has the attribute
I'm bound to.
"""
meth = getattr(plugin, call, None)
if meth is not None:
if call == 'loadTestsFromModule' and \
len(inspect.getargspec(meth)[0]) == 2:
orig_meth = meth
meth = lambda module, path, **kwargs: orig_meth(module)
self.plugins.append((plugin, meth)) |
def cast_out(self, klass):
"""Interpret the content as a particular class."""
if _debug: Any._debug("cast_out %r", klass)
global _sequence_of_classes, _list_of_classes
# check for a sequence element
if (klass in _sequence_of_classes) or (klass in _list_of_classes):
# build a sequence helper
helper = klass()
# make a copy of the tag list
t = TagList(self.tagList[:])
# let it decode itself
helper.decode(t)
# make sure everything was consumed
if len(t) != 0:
raise DecodingError("incomplete cast")
# return what was built
return helper.value
# check for an array element
elif klass in _array_of_classes:
# build a sequence helper
helper = klass()
# make a copy of the tag list
t = TagList(self.tagList[:])
# let it decode itself
helper.decode(t)
# make sure everything was consumed
if len(t) != 0:
raise DecodingError("incomplete cast")
# return what was built with Python list semantics
return helper.value[1:]
elif issubclass(klass, (Atomic, AnyAtomic)):
# make sure there's only one piece
if len(self.tagList) == 0:
raise DecodingError("missing cast component")
if len(self.tagList) > 1:
raise DecodingError("too many cast components")
if _debug: Any._debug(" - building helper: %r", klass)
# a helper cooperates between the atomic value and the tag
helper = klass(self.tagList[0])
# return the value
return helper.value
else:
if _debug: Any._debug(" - building value: %r", klass)
# build an element
value = klass()
# make a copy of the tag list
t = TagList(self.tagList[:])
# let it decode itself
value.decode(t)
# make sure everything was consumed
if len(t) != 0:
raise DecodingError("incomplete cast")
# return what was built
return value | Interpret the content as a particular class. | Below is the the instruction that describes the task:
### Input:
Interpret the content as a particular class.
### Response:
def cast_out(self, klass):
"""Interpret the content as a particular class."""
if _debug: Any._debug("cast_out %r", klass)
global _sequence_of_classes, _list_of_classes
# check for a sequence element
if (klass in _sequence_of_classes) or (klass in _list_of_classes):
# build a sequence helper
helper = klass()
# make a copy of the tag list
t = TagList(self.tagList[:])
# let it decode itself
helper.decode(t)
# make sure everything was consumed
if len(t) != 0:
raise DecodingError("incomplete cast")
# return what was built
return helper.value
# check for an array element
elif klass in _array_of_classes:
# build a sequence helper
helper = klass()
# make a copy of the tag list
t = TagList(self.tagList[:])
# let it decode itself
helper.decode(t)
# make sure everything was consumed
if len(t) != 0:
raise DecodingError("incomplete cast")
# return what was built with Python list semantics
return helper.value[1:]
elif issubclass(klass, (Atomic, AnyAtomic)):
# make sure there's only one piece
if len(self.tagList) == 0:
raise DecodingError("missing cast component")
if len(self.tagList) > 1:
raise DecodingError("too many cast components")
if _debug: Any._debug(" - building helper: %r", klass)
# a helper cooperates between the atomic value and the tag
helper = klass(self.tagList[0])
# return the value
return helper.value
else:
if _debug: Any._debug(" - building value: %r", klass)
# build an element
value = klass()
# make a copy of the tag list
t = TagList(self.tagList[:])
# let it decode itself
value.decode(t)
# make sure everything was consumed
if len(t) != 0:
raise DecodingError("incomplete cast")
# return what was built
return value |
def default_encoder(obj):
"""
Default encoder for JSON datatypes, which returns UTF-8 encoded
json instead of the default bloated backslash u XXXX escaped ASCII strings.
"""
if isinstance(obj, bytes):
return json.dumps(bytes_to_str(obj),
ensure_ascii=False).encode("utf-8")
else:
return json.dumps(obj, ensure_ascii=False).encode("utf-8") | Default encoder for JSON datatypes, which returns UTF-8 encoded
json instead of the default bloated backslash u XXXX escaped ASCII strings. | Below is the the instruction that describes the task:
### Input:
Default encoder for JSON datatypes, which returns UTF-8 encoded
json instead of the default bloated backslash u XXXX escaped ASCII strings.
### Response:
def default_encoder(obj):
"""
Default encoder for JSON datatypes, which returns UTF-8 encoded
json instead of the default bloated backslash u XXXX escaped ASCII strings.
"""
if isinstance(obj, bytes):
return json.dumps(bytes_to_str(obj),
ensure_ascii=False).encode("utf-8")
else:
return json.dumps(obj, ensure_ascii=False).encode("utf-8") |
def convert_table(self, block):
""""Converts a table to grid table format"""
lines_orig = block.split('\n')
lines_orig.pop() # Remove extra newline at end of block
widest_cell = [] # Will hold the width of the widest cell for each column
widest_word = [] # Will hold the width of the widest word for each column
widths = [] # Will hold the computed widths of grid table columns
rows = [] # Will hold table cells during processing
lines = [] # Will hold the finished table
has_border = False # Will be set to True if this is a bordered table
width_unit = 0.0 # This number is used to divide up self.width according
# to the following formula:
#
# self.width = width_unit * maxwidth
#
# Where maxwidth is the sum over all elements of
# widest_cell.
# Only process tables, leave everything else untouched
if not self.test(None, block):
return lines_orig
if lines_orig[0].startswith('|'):
has_border = True
# Initialize width arrays
for i in range(0, len(self._split_row(lines_orig[0], has_border))):
widest_cell.append(0)
widest_word.append(0)
widths.append(0)
# Parse lines into array of cells and record width of widest cell/word
for line in lines_orig:
row = self._split_row(line, has_border)
# pad widest_cell to account for under length first row
for i in range(0, len(row) - len(widest_cell)):
widest_cell.append(0)
widest_word.append(0)
widths.append(0)
for i in range(0, len(row)):
# Record cell width
if len(row[i]) > widest_cell[i]:
widest_cell[i] = len(row[i])
# Record longest word
words = row[i].split()
for word in words:
# Keep URLs from throwing the word length count off too badly.
match = re.match(r'\[(.*?)\]\(.*?\)', word)
if match:
word = match.group(1)
if len(word) > widest_word[i]:
widest_word[i] = len(word)
rows.append(row)
# Remove table header divider line from rows
rows.pop(1)
# Compute first approximation of column widths based on maximum cell width
for width in widest_cell:
width_unit += float(width)
width_unit = self.width / width_unit
for i in range(0, len(widest_cell)):
widths[i] = int(widest_cell[i] * width_unit)
# Add rounding errors to narrowest column
if sum(widths) < self.width:
widths[widths.index(min(widths))] += self.width - sum(widths)
# Attempt to correct first approximation of column widths based on
# words that fail to fit their cell's width (if this fails textwrap
# will break up long words but since it does not add hyphens this
# should be avoided)
for i in range(0, len(widths)):
if widths[i] < widest_word[i]:
offset = widest_word[i] - widths[i]
for j in range(0, len(widths)):
if widths[j] - widest_word[j] >= offset:
widths[j] -= offset
widths[i] += offset
offset = 0
lines.append(self.ruler_line(widths, linetype='-'))
# Only add header row if it contains more than just whitespace
if ''.join(rows[0]).strip() != '':
lines.extend(self.wrap_row(widths, rows[0]))
lines.append(self.ruler_line(widths, linetype='='))
for row in rows[1:]:
# Skip empty rows
if ''.join(row).strip() == '':
continue
lines.extend(self.wrap_row(widths, row))
lines.append(self.ruler_line(widths, linetype='-'))
# Append empty line after table
lines.append('')
return lines | Converts a table to grid table format | Below is the the instruction that describes the task:
### Input:
Converts a table to grid table format
### Response:
def convert_table(self, block):
""""Converts a table to grid table format"""
lines_orig = block.split('\n')
lines_orig.pop() # Remove extra newline at end of block
widest_cell = [] # Will hold the width of the widest cell for each column
widest_word = [] # Will hold the width of the widest word for each column
widths = [] # Will hold the computed widths of grid table columns
rows = [] # Will hold table cells during processing
lines = [] # Will hold the finished table
has_border = False # Will be set to True if this is a bordered table
width_unit = 0.0 # This number is used to divide up self.width according
# to the following formula:
#
# self.width = width_unit * maxwidth
#
# Where maxwidth is the sum over all elements of
# widest_cell.
# Only process tables, leave everything else untouched
if not self.test(None, block):
return lines_orig
if lines_orig[0].startswith('|'):
has_border = True
# Initialize width arrays
for i in range(0, len(self._split_row(lines_orig[0], has_border))):
widest_cell.append(0)
widest_word.append(0)
widths.append(0)
# Parse lines into array of cells and record width of widest cell/word
for line in lines_orig:
row = self._split_row(line, has_border)
# pad widest_cell to account for under length first row
for i in range(0, len(row) - len(widest_cell)):
widest_cell.append(0)
widest_word.append(0)
widths.append(0)
for i in range(0, len(row)):
# Record cell width
if len(row[i]) > widest_cell[i]:
widest_cell[i] = len(row[i])
# Record longest word
words = row[i].split()
for word in words:
# Keep URLs from throwing the word length count off too badly.
match = re.match(r'\[(.*?)\]\(.*?\)', word)
if match:
word = match.group(1)
if len(word) > widest_word[i]:
widest_word[i] = len(word)
rows.append(row)
# Remove table header divider line from rows
rows.pop(1)
# Compute first approximation of column widths based on maximum cell width
for width in widest_cell:
width_unit += float(width)
width_unit = self.width / width_unit
for i in range(0, len(widest_cell)):
widths[i] = int(widest_cell[i] * width_unit)
# Add rounding errors to narrowest column
if sum(widths) < self.width:
widths[widths.index(min(widths))] += self.width - sum(widths)
# Attempt to correct first approximation of column widths based on
# words that fail to fit their cell's width (if this fails textwrap
# will break up long words but since it does not add hyphens this
# should be avoided)
for i in range(0, len(widths)):
if widths[i] < widest_word[i]:
offset = widest_word[i] - widths[i]
for j in range(0, len(widths)):
if widths[j] - widest_word[j] >= offset:
widths[j] -= offset
widths[i] += offset
offset = 0
lines.append(self.ruler_line(widths, linetype='-'))
# Only add header row if it contains more than just whitespace
if ''.join(rows[0]).strip() != '':
lines.extend(self.wrap_row(widths, rows[0]))
lines.append(self.ruler_line(widths, linetype='='))
for row in rows[1:]:
# Skip empty rows
if ''.join(row).strip() == '':
continue
lines.extend(self.wrap_row(widths, row))
lines.append(self.ruler_line(widths, linetype='-'))
# Append empty line after table
lines.append('')
return lines |
def encrypt(self, plaintext):
"""Return ciphertext for given plaintext."""
# String to bytes.
plainbytes = plaintext.encode('utf8')
# Compress plaintext bytes.
compressed = zlib.compress(plainbytes)
# Construct AES-GCM cipher, with 96-bit nonce.
cipher = AES.new(self.cipher_key, AES.MODE_GCM, nonce=random_bytes(12))
# Encrypt and digest.
encrypted, tag = cipher.encrypt_and_digest(compressed)
# Combine with nonce.
combined = cipher.nonce + tag + encrypted
# Encode as Base64.
cipherbytes = base64.b64encode(combined)
# Bytes to string.
ciphertext = cipherbytes.decode('utf8')
# Return ciphertext.
return ciphertext | Return ciphertext for given plaintext. | Below is the the instruction that describes the task:
### Input:
Return ciphertext for given plaintext.
### Response:
def encrypt(self, plaintext):
"""Return ciphertext for given plaintext."""
# String to bytes.
plainbytes = plaintext.encode('utf8')
# Compress plaintext bytes.
compressed = zlib.compress(plainbytes)
# Construct AES-GCM cipher, with 96-bit nonce.
cipher = AES.new(self.cipher_key, AES.MODE_GCM, nonce=random_bytes(12))
# Encrypt and digest.
encrypted, tag = cipher.encrypt_and_digest(compressed)
# Combine with nonce.
combined = cipher.nonce + tag + encrypted
# Encode as Base64.
cipherbytes = base64.b64encode(combined)
# Bytes to string.
ciphertext = cipherbytes.decode('utf8')
# Return ciphertext.
return ciphertext |
def sigmaR2(self,R,z,nsigma=None,mc=False,nmc=10000,
gl=True,ngl=_DEFAULTNGL,**kwargs):
"""
NAME:
sigmaR2
PURPOSE:
calculate sigma_R^2 by marginalizing over velocity
INPUT:
R - radius at which to calculate this (can be Quantity)
z - height at which to calculate this (can be Quantity)
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
sigma_R^2
HISTORY:
2012-07-30 - Written - Bovy (IAS@MPIA)
"""
if mc:
surfmass, vrs, vts, vzs= self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
return self._vmomentdensity(R,z,2.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
elif gl:
surfmass, glqeval= self._vmomentdensity(R,z,0.,0.,0.,
gl=gl,ngl=ngl,
_returngl=True,
**kwargs)
return self._vmomentdensity(R,z,2.,0.,0.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
else: #pragma: no cover because this is too slow; a warning is shown
return (self._vmomentdensity(R,z,2.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)/
self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)) | NAME:
sigmaR2
PURPOSE:
calculate sigma_R^2 by marginalizing over velocity
INPUT:
R - radius at which to calculate this (can be Quantity)
z - height at which to calculate this (can be Quantity)
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
sigma_R^2
HISTORY:
2012-07-30 - Written - Bovy (IAS@MPIA) | Below is the the instruction that describes the task:
### Input:
NAME:
sigmaR2
PURPOSE:
calculate sigma_R^2 by marginalizing over velocity
INPUT:
R - radius at which to calculate this (can be Quantity)
z - height at which to calculate this (can be Quantity)
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
sigma_R^2
HISTORY:
2012-07-30 - Written - Bovy (IAS@MPIA)
### Response:
def sigmaR2(self,R,z,nsigma=None,mc=False,nmc=10000,
gl=True,ngl=_DEFAULTNGL,**kwargs):
"""
NAME:
sigmaR2
PURPOSE:
calculate sigma_R^2 by marginalizing over velocity
INPUT:
R - radius at which to calculate this (can be Quantity)
z - height at which to calculate this (can be Quantity)
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
sigma_R^2
HISTORY:
2012-07-30 - Written - Bovy (IAS@MPIA)
"""
if mc:
surfmass, vrs, vts, vzs= self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
return self._vmomentdensity(R,z,2.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
elif gl:
surfmass, glqeval= self._vmomentdensity(R,z,0.,0.,0.,
gl=gl,ngl=ngl,
_returngl=True,
**kwargs)
return self._vmomentdensity(R,z,2.,0.,0.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
else: #pragma: no cover because this is too slow; a warning is shown
return (self._vmomentdensity(R,z,2.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)/
self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)) |
def spawn(self, generations):
"""Grow this Pantheon by multiplying Gods."""
egg_donors = [god for god in self.gods.values() if god.chromosomes == 'XX']
sperm_donors = [god for god in self.gods.values() if god.chromosomes == 'XY']
for i in range(generations):
print("\nGENERATION %d\n" % (i+1))
gen_xx = []
gen_xy = []
for egg_donor in egg_donors:
sperm_donor = random.choice(sperm_donors)
brood = self.breed(egg_donor, sperm_donor)
for child in brood:
if child.divinity > human:
# divine offspring join the Pantheon
self.add_god(child)
if child.chromosomes == 'XX':
gen_xx.append(child)
else:
gen_xy.append(child)
# elder gods leave the breeding pool
egg_donors = [ed for ed in egg_donors if ed.generation > (i-2)]
sperm_donors = [sd for sd in sperm_donors if sd.generation > (i-3)]
# mature offspring join the breeding pool
egg_donors += gen_xx
sperm_donors += gen_xy | Grow this Pantheon by multiplying Gods. | Below is the the instruction that describes the task:
### Input:
Grow this Pantheon by multiplying Gods.
### Response:
def spawn(self, generations):
"""Grow this Pantheon by multiplying Gods."""
egg_donors = [god for god in self.gods.values() if god.chromosomes == 'XX']
sperm_donors = [god for god in self.gods.values() if god.chromosomes == 'XY']
for i in range(generations):
print("\nGENERATION %d\n" % (i+1))
gen_xx = []
gen_xy = []
for egg_donor in egg_donors:
sperm_donor = random.choice(sperm_donors)
brood = self.breed(egg_donor, sperm_donor)
for child in brood:
if child.divinity > human:
# divine offspring join the Pantheon
self.add_god(child)
if child.chromosomes == 'XX':
gen_xx.append(child)
else:
gen_xy.append(child)
# elder gods leave the breeding pool
egg_donors = [ed for ed in egg_donors if ed.generation > (i-2)]
sperm_donors = [sd for sd in sperm_donors if sd.generation > (i-3)]
# mature offspring join the breeding pool
egg_donors += gen_xx
sperm_donors += gen_xy |
def _call(self, x, out=None):
"""Return the constant vector or assign it to ``out``."""
if out is None:
return self.range.element(copy(self.constant))
else:
out.assign(self.constant) | Return the constant vector or assign it to ``out``. | Below is the the instruction that describes the task:
### Input:
Return the constant vector or assign it to ``out``.
### Response:
def _call(self, x, out=None):
"""Return the constant vector or assign it to ``out``."""
if out is None:
return self.range.element(copy(self.constant))
else:
out.assign(self.constant) |
def encode_network(root):
"""Yield ref-containing obj table entries from object network"""
def fix_values(obj):
if isinstance(obj, Container):
obj.update((k, get_ref(v)) for (k, v) in obj.items()
if k != 'class_name')
fixed_obj = obj
elif isinstance(obj, Dictionary):
fixed_obj = obj.__class__(dict(
(get_ref(field), get_ref(value))
for (field, value) in obj.value.items()
))
elif isinstance(obj, dict):
fixed_obj = dict(
(get_ref(field), get_ref(value))
for (field, value) in obj.items()
)
elif isinstance(obj, list):
fixed_obj = [get_ref(field) for field in obj]
elif isinstance(obj, Form):
fixed_obj = obj.__class__(**dict(
(field, get_ref(value))
for (field, value) in obj.value.items()
))
elif isinstance(obj, ContainsRefs):
fixed_obj = obj.__class__([get_ref(field)
for field in obj.value])
else:
return obj
fixed_obj._made_from = obj
return fixed_obj
objects = []
def get_ref(obj, objects=objects):
obj = PythonicAdapter(Pass)._encode(obj, None)
if isinstance(obj, (FixedObject, Container)):
if getattr(obj, '_index', None):
index = obj._index
else:
objects.append(None)
obj._index = index = len(objects)
objects[index - 1] = fix_values(obj)
return Ref(index)
else:
return obj # Inline value
get_ref(root)
for obj in objects:
if getattr(obj, '_index', None):
del obj._index
return objects | Yield ref-containing obj table entries from object network | Below is the the instruction that describes the task:
### Input:
Yield ref-containing obj table entries from object network
### Response:
def encode_network(root):
"""Yield ref-containing obj table entries from object network"""
def fix_values(obj):
if isinstance(obj, Container):
obj.update((k, get_ref(v)) for (k, v) in obj.items()
if k != 'class_name')
fixed_obj = obj
elif isinstance(obj, Dictionary):
fixed_obj = obj.__class__(dict(
(get_ref(field), get_ref(value))
for (field, value) in obj.value.items()
))
elif isinstance(obj, dict):
fixed_obj = dict(
(get_ref(field), get_ref(value))
for (field, value) in obj.items()
)
elif isinstance(obj, list):
fixed_obj = [get_ref(field) for field in obj]
elif isinstance(obj, Form):
fixed_obj = obj.__class__(**dict(
(field, get_ref(value))
for (field, value) in obj.value.items()
))
elif isinstance(obj, ContainsRefs):
fixed_obj = obj.__class__([get_ref(field)
for field in obj.value])
else:
return obj
fixed_obj._made_from = obj
return fixed_obj
objects = []
def get_ref(obj, objects=objects):
obj = PythonicAdapter(Pass)._encode(obj, None)
if isinstance(obj, (FixedObject, Container)):
if getattr(obj, '_index', None):
index = obj._index
else:
objects.append(None)
obj._index = index = len(objects)
objects[index - 1] = fix_values(obj)
return Ref(index)
else:
return obj # Inline value
get_ref(root)
for obj in objects:
if getattr(obj, '_index', None):
del obj._index
return objects |
def validate_wrap(self, value):
''' Checks that ``value`` is an instance of ``DocumentField.type``.
if it is, then validation on its fields has already been done and
no further validation is needed.
'''
if not isinstance(value, self.type):
self._fail_validation_type(value, self.type) | Checks that ``value`` is an instance of ``DocumentField.type``.
if it is, then validation on its fields has already been done and
no further validation is needed. | Below is the the instruction that describes the task:
### Input:
Checks that ``value`` is an instance of ``DocumentField.type``.
if it is, then validation on its fields has already been done and
no further validation is needed.
### Response:
def validate_wrap(self, value):
''' Checks that ``value`` is an instance of ``DocumentField.type``.
if it is, then validation on its fields has already been done and
no further validation is needed.
'''
if not isinstance(value, self.type):
self._fail_validation_type(value, self.type) |
def cli(ctx, board, serial_port, ftdi_id, sram, project_dir,
verbose, verbose_yosys, verbose_arachne):
"""Upload the bitstream to the FPGA."""
drivers = Drivers()
drivers.pre_upload()
# Run scons
exit_code = SCons(project_dir).upload({
'board': board,
'verbose': {
'all': verbose,
'yosys': verbose_yosys,
'arachne': verbose_arachne
}
}, serial_port, ftdi_id, sram)
drivers.post_upload()
ctx.exit(exit_code) | Upload the bitstream to the FPGA. | Below is the the instruction that describes the task:
### Input:
Upload the bitstream to the FPGA.
### Response:
def cli(ctx, board, serial_port, ftdi_id, sram, project_dir,
verbose, verbose_yosys, verbose_arachne):
"""Upload the bitstream to the FPGA."""
drivers = Drivers()
drivers.pre_upload()
# Run scons
exit_code = SCons(project_dir).upload({
'board': board,
'verbose': {
'all': verbose,
'yosys': verbose_yosys,
'arachne': verbose_arachne
}
}, serial_port, ftdi_id, sram)
drivers.post_upload()
ctx.exit(exit_code) |
def dynamic_import_class(name):
"""Import a class from a module string, e.g. ``my.module.ClassName``."""
import importlib
module_name, class_name = name.rsplit(".", 1)
try:
module = importlib.import_module(module_name)
except Exception as e:
_logger.exception("Dynamic import of {!r} failed: {}".format(name, e))
raise
the_class = getattr(module, class_name)
return the_class | Import a class from a module string, e.g. ``my.module.ClassName``. | Below is the the instruction that describes the task:
### Input:
Import a class from a module string, e.g. ``my.module.ClassName``.
### Response:
def dynamic_import_class(name):
"""Import a class from a module string, e.g. ``my.module.ClassName``."""
import importlib
module_name, class_name = name.rsplit(".", 1)
try:
module = importlib.import_module(module_name)
except Exception as e:
_logger.exception("Dynamic import of {!r} failed: {}".format(name, e))
raise
the_class = getattr(module, class_name)
return the_class |
def get_pointing_chains(docgraph, layer=None):
"""
returns a list of chained pointing relations (e.g. coreference chains)
found in the given document graph.
Parameters
----------
docgraph : DiscourseDocumentGraph
a text with annotations, represented by a document graph
layer : str or None
If layer is specifid, this function will only return pointing relations
belonging to that layer.
"""
pointing_relations = select_edges_by(docgraph, layer=layer,
edge_type=EdgeTypes.pointing_relation)
# a markable can point to more than one antecedent, cf. Issue #40
rel_dict = defaultdict(set)
for src_id, target_id in pointing_relations:
rel_dict[src_id].add(target_id)
all_chains = [__walk_chain(rel_dict, src_id)
for src_id in rel_dict.iterkeys()]
# don't return partial chains, i.e. instead of returning [a,b], [b,c] and
# [a,b,c,d], just return [a,b,c,d]
unique_chains = []
for i, src_id_chains in enumerate(all_chains):
# there will be at least one chain in this list and
# its first element is the from ID
src_id = src_id_chains[0][0]
# chain lists not starting with src_id
other_chainlists = all_chains[:i] + all_chains[i+1:]
if not any((src_id in chain
for chain_list in other_chainlists
for chain in chain_list)):
unique_chains.extend(src_id_chains)
return unique_chains | returns a list of chained pointing relations (e.g. coreference chains)
found in the given document graph.
Parameters
----------
docgraph : DiscourseDocumentGraph
a text with annotations, represented by a document graph
layer : str or None
If layer is specifid, this function will only return pointing relations
belonging to that layer. | Below is the the instruction that describes the task:
### Input:
returns a list of chained pointing relations (e.g. coreference chains)
found in the given document graph.
Parameters
----------
docgraph : DiscourseDocumentGraph
a text with annotations, represented by a document graph
layer : str or None
If layer is specifid, this function will only return pointing relations
belonging to that layer.
### Response:
def get_pointing_chains(docgraph, layer=None):
"""
returns a list of chained pointing relations (e.g. coreference chains)
found in the given document graph.
Parameters
----------
docgraph : DiscourseDocumentGraph
a text with annotations, represented by a document graph
layer : str or None
If layer is specifid, this function will only return pointing relations
belonging to that layer.
"""
pointing_relations = select_edges_by(docgraph, layer=layer,
edge_type=EdgeTypes.pointing_relation)
# a markable can point to more than one antecedent, cf. Issue #40
rel_dict = defaultdict(set)
for src_id, target_id in pointing_relations:
rel_dict[src_id].add(target_id)
all_chains = [__walk_chain(rel_dict, src_id)
for src_id in rel_dict.iterkeys()]
# don't return partial chains, i.e. instead of returning [a,b], [b,c] and
# [a,b,c,d], just return [a,b,c,d]
unique_chains = []
for i, src_id_chains in enumerate(all_chains):
# there will be at least one chain in this list and
# its first element is the from ID
src_id = src_id_chains[0][0]
# chain lists not starting with src_id
other_chainlists = all_chains[:i] + all_chains[i+1:]
if not any((src_id in chain
for chain_list in other_chainlists
for chain in chain_list)):
unique_chains.extend(src_id_chains)
return unique_chains |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.