code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def reciprocal_remove(self):
"""Removes results rows for which the n-gram is not present in
at least one text in each labelled set of texts."""
self._logger.info(
'Removing n-grams that are not attested in all labels')
self._matches = self._reciprocal_remove(self._matches) | Removes results rows for which the n-gram is not present in
at least one text in each labelled set of texts. | Below is the the instruction that describes the task:
### Input:
Removes results rows for which the n-gram is not present in
at least one text in each labelled set of texts.
### Response:
def reciprocal_remove(self):
"""Removes results rows for which the n-gram is not present in
at least one text in each labelled set of texts."""
self._logger.info(
'Removing n-grams that are not attested in all labels')
self._matches = self._reciprocal_remove(self._matches) |
def sharey(axes):
"""
Shared axes limits without shared locators, ticks, etc.
By Joe Kington
"""
linker = Linker(axes)
for ax in axes:
ax._linker = linker | Shared axes limits without shared locators, ticks, etc.
By Joe Kington | Below is the the instruction that describes the task:
### Input:
Shared axes limits without shared locators, ticks, etc.
By Joe Kington
### Response:
def sharey(axes):
"""
Shared axes limits without shared locators, ticks, etc.
By Joe Kington
"""
linker = Linker(axes)
for ax in axes:
ax._linker = linker |
def log_post(self, url=None, credentials=None, do_verify_certificate=True):
"""
Write to a remote host via HTTP POST
"""
if url is None:
url = self.url
if credentials is None:
credentials = self.credentials
if do_verify_certificate is None:
do_verify_certificate = self.do_verify_certificate
if credentials and "base64" in credentials:
headers = {"Content-Type": "application/json", \
'Authorization': 'Basic %s' % credentials["base64"]}
else:
headers = {"Content-Type": "application/json"}
try:
request = requests.post(url, headers=headers, \
data=self.store.get_json(), verify=do_verify_certificate)
except httplib.IncompleteRead as e:
request = e.partial | Write to a remote host via HTTP POST | Below is the the instruction that describes the task:
### Input:
Write to a remote host via HTTP POST
### Response:
def log_post(self, url=None, credentials=None, do_verify_certificate=True):
"""
Write to a remote host via HTTP POST
"""
if url is None:
url = self.url
if credentials is None:
credentials = self.credentials
if do_verify_certificate is None:
do_verify_certificate = self.do_verify_certificate
if credentials and "base64" in credentials:
headers = {"Content-Type": "application/json", \
'Authorization': 'Basic %s' % credentials["base64"]}
else:
headers = {"Content-Type": "application/json"}
try:
request = requests.post(url, headers=headers, \
data=self.store.get_json(), verify=do_verify_certificate)
except httplib.IncompleteRead as e:
request = e.partial |
def add_coconut_to_path():
"""Adds coconut to sys.path if it isn't there already."""
try:
import coconut # NOQA
except ImportError:
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) | Adds coconut to sys.path if it isn't there already. | Below is the the instruction that describes the task:
### Input:
Adds coconut to sys.path if it isn't there already.
### Response:
def add_coconut_to_path():
"""Adds coconut to sys.path if it isn't there already."""
try:
import coconut # NOQA
except ImportError:
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) |
def audio_bottom(x, model_hparams, vocab_size):
"""Transform input from data space to model space.
Args:
x: A Tensor with shape [batch, ...]
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
body_input: A Tensor with shape [batch, ?, ?,
model_hparams.hidden_size].
"""
del vocab_size # unused arg
inputs = x
with tf.variable_scope("audio_modality"):
# TODO(aidangomez): Will need to sort out a better audio pipeline
def xnet_resblock(x, filters, res_relu, name):
"""Xception block."""
with tf.variable_scope(name):
# Typically audio samples are >100k samples in length and have a width
# of 2 or 4. Mono audio has a single channel while stereo has 2.
y = common_layers.separable_conv_block(
x,
filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))],
first_relu=True,
padding="SAME",
force2d=True,
name="sep_conv_block")
y = common_layers.pool(y, (3, 3), "MAX", "SAME", strides=(2, 2))
return y + common_layers.conv_block(
x,
filters, [((1, 1), (1, 1))],
padding="SAME",
strides=(2, 2),
first_relu=res_relu,
force2d=True,
name="res_conv0")
x = tf.to_float(inputs) / 255.
x.set_shape([None, None, None, 1])
for i in range(model_hparams.audio_compression):
x = xnet_resblock(x, 2**(i + 1), True, "compress_block_%d" % i)
return xnet_resblock(x,
model_hparams.hidden_size,
False,
"compress_block_final") | Transform input from data space to model space.
Args:
x: A Tensor with shape [batch, ...]
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
body_input: A Tensor with shape [batch, ?, ?,
model_hparams.hidden_size]. | Below is the the instruction that describes the task:
### Input:
Transform input from data space to model space.
Args:
x: A Tensor with shape [batch, ...]
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
body_input: A Tensor with shape [batch, ?, ?,
model_hparams.hidden_size].
### Response:
def audio_bottom(x, model_hparams, vocab_size):
"""Transform input from data space to model space.
Args:
x: A Tensor with shape [batch, ...]
model_hparams: HParams, model hyperparmeters.
vocab_size: int, vocabulary size.
Returns:
body_input: A Tensor with shape [batch, ?, ?,
model_hparams.hidden_size].
"""
del vocab_size # unused arg
inputs = x
with tf.variable_scope("audio_modality"):
# TODO(aidangomez): Will need to sort out a better audio pipeline
def xnet_resblock(x, filters, res_relu, name):
"""Xception block."""
with tf.variable_scope(name):
# Typically audio samples are >100k samples in length and have a width
# of 2 or 4. Mono audio has a single channel while stereo has 2.
y = common_layers.separable_conv_block(
x,
filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))],
first_relu=True,
padding="SAME",
force2d=True,
name="sep_conv_block")
y = common_layers.pool(y, (3, 3), "MAX", "SAME", strides=(2, 2))
return y + common_layers.conv_block(
x,
filters, [((1, 1), (1, 1))],
padding="SAME",
strides=(2, 2),
first_relu=res_relu,
force2d=True,
name="res_conv0")
x = tf.to_float(inputs) / 255.
x.set_shape([None, None, None, 1])
for i in range(model_hparams.audio_compression):
x = xnet_resblock(x, 2**(i + 1), True, "compress_block_%d" % i)
return xnet_resblock(x,
model_hparams.hidden_size,
False,
"compress_block_final") |
def _rollback(self):
"""Roll back the transaction.
Raises:
ValueError: If no transaction is in progress.
"""
if not self.in_progress:
raise ValueError(_CANT_ROLLBACK)
try:
# NOTE: The response is just ``google.protobuf.Empty``.
self._client._firestore_api.rollback(
self._client._database_string,
self._id,
metadata=self._client._rpc_metadata,
)
finally:
self._clean_up() | Roll back the transaction.
Raises:
ValueError: If no transaction is in progress. | Below is the the instruction that describes the task:
### Input:
Roll back the transaction.
Raises:
ValueError: If no transaction is in progress.
### Response:
def _rollback(self):
"""Roll back the transaction.
Raises:
ValueError: If no transaction is in progress.
"""
if not self.in_progress:
raise ValueError(_CANT_ROLLBACK)
try:
# NOTE: The response is just ``google.protobuf.Empty``.
self._client._firestore_api.rollback(
self._client._database_string,
self._id,
metadata=self._client._rpc_metadata,
)
finally:
self._clean_up() |
def create_connection(address):
"""
Wrapper for socket.create_connection() function.
If *address* (a 2-tuple ``(host, port)``) contains a valid IPv4/v6
address, passes *address* to socket.create_connection().
If *host* is valid path to Unix Domain socket, tries to connect to
the server listening on the given socket.
:param address: IP address or path to Unix Domain socket.
:return: Socket instance.
"""
host, _port = address
if ip.valid_ipv4(host) or ip.valid_ipv6(host):
return socket.create_connection(address)
elif os.path.exists(host):
sock = None
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(host)
except socket.error as e:
if sock is not None:
sock.close()
raise e
return sock
else:
raise ValueError('Invalid IP address or Unix Socket: %s' % host) | Wrapper for socket.create_connection() function.
If *address* (a 2-tuple ``(host, port)``) contains a valid IPv4/v6
address, passes *address* to socket.create_connection().
If *host* is valid path to Unix Domain socket, tries to connect to
the server listening on the given socket.
:param address: IP address or path to Unix Domain socket.
:return: Socket instance. | Below is the the instruction that describes the task:
### Input:
Wrapper for socket.create_connection() function.
If *address* (a 2-tuple ``(host, port)``) contains a valid IPv4/v6
address, passes *address* to socket.create_connection().
If *host* is valid path to Unix Domain socket, tries to connect to
the server listening on the given socket.
:param address: IP address or path to Unix Domain socket.
:return: Socket instance.
### Response:
def create_connection(address):
"""
Wrapper for socket.create_connection() function.
If *address* (a 2-tuple ``(host, port)``) contains a valid IPv4/v6
address, passes *address* to socket.create_connection().
If *host* is valid path to Unix Domain socket, tries to connect to
the server listening on the given socket.
:param address: IP address or path to Unix Domain socket.
:return: Socket instance.
"""
host, _port = address
if ip.valid_ipv4(host) or ip.valid_ipv6(host):
return socket.create_connection(address)
elif os.path.exists(host):
sock = None
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(host)
except socket.error as e:
if sock is not None:
sock.close()
raise e
return sock
else:
raise ValueError('Invalid IP address or Unix Socket: %s' % host) |
def telephone(self, mask: str = '', placeholder: str = '#') -> str:
"""Generate a random phone number.
:param mask: Mask for formatting number.
:param placeholder: A placeholder for a mask (default is #).
:return: Phone number.
:Example:
+7-(963)-409-11-22.
"""
if not mask:
code = self.random.choice(CALLING_CODES)
default = '{}-(###)-###-####'.format(code)
masks = self._data.get('telephone_fmt', [default])
mask = self.random.choice(masks)
return self.random.custom_code(mask=mask, digit=placeholder) | Generate a random phone number.
:param mask: Mask for formatting number.
:param placeholder: A placeholder for a mask (default is #).
:return: Phone number.
:Example:
+7-(963)-409-11-22. | Below is the the instruction that describes the task:
### Input:
Generate a random phone number.
:param mask: Mask for formatting number.
:param placeholder: A placeholder for a mask (default is #).
:return: Phone number.
:Example:
+7-(963)-409-11-22.
### Response:
def telephone(self, mask: str = '', placeholder: str = '#') -> str:
"""Generate a random phone number.
:param mask: Mask for formatting number.
:param placeholder: A placeholder for a mask (default is #).
:return: Phone number.
:Example:
+7-(963)-409-11-22.
"""
if not mask:
code = self.random.choice(CALLING_CODES)
default = '{}-(###)-###-####'.format(code)
masks = self._data.get('telephone_fmt', [default])
mask = self.random.choice(masks)
return self.random.custom_code(mask=mask, digit=placeholder) |
def data_filename(fname, pkgdir=""):
"""Return the path to a data file of ours.
The file is searched for on `STATIC_PATH`, and the first place it's found,
is returned.
Each directory in `STATIC_PATH` is searched as-is, and also, if `pkgdir`
is provided, at that subdirectory.
"""
for static_dir in STATIC_PATH:
static_filename = os.path.join(static_dir, fname)
if os.path.exists(static_filename):
return static_filename
if pkgdir:
static_filename = os.path.join(static_dir, pkgdir, fname)
if os.path.exists(static_filename):
return static_filename
raise CoverageException("Couldn't find static file %r" % fname) | Return the path to a data file of ours.
The file is searched for on `STATIC_PATH`, and the first place it's found,
is returned.
Each directory in `STATIC_PATH` is searched as-is, and also, if `pkgdir`
is provided, at that subdirectory. | Below is the the instruction that describes the task:
### Input:
Return the path to a data file of ours.
The file is searched for on `STATIC_PATH`, and the first place it's found,
is returned.
Each directory in `STATIC_PATH` is searched as-is, and also, if `pkgdir`
is provided, at that subdirectory.
### Response:
def data_filename(fname, pkgdir=""):
"""Return the path to a data file of ours.
The file is searched for on `STATIC_PATH`, and the first place it's found,
is returned.
Each directory in `STATIC_PATH` is searched as-is, and also, if `pkgdir`
is provided, at that subdirectory.
"""
for static_dir in STATIC_PATH:
static_filename = os.path.join(static_dir, fname)
if os.path.exists(static_filename):
return static_filename
if pkgdir:
static_filename = os.path.join(static_dir, pkgdir, fname)
if os.path.exists(static_filename):
return static_filename
raise CoverageException("Couldn't find static file %r" % fname) |
def get_prtflds_default(self):
"""Get default fields."""
return self._fldsdefprt[:-1] + \
["p_{M}".format(M=m.fieldname) for m in self.method_flds] + \
[self._fldsdefprt[-1]] | Get default fields. | Below is the the instruction that describes the task:
### Input:
Get default fields.
### Response:
def get_prtflds_default(self):
"""Get default fields."""
return self._fldsdefprt[:-1] + \
["p_{M}".format(M=m.fieldname) for m in self.method_flds] + \
[self._fldsdefprt[-1]] |
def axesfontsize(ax, fontsize):
"""
Change the font size for the title, x and y labels, and x and y tick labels for axis *ax* to *fontsize*.
"""
items = ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels())
for item in items:
item.set_fontsize(fontsize) | Change the font size for the title, x and y labels, and x and y tick labels for axis *ax* to *fontsize*. | Below is the the instruction that describes the task:
### Input:
Change the font size for the title, x and y labels, and x and y tick labels for axis *ax* to *fontsize*.
### Response:
def axesfontsize(ax, fontsize):
"""
Change the font size for the title, x and y labels, and x and y tick labels for axis *ax* to *fontsize*.
"""
items = ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels())
for item in items:
item.set_fontsize(fontsize) |
def verify_file_private(filename):
"""
Raises ValueError the file permissions allow group/other
On windows this never raises due to the implementation of stat.
"""
if platform.system().upper() != 'WINDOWS':
filename = os.path.expanduser(filename)
if os.path.exists(filename):
file_stat = os.stat(filename)
if mode_allows_group_or_other(file_stat.st_mode):
raise ValueError(CONFIG_FILE_PERMISSIONS_ERROR) | Raises ValueError the file permissions allow group/other
On windows this never raises due to the implementation of stat. | Below is the the instruction that describes the task:
### Input:
Raises ValueError the file permissions allow group/other
On windows this never raises due to the implementation of stat.
### Response:
def verify_file_private(filename):
"""
Raises ValueError the file permissions allow group/other
On windows this never raises due to the implementation of stat.
"""
if platform.system().upper() != 'WINDOWS':
filename = os.path.expanduser(filename)
if os.path.exists(filename):
file_stat = os.stat(filename)
if mode_allows_group_or_other(file_stat.st_mode):
raise ValueError(CONFIG_FILE_PERMISSIONS_ERROR) |
def description(self):
"""A user-friendly description of the handler.
Returns:
:py:class:`str`: The handler's description.
"""
if self._description is None:
text = '\n'.join(self.__doc__.splitlines()[1:]).strip()
lines = []
for line in map(str.strip, text.splitlines()):
if line and lines:
lines[-1] = ' '.join((lines[-1], line))
elif line:
lines.append(line)
else:
lines.append('')
self._description = '\n'.join(lines)
return self._description | A user-friendly description of the handler.
Returns:
:py:class:`str`: The handler's description. | Below is the the instruction that describes the task:
### Input:
A user-friendly description of the handler.
Returns:
:py:class:`str`: The handler's description.
### Response:
def description(self):
"""A user-friendly description of the handler.
Returns:
:py:class:`str`: The handler's description.
"""
if self._description is None:
text = '\n'.join(self.__doc__.splitlines()[1:]).strip()
lines = []
for line in map(str.strip, text.splitlines()):
if line and lines:
lines[-1] = ' '.join((lines[-1], line))
elif line:
lines.append(line)
else:
lines.append('')
self._description = '\n'.join(lines)
return self._description |
def find_cookies_for_class(cookies_file, class_name):
"""
Return a RequestsCookieJar containing the cookies for
.coursera.org and class.coursera.org found in the given cookies_file.
"""
path = "/" + class_name
def cookies_filter(c):
return c.domain == ".coursera.org" \
or (c.domain == "class.coursera.org" and c.path == path)
cj = get_cookie_jar(cookies_file)
new_cj = requests.cookies.RequestsCookieJar()
for c in filter(cookies_filter, cj):
new_cj.set_cookie(c)
return new_cj | Return a RequestsCookieJar containing the cookies for
.coursera.org and class.coursera.org found in the given cookies_file. | Below is the the instruction that describes the task:
### Input:
Return a RequestsCookieJar containing the cookies for
.coursera.org and class.coursera.org found in the given cookies_file.
### Response:
def find_cookies_for_class(cookies_file, class_name):
"""
Return a RequestsCookieJar containing the cookies for
.coursera.org and class.coursera.org found in the given cookies_file.
"""
path = "/" + class_name
def cookies_filter(c):
return c.domain == ".coursera.org" \
or (c.domain == "class.coursera.org" and c.path == path)
cj = get_cookie_jar(cookies_file)
new_cj = requests.cookies.RequestsCookieJar()
for c in filter(cookies_filter, cj):
new_cj.set_cookie(c)
return new_cj |
def capsnet(batch_size, n_class, num_routing, recon_loss_weight):
"""Create CapsNet"""
# data.shape = [batch_size, 1, 28, 28]
data = mx.sym.Variable('data')
input_shape = (1, 28, 28)
# Conv2D layer
# net.shape = [batch_size, 256, 20, 20]
conv1 = mx.sym.Convolution(data=data,
num_filter=256,
kernel=(9, 9),
layout='NCHW',
name='conv1')
conv1 = mx.sym.Activation(data=conv1, act_type='relu', name='conv1_act')
# net.shape = [batch_size, 256, 6, 6]
primarycaps = primary_caps(data=conv1,
dim_vector=8,
n_channels=32,
kernel=(9, 9),
strides=[2, 2],
name='primarycaps')
primarycaps.infer_shape(data=(batch_size, 1, 28, 28))
# CapsuleLayer
kernel_initializer = mx.init.Xavier(rnd_type='uniform', factor_type='avg', magnitude=3)
bias_initializer = mx.init.Zero()
digitcaps = CapsuleLayer(num_capsule=10,
dim_vector=16,
batch_size=batch_size,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
num_routing=num_routing)(primarycaps)
# out_caps : (batch_size, 10)
out_caps = mx.sym.sqrt(data=mx.sym.sum(mx.sym.square(digitcaps), 2))
out_caps.infer_shape(data=(batch_size, 1, 28, 28))
y = mx.sym.Variable('softmax_label', shape=(batch_size,))
y_onehot = mx.sym.one_hot(y, n_class)
y_reshaped = mx.sym.Reshape(data=y_onehot, shape=(batch_size, -4, n_class, -1))
y_reshaped.infer_shape(softmax_label=(batch_size,))
# inputs_masked : (batch_size, 16)
inputs_masked = mx.sym.linalg_gemm2(y_reshaped, digitcaps, transpose_a=True)
inputs_masked = mx.sym.Reshape(data=inputs_masked, shape=(-3, 0))
x_recon = mx.sym.FullyConnected(data=inputs_masked, num_hidden=512, name='x_recon')
x_recon = mx.sym.Activation(data=x_recon, act_type='relu', name='x_recon_act')
x_recon = mx.sym.FullyConnected(data=x_recon, num_hidden=1024, name='x_recon2')
x_recon = mx.sym.Activation(data=x_recon, act_type='relu', name='x_recon_act2')
x_recon = mx.sym.FullyConnected(data=x_recon, num_hidden=np.prod(input_shape), name='x_recon3')
x_recon = mx.sym.Activation(data=x_recon, act_type='sigmoid', name='x_recon_act3')
data_flatten = mx.sym.flatten(data=data)
squared_error = mx.sym.square(x_recon-data_flatten)
recon_error = mx.sym.mean(squared_error)
recon_error_stopped = recon_error
recon_error_stopped = mx.sym.BlockGrad(recon_error_stopped)
loss = mx.symbol.MakeLoss((1-recon_loss_weight)*margin_loss(y_onehot, out_caps)+recon_loss_weight*recon_error)
out_caps_blocked = out_caps
out_caps_blocked = mx.sym.BlockGrad(out_caps_blocked)
return mx.sym.Group([out_caps_blocked, loss, recon_error_stopped]) | Create CapsNet | Below is the the instruction that describes the task:
### Input:
Create CapsNet
### Response:
def capsnet(batch_size, n_class, num_routing, recon_loss_weight):
"""Create CapsNet"""
# data.shape = [batch_size, 1, 28, 28]
data = mx.sym.Variable('data')
input_shape = (1, 28, 28)
# Conv2D layer
# net.shape = [batch_size, 256, 20, 20]
conv1 = mx.sym.Convolution(data=data,
num_filter=256,
kernel=(9, 9),
layout='NCHW',
name='conv1')
conv1 = mx.sym.Activation(data=conv1, act_type='relu', name='conv1_act')
# net.shape = [batch_size, 256, 6, 6]
primarycaps = primary_caps(data=conv1,
dim_vector=8,
n_channels=32,
kernel=(9, 9),
strides=[2, 2],
name='primarycaps')
primarycaps.infer_shape(data=(batch_size, 1, 28, 28))
# CapsuleLayer
kernel_initializer = mx.init.Xavier(rnd_type='uniform', factor_type='avg', magnitude=3)
bias_initializer = mx.init.Zero()
digitcaps = CapsuleLayer(num_capsule=10,
dim_vector=16,
batch_size=batch_size,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
num_routing=num_routing)(primarycaps)
# out_caps : (batch_size, 10)
out_caps = mx.sym.sqrt(data=mx.sym.sum(mx.sym.square(digitcaps), 2))
out_caps.infer_shape(data=(batch_size, 1, 28, 28))
y = mx.sym.Variable('softmax_label', shape=(batch_size,))
y_onehot = mx.sym.one_hot(y, n_class)
y_reshaped = mx.sym.Reshape(data=y_onehot, shape=(batch_size, -4, n_class, -1))
y_reshaped.infer_shape(softmax_label=(batch_size,))
# inputs_masked : (batch_size, 16)
inputs_masked = mx.sym.linalg_gemm2(y_reshaped, digitcaps, transpose_a=True)
inputs_masked = mx.sym.Reshape(data=inputs_masked, shape=(-3, 0))
x_recon = mx.sym.FullyConnected(data=inputs_masked, num_hidden=512, name='x_recon')
x_recon = mx.sym.Activation(data=x_recon, act_type='relu', name='x_recon_act')
x_recon = mx.sym.FullyConnected(data=x_recon, num_hidden=1024, name='x_recon2')
x_recon = mx.sym.Activation(data=x_recon, act_type='relu', name='x_recon_act2')
x_recon = mx.sym.FullyConnected(data=x_recon, num_hidden=np.prod(input_shape), name='x_recon3')
x_recon = mx.sym.Activation(data=x_recon, act_type='sigmoid', name='x_recon_act3')
data_flatten = mx.sym.flatten(data=data)
squared_error = mx.sym.square(x_recon-data_flatten)
recon_error = mx.sym.mean(squared_error)
recon_error_stopped = recon_error
recon_error_stopped = mx.sym.BlockGrad(recon_error_stopped)
loss = mx.symbol.MakeLoss((1-recon_loss_weight)*margin_loss(y_onehot, out_caps)+recon_loss_weight*recon_error)
out_caps_blocked = out_caps
out_caps_blocked = mx.sym.BlockGrad(out_caps_blocked)
return mx.sym.Group([out_caps_blocked, loss, recon_error_stopped]) |
def read_table(self, table_name, index_col=None, coerce_float=True,
parse_dates=None, columns=None, schema=None,
chunksize=None):
"""Read SQL database table into a DataFrame.
Parameters
----------
table_name : string
Name of SQL table in database.
index_col : string, optional, default: None
Column to set as index.
coerce_float : boolean, default True
Attempts to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default: None
List of column names to select from SQL table.
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQL database object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See Also
--------
pandas.read_sql_table
SQLDatabase.read_query
"""
table = SQLTable(table_name, self, index=index_col, schema=schema)
return table.read(coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns,
chunksize=chunksize) | Read SQL database table into a DataFrame.
Parameters
----------
table_name : string
Name of SQL table in database.
index_col : string, optional, default: None
Column to set as index.
coerce_float : boolean, default True
Attempts to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default: None
List of column names to select from SQL table.
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQL database object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See Also
--------
pandas.read_sql_table
SQLDatabase.read_query | Below is the the instruction that describes the task:
### Input:
Read SQL database table into a DataFrame.
Parameters
----------
table_name : string
Name of SQL table in database.
index_col : string, optional, default: None
Column to set as index.
coerce_float : boolean, default True
Attempts to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default: None
List of column names to select from SQL table.
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQL database object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See Also
--------
pandas.read_sql_table
SQLDatabase.read_query
### Response:
def read_table(self, table_name, index_col=None, coerce_float=True,
parse_dates=None, columns=None, schema=None,
chunksize=None):
"""Read SQL database table into a DataFrame.
Parameters
----------
table_name : string
Name of SQL table in database.
index_col : string, optional, default: None
Column to set as index.
coerce_float : boolean, default True
Attempts to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default: None
List of column names to select from SQL table.
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQL database object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See Also
--------
pandas.read_sql_table
SQLDatabase.read_query
"""
table = SQLTable(table_name, self, index=index_col, schema=schema)
return table.read(coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns,
chunksize=chunksize) |
def as_live(self):
"""
Return this call as if it were being assigned in a pyconfig namespace,
but load the actual value currently available in pyconfig.
"""
key = self.get_key()
default = pyconfig.get(key)
if default:
default = repr(default)
else:
default = self._default() or NotSet()
return "%s = %s" % (key, default) | Return this call as if it were being assigned in a pyconfig namespace,
but load the actual value currently available in pyconfig. | Below is the the instruction that describes the task:
### Input:
Return this call as if it were being assigned in a pyconfig namespace,
but load the actual value currently available in pyconfig.
### Response:
def as_live(self):
"""
Return this call as if it were being assigned in a pyconfig namespace,
but load the actual value currently available in pyconfig.
"""
key = self.get_key()
default = pyconfig.get(key)
if default:
default = repr(default)
else:
default = self._default() or NotSet()
return "%s = %s" % (key, default) |
def _init_hex(self, hexval: str) -> None:
""" Initialize from a hex value string. """
self.hexval = hex2termhex(fix_hex(hexval))
self.code = hex2term(self.hexval)
self.rgb = hex2rgb(self.hexval) | Initialize from a hex value string. | Below is the the instruction that describes the task:
### Input:
Initialize from a hex value string.
### Response:
def _init_hex(self, hexval: str) -> None:
""" Initialize from a hex value string. """
self.hexval = hex2termhex(fix_hex(hexval))
self.code = hex2term(self.hexval)
self.rgb = hex2rgb(self.hexval) |
def main(input_filename, format):
"""
Calculate the fingerprint hashses of the referenced audio file and save
to disk as a pickle file
"""
# open the file & convert to wav
song_data = AudioSegment.from_file(input_filename, format=format)
song_data = song_data.set_channels(1) # convert to mono
wav_tmp = song_data.export(format="wav") # write to a tmp file buffer
wav_tmp.seek(0)
rate, wav_data = wavfile.read(wav_tmp)
rows_per_second = (1 + (rate - WIDTH)) // FRAME_STRIDE
# Calculate a coarser window for matching
window_size = (rows_per_second // TIME_STRIDE, (WIDTH // 2) // FREQ_STRIDE)
peaks = resound.get_peaks(np.array(wav_data), window_size=window_size)
# half width (nyquist freq) & half size (window is +/- around the middle)
f_width = WIDTH // (2 * FREQ_STRIDE) * 2
t_gap = 1 * rows_per_second
t_width = 2 * rows_per_second
fingerprints = resound.hashes(peaks, f_width=f_width, t_gap=t_gap, t_width=t_width) # hash, offset pairs
return fingerprints | Calculate the fingerprint hashses of the referenced audio file and save
to disk as a pickle file | Below is the the instruction that describes the task:
### Input:
Calculate the fingerprint hashses of the referenced audio file and save
to disk as a pickle file
### Response:
def main(input_filename, format):
"""
Calculate the fingerprint hashses of the referenced audio file and save
to disk as a pickle file
"""
# open the file & convert to wav
song_data = AudioSegment.from_file(input_filename, format=format)
song_data = song_data.set_channels(1) # convert to mono
wav_tmp = song_data.export(format="wav") # write to a tmp file buffer
wav_tmp.seek(0)
rate, wav_data = wavfile.read(wav_tmp)
rows_per_second = (1 + (rate - WIDTH)) // FRAME_STRIDE
# Calculate a coarser window for matching
window_size = (rows_per_second // TIME_STRIDE, (WIDTH // 2) // FREQ_STRIDE)
peaks = resound.get_peaks(np.array(wav_data), window_size=window_size)
# half width (nyquist freq) & half size (window is +/- around the middle)
f_width = WIDTH // (2 * FREQ_STRIDE) * 2
t_gap = 1 * rows_per_second
t_width = 2 * rows_per_second
fingerprints = resound.hashes(peaks, f_width=f_width, t_gap=t_gap, t_width=t_width) # hash, offset pairs
return fingerprints |
def get_wrong_answer_ids(self):
"""provide this method to return only wrong answer ids"""
id_list = []
for answer in self.get_wrong_answers():
id_list.append(answer.get_id())
return IdList(id_list) | provide this method to return only wrong answer ids | Below is the the instruction that describes the task:
### Input:
provide this method to return only wrong answer ids
### Response:
def get_wrong_answer_ids(self):
"""provide this method to return only wrong answer ids"""
id_list = []
for answer in self.get_wrong_answers():
id_list.append(answer.get_id())
return IdList(id_list) |
def is_monotonic(a, increasing=True):
"""
Tests whether a vector a has monotonicity.
Parameters
----------
a : array-like
Array that should be tested for monotonicity
increasing : bool, default: True
Test if the array is montonically increasing, otherwise test if the
array is montonically decreasing.
"""
a = np.asarray(a) # ensure a is array-like
if a.ndim > 1:
raise ValueError("not supported for multi-dimensonal arrays")
if len(a) <= 1:
return True
if increasing:
return np.all(a[1:] >= a[:-1], axis=0)
return np.all(a[1:] <= a[:-1], axis=0) | Tests whether a vector a has monotonicity.
Parameters
----------
a : array-like
Array that should be tested for monotonicity
increasing : bool, default: True
Test if the array is montonically increasing, otherwise test if the
array is montonically decreasing. | Below is the the instruction that describes the task:
### Input:
Tests whether a vector a has monotonicity.
Parameters
----------
a : array-like
Array that should be tested for monotonicity
increasing : bool, default: True
Test if the array is montonically increasing, otherwise test if the
array is montonically decreasing.
### Response:
def is_monotonic(a, increasing=True):
"""
Tests whether a vector a has monotonicity.
Parameters
----------
a : array-like
Array that should be tested for monotonicity
increasing : bool, default: True
Test if the array is montonically increasing, otherwise test if the
array is montonically decreasing.
"""
a = np.asarray(a) # ensure a is array-like
if a.ndim > 1:
raise ValueError("not supported for multi-dimensonal arrays")
if len(a) <= 1:
return True
if increasing:
return np.all(a[1:] >= a[:-1], axis=0)
return np.all(a[1:] <= a[:-1], axis=0) |
def init_epoch(self):
"""Set up the batch generator for a new epoch."""
if self._restored_from_state:
self.random_shuffler.random_state = self._random_state_this_epoch
else:
self._random_state_this_epoch = self.random_shuffler.random_state
self.create_batches()
if self._restored_from_state:
self._restored_from_state = False
else:
self._iterations_this_epoch = 0
if not self.repeat:
self.iterations = 0 | Set up the batch generator for a new epoch. | Below is the the instruction that describes the task:
### Input:
Set up the batch generator for a new epoch.
### Response:
def init_epoch(self):
"""Set up the batch generator for a new epoch."""
if self._restored_from_state:
self.random_shuffler.random_state = self._random_state_this_epoch
else:
self._random_state_this_epoch = self.random_shuffler.random_state
self.create_batches()
if self._restored_from_state:
self._restored_from_state = False
else:
self._iterations_this_epoch = 0
if not self.repeat:
self.iterations = 0 |
def hidden_constraints(self):
"""
Constraints applied to the population, but temporarily removed.
"""
try:
return self._hidden_constraints
except AttributeError:
self._hidden_constraints = ConstraintDict()
return self._hidden_constraints | Constraints applied to the population, but temporarily removed. | Below is the the instruction that describes the task:
### Input:
Constraints applied to the population, but temporarily removed.
### Response:
def hidden_constraints(self):
"""
Constraints applied to the population, but temporarily removed.
"""
try:
return self._hidden_constraints
except AttributeError:
self._hidden_constraints = ConstraintDict()
return self._hidden_constraints |
def _Reg2Py(data, size, data_type):
"""Converts a Windows Registry value to the corresponding Python data type."""
if data_type == winreg.REG_DWORD:
if size == 0:
return 0
# DWORD is an unsigned 32-bit integer, see:
# https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-dtyp/262627d8-3418-4627-9218-4ffe110850b2
return ctypes.cast(data, ctypes.POINTER(ctypes.c_uint32)).contents.value
elif data_type == winreg.REG_SZ or data_type == winreg.REG_EXPAND_SZ:
return ctypes.wstring_at(data, size // 2).rstrip(u"\x00")
elif data_type == winreg.REG_MULTI_SZ:
return ctypes.wstring_at(data, size // 2).rstrip(u"\x00").split(u"\x00")
else:
if size == 0:
return None
return ctypes.string_at(data, size) | Converts a Windows Registry value to the corresponding Python data type. | Below is the the instruction that describes the task:
### Input:
Converts a Windows Registry value to the corresponding Python data type.
### Response:
def _Reg2Py(data, size, data_type):
"""Converts a Windows Registry value to the corresponding Python data type."""
if data_type == winreg.REG_DWORD:
if size == 0:
return 0
# DWORD is an unsigned 32-bit integer, see:
# https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-dtyp/262627d8-3418-4627-9218-4ffe110850b2
return ctypes.cast(data, ctypes.POINTER(ctypes.c_uint32)).contents.value
elif data_type == winreg.REG_SZ or data_type == winreg.REG_EXPAND_SZ:
return ctypes.wstring_at(data, size // 2).rstrip(u"\x00")
elif data_type == winreg.REG_MULTI_SZ:
return ctypes.wstring_at(data, size // 2).rstrip(u"\x00").split(u"\x00")
else:
if size == 0:
return None
return ctypes.string_at(data, size) |
def lookupjoin(left, right, key=None, lkey=None, rkey=None, missing=None,
presorted=False, buffersize=None, tempdir=None, cache=True,
lprefix=None, rprefix=None):
"""
Perform a left join, but where the key is not unique in the right-hand
table, arbitrarily choose the first row and ignore others. E.g.::
>>> import petl as etl
>>> table1 = [['id', 'color', 'cost'],
... [1, 'blue', 12],
... [2, 'red', 8],
... [3, 'purple', 4]]
>>> table2 = [['id', 'shape', 'size'],
... [1, 'circle', 'big'],
... [1, 'circle', 'small'],
... [2, 'square', 'tiny'],
... [2, 'square', 'big'],
... [3, 'ellipse', 'small'],
... [3, 'ellipse', 'tiny']]
>>> table3 = etl.lookupjoin(table1, table2, key='id')
>>> table3
+----+----------+------+-----------+---------+
| id | color | cost | shape | size |
+====+==========+======+===========+=========+
| 1 | 'blue' | 12 | 'circle' | 'big' |
+----+----------+------+-----------+---------+
| 2 | 'red' | 8 | 'square' | 'tiny' |
+----+----------+------+-----------+---------+
| 3 | 'purple' | 4 | 'ellipse' | 'small' |
+----+----------+------+-----------+---------+
See also :func:`petl.transform.joins.leftjoin`.
"""
lkey, rkey = keys_from_args(left, right, key, lkey, rkey)
return LookupJoinView(left, right, lkey, rkey, presorted=presorted,
missing=missing, buffersize=buffersize,
tempdir=tempdir, cache=cache,
lprefix=lprefix, rprefix=rprefix) | Perform a left join, but where the key is not unique in the right-hand
table, arbitrarily choose the first row and ignore others. E.g.::
>>> import petl as etl
>>> table1 = [['id', 'color', 'cost'],
... [1, 'blue', 12],
... [2, 'red', 8],
... [3, 'purple', 4]]
>>> table2 = [['id', 'shape', 'size'],
... [1, 'circle', 'big'],
... [1, 'circle', 'small'],
... [2, 'square', 'tiny'],
... [2, 'square', 'big'],
... [3, 'ellipse', 'small'],
... [3, 'ellipse', 'tiny']]
>>> table3 = etl.lookupjoin(table1, table2, key='id')
>>> table3
+----+----------+------+-----------+---------+
| id | color | cost | shape | size |
+====+==========+======+===========+=========+
| 1 | 'blue' | 12 | 'circle' | 'big' |
+----+----------+------+-----------+---------+
| 2 | 'red' | 8 | 'square' | 'tiny' |
+----+----------+------+-----------+---------+
| 3 | 'purple' | 4 | 'ellipse' | 'small' |
+----+----------+------+-----------+---------+
See also :func:`petl.transform.joins.leftjoin`. | Below is the the instruction that describes the task:
### Input:
Perform a left join, but where the key is not unique in the right-hand
table, arbitrarily choose the first row and ignore others. E.g.::
>>> import petl as etl
>>> table1 = [['id', 'color', 'cost'],
... [1, 'blue', 12],
... [2, 'red', 8],
... [3, 'purple', 4]]
>>> table2 = [['id', 'shape', 'size'],
... [1, 'circle', 'big'],
... [1, 'circle', 'small'],
... [2, 'square', 'tiny'],
... [2, 'square', 'big'],
... [3, 'ellipse', 'small'],
... [3, 'ellipse', 'tiny']]
>>> table3 = etl.lookupjoin(table1, table2, key='id')
>>> table3
+----+----------+------+-----------+---------+
| id | color | cost | shape | size |
+====+==========+======+===========+=========+
| 1 | 'blue' | 12 | 'circle' | 'big' |
+----+----------+------+-----------+---------+
| 2 | 'red' | 8 | 'square' | 'tiny' |
+----+----------+------+-----------+---------+
| 3 | 'purple' | 4 | 'ellipse' | 'small' |
+----+----------+------+-----------+---------+
See also :func:`petl.transform.joins.leftjoin`.
### Response:
def lookupjoin(left, right, key=None, lkey=None, rkey=None, missing=None,
presorted=False, buffersize=None, tempdir=None, cache=True,
lprefix=None, rprefix=None):
"""
Perform a left join, but where the key is not unique in the right-hand
table, arbitrarily choose the first row and ignore others. E.g.::
>>> import petl as etl
>>> table1 = [['id', 'color', 'cost'],
... [1, 'blue', 12],
... [2, 'red', 8],
... [3, 'purple', 4]]
>>> table2 = [['id', 'shape', 'size'],
... [1, 'circle', 'big'],
... [1, 'circle', 'small'],
... [2, 'square', 'tiny'],
... [2, 'square', 'big'],
... [3, 'ellipse', 'small'],
... [3, 'ellipse', 'tiny']]
>>> table3 = etl.lookupjoin(table1, table2, key='id')
>>> table3
+----+----------+------+-----------+---------+
| id | color | cost | shape | size |
+====+==========+======+===========+=========+
| 1 | 'blue' | 12 | 'circle' | 'big' |
+----+----------+------+-----------+---------+
| 2 | 'red' | 8 | 'square' | 'tiny' |
+----+----------+------+-----------+---------+
| 3 | 'purple' | 4 | 'ellipse' | 'small' |
+----+----------+------+-----------+---------+
See also :func:`petl.transform.joins.leftjoin`.
"""
lkey, rkey = keys_from_args(left, right, key, lkey, rkey)
return LookupJoinView(left, right, lkey, rkey, presorted=presorted,
missing=missing, buffersize=buffersize,
tempdir=tempdir, cache=cache,
lprefix=lprefix, rprefix=rprefix) |
def _format_final_exc_line(etype, value):
"""Return a list of a single line -- normal case for format_exception_only"""
valuestr = _some_str(value)
if value is None or not valuestr:
line = "%s\n" % etype
else:
line = "%s: %s\n" % (etype, valuestr)
return line | Return a list of a single line -- normal case for format_exception_only | Below is the the instruction that describes the task:
### Input:
Return a list of a single line -- normal case for format_exception_only
### Response:
def _format_final_exc_line(etype, value):
"""Return a list of a single line -- normal case for format_exception_only"""
valuestr = _some_str(value)
if value is None or not valuestr:
line = "%s\n" % etype
else:
line = "%s: %s\n" % (etype, valuestr)
return line |
def async_handler(handler):
"""
This decorator allows for use of async handlers by automatically running
them in an event loop. The loop is added to the context object for if the
handler needs it.
Usage::
>>> from lambda_decorators import async_handler
>>> async def foobar():
... return 'foobar'
>>> @async_handler
... async def handler(event, context):
... return await foobar()
>>> class Context:
... pass
>>> handler({}, Context())
'foobar'
*NOTE: Python 3 only*
"""
@wraps(handler)
def wrapper(event, context):
context.loop = asyncio.get_event_loop()
return context.loop.run_until_complete(handler(event, context))
return wrapper | This decorator allows for use of async handlers by automatically running
them in an event loop. The loop is added to the context object for if the
handler needs it.
Usage::
>>> from lambda_decorators import async_handler
>>> async def foobar():
... return 'foobar'
>>> @async_handler
... async def handler(event, context):
... return await foobar()
>>> class Context:
... pass
>>> handler({}, Context())
'foobar'
*NOTE: Python 3 only* | Below is the the instruction that describes the task:
### Input:
This decorator allows for use of async handlers by automatically running
them in an event loop. The loop is added to the context object for if the
handler needs it.
Usage::
>>> from lambda_decorators import async_handler
>>> async def foobar():
... return 'foobar'
>>> @async_handler
... async def handler(event, context):
... return await foobar()
>>> class Context:
... pass
>>> handler({}, Context())
'foobar'
*NOTE: Python 3 only*
### Response:
def async_handler(handler):
"""
This decorator allows for use of async handlers by automatically running
them in an event loop. The loop is added to the context object for if the
handler needs it.
Usage::
>>> from lambda_decorators import async_handler
>>> async def foobar():
... return 'foobar'
>>> @async_handler
... async def handler(event, context):
... return await foobar()
>>> class Context:
... pass
>>> handler({}, Context())
'foobar'
*NOTE: Python 3 only*
"""
@wraps(handler)
def wrapper(event, context):
context.loop = asyncio.get_event_loop()
return context.loop.run_until_complete(handler(event, context))
return wrapper |
def matches_requirement(self, req):
"""
Say if this instance matches (fulfills) a requirement.
:param req: The requirement to match.
:rtype req: str
:return: True if it matches, else False.
"""
# Requirement may contain extras - parse to lose those
# from what's passed to the matcher
r = parse_requirement(req)
scheme = get_scheme(self.metadata.scheme)
try:
matcher = scheme.matcher(r.requirement)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
result = False
for p in self.provides:
p_name, p_ver = parse_name_and_version(p)
if p_name != name:
continue
try:
result = matcher.match(p_ver)
break
except UnsupportedVersionError:
pass
return result | Say if this instance matches (fulfills) a requirement.
:param req: The requirement to match.
:rtype req: str
:return: True if it matches, else False. | Below is the the instruction that describes the task:
### Input:
Say if this instance matches (fulfills) a requirement.
:param req: The requirement to match.
:rtype req: str
:return: True if it matches, else False.
### Response:
def matches_requirement(self, req):
"""
Say if this instance matches (fulfills) a requirement.
:param req: The requirement to match.
:rtype req: str
:return: True if it matches, else False.
"""
# Requirement may contain extras - parse to lose those
# from what's passed to the matcher
r = parse_requirement(req)
scheme = get_scheme(self.metadata.scheme)
try:
matcher = scheme.matcher(r.requirement)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
result = False
for p in self.provides:
p_name, p_ver = parse_name_and_version(p)
if p_name != name:
continue
try:
result = matcher.match(p_ver)
break
except UnsupportedVersionError:
pass
return result |
def allow_client_outgoing(self, application_sid, **kwargs):
"""
Allow the user of this token to make outgoing connections. Keyword arguments are passed
to the application.
:param str application_sid: Application to contact
"""
scope = ScopeURI('client', 'outgoing', {'appSid': application_sid})
if kwargs:
scope.add_param('appParams', urlencode(kwargs, doseq=True))
self.capabilities['outgoing'] = scope | Allow the user of this token to make outgoing connections. Keyword arguments are passed
to the application.
:param str application_sid: Application to contact | Below is the the instruction that describes the task:
### Input:
Allow the user of this token to make outgoing connections. Keyword arguments are passed
to the application.
:param str application_sid: Application to contact
### Response:
def allow_client_outgoing(self, application_sid, **kwargs):
"""
Allow the user of this token to make outgoing connections. Keyword arguments are passed
to the application.
:param str application_sid: Application to contact
"""
scope = ScopeURI('client', 'outgoing', {'appSid': application_sid})
if kwargs:
scope.add_param('appParams', urlencode(kwargs, doseq=True))
self.capabilities['outgoing'] = scope |
def make_preprocessing_fn(output_dir, features, keep_target):
"""Makes a preprocessing function.
Args:
output_dir: folder path that contains the vocab and stats files.
features: the features dict
Returns:
a function that takes a dict of input tensors
"""
def preprocessing_fn(inputs):
"""Preprocessing function.
Args:
inputs: dictionary of raw input tensors
Returns:
A dictionary of transformed tensors
"""
stats = json.loads(
file_io.read_file_to_string(
os.path.join(output_dir, STATS_FILE)).decode())
result = {}
for name, transform in six.iteritems(features):
transform_name = transform['transform']
source_column = transform['source_column']
if transform_name == TARGET_TRANSFORM:
if not keep_target:
continue
if file_io.file_exists(os.path.join(output_dir, VOCAB_ANALYSIS_FILE % source_column)):
transform_name = 'one_hot'
else:
transform_name = 'identity'
if transform_name == 'identity':
result[name] = inputs[source_column]
elif transform_name == 'scale':
result[name] = _scale(
inputs[name],
min_x_value=stats['column_stats'][source_column]['min'],
max_x_value=stats['column_stats'][source_column]['max'],
output_min=transform.get('value', 1) * (-1),
output_max=transform.get('value', 1))
elif transform_name in [ONE_HOT_TRANSFORM, MULTI_HOT_TRANSFORM]:
vocab, ex_count = read_vocab_file(
os.path.join(output_dir, VOCAB_ANALYSIS_FILE % source_column))
if transform_name == MULTI_HOT_TRANSFORM:
separator = transform.get('separator', ' ')
tokens = tf.string_split(inputs[source_column], separator)
result[name] = _string_to_int(tokens, vocab)
else:
result[name] = _string_to_int(inputs[source_column], vocab)
elif transform_name == IMAGE_TRANSFORM:
make_image_to_vec_fn = _make_image_to_vec_tito(
name, checkpoint=transform.get('checkpoint', None))
result[name] = make_image_to_vec_fn(inputs[source_column])
else:
raise ValueError('unknown transform %s' % transform_name)
return result
return preprocessing_fn | Makes a preprocessing function.
Args:
output_dir: folder path that contains the vocab and stats files.
features: the features dict
Returns:
a function that takes a dict of input tensors | Below is the the instruction that describes the task:
### Input:
Makes a preprocessing function.
Args:
output_dir: folder path that contains the vocab and stats files.
features: the features dict
Returns:
a function that takes a dict of input tensors
### Response:
def make_preprocessing_fn(output_dir, features, keep_target):
"""Makes a preprocessing function.
Args:
output_dir: folder path that contains the vocab and stats files.
features: the features dict
Returns:
a function that takes a dict of input tensors
"""
def preprocessing_fn(inputs):
"""Preprocessing function.
Args:
inputs: dictionary of raw input tensors
Returns:
A dictionary of transformed tensors
"""
stats = json.loads(
file_io.read_file_to_string(
os.path.join(output_dir, STATS_FILE)).decode())
result = {}
for name, transform in six.iteritems(features):
transform_name = transform['transform']
source_column = transform['source_column']
if transform_name == TARGET_TRANSFORM:
if not keep_target:
continue
if file_io.file_exists(os.path.join(output_dir, VOCAB_ANALYSIS_FILE % source_column)):
transform_name = 'one_hot'
else:
transform_name = 'identity'
if transform_name == 'identity':
result[name] = inputs[source_column]
elif transform_name == 'scale':
result[name] = _scale(
inputs[name],
min_x_value=stats['column_stats'][source_column]['min'],
max_x_value=stats['column_stats'][source_column]['max'],
output_min=transform.get('value', 1) * (-1),
output_max=transform.get('value', 1))
elif transform_name in [ONE_HOT_TRANSFORM, MULTI_HOT_TRANSFORM]:
vocab, ex_count = read_vocab_file(
os.path.join(output_dir, VOCAB_ANALYSIS_FILE % source_column))
if transform_name == MULTI_HOT_TRANSFORM:
separator = transform.get('separator', ' ')
tokens = tf.string_split(inputs[source_column], separator)
result[name] = _string_to_int(tokens, vocab)
else:
result[name] = _string_to_int(inputs[source_column], vocab)
elif transform_name == IMAGE_TRANSFORM:
make_image_to_vec_fn = _make_image_to_vec_tito(
name, checkpoint=transform.get('checkpoint', None))
result[name] = make_image_to_vec_fn(inputs[source_column])
else:
raise ValueError('unknown transform %s' % transform_name)
return result
return preprocessing_fn |
def getPattern(self, idx, sparseBinaryForm=False, cat=None):
"""Gets a training pattern either by index or category number.
:param idx: Index of the training pattern
:param sparseBinaryForm: If true, returns a list of the indices of the
non-zero bits in the training pattern
:param cat: If not None, get the first pattern belonging to category cat. If
this is specified, idx must be None.
:returns: The training pattern with specified index
"""
if cat is not None:
assert idx is None
idx = self._categoryList.index(cat)
if not self.useSparseMemory:
pattern = self._Memory[idx]
if sparseBinaryForm:
pattern = pattern.nonzero()[0]
else:
(nz, values) = self._Memory.rowNonZeros(idx)
if not sparseBinaryForm:
pattern = numpy.zeros(self._Memory.nCols())
numpy.put(pattern, nz, 1)
else:
pattern = nz
return pattern | Gets a training pattern either by index or category number.
:param idx: Index of the training pattern
:param sparseBinaryForm: If true, returns a list of the indices of the
non-zero bits in the training pattern
:param cat: If not None, get the first pattern belonging to category cat. If
this is specified, idx must be None.
:returns: The training pattern with specified index | Below is the the instruction that describes the task:
### Input:
Gets a training pattern either by index or category number.
:param idx: Index of the training pattern
:param sparseBinaryForm: If true, returns a list of the indices of the
non-zero bits in the training pattern
:param cat: If not None, get the first pattern belonging to category cat. If
this is specified, idx must be None.
:returns: The training pattern with specified index
### Response:
def getPattern(self, idx, sparseBinaryForm=False, cat=None):
"""Gets a training pattern either by index or category number.
:param idx: Index of the training pattern
:param sparseBinaryForm: If true, returns a list of the indices of the
non-zero bits in the training pattern
:param cat: If not None, get the first pattern belonging to category cat. If
this is specified, idx must be None.
:returns: The training pattern with specified index
"""
if cat is not None:
assert idx is None
idx = self._categoryList.index(cat)
if not self.useSparseMemory:
pattern = self._Memory[idx]
if sparseBinaryForm:
pattern = pattern.nonzero()[0]
else:
(nz, values) = self._Memory.rowNonZeros(idx)
if not sparseBinaryForm:
pattern = numpy.zeros(self._Memory.nCols())
numpy.put(pattern, nz, 1)
else:
pattern = nz
return pattern |
def _init_itemid2name(self):
"""Print gene symbols instead of gene IDs, if provided."""
if not hasattr(self.args, 'id2sym'):
return None
fin_id2sym = self.args.id2sym
if fin_id2sym is not None and os.path.exists(fin_id2sym):
id2sym = {}
cmpl = re.compile(r'^\s*(\S+)[\s,;]+(\S+)')
with open(fin_id2sym) as ifstrm:
for line in ifstrm:
mtch = cmpl.search(line)
if mtch:
id2sym[mtch.group(1)] = mtch.group(2)
return id2sym | Print gene symbols instead of gene IDs, if provided. | Below is the the instruction that describes the task:
### Input:
Print gene symbols instead of gene IDs, if provided.
### Response:
def _init_itemid2name(self):
"""Print gene symbols instead of gene IDs, if provided."""
if not hasattr(self.args, 'id2sym'):
return None
fin_id2sym = self.args.id2sym
if fin_id2sym is not None and os.path.exists(fin_id2sym):
id2sym = {}
cmpl = re.compile(r'^\s*(\S+)[\s,;]+(\S+)')
with open(fin_id2sym) as ifstrm:
for line in ifstrm:
mtch = cmpl.search(line)
if mtch:
id2sym[mtch.group(1)] = mtch.group(2)
return id2sym |
def normalize_path(path, filetype=None, has_filetype=True):
""" Convert dot-separated paths to directory paths
Allows non-python files to be placed in the PYTHONPATH and be referenced
using dot-notation instead of absolute or relative file-system paths.
If a text file, named test.txt was placed in a python repo named myprog in
the module named util, then:
normalize_path('myprog.util.test.txt')
would return the file-system's path to the file 'test.txt'.
Parameters:
path - path to convert
filetype - don't include as part of path if present as last token
has_filetype - if True, treat last dot-delimited token as filetype
Notes:
1. Paths are relative to PYTHONPATH.
2. If the specified path is not a string, it is returned without
change.
3. If the specified path contains os-specific path separator
characters, the path is returned without change.
4. If has_filetype is True, filetype does not have to be specified.
If filetype is specified, has_filetype is ignored, and filetype
must match the last dot-delimited token exactly.
"""
if not isinstance(path, str):
return path
if '.' in path and os.path.sep not in path: # path is dot separated
parts = path.split('.')
extension = ''
if len(parts) > 1:
if filetype and has_filetype:
has_filetype = False # filetype is more specific
if (filetype and parts[-1] == filetype) or has_filetype:
extension = '.' + parts[-1]
parts = parts[:-1]
if len(parts) > 1:
if PY3:
spec = importlib.util.find_spec(parts[0])
path = list(spec.submodule_search_locations)[0]
else:
_, path, _ = imp.find_module(parts[0])
path = os.path.join(path, *parts[1:]) + extension
return path | Convert dot-separated paths to directory paths
Allows non-python files to be placed in the PYTHONPATH and be referenced
using dot-notation instead of absolute or relative file-system paths.
If a text file, named test.txt was placed in a python repo named myprog in
the module named util, then:
normalize_path('myprog.util.test.txt')
would return the file-system's path to the file 'test.txt'.
Parameters:
path - path to convert
filetype - don't include as part of path if present as last token
has_filetype - if True, treat last dot-delimited token as filetype
Notes:
1. Paths are relative to PYTHONPATH.
2. If the specified path is not a string, it is returned without
change.
3. If the specified path contains os-specific path separator
characters, the path is returned without change.
4. If has_filetype is True, filetype does not have to be specified.
If filetype is specified, has_filetype is ignored, and filetype
must match the last dot-delimited token exactly. | Below is the the instruction that describes the task:
### Input:
Convert dot-separated paths to directory paths
Allows non-python files to be placed in the PYTHONPATH and be referenced
using dot-notation instead of absolute or relative file-system paths.
If a text file, named test.txt was placed in a python repo named myprog in
the module named util, then:
normalize_path('myprog.util.test.txt')
would return the file-system's path to the file 'test.txt'.
Parameters:
path - path to convert
filetype - don't include as part of path if present as last token
has_filetype - if True, treat last dot-delimited token as filetype
Notes:
1. Paths are relative to PYTHONPATH.
2. If the specified path is not a string, it is returned without
change.
3. If the specified path contains os-specific path separator
characters, the path is returned without change.
4. If has_filetype is True, filetype does not have to be specified.
If filetype is specified, has_filetype is ignored, and filetype
must match the last dot-delimited token exactly.
### Response:
def normalize_path(path, filetype=None, has_filetype=True):
""" Convert dot-separated paths to directory paths
Allows non-python files to be placed in the PYTHONPATH and be referenced
using dot-notation instead of absolute or relative file-system paths.
If a text file, named test.txt was placed in a python repo named myprog in
the module named util, then:
normalize_path('myprog.util.test.txt')
would return the file-system's path to the file 'test.txt'.
Parameters:
path - path to convert
filetype - don't include as part of path if present as last token
has_filetype - if True, treat last dot-delimited token as filetype
Notes:
1. Paths are relative to PYTHONPATH.
2. If the specified path is not a string, it is returned without
change.
3. If the specified path contains os-specific path separator
characters, the path is returned without change.
4. If has_filetype is True, filetype does not have to be specified.
If filetype is specified, has_filetype is ignored, and filetype
must match the last dot-delimited token exactly.
"""
if not isinstance(path, str):
return path
if '.' in path and os.path.sep not in path: # path is dot separated
parts = path.split('.')
extension = ''
if len(parts) > 1:
if filetype and has_filetype:
has_filetype = False # filetype is more specific
if (filetype and parts[-1] == filetype) or has_filetype:
extension = '.' + parts[-1]
parts = parts[:-1]
if len(parts) > 1:
if PY3:
spec = importlib.util.find_spec(parts[0])
path = list(spec.submodule_search_locations)[0]
else:
_, path, _ = imp.find_module(parts[0])
path = os.path.join(path, *parts[1:]) + extension
return path |
def physical_cpus():
"""Get cpus identifiers, for instance set(["0", "1", "2", "3"])
:return Number of physical CPUs available
:rtype: int
"""
if platform.system() == 'Darwin':
ncores = subprocess.check_output(
['/usr/sbin/sysctl', '-n', 'hw.ncpu'], shell=False
)
return int(ncores.strip())
sockets = set()
with open('/proc/cpuinfo') as istr:
for line in istr:
if line.startswith('physical id'):
sockets.add(line.split(':')[-1].strip())
return len(sockets) | Get cpus identifiers, for instance set(["0", "1", "2", "3"])
:return Number of physical CPUs available
:rtype: int | Below is the the instruction that describes the task:
### Input:
Get cpus identifiers, for instance set(["0", "1", "2", "3"])
:return Number of physical CPUs available
:rtype: int
### Response:
def physical_cpus():
"""Get cpus identifiers, for instance set(["0", "1", "2", "3"])
:return Number of physical CPUs available
:rtype: int
"""
if platform.system() == 'Darwin':
ncores = subprocess.check_output(
['/usr/sbin/sysctl', '-n', 'hw.ncpu'], shell=False
)
return int(ncores.strip())
sockets = set()
with open('/proc/cpuinfo') as istr:
for line in istr:
if line.startswith('physical id'):
sockets.add(line.split(':')[-1].strip())
return len(sockets) |
def min_validator(min_value):
"""Return validator function that ensures lower bound of a number.
Result validation function will validate the internal value of resource
instance field with the ``value >= min_value`` check
Args:
min_value: minimal value for new validator
"""
def validator(value):
if value < min_value:
raise ValidationError("{} is not >= {}".format(value, min_value))
return validator | Return validator function that ensures lower bound of a number.
Result validation function will validate the internal value of resource
instance field with the ``value >= min_value`` check
Args:
min_value: minimal value for new validator | Below is the the instruction that describes the task:
### Input:
Return validator function that ensures lower bound of a number.
Result validation function will validate the internal value of resource
instance field with the ``value >= min_value`` check
Args:
min_value: minimal value for new validator
### Response:
def min_validator(min_value):
"""Return validator function that ensures lower bound of a number.
Result validation function will validate the internal value of resource
instance field with the ``value >= min_value`` check
Args:
min_value: minimal value for new validator
"""
def validator(value):
if value < min_value:
raise ValidationError("{} is not >= {}".format(value, min_value))
return validator |
def line_intersects_itself(lons, lats, closed_shape=False):
"""
Return ``True`` if line of points intersects itself.
Line with the last point repeating the first one considered
intersecting itself.
The line is defined by lists (or numpy arrays) of points'
longitudes and latitudes (depth is not taken into account).
:param closed_shape:
If ``True`` the line will be checked twice: first time with
its original shape and second time with the points sequence
being shifted by one point (the last point becomes first,
the first turns second and so on). This is useful for
checking that the sequence of points defines a valid
:class:`~openquake.hazardlib.geo.polygon.Polygon`.
"""
assert len(lons) == len(lats)
if len(lons) <= 3:
# line can not intersect itself unless there are
# at least four points
return False
west, east, north, south = get_spherical_bounding_box(lons, lats)
proj = OrthographicProjection(west, east, north, south)
xx, yy = proj(lons, lats)
if not shapely.geometry.LineString(list(zip(xx, yy))).is_simple:
return True
if closed_shape:
xx, yy = proj(numpy.roll(lons, 1), numpy.roll(lats, 1))
if not shapely.geometry.LineString(list(zip(xx, yy))).is_simple:
return True
return False | Return ``True`` if line of points intersects itself.
Line with the last point repeating the first one considered
intersecting itself.
The line is defined by lists (or numpy arrays) of points'
longitudes and latitudes (depth is not taken into account).
:param closed_shape:
If ``True`` the line will be checked twice: first time with
its original shape and second time with the points sequence
being shifted by one point (the last point becomes first,
the first turns second and so on). This is useful for
checking that the sequence of points defines a valid
:class:`~openquake.hazardlib.geo.polygon.Polygon`. | Below is the the instruction that describes the task:
### Input:
Return ``True`` if line of points intersects itself.
Line with the last point repeating the first one considered
intersecting itself.
The line is defined by lists (or numpy arrays) of points'
longitudes and latitudes (depth is not taken into account).
:param closed_shape:
If ``True`` the line will be checked twice: first time with
its original shape and second time with the points sequence
being shifted by one point (the last point becomes first,
the first turns second and so on). This is useful for
checking that the sequence of points defines a valid
:class:`~openquake.hazardlib.geo.polygon.Polygon`.
### Response:
def line_intersects_itself(lons, lats, closed_shape=False):
"""
Return ``True`` if line of points intersects itself.
Line with the last point repeating the first one considered
intersecting itself.
The line is defined by lists (or numpy arrays) of points'
longitudes and latitudes (depth is not taken into account).
:param closed_shape:
If ``True`` the line will be checked twice: first time with
its original shape and second time with the points sequence
being shifted by one point (the last point becomes first,
the first turns second and so on). This is useful for
checking that the sequence of points defines a valid
:class:`~openquake.hazardlib.geo.polygon.Polygon`.
"""
assert len(lons) == len(lats)
if len(lons) <= 3:
# line can not intersect itself unless there are
# at least four points
return False
west, east, north, south = get_spherical_bounding_box(lons, lats)
proj = OrthographicProjection(west, east, north, south)
xx, yy = proj(lons, lats)
if not shapely.geometry.LineString(list(zip(xx, yy))).is_simple:
return True
if closed_shape:
xx, yy = proj(numpy.roll(lons, 1), numpy.roll(lats, 1))
if not shapely.geometry.LineString(list(zip(xx, yy))).is_simple:
return True
return False |
def show(self, value=True, modal=None):
"Display or hide the window, optionally disabling all other windows"
self.wx_obj.Show(value)
if modal:
# disable all top level windows of this application (MakeModal)
disabler = wx.WindowDisabler(self.wx_obj)
# create an event loop to stop execution
eventloop = wx.EventLoop()
def on_close_modal(evt):
evt.Skip()
eventloop.Exit()
self.wx_obj.Bind(wx.EVT_CLOSE, on_close_modal)
# start the event loop to wait user interaction
eventloop.Run()
# reenable the windows disabled and return control to the caller
del disabler | Display or hide the window, optionally disabling all other windows | Below is the the instruction that describes the task:
### Input:
Display or hide the window, optionally disabling all other windows
### Response:
def show(self, value=True, modal=None):
"Display or hide the window, optionally disabling all other windows"
self.wx_obj.Show(value)
if modal:
# disable all top level windows of this application (MakeModal)
disabler = wx.WindowDisabler(self.wx_obj)
# create an event loop to stop execution
eventloop = wx.EventLoop()
def on_close_modal(evt):
evt.Skip()
eventloop.Exit()
self.wx_obj.Bind(wx.EVT_CLOSE, on_close_modal)
# start the event loop to wait user interaction
eventloop.Run()
# reenable the windows disabled and return control to the caller
del disabler |
def nontruncating_zip(*seqs):
"""Return a list of tuples, where each tuple contains the i-th
element from each of the argument sequences.
The returned list is as long as the longest argument sequence.
Shorter argument sequences will be represented in the output as
None padding elements:
nontruncating_zip([1, 2, 3], ['a', 'b'])
-> [(1, 'a'), (2, 'b'), (3, None)]
"""
n_seqs = len(seqs)
tups = []
idx = 0
while True:
empties = 0
tup = []
for seq in seqs:
try:
tup.append(seq[idx])
except IndexError:
empties += 1
tup.append(None)
if empties == n_seqs:
break
tup = tuple(tup)
tups.append(tup)
idx += 1
return tups | Return a list of tuples, where each tuple contains the i-th
element from each of the argument sequences.
The returned list is as long as the longest argument sequence.
Shorter argument sequences will be represented in the output as
None padding elements:
nontruncating_zip([1, 2, 3], ['a', 'b'])
-> [(1, 'a'), (2, 'b'), (3, None)] | Below is the the instruction that describes the task:
### Input:
Return a list of tuples, where each tuple contains the i-th
element from each of the argument sequences.
The returned list is as long as the longest argument sequence.
Shorter argument sequences will be represented in the output as
None padding elements:
nontruncating_zip([1, 2, 3], ['a', 'b'])
-> [(1, 'a'), (2, 'b'), (3, None)]
### Response:
def nontruncating_zip(*seqs):
"""Return a list of tuples, where each tuple contains the i-th
element from each of the argument sequences.
The returned list is as long as the longest argument sequence.
Shorter argument sequences will be represented in the output as
None padding elements:
nontruncating_zip([1, 2, 3], ['a', 'b'])
-> [(1, 'a'), (2, 'b'), (3, None)]
"""
n_seqs = len(seqs)
tups = []
idx = 0
while True:
empties = 0
tup = []
for seq in seqs:
try:
tup.append(seq[idx])
except IndexError:
empties += 1
tup.append(None)
if empties == n_seqs:
break
tup = tuple(tup)
tups.append(tup)
idx += 1
return tups |
def parse(self, filelike, filename):
"""Parse the given file-like object and return its Module object."""
self.log = log
self.source = filelike.readlines()
src = "".join(self.source)
# This may raise a SyntaxError:
compile(src, filename, "exec")
self.stream = TokenStream(StringIO(src))
self.filename = filename
self.all = None
self.future_imports = set()
self._accumulated_decorators = []
return self.parse_module() | Parse the given file-like object and return its Module object. | Below is the the instruction that describes the task:
### Input:
Parse the given file-like object and return its Module object.
### Response:
def parse(self, filelike, filename):
"""Parse the given file-like object and return its Module object."""
self.log = log
self.source = filelike.readlines()
src = "".join(self.source)
# This may raise a SyntaxError:
compile(src, filename, "exec")
self.stream = TokenStream(StringIO(src))
self.filename = filename
self.all = None
self.future_imports = set()
self._accumulated_decorators = []
return self.parse_module() |
def do_asg(self,args):
"""Go to the specified auto scaling group. asg -h for detailed help"""
parser = CommandArgumentParser("asg")
parser.add_argument(dest='asg',help='asg index or name');
args = vars(parser.parse_args(args))
print "loading auto scaling group {}".format(args['asg'])
try:
index = int(args['asg'])
asgSummary = self.wrappedStack['resourcesByTypeIndex']['AWS::AutoScaling::AutoScalingGroup'][index]
except:
asgSummary = self.wrappedStack['resourcesByTypeName']['AWS::AutoScaling::AutoScalingGroup'][args['asg']]
self.stackResource(asgSummary.stack_name,asgSummary.logical_id) | Go to the specified auto scaling group. asg -h for detailed help | Below is the the instruction that describes the task:
### Input:
Go to the specified auto scaling group. asg -h for detailed help
### Response:
def do_asg(self,args):
"""Go to the specified auto scaling group. asg -h for detailed help"""
parser = CommandArgumentParser("asg")
parser.add_argument(dest='asg',help='asg index or name');
args = vars(parser.parse_args(args))
print "loading auto scaling group {}".format(args['asg'])
try:
index = int(args['asg'])
asgSummary = self.wrappedStack['resourcesByTypeIndex']['AWS::AutoScaling::AutoScalingGroup'][index]
except:
asgSummary = self.wrappedStack['resourcesByTypeName']['AWS::AutoScaling::AutoScalingGroup'][args['asg']]
self.stackResource(asgSummary.stack_name,asgSummary.logical_id) |
def histogram_cluster_table(analyzed_data_file, output_file, chunk_size=10000000):
'''Reads in the cluster info table in chunks and histograms the seed pixels into one occupancy array.
The 3rd dimension of the occupancy array is the number of different scan parameters used
Parameters
----------
analyzed_data_file : string
HDF5 filename of the file containing the cluster table. If a scan parameter is given in the meta data, the occupancy histogramming is done per scan parameter step.
Returns
-------
occupancy_array: numpy.array with dimensions (col, row, #scan_parameter)
'''
with tb.open_file(analyzed_data_file, mode="r") as in_file_h5:
with tb.open_file(output_file, mode="w") as out_file_h5:
histogram = PyDataHistograming()
histogram.create_occupancy_hist(True)
scan_parameters = None
event_number_indices = None
scan_parameter_indices = None
try:
meta_data = in_file_h5.root.meta_data[:]
scan_parameters = analysis_utils.get_unique_scan_parameter_combinations(meta_data)
if scan_parameters is not None:
scan_parameter_indices = np.array(range(0, len(scan_parameters)), dtype='u4')
event_number_indices = np.ascontiguousarray(scan_parameters['event_number']).astype(np.uint64)
histogram.add_meta_event_index(event_number_indices, array_length=len(scan_parameters['event_number']))
histogram.add_scan_parameter(scan_parameter_indices)
logging.info("Add %d different scan parameter(s) for analysis", len(scan_parameters))
else:
logging.info("No scan parameter data provided")
histogram.set_no_scan_parameter()
except tb.exceptions.NoSuchNodeError:
logging.info("No meta data provided, use no scan parameter")
histogram.set_no_scan_parameter()
logging.info('Histogram cluster seeds...')
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=in_file_h5.root.Cluster.shape[0], term_width=80)
progress_bar.start()
total_cluster = 0 # to check analysis
for cluster, index in analysis_utils.data_aligned_at_events(in_file_h5.root.Cluster, chunk_size=chunk_size):
total_cluster += len(cluster)
histogram.add_cluster_seed_hits(cluster, len(cluster))
progress_bar.update(index)
progress_bar.finish()
filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False) # compression of the written data
occupancy_array = histogram.get_occupancy().T
occupancy_array_table = out_file_h5.create_carray(out_file_h5.root, name='HistOcc', title='Occupancy Histogram', atom=tb.Atom.from_dtype(occupancy_array.dtype), shape=occupancy_array.shape, filters=filter_table)
occupancy_array_table[:] = occupancy_array
if total_cluster != np.sum(occupancy_array):
logging.warning('Analysis shows inconsistent number of cluster used. Check needed!')
in_file_h5.root.meta_data.copy(out_file_h5.root) | Reads in the cluster info table in chunks and histograms the seed pixels into one occupancy array.
The 3rd dimension of the occupancy array is the number of different scan parameters used
Parameters
----------
analyzed_data_file : string
HDF5 filename of the file containing the cluster table. If a scan parameter is given in the meta data, the occupancy histogramming is done per scan parameter step.
Returns
-------
occupancy_array: numpy.array with dimensions (col, row, #scan_parameter) | Below is the the instruction that describes the task:
### Input:
Reads in the cluster info table in chunks and histograms the seed pixels into one occupancy array.
The 3rd dimension of the occupancy array is the number of different scan parameters used
Parameters
----------
analyzed_data_file : string
HDF5 filename of the file containing the cluster table. If a scan parameter is given in the meta data, the occupancy histogramming is done per scan parameter step.
Returns
-------
occupancy_array: numpy.array with dimensions (col, row, #scan_parameter)
### Response:
def histogram_cluster_table(analyzed_data_file, output_file, chunk_size=10000000):
'''Reads in the cluster info table in chunks and histograms the seed pixels into one occupancy array.
The 3rd dimension of the occupancy array is the number of different scan parameters used
Parameters
----------
analyzed_data_file : string
HDF5 filename of the file containing the cluster table. If a scan parameter is given in the meta data, the occupancy histogramming is done per scan parameter step.
Returns
-------
occupancy_array: numpy.array with dimensions (col, row, #scan_parameter)
'''
with tb.open_file(analyzed_data_file, mode="r") as in_file_h5:
with tb.open_file(output_file, mode="w") as out_file_h5:
histogram = PyDataHistograming()
histogram.create_occupancy_hist(True)
scan_parameters = None
event_number_indices = None
scan_parameter_indices = None
try:
meta_data = in_file_h5.root.meta_data[:]
scan_parameters = analysis_utils.get_unique_scan_parameter_combinations(meta_data)
if scan_parameters is not None:
scan_parameter_indices = np.array(range(0, len(scan_parameters)), dtype='u4')
event_number_indices = np.ascontiguousarray(scan_parameters['event_number']).astype(np.uint64)
histogram.add_meta_event_index(event_number_indices, array_length=len(scan_parameters['event_number']))
histogram.add_scan_parameter(scan_parameter_indices)
logging.info("Add %d different scan parameter(s) for analysis", len(scan_parameters))
else:
logging.info("No scan parameter data provided")
histogram.set_no_scan_parameter()
except tb.exceptions.NoSuchNodeError:
logging.info("No meta data provided, use no scan parameter")
histogram.set_no_scan_parameter()
logging.info('Histogram cluster seeds...')
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=in_file_h5.root.Cluster.shape[0], term_width=80)
progress_bar.start()
total_cluster = 0 # to check analysis
for cluster, index in analysis_utils.data_aligned_at_events(in_file_h5.root.Cluster, chunk_size=chunk_size):
total_cluster += len(cluster)
histogram.add_cluster_seed_hits(cluster, len(cluster))
progress_bar.update(index)
progress_bar.finish()
filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False) # compression of the written data
occupancy_array = histogram.get_occupancy().T
occupancy_array_table = out_file_h5.create_carray(out_file_h5.root, name='HistOcc', title='Occupancy Histogram', atom=tb.Atom.from_dtype(occupancy_array.dtype), shape=occupancy_array.shape, filters=filter_table)
occupancy_array_table[:] = occupancy_array
if total_cluster != np.sum(occupancy_array):
logging.warning('Analysis shows inconsistent number of cluster used. Check needed!')
in_file_h5.root.meta_data.copy(out_file_h5.root) |
def zharkov_panh(v, temp, v0, a0, m, n, z, t_ref=300.,
three_r=3. * constants.R):
"""
calculate pressure from anharmonicity for Zharkov equation
the equation is from Dorogokupets 2015
:param v: unit-cell volume in A^3
:param temp: temperature in K
:param v0: unit-cell volume in A^3 at 1 bar
:param a0: parameter in K-1 for the Zharkov equation
:param m: parameter for the Zharkov equation
:param n: number of elements in a chemical formula
:param z: number of formula unit in a unit cell
:param three_r: 3 times gas constant
:return: anharmonic contribution for pressure in GPa
"""
v_mol = vol_uc2mol(v, z)
x = v / v0
a = a0 * np.power(x, m)
def f(t):
return three_r * n / 2. * a * m / v_mol * np.power(t, 2.) * 1.e-9
return f(temp) - f(t_ref) | calculate pressure from anharmonicity for Zharkov equation
the equation is from Dorogokupets 2015
:param v: unit-cell volume in A^3
:param temp: temperature in K
:param v0: unit-cell volume in A^3 at 1 bar
:param a0: parameter in K-1 for the Zharkov equation
:param m: parameter for the Zharkov equation
:param n: number of elements in a chemical formula
:param z: number of formula unit in a unit cell
:param three_r: 3 times gas constant
:return: anharmonic contribution for pressure in GPa | Below is the the instruction that describes the task:
### Input:
calculate pressure from anharmonicity for Zharkov equation
the equation is from Dorogokupets 2015
:param v: unit-cell volume in A^3
:param temp: temperature in K
:param v0: unit-cell volume in A^3 at 1 bar
:param a0: parameter in K-1 for the Zharkov equation
:param m: parameter for the Zharkov equation
:param n: number of elements in a chemical formula
:param z: number of formula unit in a unit cell
:param three_r: 3 times gas constant
:return: anharmonic contribution for pressure in GPa
### Response:
def zharkov_panh(v, temp, v0, a0, m, n, z, t_ref=300.,
three_r=3. * constants.R):
"""
calculate pressure from anharmonicity for Zharkov equation
the equation is from Dorogokupets 2015
:param v: unit-cell volume in A^3
:param temp: temperature in K
:param v0: unit-cell volume in A^3 at 1 bar
:param a0: parameter in K-1 for the Zharkov equation
:param m: parameter for the Zharkov equation
:param n: number of elements in a chemical formula
:param z: number of formula unit in a unit cell
:param three_r: 3 times gas constant
:return: anharmonic contribution for pressure in GPa
"""
v_mol = vol_uc2mol(v, z)
x = v / v0
a = a0 * np.power(x, m)
def f(t):
return three_r * n / 2. * a * m / v_mol * np.power(t, 2.) * 1.e-9
return f(temp) - f(t_ref) |
def set_interval(self, start, end, value, compact=False):
"""Set the value for the time series on an interval. If compact is
True, only set the value if it's different from what it would
be anyway.
"""
# for each interval to render
for i, (s, e, v) in enumerate(self.iterperiods(start, end)):
# look at all intervals included in the current interval
# (always at least 1)
if i == 0:
# if the first, set initial value to new value of range
self.set(s, value, compact)
else:
# otherwise, remove intermediate key
del self[s]
# finish by setting the end of the interval to the previous value
self.set(end, v, compact) | Set the value for the time series on an interval. If compact is
True, only set the value if it's different from what it would
be anyway. | Below is the the instruction that describes the task:
### Input:
Set the value for the time series on an interval. If compact is
True, only set the value if it's different from what it would
be anyway.
### Response:
def set_interval(self, start, end, value, compact=False):
"""Set the value for the time series on an interval. If compact is
True, only set the value if it's different from what it would
be anyway.
"""
# for each interval to render
for i, (s, e, v) in enumerate(self.iterperiods(start, end)):
# look at all intervals included in the current interval
# (always at least 1)
if i == 0:
# if the first, set initial value to new value of range
self.set(s, value, compact)
else:
# otherwise, remove intermediate key
del self[s]
# finish by setting the end of the interval to the previous value
self.set(end, v, compact) |
def parse_volumes_output(out):
"""
Parses the output of the Docker CLI 'docker volume ls' and returns it in the format similar to the Docker API.
:param out: CLI output.
:type out: unicode | str
:return: Parsed result.
:rtype: list[dict]
"""
if not out:
return []
line_iter = islice(out.splitlines(), 1, None) # Skip header
return list(map(_volume_info, line_iter)) | Parses the output of the Docker CLI 'docker volume ls' and returns it in the format similar to the Docker API.
:param out: CLI output.
:type out: unicode | str
:return: Parsed result.
:rtype: list[dict] | Below is the the instruction that describes the task:
### Input:
Parses the output of the Docker CLI 'docker volume ls' and returns it in the format similar to the Docker API.
:param out: CLI output.
:type out: unicode | str
:return: Parsed result.
:rtype: list[dict]
### Response:
def parse_volumes_output(out):
"""
Parses the output of the Docker CLI 'docker volume ls' and returns it in the format similar to the Docker API.
:param out: CLI output.
:type out: unicode | str
:return: Parsed result.
:rtype: list[dict]
"""
if not out:
return []
line_iter = islice(out.splitlines(), 1, None) # Skip header
return list(map(_volume_info, line_iter)) |
def verify_tops(self, tops):
'''
Verify the contents of the top file data
'''
errors = []
if not isinstance(tops, dict):
errors.append('Top data was not formed as a dict')
# No further checks will work, bail out
return errors
for saltenv, matches in six.iteritems(tops):
if saltenv == 'include':
continue
if not isinstance(saltenv, six.string_types):
errors.append(
'Environment {0} in top file is not formed as a '
'string'.format(saltenv)
)
if saltenv == '':
errors.append('Empty saltenv statement in top file')
if not isinstance(matches, dict):
errors.append(
'The top file matches for saltenv {0} are not '
'formatted as a dict'.format(saltenv)
)
for slsmods in six.itervalues(matches):
if not isinstance(slsmods, list):
errors.append('Malformed topfile (state declarations not '
'formed as a list)')
continue
for slsmod in slsmods:
if isinstance(slsmod, dict):
# This value is a match option
for val in six.itervalues(slsmod):
if not val:
errors.append(
'Improperly formatted top file matcher '
'in saltenv {0}: {1} file'.format(
slsmod,
val
)
)
elif isinstance(slsmod, six.string_types):
# This is a sls module
if not slsmod:
errors.append(
'Environment {0} contains an empty sls '
'index'.format(saltenv)
)
return errors | Verify the contents of the top file data | Below is the the instruction that describes the task:
### Input:
Verify the contents of the top file data
### Response:
def verify_tops(self, tops):
'''
Verify the contents of the top file data
'''
errors = []
if not isinstance(tops, dict):
errors.append('Top data was not formed as a dict')
# No further checks will work, bail out
return errors
for saltenv, matches in six.iteritems(tops):
if saltenv == 'include':
continue
if not isinstance(saltenv, six.string_types):
errors.append(
'Environment {0} in top file is not formed as a '
'string'.format(saltenv)
)
if saltenv == '':
errors.append('Empty saltenv statement in top file')
if not isinstance(matches, dict):
errors.append(
'The top file matches for saltenv {0} are not '
'formatted as a dict'.format(saltenv)
)
for slsmods in six.itervalues(matches):
if not isinstance(slsmods, list):
errors.append('Malformed topfile (state declarations not '
'formed as a list)')
continue
for slsmod in slsmods:
if isinstance(slsmod, dict):
# This value is a match option
for val in six.itervalues(slsmod):
if not val:
errors.append(
'Improperly formatted top file matcher '
'in saltenv {0}: {1} file'.format(
slsmod,
val
)
)
elif isinstance(slsmod, six.string_types):
# This is a sls module
if not slsmod:
errors.append(
'Environment {0} contains an empty sls '
'index'.format(saltenv)
)
return errors |
def __partial_trace_vec(vec, trace_systems, dimensions, reverse=True):
"""
Partial trace over subsystems of multi-partite vector.
Args:
vec (vector_like): complex vector N
trace_systems (list(int)): a list of subsystems (starting from 0) to
trace over.
dimensions (list(int)): a list of the dimensions of the subsystems.
If this is not set it will assume all
subsystems are qubits.
reverse (bool): ordering of systems in operator.
If True system-0 is the right most system in tensor product.
If False system-0 is the left most system in tensor product.
Returns:
ndarray: A density matrix with the appropriate subsystems traced over.
"""
# trace sys positions
if reverse:
dimensions = dimensions[::-1]
trace_systems = len(dimensions) - 1 - np.array(trace_systems)
rho = vec.reshape(dimensions)
rho = np.tensordot(rho, rho.conj(), axes=(trace_systems, trace_systems))
d = int(np.sqrt(np.product(rho.shape)))
return rho.reshape(d, d) | Partial trace over subsystems of multi-partite vector.
Args:
vec (vector_like): complex vector N
trace_systems (list(int)): a list of subsystems (starting from 0) to
trace over.
dimensions (list(int)): a list of the dimensions of the subsystems.
If this is not set it will assume all
subsystems are qubits.
reverse (bool): ordering of systems in operator.
If True system-0 is the right most system in tensor product.
If False system-0 is the left most system in tensor product.
Returns:
ndarray: A density matrix with the appropriate subsystems traced over. | Below is the the instruction that describes the task:
### Input:
Partial trace over subsystems of multi-partite vector.
Args:
vec (vector_like): complex vector N
trace_systems (list(int)): a list of subsystems (starting from 0) to
trace over.
dimensions (list(int)): a list of the dimensions of the subsystems.
If this is not set it will assume all
subsystems are qubits.
reverse (bool): ordering of systems in operator.
If True system-0 is the right most system in tensor product.
If False system-0 is the left most system in tensor product.
Returns:
ndarray: A density matrix with the appropriate subsystems traced over.
### Response:
def __partial_trace_vec(vec, trace_systems, dimensions, reverse=True):
"""
Partial trace over subsystems of multi-partite vector.
Args:
vec (vector_like): complex vector N
trace_systems (list(int)): a list of subsystems (starting from 0) to
trace over.
dimensions (list(int)): a list of the dimensions of the subsystems.
If this is not set it will assume all
subsystems are qubits.
reverse (bool): ordering of systems in operator.
If True system-0 is the right most system in tensor product.
If False system-0 is the left most system in tensor product.
Returns:
ndarray: A density matrix with the appropriate subsystems traced over.
"""
# trace sys positions
if reverse:
dimensions = dimensions[::-1]
trace_systems = len(dimensions) - 1 - np.array(trace_systems)
rho = vec.reshape(dimensions)
rho = np.tensordot(rho, rho.conj(), axes=(trace_systems, trace_systems))
d = int(np.sqrt(np.product(rho.shape)))
return rho.reshape(d, d) |
def get_by_username(cls, username):
"""
Return a User by email address
"""
return cls.query().filter(cls.username == username).first() | Return a User by email address | Below is the the instruction that describes the task:
### Input:
Return a User by email address
### Response:
def get_by_username(cls, username):
"""
Return a User by email address
"""
return cls.query().filter(cls.username == username).first() |
def from_points(cls, lons, lats, depths=None, sitemodel=None,
req_site_params=()):
"""
Build the site collection from
:param lons:
a sequence of longitudes
:param lats:
a sequence of latitudes
:param depths:
a sequence of depths (or None)
:param sitemodel:
None or an object containing site parameters as attributes
:param req_site_params:
a sequence of required site parameters, possibly empty
"""
assert len(lons) < U32LIMIT, len(lons)
if depths is None:
depths = numpy.zeros(len(lons))
assert len(lons) == len(lats) == len(depths), (len(lons), len(lats),
len(depths))
self = object.__new__(cls)
self.complete = self
req = ['sids', 'lon', 'lat', 'depth'] + sorted(
par for par in req_site_params if par not in ('lon', 'lat'))
if 'vs30' in req and 'vs30measured' not in req:
req.append('vs30measured')
self.dtype = numpy.dtype([(p, site_param_dt[p]) for p in req])
self.array = arr = numpy.zeros(len(lons), self.dtype)
arr['sids'] = numpy.arange(len(lons), dtype=numpy.uint32)
arr['lon'] = fix_lon(numpy.array(lons))
arr['lat'] = numpy.array(lats)
arr['depth'] = numpy.array(depths)
if sitemodel is None:
pass
elif hasattr(sitemodel, 'reference_vs30_value'):
# sitemodel is actually an OqParam instance
self._set('vs30', sitemodel.reference_vs30_value)
self._set('vs30measured',
sitemodel.reference_vs30_type == 'measured')
self._set('z1pt0', sitemodel.reference_depth_to_1pt0km_per_sec)
self._set('z2pt5', sitemodel.reference_depth_to_2pt5km_per_sec)
self._set('siteclass', sitemodel.reference_siteclass)
else:
for name in sitemodel.dtype.names:
if name not in ('lon', 'lat'):
self._set(name, sitemodel[name])
return self | Build the site collection from
:param lons:
a sequence of longitudes
:param lats:
a sequence of latitudes
:param depths:
a sequence of depths (or None)
:param sitemodel:
None or an object containing site parameters as attributes
:param req_site_params:
a sequence of required site parameters, possibly empty | Below is the the instruction that describes the task:
### Input:
Build the site collection from
:param lons:
a sequence of longitudes
:param lats:
a sequence of latitudes
:param depths:
a sequence of depths (or None)
:param sitemodel:
None or an object containing site parameters as attributes
:param req_site_params:
a sequence of required site parameters, possibly empty
### Response:
def from_points(cls, lons, lats, depths=None, sitemodel=None,
req_site_params=()):
"""
Build the site collection from
:param lons:
a sequence of longitudes
:param lats:
a sequence of latitudes
:param depths:
a sequence of depths (or None)
:param sitemodel:
None or an object containing site parameters as attributes
:param req_site_params:
a sequence of required site parameters, possibly empty
"""
assert len(lons) < U32LIMIT, len(lons)
if depths is None:
depths = numpy.zeros(len(lons))
assert len(lons) == len(lats) == len(depths), (len(lons), len(lats),
len(depths))
self = object.__new__(cls)
self.complete = self
req = ['sids', 'lon', 'lat', 'depth'] + sorted(
par for par in req_site_params if par not in ('lon', 'lat'))
if 'vs30' in req and 'vs30measured' not in req:
req.append('vs30measured')
self.dtype = numpy.dtype([(p, site_param_dt[p]) for p in req])
self.array = arr = numpy.zeros(len(lons), self.dtype)
arr['sids'] = numpy.arange(len(lons), dtype=numpy.uint32)
arr['lon'] = fix_lon(numpy.array(lons))
arr['lat'] = numpy.array(lats)
arr['depth'] = numpy.array(depths)
if sitemodel is None:
pass
elif hasattr(sitemodel, 'reference_vs30_value'):
# sitemodel is actually an OqParam instance
self._set('vs30', sitemodel.reference_vs30_value)
self._set('vs30measured',
sitemodel.reference_vs30_type == 'measured')
self._set('z1pt0', sitemodel.reference_depth_to_1pt0km_per_sec)
self._set('z2pt5', sitemodel.reference_depth_to_2pt5km_per_sec)
self._set('siteclass', sitemodel.reference_siteclass)
else:
for name in sitemodel.dtype.names:
if name not in ('lon', 'lat'):
self._set(name, sitemodel[name])
return self |
def next_frame_savp_gan():
"""SAVP - GAN only model."""
hparams = next_frame_savp()
hparams.use_gan = True
hparams.use_vae = False
hparams.gan_loss_multiplier = 0.001
hparams.optimizer_adam_beta1 = 0.5
hparams.learning_rate_constant = 2e-4
hparams.gan_loss = "cross_entropy"
hparams.learning_rate_decay_steps = 100000
hparams.learning_rate_schedule = "constant*linear_decay"
return hparams | SAVP - GAN only model. | Below is the the instruction that describes the task:
### Input:
SAVP - GAN only model.
### Response:
def next_frame_savp_gan():
"""SAVP - GAN only model."""
hparams = next_frame_savp()
hparams.use_gan = True
hparams.use_vae = False
hparams.gan_loss_multiplier = 0.001
hparams.optimizer_adam_beta1 = 0.5
hparams.learning_rate_constant = 2e-4
hparams.gan_loss = "cross_entropy"
hparams.learning_rate_decay_steps = 100000
hparams.learning_rate_schedule = "constant*linear_decay"
return hparams |
def pypirc_temp(index_url):
""" Create a temporary pypirc file for interaction with twine """
pypirc_file = tempfile.NamedTemporaryFile(suffix='.pypirc', delete=False)
print(pypirc_file.name)
with open(pypirc_file.name, 'w') as fh:
fh.write(PYPIRC_TEMPLATE.format(index_name=PYPIRC_TEMP_INDEX_NAME, index_url=index_url))
return pypirc_file.name | Create a temporary pypirc file for interaction with twine | Below is the the instruction that describes the task:
### Input:
Create a temporary pypirc file for interaction with twine
### Response:
def pypirc_temp(index_url):
""" Create a temporary pypirc file for interaction with twine """
pypirc_file = tempfile.NamedTemporaryFile(suffix='.pypirc', delete=False)
print(pypirc_file.name)
with open(pypirc_file.name, 'w') as fh:
fh.write(PYPIRC_TEMPLATE.format(index_name=PYPIRC_TEMP_INDEX_NAME, index_url=index_url))
return pypirc_file.name |
def _chart(self, x, y, chart_type, opts, style, label, options, **kwargs):
"""
Initialize chart options
"""
if opts is not None:
self.chart_opts = opts
if style is not None:
self.chart_style = style
if label is not None:
self.label = label
self.x = x
self.y = y
if chart_type is None:
return
try:
chart_obj = self._get_chart(chart_type, x, y, style=style,
opts=opts, label=label,
options=options, **kwargs)
return chart_obj
except Exception as e:
self.err(e) | Initialize chart options | Below is the the instruction that describes the task:
### Input:
Initialize chart options
### Response:
def _chart(self, x, y, chart_type, opts, style, label, options, **kwargs):
"""
Initialize chart options
"""
if opts is not None:
self.chart_opts = opts
if style is not None:
self.chart_style = style
if label is not None:
self.label = label
self.x = x
self.y = y
if chart_type is None:
return
try:
chart_obj = self._get_chart(chart_type, x, y, style=style,
opts=opts, label=label,
options=options, **kwargs)
return chart_obj
except Exception as e:
self.err(e) |
def u2opener(self):
"""
Create a urllib opener.
@return: An opener.
@rtype: I{OpenerDirector}
"""
if self.urlopener is None:
return urllib2.build_opener(*self.u2handlers())
return self.urlopener | Create a urllib opener.
@return: An opener.
@rtype: I{OpenerDirector} | Below is the the instruction that describes the task:
### Input:
Create a urllib opener.
@return: An opener.
@rtype: I{OpenerDirector}
### Response:
def u2opener(self):
"""
Create a urllib opener.
@return: An opener.
@rtype: I{OpenerDirector}
"""
if self.urlopener is None:
return urllib2.build_opener(*self.u2handlers())
return self.urlopener |
def _expand_formatting_rule(rule, national_prefix):
"""Formatting rules can include terms "$NP" and "$FG",
These get replaced with:
"$NP" => the national prefix
"$FG" => the first group, i.e. "$1"
"""
if rule is None:
return None
if national_prefix is None:
national_prefix = u("")
rule = re.sub(u(r"\$NP"), national_prefix, rule)
rule = re.sub(u(r"\$FG"), u("$1"), rule)
return rule | Formatting rules can include terms "$NP" and "$FG",
These get replaced with:
"$NP" => the national prefix
"$FG" => the first group, i.e. "$1" | Below is the the instruction that describes the task:
### Input:
Formatting rules can include terms "$NP" and "$FG",
These get replaced with:
"$NP" => the national prefix
"$FG" => the first group, i.e. "$1"
### Response:
def _expand_formatting_rule(rule, national_prefix):
"""Formatting rules can include terms "$NP" and "$FG",
These get replaced with:
"$NP" => the national prefix
"$FG" => the first group, i.e. "$1"
"""
if rule is None:
return None
if national_prefix is None:
national_prefix = u("")
rule = re.sub(u(r"\$NP"), national_prefix, rule)
rule = re.sub(u(r"\$FG"), u("$1"), rule)
return rule |
def global_fixes():
"""Yield multiple (code, function) tuples."""
for function in list(globals().values()):
if inspect.isfunction(function):
arguments = _get_parameters(function)
if arguments[:1] != ['source']:
continue
code = extract_code_from_function(function)
if code:
yield (code, function) | Yield multiple (code, function) tuples. | Below is the the instruction that describes the task:
### Input:
Yield multiple (code, function) tuples.
### Response:
def global_fixes():
"""Yield multiple (code, function) tuples."""
for function in list(globals().values()):
if inspect.isfunction(function):
arguments = _get_parameters(function)
if arguments[:1] != ['source']:
continue
code = extract_code_from_function(function)
if code:
yield (code, function) |
def _remove_vars(self):
"""
Remove hosts/host_vars/group_vars and returns None.
:returns: None
"""
for name in ("hosts", "group_vars", "host_vars"):
d = os.path.join(self.inventory_directory, name)
if os.path.islink(d) or os.path.isfile(d):
os.unlink(d)
elif os.path.isdir(d):
shutil.rmtree(d) | Remove hosts/host_vars/group_vars and returns None.
:returns: None | Below is the the instruction that describes the task:
### Input:
Remove hosts/host_vars/group_vars and returns None.
:returns: None
### Response:
def _remove_vars(self):
"""
Remove hosts/host_vars/group_vars and returns None.
:returns: None
"""
for name in ("hosts", "group_vars", "host_vars"):
d = os.path.join(self.inventory_directory, name)
if os.path.islink(d) or os.path.isfile(d):
os.unlink(d)
elif os.path.isdir(d):
shutil.rmtree(d) |
def _on_del_route(self, msg):
"""
Respond to :data:`mitogen.core.DEL_ROUTE` by validating the source of
the message, updating the local table, propagating the message
upwards, and downwards towards any stream that every had a message
forwarded from it towards the disconnecting context.
"""
if msg.is_dead:
return
target_id = int(msg.data)
registered_stream = self.router.stream_by_id(target_id)
if registered_stream is None:
return
stream = self.router.stream_by_id(msg.auth_id)
if registered_stream != stream:
LOG.error('%r: received DEL_ROUTE for %d from %r, expected %r',
self, target_id, stream, registered_stream)
return
context = self.router.context_by_id(target_id, create=False)
if context:
LOG.debug('%r: firing local disconnect for %r', self, context)
mitogen.core.fire(context, 'disconnect')
LOG.debug('%r: deleting route to %d via %r', self, target_id, stream)
routes = self._routes_by_stream.get(stream)
if routes:
routes.discard(target_id)
self.router.del_route(target_id)
if stream.remote_id != mitogen.parent_id:
self._propagate_up(mitogen.core.DEL_ROUTE, target_id)
self._propagate_down(mitogen.core.DEL_ROUTE, target_id) | Respond to :data:`mitogen.core.DEL_ROUTE` by validating the source of
the message, updating the local table, propagating the message
upwards, and downwards towards any stream that every had a message
forwarded from it towards the disconnecting context. | Below is the the instruction that describes the task:
### Input:
Respond to :data:`mitogen.core.DEL_ROUTE` by validating the source of
the message, updating the local table, propagating the message
upwards, and downwards towards any stream that every had a message
forwarded from it towards the disconnecting context.
### Response:
def _on_del_route(self, msg):
"""
Respond to :data:`mitogen.core.DEL_ROUTE` by validating the source of
the message, updating the local table, propagating the message
upwards, and downwards towards any stream that every had a message
forwarded from it towards the disconnecting context.
"""
if msg.is_dead:
return
target_id = int(msg.data)
registered_stream = self.router.stream_by_id(target_id)
if registered_stream is None:
return
stream = self.router.stream_by_id(msg.auth_id)
if registered_stream != stream:
LOG.error('%r: received DEL_ROUTE for %d from %r, expected %r',
self, target_id, stream, registered_stream)
return
context = self.router.context_by_id(target_id, create=False)
if context:
LOG.debug('%r: firing local disconnect for %r', self, context)
mitogen.core.fire(context, 'disconnect')
LOG.debug('%r: deleting route to %d via %r', self, target_id, stream)
routes = self._routes_by_stream.get(stream)
if routes:
routes.discard(target_id)
self.router.del_route(target_id)
if stream.remote_id != mitogen.parent_id:
self._propagate_up(mitogen.core.DEL_ROUTE, target_id)
self._propagate_down(mitogen.core.DEL_ROUTE, target_id) |
def read(self, size=None):
"""Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
"""
if not self._is_open:
raise IOError('Not opened.')
if self._range_offset < 0 or self._range_size < 0:
raise IOError('Invalid data range.')
if self._current_offset < 0:
raise IOError(
'Invalid current offset: {0:d} value less than zero.'.format(
self._current_offset))
if self._current_offset >= self._range_size:
return b''
if size is None:
size = self._range_size
if self._current_offset + size > self._range_size:
size = self._range_size - self._current_offset
self._file_object.seek(
self._range_offset + self._current_offset, os.SEEK_SET)
data = self._file_object.read(size)
self._current_offset += len(data)
return data | Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed. | Below is the the instruction that describes the task:
### Input:
Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
### Response:
def read(self, size=None):
"""Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
"""
if not self._is_open:
raise IOError('Not opened.')
if self._range_offset < 0 or self._range_size < 0:
raise IOError('Invalid data range.')
if self._current_offset < 0:
raise IOError(
'Invalid current offset: {0:d} value less than zero.'.format(
self._current_offset))
if self._current_offset >= self._range_size:
return b''
if size is None:
size = self._range_size
if self._current_offset + size > self._range_size:
size = self._range_size - self._current_offset
self._file_object.seek(
self._range_offset + self._current_offset, os.SEEK_SET)
data = self._file_object.read(size)
self._current_offset += len(data)
return data |
def __merge_json_values(current, previous):
"""Merges the values between the current and previous run of the script."""
for value in current:
name = value['name']
# Find the previous value
previous_value = __find_and_remove_value(previous, value)
if previous_value is not None:
flags = value['flags']
previous_flags = previous_value['flags']
if flags != previous_flags:
logging.warning(
'Flags for %s are different. Using previous value.', name)
value['flags'] = previous_flags
else:
logging.warning('Value %s is a new value', name)
for value in previous:
name = value['name']
logging.warning(
'Value %s not present in current run. Appending value.', name)
current.append(value) | Merges the values between the current and previous run of the script. | Below is the the instruction that describes the task:
### Input:
Merges the values between the current and previous run of the script.
### Response:
def __merge_json_values(current, previous):
"""Merges the values between the current and previous run of the script."""
for value in current:
name = value['name']
# Find the previous value
previous_value = __find_and_remove_value(previous, value)
if previous_value is not None:
flags = value['flags']
previous_flags = previous_value['flags']
if flags != previous_flags:
logging.warning(
'Flags for %s are different. Using previous value.', name)
value['flags'] = previous_flags
else:
logging.warning('Value %s is a new value', name)
for value in previous:
name = value['name']
logging.warning(
'Value %s not present in current run. Appending value.', name)
current.append(value) |
def reproduce(self, config, species, pop_size, generation):
"""
Handles creation of genomes, either from scratch or by sexual or
asexual reproduction from parents.
"""
# TODO: I don't like this modification of the species and stagnation objects,
# because it requires internal knowledge of the objects.
# Filter out stagnated species, collect the set of non-stagnated
# species members, and compute their average adjusted fitness.
# The average adjusted fitness scheme (normalized to the interval
# [0, 1]) allows the use of negative fitness values without
# interfering with the shared fitness scheme.
all_fitnesses = []
remaining_species = []
for stag_sid, stag_s, stagnant in self.stagnation.update(species, generation):
if stagnant:
self.reporters.species_stagnant(stag_sid, stag_s)
else:
all_fitnesses.extend(m.fitness for m in itervalues(stag_s.members))
remaining_species.append(stag_s)
# The above comment was not quite what was happening - now getting fitnesses
# only from members of non-stagnated species.
# No species left.
if not remaining_species:
species.species = {}
return {} # was []
# Find minimum/maximum fitness across the entire population, for use in
# species adjusted fitness computation.
min_fitness = min(all_fitnesses)
max_fitness = max(all_fitnesses)
# Do not allow the fitness range to be zero, as we divide by it below.
# TODO: The ``1.0`` below is rather arbitrary, and should be configurable.
fitness_range = max(1.0, max_fitness - min_fitness)
for afs in remaining_species:
# Compute adjusted fitness.
msf = mean([m.fitness for m in itervalues(afs.members)])
af = (msf - min_fitness) / fitness_range
afs.adjusted_fitness = af
adjusted_fitnesses = [s.adjusted_fitness for s in remaining_species]
avg_adjusted_fitness = mean(adjusted_fitnesses) # type: float
self.reporters.info("Average adjusted fitness: {:.3f}".format(avg_adjusted_fitness))
# Compute the number of new members for each species in the new generation.
previous_sizes = [len(s.members) for s in remaining_species]
min_species_size = self.reproduction_config.min_species_size
# Isn't the effective min_species_size going to be max(min_species_size,
# self.reproduction_config.elitism)? That would probably produce more accurate tracking
# of population sizes and relative fitnesses... doing. TODO: document.
min_species_size = max(min_species_size,self.reproduction_config.elitism)
spawn_amounts = self.compute_spawn(adjusted_fitnesses, previous_sizes,
pop_size, min_species_size)
new_population = {}
species.species = {}
for spawn, s in zip(spawn_amounts, remaining_species):
# If elitism is enabled, each species always at least gets to retain its elites.
spawn = max(spawn, self.reproduction_config.elitism)
assert spawn > 0
# The species has at least one member for the next generation, so retain it.
old_members = list(iteritems(s.members))
s.members = {}
species.species[s.key] = s
# Sort members in order of descending fitness.
old_members.sort(reverse=True, key=lambda x: x[1].fitness)
# Transfer elites to new generation.
if self.reproduction_config.elitism > 0:
for i, m in old_members[:self.reproduction_config.elitism]:
new_population[i] = m
spawn -= 1
if spawn <= 0:
continue
# Only use the survival threshold fraction to use as parents for the next generation.
repro_cutoff = int(math.ceil(self.reproduction_config.survival_threshold *
len(old_members)))
# Use at least two parents no matter what the threshold fraction result is.
repro_cutoff = max(repro_cutoff, 2)
old_members = old_members[:repro_cutoff]
# Randomly choose parents and produce the number of offspring allotted to the species.
while spawn > 0:
spawn -= 1
parent1_id, parent1 = random.choice(old_members)
parent2_id, parent2 = random.choice(old_members)
# Note that if the parents are not distinct, crossover will produce a
# genetically identical clone of the parent (but with a different ID).
gid = next(self.genome_indexer)
child = config.genome_type(gid)
child.configure_crossover(parent1, parent2, config.genome_config)
child.mutate(config.genome_config)
new_population[gid] = child
self.ancestors[gid] = (parent1_id, parent2_id)
return new_population | Handles creation of genomes, either from scratch or by sexual or
asexual reproduction from parents. | Below is the the instruction that describes the task:
### Input:
Handles creation of genomes, either from scratch or by sexual or
asexual reproduction from parents.
### Response:
def reproduce(self, config, species, pop_size, generation):
"""
Handles creation of genomes, either from scratch or by sexual or
asexual reproduction from parents.
"""
# TODO: I don't like this modification of the species and stagnation objects,
# because it requires internal knowledge of the objects.
# Filter out stagnated species, collect the set of non-stagnated
# species members, and compute their average adjusted fitness.
# The average adjusted fitness scheme (normalized to the interval
# [0, 1]) allows the use of negative fitness values without
# interfering with the shared fitness scheme.
all_fitnesses = []
remaining_species = []
for stag_sid, stag_s, stagnant in self.stagnation.update(species, generation):
if stagnant:
self.reporters.species_stagnant(stag_sid, stag_s)
else:
all_fitnesses.extend(m.fitness for m in itervalues(stag_s.members))
remaining_species.append(stag_s)
# The above comment was not quite what was happening - now getting fitnesses
# only from members of non-stagnated species.
# No species left.
if not remaining_species:
species.species = {}
return {} # was []
# Find minimum/maximum fitness across the entire population, for use in
# species adjusted fitness computation.
min_fitness = min(all_fitnesses)
max_fitness = max(all_fitnesses)
# Do not allow the fitness range to be zero, as we divide by it below.
# TODO: The ``1.0`` below is rather arbitrary, and should be configurable.
fitness_range = max(1.0, max_fitness - min_fitness)
for afs in remaining_species:
# Compute adjusted fitness.
msf = mean([m.fitness for m in itervalues(afs.members)])
af = (msf - min_fitness) / fitness_range
afs.adjusted_fitness = af
adjusted_fitnesses = [s.adjusted_fitness for s in remaining_species]
avg_adjusted_fitness = mean(adjusted_fitnesses) # type: float
self.reporters.info("Average adjusted fitness: {:.3f}".format(avg_adjusted_fitness))
# Compute the number of new members for each species in the new generation.
previous_sizes = [len(s.members) for s in remaining_species]
min_species_size = self.reproduction_config.min_species_size
# Isn't the effective min_species_size going to be max(min_species_size,
# self.reproduction_config.elitism)? That would probably produce more accurate tracking
# of population sizes and relative fitnesses... doing. TODO: document.
min_species_size = max(min_species_size,self.reproduction_config.elitism)
spawn_amounts = self.compute_spawn(adjusted_fitnesses, previous_sizes,
pop_size, min_species_size)
new_population = {}
species.species = {}
for spawn, s in zip(spawn_amounts, remaining_species):
# If elitism is enabled, each species always at least gets to retain its elites.
spawn = max(spawn, self.reproduction_config.elitism)
assert spawn > 0
# The species has at least one member for the next generation, so retain it.
old_members = list(iteritems(s.members))
s.members = {}
species.species[s.key] = s
# Sort members in order of descending fitness.
old_members.sort(reverse=True, key=lambda x: x[1].fitness)
# Transfer elites to new generation.
if self.reproduction_config.elitism > 0:
for i, m in old_members[:self.reproduction_config.elitism]:
new_population[i] = m
spawn -= 1
if spawn <= 0:
continue
# Only use the survival threshold fraction to use as parents for the next generation.
repro_cutoff = int(math.ceil(self.reproduction_config.survival_threshold *
len(old_members)))
# Use at least two parents no matter what the threshold fraction result is.
repro_cutoff = max(repro_cutoff, 2)
old_members = old_members[:repro_cutoff]
# Randomly choose parents and produce the number of offspring allotted to the species.
while spawn > 0:
spawn -= 1
parent1_id, parent1 = random.choice(old_members)
parent2_id, parent2 = random.choice(old_members)
# Note that if the parents are not distinct, crossover will produce a
# genetically identical clone of the parent (but with a different ID).
gid = next(self.genome_indexer)
child = config.genome_type(gid)
child.configure_crossover(parent1, parent2, config.genome_config)
child.mutate(config.genome_config)
new_population[gid] = child
self.ancestors[gid] = (parent1_id, parent2_id)
return new_population |
def parse(contents, tokens=None):
"""Parse a string called contents for an AST and return it."""
# Shortcut for users who are interested in tokens
if tokens is None:
tokens = [t for t in tokenize(contents)]
token_index, body = _ast_worker(tokens, len(tokens), 0, None)
assert token_index == len(tokens)
assert body.arguments == []
return ToplevelBody(statements=body.statements) | Parse a string called contents for an AST and return it. | Below is the the instruction that describes the task:
### Input:
Parse a string called contents for an AST and return it.
### Response:
def parse(contents, tokens=None):
"""Parse a string called contents for an AST and return it."""
# Shortcut for users who are interested in tokens
if tokens is None:
tokens = [t for t in tokenize(contents)]
token_index, body = _ast_worker(tokens, len(tokens), 0, None)
assert token_index == len(tokens)
assert body.arguments == []
return ToplevelBody(statements=body.statements) |
def build(package, path=None, dry_run=False, env='default', force=False, build_file=False):
"""
Compile a Quilt data package, either from a build file or an existing package node.
:param package: short package specifier, i.e. 'team:user/pkg'
:param path: file path, git url, or existing package node
"""
# TODO: rename 'path' param to 'target'?
team, _, _, subpath = parse_package(package, allow_subpath=True)
_check_team_id(team)
logged_in_team = _find_logged_in_team()
if logged_in_team is not None and team is None and force is False:
answer = input("You're logged in as a team member, but you aren't specifying "
"a team for the package you're currently building. Maybe you meant:\n"
"quilt build {team}:{package}\n"
"Are you sure you want to continue? (y/N) ".format(
team=logged_in_team, package=package))
if answer.lower() != 'y':
return
# Backward compatibility: if there's no subpath, we're building a top-level package,
# so treat `path` as a build file, not as a data node.
if not subpath:
build_file = True
package_hash = hashlib.md5(package.encode('utf-8')).hexdigest()
try:
_build_internal(package, path, dry_run, env, build_file)
except Exception as ex:
_log(team, type='build', package=package_hash, dry_run=dry_run, env=env, error=str(ex))
raise
_log(team, type='build', package=package_hash, dry_run=dry_run, env=env) | Compile a Quilt data package, either from a build file or an existing package node.
:param package: short package specifier, i.e. 'team:user/pkg'
:param path: file path, git url, or existing package node | Below is the the instruction that describes the task:
### Input:
Compile a Quilt data package, either from a build file or an existing package node.
:param package: short package specifier, i.e. 'team:user/pkg'
:param path: file path, git url, or existing package node
### Response:
def build(package, path=None, dry_run=False, env='default', force=False, build_file=False):
"""
Compile a Quilt data package, either from a build file or an existing package node.
:param package: short package specifier, i.e. 'team:user/pkg'
:param path: file path, git url, or existing package node
"""
# TODO: rename 'path' param to 'target'?
team, _, _, subpath = parse_package(package, allow_subpath=True)
_check_team_id(team)
logged_in_team = _find_logged_in_team()
if logged_in_team is not None and team is None and force is False:
answer = input("You're logged in as a team member, but you aren't specifying "
"a team for the package you're currently building. Maybe you meant:\n"
"quilt build {team}:{package}\n"
"Are you sure you want to continue? (y/N) ".format(
team=logged_in_team, package=package))
if answer.lower() != 'y':
return
# Backward compatibility: if there's no subpath, we're building a top-level package,
# so treat `path` as a build file, not as a data node.
if not subpath:
build_file = True
package_hash = hashlib.md5(package.encode('utf-8')).hexdigest()
try:
_build_internal(package, path, dry_run, env, build_file)
except Exception as ex:
_log(team, type='build', package=package_hash, dry_run=dry_run, env=env, error=str(ex))
raise
_log(team, type='build', package=package_hash, dry_run=dry_run, env=env) |
def json(self, var, default=NOTSET):
"""
:returns: Json parsed
"""
return self.get_value(var, cast=json.loads, default=default) | :returns: Json parsed | Below is the the instruction that describes the task:
### Input:
:returns: Json parsed
### Response:
def json(self, var, default=NOTSET):
"""
:returns: Json parsed
"""
return self.get_value(var, cast=json.loads, default=default) |
def ionic_strength(mis, zis):
r'''Calculate the ionic strength of a solution in one of two ways,
depending on the inputs only. For Pitzer and Bromley models,
`mis` should be molalities of each component. For eNRTL models,
`mis` should be mole fractions of each electrolyte in the solution.
This will sum to be much less than 1.
.. math::
I = \frac{1}{2} \sum M_i z_i^2
I = \frac{1}{2} \sum x_i z_i^2
Parameters
----------
mis : list
Molalities of each ion, or mole fractions of each ion [mol/kg or -]
zis : list
Charges of each ion [-]
Returns
-------
I : float
ionic strength, [?]
Examples
--------
>>> ionic_strength([0.1393, 0.1393], [1, -1])
0.1393
References
----------
.. [1] Chen, Chau-Chyun, H. I. Britt, J. F. Boston, and L. B. Evans. "Local
Composition Model for Excess Gibbs Energy of Electrolyte Systems.
Part I: Single Solvent, Single Completely Dissociated Electrolyte
Systems." AIChE Journal 28, no. 4 (July 1, 1982): 588-96.
doi:10.1002/aic.690280410
.. [2] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.
Weinheim, Germany: Wiley-VCH, 2012.
'''
return 0.5*sum([mi*zi*zi for mi, zi in zip(mis, zis)]) | r'''Calculate the ionic strength of a solution in one of two ways,
depending on the inputs only. For Pitzer and Bromley models,
`mis` should be molalities of each component. For eNRTL models,
`mis` should be mole fractions of each electrolyte in the solution.
This will sum to be much less than 1.
.. math::
I = \frac{1}{2} \sum M_i z_i^2
I = \frac{1}{2} \sum x_i z_i^2
Parameters
----------
mis : list
Molalities of each ion, or mole fractions of each ion [mol/kg or -]
zis : list
Charges of each ion [-]
Returns
-------
I : float
ionic strength, [?]
Examples
--------
>>> ionic_strength([0.1393, 0.1393], [1, -1])
0.1393
References
----------
.. [1] Chen, Chau-Chyun, H. I. Britt, J. F. Boston, and L. B. Evans. "Local
Composition Model for Excess Gibbs Energy of Electrolyte Systems.
Part I: Single Solvent, Single Completely Dissociated Electrolyte
Systems." AIChE Journal 28, no. 4 (July 1, 1982): 588-96.
doi:10.1002/aic.690280410
.. [2] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.
Weinheim, Germany: Wiley-VCH, 2012. | Below is the the instruction that describes the task:
### Input:
r'''Calculate the ionic strength of a solution in one of two ways,
depending on the inputs only. For Pitzer and Bromley models,
`mis` should be molalities of each component. For eNRTL models,
`mis` should be mole fractions of each electrolyte in the solution.
This will sum to be much less than 1.
.. math::
I = \frac{1}{2} \sum M_i z_i^2
I = \frac{1}{2} \sum x_i z_i^2
Parameters
----------
mis : list
Molalities of each ion, or mole fractions of each ion [mol/kg or -]
zis : list
Charges of each ion [-]
Returns
-------
I : float
ionic strength, [?]
Examples
--------
>>> ionic_strength([0.1393, 0.1393], [1, -1])
0.1393
References
----------
.. [1] Chen, Chau-Chyun, H. I. Britt, J. F. Boston, and L. B. Evans. "Local
Composition Model for Excess Gibbs Energy of Electrolyte Systems.
Part I: Single Solvent, Single Completely Dissociated Electrolyte
Systems." AIChE Journal 28, no. 4 (July 1, 1982): 588-96.
doi:10.1002/aic.690280410
.. [2] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.
Weinheim, Germany: Wiley-VCH, 2012.
### Response:
def ionic_strength(mis, zis):
r'''Calculate the ionic strength of a solution in one of two ways,
depending on the inputs only. For Pitzer and Bromley models,
`mis` should be molalities of each component. For eNRTL models,
`mis` should be mole fractions of each electrolyte in the solution.
This will sum to be much less than 1.
.. math::
I = \frac{1}{2} \sum M_i z_i^2
I = \frac{1}{2} \sum x_i z_i^2
Parameters
----------
mis : list
Molalities of each ion, or mole fractions of each ion [mol/kg or -]
zis : list
Charges of each ion [-]
Returns
-------
I : float
ionic strength, [?]
Examples
--------
>>> ionic_strength([0.1393, 0.1393], [1, -1])
0.1393
References
----------
.. [1] Chen, Chau-Chyun, H. I. Britt, J. F. Boston, and L. B. Evans. "Local
Composition Model for Excess Gibbs Energy of Electrolyte Systems.
Part I: Single Solvent, Single Completely Dissociated Electrolyte
Systems." AIChE Journal 28, no. 4 (July 1, 1982): 588-96.
doi:10.1002/aic.690280410
.. [2] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.
Weinheim, Germany: Wiley-VCH, 2012.
'''
return 0.5*sum([mi*zi*zi for mi, zi in zip(mis, zis)]) |
def unpack_4to8(data):
""" Promote 2-bit unisgned data into 8-bit unsigned data.
Args:
data: Numpy array with dtype == uint8
Notes:
# The process is this:
# ABCDEFGH [Bits of one 4+4-bit value]
# 00000000ABCDEFGH [astype(uint16)]
# 0000ABCDEFGH0000 [<< 4]
# 0000ABCDXXXXEFGH [bitwise 'or' of previous two lines]
# 0000111100001111 [0x0F0F]
# 0000ABCD0000EFGH [bitwise 'and' of previous two lines]
# ABCD0000EFGH0000 [<< 4]
# which effectively pads the two 4-bit values with zeros on the right
# Note: This technique assumes LSB-first ordering
"""
tmpdata = data.astype(np.int16) # np.empty(upshape, dtype=np.int16)
tmpdata = (tmpdata | (tmpdata << 4)) & 0x0F0F
# tmpdata = tmpdata << 4 # Shift into high bits to avoid needing to sign extend
updata = tmpdata.byteswap()
return updata.view(data.dtype) | Promote 2-bit unisgned data into 8-bit unsigned data.
Args:
data: Numpy array with dtype == uint8
Notes:
# The process is this:
# ABCDEFGH [Bits of one 4+4-bit value]
# 00000000ABCDEFGH [astype(uint16)]
# 0000ABCDEFGH0000 [<< 4]
# 0000ABCDXXXXEFGH [bitwise 'or' of previous two lines]
# 0000111100001111 [0x0F0F]
# 0000ABCD0000EFGH [bitwise 'and' of previous two lines]
# ABCD0000EFGH0000 [<< 4]
# which effectively pads the two 4-bit values with zeros on the right
# Note: This technique assumes LSB-first ordering | Below is the the instruction that describes the task:
### Input:
Promote 2-bit unisgned data into 8-bit unsigned data.
Args:
data: Numpy array with dtype == uint8
Notes:
# The process is this:
# ABCDEFGH [Bits of one 4+4-bit value]
# 00000000ABCDEFGH [astype(uint16)]
# 0000ABCDEFGH0000 [<< 4]
# 0000ABCDXXXXEFGH [bitwise 'or' of previous two lines]
# 0000111100001111 [0x0F0F]
# 0000ABCD0000EFGH [bitwise 'and' of previous two lines]
# ABCD0000EFGH0000 [<< 4]
# which effectively pads the two 4-bit values with zeros on the right
# Note: This technique assumes LSB-first ordering
### Response:
def unpack_4to8(data):
""" Promote 2-bit unisgned data into 8-bit unsigned data.
Args:
data: Numpy array with dtype == uint8
Notes:
# The process is this:
# ABCDEFGH [Bits of one 4+4-bit value]
# 00000000ABCDEFGH [astype(uint16)]
# 0000ABCDEFGH0000 [<< 4]
# 0000ABCDXXXXEFGH [bitwise 'or' of previous two lines]
# 0000111100001111 [0x0F0F]
# 0000ABCD0000EFGH [bitwise 'and' of previous two lines]
# ABCD0000EFGH0000 [<< 4]
# which effectively pads the two 4-bit values with zeros on the right
# Note: This technique assumes LSB-first ordering
"""
tmpdata = data.astype(np.int16) # np.empty(upshape, dtype=np.int16)
tmpdata = (tmpdata | (tmpdata << 4)) & 0x0F0F
# tmpdata = tmpdata << 4 # Shift into high bits to avoid needing to sign extend
updata = tmpdata.byteswap()
return updata.view(data.dtype) |
def update_k8s_model(target, changes, logger=None, target_name=None, changes_name=None):
"""
Takes a model instance such as V1PodSpec() and updates it with another
model, which is allowed to be a dict or another model instance of the same
type. The logger is used to warn if any truthy value in the target is is
overridden. The target_name parameter can for example be "pod.spec", and
changes_name parameter could be "extra_pod_config". These parameters allows
the logger to write out something more meaningful to the user whenever
something is about to become overridden.
"""
model_type = type(target)
if not hasattr(target, 'attribute_map'):
raise AttributeError("Attribute 'target' ({}) must be an object (such as 'V1PodSpec') with an attribute 'attribute_map'.".format(model_type.__name__))
if not isinstance(changes, model_type) and not isinstance(changes, dict):
raise AttributeError("Attribute 'changes' ({}) must be an object of the same type as 'target' ({}) or a 'dict'.".format(type(changes).__name__, model_type.__name__))
changes_dict = _get_k8s_model_dict(model_type, changes)
for key, value in changes_dict.items():
if key not in target.attribute_map:
raise ValueError("The attribute 'changes' ({}) contained '{}' not modeled by '{}'.".format(type(changes).__name__, key, model_type.__name__))
# If changes are passed as a dict, they will only have a few keys/value
# pairs representing the specific changes. If the changes parameter is a
# model instance on the other hand, the changes parameter will have a
# lot of default values as well. These default values, which are also
# falsy, should not use to override the target's values.
if isinstance(changes, dict) or value:
if getattr(target, key):
if logger and changes_name:
warning = "'{}.{}' current value: '{}' is overridden with '{}', which is the value of '{}.{}'.".format(
target_name,
key,
getattr(target, key),
value,
changes_name,
key
)
logger.warning(warning)
setattr(target, key, value)
return target | Takes a model instance such as V1PodSpec() and updates it with another
model, which is allowed to be a dict or another model instance of the same
type. The logger is used to warn if any truthy value in the target is is
overridden. The target_name parameter can for example be "pod.spec", and
changes_name parameter could be "extra_pod_config". These parameters allows
the logger to write out something more meaningful to the user whenever
something is about to become overridden. | Below is the the instruction that describes the task:
### Input:
Takes a model instance such as V1PodSpec() and updates it with another
model, which is allowed to be a dict or another model instance of the same
type. The logger is used to warn if any truthy value in the target is is
overridden. The target_name parameter can for example be "pod.spec", and
changes_name parameter could be "extra_pod_config". These parameters allows
the logger to write out something more meaningful to the user whenever
something is about to become overridden.
### Response:
def update_k8s_model(target, changes, logger=None, target_name=None, changes_name=None):
"""
Takes a model instance such as V1PodSpec() and updates it with another
model, which is allowed to be a dict or another model instance of the same
type. The logger is used to warn if any truthy value in the target is is
overridden. The target_name parameter can for example be "pod.spec", and
changes_name parameter could be "extra_pod_config". These parameters allows
the logger to write out something more meaningful to the user whenever
something is about to become overridden.
"""
model_type = type(target)
if not hasattr(target, 'attribute_map'):
raise AttributeError("Attribute 'target' ({}) must be an object (such as 'V1PodSpec') with an attribute 'attribute_map'.".format(model_type.__name__))
if not isinstance(changes, model_type) and not isinstance(changes, dict):
raise AttributeError("Attribute 'changes' ({}) must be an object of the same type as 'target' ({}) or a 'dict'.".format(type(changes).__name__, model_type.__name__))
changes_dict = _get_k8s_model_dict(model_type, changes)
for key, value in changes_dict.items():
if key not in target.attribute_map:
raise ValueError("The attribute 'changes' ({}) contained '{}' not modeled by '{}'.".format(type(changes).__name__, key, model_type.__name__))
# If changes are passed as a dict, they will only have a few keys/value
# pairs representing the specific changes. If the changes parameter is a
# model instance on the other hand, the changes parameter will have a
# lot of default values as well. These default values, which are also
# falsy, should not use to override the target's values.
if isinstance(changes, dict) or value:
if getattr(target, key):
if logger and changes_name:
warning = "'{}.{}' current value: '{}' is overridden with '{}', which is the value of '{}.{}'.".format(
target_name,
key,
getattr(target, key),
value,
changes_name,
key
)
logger.warning(warning)
setattr(target, key, value)
return target |
def post(self, request):
"""Respond to POSTed username/password with token."""
serializer = AuthTokenSerializer(data=request.data)
if serializer.is_valid():
token, _ = ExpiringToken.objects.get_or_create(
user=serializer.validated_data['user']
)
if token.expired():
# If the token is expired, generate a new one.
token.delete()
token = ExpiringToken.objects.create(
user=serializer.validated_data['user']
)
data = {'token': token.key}
return Response(data)
return Response(serializer.errors, status=HTTP_400_BAD_REQUEST) | Respond to POSTed username/password with token. | Below is the the instruction that describes the task:
### Input:
Respond to POSTed username/password with token.
### Response:
def post(self, request):
"""Respond to POSTed username/password with token."""
serializer = AuthTokenSerializer(data=request.data)
if serializer.is_valid():
token, _ = ExpiringToken.objects.get_or_create(
user=serializer.validated_data['user']
)
if token.expired():
# If the token is expired, generate a new one.
token.delete()
token = ExpiringToken.objects.create(
user=serializer.validated_data['user']
)
data = {'token': token.key}
return Response(data)
return Response(serializer.errors, status=HTTP_400_BAD_REQUEST) |
def sendDocument(self, chat_id, document,
thumb=None,
caption=None,
parse_mode=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#senddocument
:param document: Same as ``photo`` in :meth:`amanobot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['document'])
return self._api_request_with_file('sendDocument', _rectify(p), 'document', document) | See: https://core.telegram.org/bots/api#senddocument
:param document: Same as ``photo`` in :meth:`amanobot.Bot.sendPhoto` | Below is the the instruction that describes the task:
### Input:
See: https://core.telegram.org/bots/api#senddocument
:param document: Same as ``photo`` in :meth:`amanobot.Bot.sendPhoto`
### Response:
def sendDocument(self, chat_id, document,
thumb=None,
caption=None,
parse_mode=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#senddocument
:param document: Same as ``photo`` in :meth:`amanobot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['document'])
return self._api_request_with_file('sendDocument', _rectify(p), 'document', document) |
def delete_global_cache(appname='default'):
""" Reads cache files to a safe place in each operating system """
#close_global_shelf(appname)
shelf_fpath = get_global_shelf_fpath(appname)
util_path.remove_file(shelf_fpath, verbose=True, dryrun=False) | Reads cache files to a safe place in each operating system | Below is the the instruction that describes the task:
### Input:
Reads cache files to a safe place in each operating system
### Response:
def delete_global_cache(appname='default'):
""" Reads cache files to a safe place in each operating system """
#close_global_shelf(appname)
shelf_fpath = get_global_shelf_fpath(appname)
util_path.remove_file(shelf_fpath, verbose=True, dryrun=False) |
def delete_all_but_self(self):
"""
DELETE ALL INDEXES WITH GIVEN PREFIX, EXCEPT name
"""
prefix = self.settings.alias
name = self.settings.index
if prefix == name:
Log.note("{{index_name}} will not be deleted", index_name= prefix)
for a in self.cluster.get_aliases():
# MATCH <prefix>YYMMDD_HHMMSS FORMAT
if re.match(re.escape(prefix) + "\\d{8}_\\d{6}", a.index) and a.index != name:
self.cluster.delete_index(a.index) | DELETE ALL INDEXES WITH GIVEN PREFIX, EXCEPT name | Below is the the instruction that describes the task:
### Input:
DELETE ALL INDEXES WITH GIVEN PREFIX, EXCEPT name
### Response:
def delete_all_but_self(self):
"""
DELETE ALL INDEXES WITH GIVEN PREFIX, EXCEPT name
"""
prefix = self.settings.alias
name = self.settings.index
if prefix == name:
Log.note("{{index_name}} will not be deleted", index_name= prefix)
for a in self.cluster.get_aliases():
# MATCH <prefix>YYMMDD_HHMMSS FORMAT
if re.match(re.escape(prefix) + "\\d{8}_\\d{6}", a.index) and a.index != name:
self.cluster.delete_index(a.index) |
def get_datastreams(self):
"""
To get list of Datastream
"""
datastreams = []
response = self.http.get('/Datastream')
for datastream in response:
datastreams.append(Schemas.Datastream(datastream=datastream))
return datastreams | To get list of Datastream | Below is the the instruction that describes the task:
### Input:
To get list of Datastream
### Response:
def get_datastreams(self):
"""
To get list of Datastream
"""
datastreams = []
response = self.http.get('/Datastream')
for datastream in response:
datastreams.append(Schemas.Datastream(datastream=datastream))
return datastreams |
def _radix_sort(L, i=0):
"""
Most significant char radix sort
"""
if len(L) <= 1:
return L
done_bucket = []
buckets = [ [] for x in range(255) ]
for s in L:
if i >= len(s):
done_bucket.append(s)
else:
buckets[ ord(s[i]) ].append(s)
buckets = [ _radix_sort(b, i + 1) for b in buckets ]
return done_bucket + [ b for blist in buckets for b in blist ] | Most significant char radix sort | Below is the the instruction that describes the task:
### Input:
Most significant char radix sort
### Response:
def _radix_sort(L, i=0):
"""
Most significant char radix sort
"""
if len(L) <= 1:
return L
done_bucket = []
buckets = [ [] for x in range(255) ]
for s in L:
if i >= len(s):
done_bucket.append(s)
else:
buckets[ ord(s[i]) ].append(s)
buckets = [ _radix_sort(b, i + 1) for b in buckets ]
return done_bucket + [ b for blist in buckets for b in blist ] |
def stop(self):
"""
Stops this VirtualBox VM.
"""
self._hw_virtualization = False
yield from self._stop_ubridge()
yield from self._stop_remote_console()
vm_state = yield from self._get_vm_state()
if vm_state == "running" or vm_state == "paused" or vm_state == "stuck":
if self.acpi_shutdown:
# use ACPI to shutdown the VM
result = yield from self._control_vm("acpipowerbutton")
trial = 0
while True:
vm_state = yield from self._get_vm_state()
if vm_state == "poweroff":
break
yield from asyncio.sleep(1)
trial += 1
if trial >= 120:
yield from self._control_vm("poweroff")
break
self.status = "stopped"
log.debug("ACPI shutdown result: {}".format(result))
else:
# power off the VM
result = yield from self._control_vm("poweroff")
self.status = "stopped"
log.debug("Stop result: {}".format(result))
log.info("VirtualBox VM '{name}' [{id}] stopped".format(name=self.name, id=self.id))
yield from asyncio.sleep(0.5) # give some time for VirtualBox to unlock the VM
try:
# deactivate the first serial port
yield from self._modify_vm("--uart1 off")
except VirtualBoxError as e:
log.warn("Could not deactivate the first serial port: {}".format(e))
for adapter_number in range(0, self._adapters):
nio = self._ethernet_adapters[adapter_number].get_nio(0)
if nio:
yield from self._modify_vm("--nictrace{} off".format(adapter_number + 1))
yield from self._modify_vm("--cableconnected{} off".format(adapter_number + 1))
yield from self._modify_vm("--nic{} null".format(adapter_number + 1))
yield from super().stop() | Stops this VirtualBox VM. | Below is the the instruction that describes the task:
### Input:
Stops this VirtualBox VM.
### Response:
def stop(self):
"""
Stops this VirtualBox VM.
"""
self._hw_virtualization = False
yield from self._stop_ubridge()
yield from self._stop_remote_console()
vm_state = yield from self._get_vm_state()
if vm_state == "running" or vm_state == "paused" or vm_state == "stuck":
if self.acpi_shutdown:
# use ACPI to shutdown the VM
result = yield from self._control_vm("acpipowerbutton")
trial = 0
while True:
vm_state = yield from self._get_vm_state()
if vm_state == "poweroff":
break
yield from asyncio.sleep(1)
trial += 1
if trial >= 120:
yield from self._control_vm("poweroff")
break
self.status = "stopped"
log.debug("ACPI shutdown result: {}".format(result))
else:
# power off the VM
result = yield from self._control_vm("poweroff")
self.status = "stopped"
log.debug("Stop result: {}".format(result))
log.info("VirtualBox VM '{name}' [{id}] stopped".format(name=self.name, id=self.id))
yield from asyncio.sleep(0.5) # give some time for VirtualBox to unlock the VM
try:
# deactivate the first serial port
yield from self._modify_vm("--uart1 off")
except VirtualBoxError as e:
log.warn("Could not deactivate the first serial port: {}".format(e))
for adapter_number in range(0, self._adapters):
nio = self._ethernet_adapters[adapter_number].get_nio(0)
if nio:
yield from self._modify_vm("--nictrace{} off".format(adapter_number + 1))
yield from self._modify_vm("--cableconnected{} off".format(adapter_number + 1))
yield from self._modify_vm("--nic{} null".format(adapter_number + 1))
yield from super().stop() |
def sendResult(self, future):
"""Send a terminated future back to its parent."""
future = copy.copy(future)
# Remove the (now) extraneous elements from future class
future.callable = future.args = future.kargs = future.greenlet = None
if not future.sendResultBack:
# Don't reply back the result if it isn't asked
future.resultValue = None
self._sendReply(
future.id.worker,
pickle.dumps(
future,
pickle.HIGHEST_PROTOCOL,
),
) | Send a terminated future back to its parent. | Below is the the instruction that describes the task:
### Input:
Send a terminated future back to its parent.
### Response:
def sendResult(self, future):
"""Send a terminated future back to its parent."""
future = copy.copy(future)
# Remove the (now) extraneous elements from future class
future.callable = future.args = future.kargs = future.greenlet = None
if not future.sendResultBack:
# Don't reply back the result if it isn't asked
future.resultValue = None
self._sendReply(
future.id.worker,
pickle.dumps(
future,
pickle.HIGHEST_PROTOCOL,
),
) |
def gen(function, io_loop=None):
"""Allows using a generator to chain together reversible actions.
This function is very similar to :py:func:`reversible.gen` except that it
may be used with actions whose ``forwards`` and/or ``backwards`` methods
are couroutines. Specifically, if either of those methods return futures
the generated action will stop execution until the result of the future is
available.
.. code-block:: python
@reversible.tornado.gen
@tornado.gen.coroutine
def save_comment(ctx, comment):
ctx['comment_id'] = yield async_http_client.fetch(
# ...
)
raise tornado.gen.Return(ctx['comment_id'])
@save_comment.backwards
def delete_comment(ctx, comment):
# returns a Future
return async_http_client.fetch(...)
@reversible.tornado.gen
def post_comment(post, comment, client):
try:
comment_id = yield save_comment(comment)
except CommentStoreException:
# Exceptions thrown by actions may be caught by the
# action.
yield queue_save_comment_request(comment)
else:
yield update_comment_count(post)
update_cache()
:param function:
The generator function. This generator must yield action objects. The
``forwards`` and/or ``backwards`` methods on the action may be
asynchronous operations returning coroutines.
:param io_loop:
IOLoop used to execute asynchronous operations. Defaults to the
current IOLoop if omitted.
:returns:
An action executable via :py:func:`reversible.tornado.execute` and
yieldable in other instances of :py:func:`reversible.tornado.gen`.
"""
@functools.wraps(function) # TODO: use wrapt instead?
def new_function(*args, **kwargs):
try:
value = function(*args, **kwargs)
except _RETURNS as result:
return SimpleAction(
lambda ctx: ctx.value,
lambda _: None,
result,
)
else:
if isinstance(value, types.GeneratorType):
return _TornadoGeneratorAction(value, io_loop)
else:
return SimpleAction(
lambda _: value,
lambda _: None,
None,
)
return new_function | Allows using a generator to chain together reversible actions.
This function is very similar to :py:func:`reversible.gen` except that it
may be used with actions whose ``forwards`` and/or ``backwards`` methods
are couroutines. Specifically, if either of those methods return futures
the generated action will stop execution until the result of the future is
available.
.. code-block:: python
@reversible.tornado.gen
@tornado.gen.coroutine
def save_comment(ctx, comment):
ctx['comment_id'] = yield async_http_client.fetch(
# ...
)
raise tornado.gen.Return(ctx['comment_id'])
@save_comment.backwards
def delete_comment(ctx, comment):
# returns a Future
return async_http_client.fetch(...)
@reversible.tornado.gen
def post_comment(post, comment, client):
try:
comment_id = yield save_comment(comment)
except CommentStoreException:
# Exceptions thrown by actions may be caught by the
# action.
yield queue_save_comment_request(comment)
else:
yield update_comment_count(post)
update_cache()
:param function:
The generator function. This generator must yield action objects. The
``forwards`` and/or ``backwards`` methods on the action may be
asynchronous operations returning coroutines.
:param io_loop:
IOLoop used to execute asynchronous operations. Defaults to the
current IOLoop if omitted.
:returns:
An action executable via :py:func:`reversible.tornado.execute` and
yieldable in other instances of :py:func:`reversible.tornado.gen`. | Below is the the instruction that describes the task:
### Input:
Allows using a generator to chain together reversible actions.
This function is very similar to :py:func:`reversible.gen` except that it
may be used with actions whose ``forwards`` and/or ``backwards`` methods
are couroutines. Specifically, if either of those methods return futures
the generated action will stop execution until the result of the future is
available.
.. code-block:: python
@reversible.tornado.gen
@tornado.gen.coroutine
def save_comment(ctx, comment):
ctx['comment_id'] = yield async_http_client.fetch(
# ...
)
raise tornado.gen.Return(ctx['comment_id'])
@save_comment.backwards
def delete_comment(ctx, comment):
# returns a Future
return async_http_client.fetch(...)
@reversible.tornado.gen
def post_comment(post, comment, client):
try:
comment_id = yield save_comment(comment)
except CommentStoreException:
# Exceptions thrown by actions may be caught by the
# action.
yield queue_save_comment_request(comment)
else:
yield update_comment_count(post)
update_cache()
:param function:
The generator function. This generator must yield action objects. The
``forwards`` and/or ``backwards`` methods on the action may be
asynchronous operations returning coroutines.
:param io_loop:
IOLoop used to execute asynchronous operations. Defaults to the
current IOLoop if omitted.
:returns:
An action executable via :py:func:`reversible.tornado.execute` and
yieldable in other instances of :py:func:`reversible.tornado.gen`.
### Response:
def gen(function, io_loop=None):
"""Allows using a generator to chain together reversible actions.
This function is very similar to :py:func:`reversible.gen` except that it
may be used with actions whose ``forwards`` and/or ``backwards`` methods
are couroutines. Specifically, if either of those methods return futures
the generated action will stop execution until the result of the future is
available.
.. code-block:: python
@reversible.tornado.gen
@tornado.gen.coroutine
def save_comment(ctx, comment):
ctx['comment_id'] = yield async_http_client.fetch(
# ...
)
raise tornado.gen.Return(ctx['comment_id'])
@save_comment.backwards
def delete_comment(ctx, comment):
# returns a Future
return async_http_client.fetch(...)
@reversible.tornado.gen
def post_comment(post, comment, client):
try:
comment_id = yield save_comment(comment)
except CommentStoreException:
# Exceptions thrown by actions may be caught by the
# action.
yield queue_save_comment_request(comment)
else:
yield update_comment_count(post)
update_cache()
:param function:
The generator function. This generator must yield action objects. The
``forwards`` and/or ``backwards`` methods on the action may be
asynchronous operations returning coroutines.
:param io_loop:
IOLoop used to execute asynchronous operations. Defaults to the
current IOLoop if omitted.
:returns:
An action executable via :py:func:`reversible.tornado.execute` and
yieldable in other instances of :py:func:`reversible.tornado.gen`.
"""
@functools.wraps(function) # TODO: use wrapt instead?
def new_function(*args, **kwargs):
try:
value = function(*args, **kwargs)
except _RETURNS as result:
return SimpleAction(
lambda ctx: ctx.value,
lambda _: None,
result,
)
else:
if isinstance(value, types.GeneratorType):
return _TornadoGeneratorAction(value, io_loop)
else:
return SimpleAction(
lambda _: value,
lambda _: None,
None,
)
return new_function |
def _retrieve_indices(cols):
'''
Retrieve a list of indices corresponding to the provided column specification.
'''
if isinstance(cols, int):
return [cols]
elif isinstance(cols, slice):
start = cols.start if cols.start else 0
stop = cols.stop
step = cols.step if cols.step else 1
return list(range(start, stop, step))
elif isinstance(cols, list) and cols:
if isinstance(cols[0], bool):
return np.flatnonzero(np.asarray(cols))
elif isinstance(cols[0], int):
return cols
else:
raise TypeError('No valid column specifier. Only a scalar, list or slice of all'
'integers or a boolean mask are allowed.') | Retrieve a list of indices corresponding to the provided column specification. | Below is the the instruction that describes the task:
### Input:
Retrieve a list of indices corresponding to the provided column specification.
### Response:
def _retrieve_indices(cols):
'''
Retrieve a list of indices corresponding to the provided column specification.
'''
if isinstance(cols, int):
return [cols]
elif isinstance(cols, slice):
start = cols.start if cols.start else 0
stop = cols.stop
step = cols.step if cols.step else 1
return list(range(start, stop, step))
elif isinstance(cols, list) and cols:
if isinstance(cols[0], bool):
return np.flatnonzero(np.asarray(cols))
elif isinstance(cols[0], int):
return cols
else:
raise TypeError('No valid column specifier. Only a scalar, list or slice of all'
'integers or a boolean mask are allowed.') |
def _set_index(self, key, index):
"""Set a new index array for this series
"""
axis = key[0]
origin = "{}0".format(axis)
delta = "d{}".format(axis)
if index is None:
return delattr(self, key)
if not isinstance(index, Index):
try:
unit = index.unit
except AttributeError:
unit = getattr(self, "_default_{}unit".format(axis))
index = Index(index, unit=unit, copy=False)
setattr(self, origin, index[0])
if index.regular:
setattr(self, delta, index[1] - index[0])
else:
delattr(self, delta)
setattr(self, "_{}".format(key), index) | Set a new index array for this series | Below is the the instruction that describes the task:
### Input:
Set a new index array for this series
### Response:
def _set_index(self, key, index):
"""Set a new index array for this series
"""
axis = key[0]
origin = "{}0".format(axis)
delta = "d{}".format(axis)
if index is None:
return delattr(self, key)
if not isinstance(index, Index):
try:
unit = index.unit
except AttributeError:
unit = getattr(self, "_default_{}unit".format(axis))
index = Index(index, unit=unit, copy=False)
setattr(self, origin, index[0])
if index.regular:
setattr(self, delta, index[1] - index[0])
else:
delattr(self, delta)
setattr(self, "_{}".format(key), index) |
def _get_function_ptr(self, name):
"""Get or create a function pointer of the given name."""
func = _make_function_ptr_instance
self._function_ptrs.setdefault(name, func(self, name))
return self._function_ptrs[name] | Get or create a function pointer of the given name. | Below is the the instruction that describes the task:
### Input:
Get or create a function pointer of the given name.
### Response:
def _get_function_ptr(self, name):
"""Get or create a function pointer of the given name."""
func = _make_function_ptr_instance
self._function_ptrs.setdefault(name, func(self, name))
return self._function_ptrs[name] |
def register_as(self, klass, name, callback):
"""
Register a class with a function.
:param klass: The class
:type klass: class
:param callback: The callable
:type callback: callable
:param name: The short name
:type name: str
"""
return self.register(klass, callback, name) | Register a class with a function.
:param klass: The class
:type klass: class
:param callback: The callable
:type callback: callable
:param name: The short name
:type name: str | Below is the the instruction that describes the task:
### Input:
Register a class with a function.
:param klass: The class
:type klass: class
:param callback: The callable
:type callback: callable
:param name: The short name
:type name: str
### Response:
def register_as(self, klass, name, callback):
"""
Register a class with a function.
:param klass: The class
:type klass: class
:param callback: The callable
:type callback: callable
:param name: The short name
:type name: str
"""
return self.register(klass, callback, name) |
def aggregate(input, **params):
"""
Returns aggregate
:param input:
:param params:
:return:
"""
PARAM_CFG_EXTRACT = 'extract'
PARAM_CFG_SUBSTITUTE = 'substitute'
PARAM_CFG_AGGREGATE = 'aggregate'
AGGR_FIELD = 'field'
AGGR_FUNC = 'func'
extract_params = params.get(PARAM_CFG_EXTRACT)
extract_params.update({AccessParams.KEY_TYPE: AccessParams.TYPE_MULTI})
dataset = __extract(input, extract_params)
if PARAM_CFG_SUBSTITUTE in params:
dataset = __substitute(input, dataset, params.get(PARAM_CFG_SUBSTITUTE))
cfg = params.get(PARAM_CFG_AGGREGATE)
res = Aggregator.agg_single_func(dataset, cfg[AGGR_FIELD], cfg[AGGR_FUNC])
return res | Returns aggregate
:param input:
:param params:
:return: | Below is the the instruction that describes the task:
### Input:
Returns aggregate
:param input:
:param params:
:return:
### Response:
def aggregate(input, **params):
"""
Returns aggregate
:param input:
:param params:
:return:
"""
PARAM_CFG_EXTRACT = 'extract'
PARAM_CFG_SUBSTITUTE = 'substitute'
PARAM_CFG_AGGREGATE = 'aggregate'
AGGR_FIELD = 'field'
AGGR_FUNC = 'func'
extract_params = params.get(PARAM_CFG_EXTRACT)
extract_params.update({AccessParams.KEY_TYPE: AccessParams.TYPE_MULTI})
dataset = __extract(input, extract_params)
if PARAM_CFG_SUBSTITUTE in params:
dataset = __substitute(input, dataset, params.get(PARAM_CFG_SUBSTITUTE))
cfg = params.get(PARAM_CFG_AGGREGATE)
res = Aggregator.agg_single_func(dataset, cfg[AGGR_FIELD], cfg[AGGR_FUNC])
return res |
def from_web_element(self, web_element):
"""
Store reference to a WebElement instance representing the element on the DOM.
Use it when an instance of WebElement has already been created (e.g. as the result of find_element) and
you want to create a UIComponent out of it without evaluating it from the locator again.
Returns an instance of the class.
"""
if isinstance(web_element, WebElement) is not True:
raise TypeError("web_element parameter is not of type WebElement.")
self._web_element = web_element
return self | Store reference to a WebElement instance representing the element on the DOM.
Use it when an instance of WebElement has already been created (e.g. as the result of find_element) and
you want to create a UIComponent out of it without evaluating it from the locator again.
Returns an instance of the class. | Below is the the instruction that describes the task:
### Input:
Store reference to a WebElement instance representing the element on the DOM.
Use it when an instance of WebElement has already been created (e.g. as the result of find_element) and
you want to create a UIComponent out of it without evaluating it from the locator again.
Returns an instance of the class.
### Response:
def from_web_element(self, web_element):
"""
Store reference to a WebElement instance representing the element on the DOM.
Use it when an instance of WebElement has already been created (e.g. as the result of find_element) and
you want to create a UIComponent out of it without evaluating it from the locator again.
Returns an instance of the class.
"""
if isinstance(web_element, WebElement) is not True:
raise TypeError("web_element parameter is not of type WebElement.")
self._web_element = web_element
return self |
def get_modifications_indirect(self):
"""Extract indirect Modification INDRA Statements."""
# Get all the specific mod types
mod_event_types = list(ont_to_mod_type.keys())
# Add ONT::PTMs as a special case
mod_event_types += ['ONT::PTM']
def get_increase_events(mod_event_types):
mod_events = []
events = self.tree.findall("EVENT/[type='ONT::INCREASE']")
for event in events:
affected = event.find(".//*[@role=':AFFECTED']")
if affected is None:
continue
affected_id = affected.attrib.get('id')
if not affected_id:
continue
pattern = "EVENT/[@id='%s']" % affected_id
affected_event = self.tree.find(pattern)
if affected_event is not None:
affected_type = affected_event.find('type')
if affected_type is not None and \
affected_type.text in mod_event_types:
mod_events.append(event)
return mod_events
def get_cause_events(mod_event_types):
mod_events = []
ccs = self.tree.findall("CC/[type='ONT::CAUSE']")
for cc in ccs:
outcome = cc.find(".//*[@role=':OUTCOME']")
if outcome is None:
continue
outcome_id = outcome.attrib.get('id')
if not outcome_id:
continue
pattern = "EVENT/[@id='%s']" % outcome_id
outcome_event = self.tree.find(pattern)
if outcome_event is not None:
outcome_type = outcome_event.find('type')
if outcome_type is not None and \
outcome_type.text in mod_event_types:
mod_events.append(cc)
return mod_events
mod_events = get_increase_events(mod_event_types)
mod_events += get_cause_events(mod_event_types)
# Iterate over all modification events
for event in mod_events:
event_id = event.attrib['id']
if event_id in self._static_events:
continue
event_type = _get_type(event)
# Get enzyme Agent
enzyme = event.find(".//*[@role=':AGENT']")
if enzyme is None:
enzyme = event.find(".//*[@role=':FACTOR']")
if enzyme is None:
return
enzyme_id = enzyme.attrib.get('id')
if enzyme_id is None:
continue
enzyme_agent = self._get_agent_by_id(enzyme_id, event_id)
affected_event_tag = event.find(".//*[@role=':AFFECTED']")
if affected_event_tag is None:
affected_event_tag = event.find(".//*[@role=':OUTCOME']")
if affected_event_tag is None:
return
affected_id = affected_event_tag.attrib.get('id')
if not affected_id:
return
affected_event = self.tree.find("EVENT/[@id='%s']" % affected_id)
if affected_event is None:
return
# Iterate over all enzyme agents if there are multiple ones
for enz_t in _agent_list_product((enzyme_agent, )):
# enz_t comes out as a tuple so we need to take the first
# element here
enz = enz_t[0]
# Note that we re-run the extraction code here potentially
# multiple times. This is mainly to make sure each Statement
# object created here is independent (i.e. has different UUIDs)
# without having to manipulate it after creation.
stmts = self._get_modification_event(affected_event)
stmts_to_make = []
if stmts:
for stmt in stmts:
# The affected event should have no enzyme but should
# have a substrate
if stmt.enz is None and stmt.sub is not None:
stmts_to_make.append(stmt)
for stmt in stmts_to_make:
stmt.enz = enz
for ev in stmt.evidence:
ev.epistemics['direct'] = False
self.statements.append(stmt)
self._add_extracted(event_type, event.attrib['id'])
self._add_extracted(affected_event.find('type').text, affected_id) | Extract indirect Modification INDRA Statements. | Below is the the instruction that describes the task:
### Input:
Extract indirect Modification INDRA Statements.
### Response:
def get_modifications_indirect(self):
"""Extract indirect Modification INDRA Statements."""
# Get all the specific mod types
mod_event_types = list(ont_to_mod_type.keys())
# Add ONT::PTMs as a special case
mod_event_types += ['ONT::PTM']
def get_increase_events(mod_event_types):
mod_events = []
events = self.tree.findall("EVENT/[type='ONT::INCREASE']")
for event in events:
affected = event.find(".//*[@role=':AFFECTED']")
if affected is None:
continue
affected_id = affected.attrib.get('id')
if not affected_id:
continue
pattern = "EVENT/[@id='%s']" % affected_id
affected_event = self.tree.find(pattern)
if affected_event is not None:
affected_type = affected_event.find('type')
if affected_type is not None and \
affected_type.text in mod_event_types:
mod_events.append(event)
return mod_events
def get_cause_events(mod_event_types):
mod_events = []
ccs = self.tree.findall("CC/[type='ONT::CAUSE']")
for cc in ccs:
outcome = cc.find(".//*[@role=':OUTCOME']")
if outcome is None:
continue
outcome_id = outcome.attrib.get('id')
if not outcome_id:
continue
pattern = "EVENT/[@id='%s']" % outcome_id
outcome_event = self.tree.find(pattern)
if outcome_event is not None:
outcome_type = outcome_event.find('type')
if outcome_type is not None and \
outcome_type.text in mod_event_types:
mod_events.append(cc)
return mod_events
mod_events = get_increase_events(mod_event_types)
mod_events += get_cause_events(mod_event_types)
# Iterate over all modification events
for event in mod_events:
event_id = event.attrib['id']
if event_id in self._static_events:
continue
event_type = _get_type(event)
# Get enzyme Agent
enzyme = event.find(".//*[@role=':AGENT']")
if enzyme is None:
enzyme = event.find(".//*[@role=':FACTOR']")
if enzyme is None:
return
enzyme_id = enzyme.attrib.get('id')
if enzyme_id is None:
continue
enzyme_agent = self._get_agent_by_id(enzyme_id, event_id)
affected_event_tag = event.find(".//*[@role=':AFFECTED']")
if affected_event_tag is None:
affected_event_tag = event.find(".//*[@role=':OUTCOME']")
if affected_event_tag is None:
return
affected_id = affected_event_tag.attrib.get('id')
if not affected_id:
return
affected_event = self.tree.find("EVENT/[@id='%s']" % affected_id)
if affected_event is None:
return
# Iterate over all enzyme agents if there are multiple ones
for enz_t in _agent_list_product((enzyme_agent, )):
# enz_t comes out as a tuple so we need to take the first
# element here
enz = enz_t[0]
# Note that we re-run the extraction code here potentially
# multiple times. This is mainly to make sure each Statement
# object created here is independent (i.e. has different UUIDs)
# without having to manipulate it after creation.
stmts = self._get_modification_event(affected_event)
stmts_to_make = []
if stmts:
for stmt in stmts:
# The affected event should have no enzyme but should
# have a substrate
if stmt.enz is None and stmt.sub is not None:
stmts_to_make.append(stmt)
for stmt in stmts_to_make:
stmt.enz = enz
for ev in stmt.evidence:
ev.epistemics['direct'] = False
self.statements.append(stmt)
self._add_extracted(event_type, event.attrib['id'])
self._add_extracted(affected_event.find('type').text, affected_id) |
def set_color(
fg=Color.normal,
bg=Color.normal,
fg_dark=False,
bg_dark=False,
underlined=False,
):
"""Set the console color.
>>> set_color(Color.red, Color.blue)
>>> set_color('red', 'blue')
>>> set_color() # returns back to normal
"""
_set_color(fg, bg, fg_dark, bg_dark, underlined) | Set the console color.
>>> set_color(Color.red, Color.blue)
>>> set_color('red', 'blue')
>>> set_color() # returns back to normal | Below is the the instruction that describes the task:
### Input:
Set the console color.
>>> set_color(Color.red, Color.blue)
>>> set_color('red', 'blue')
>>> set_color() # returns back to normal
### Response:
def set_color(
fg=Color.normal,
bg=Color.normal,
fg_dark=False,
bg_dark=False,
underlined=False,
):
"""Set the console color.
>>> set_color(Color.red, Color.blue)
>>> set_color('red', 'blue')
>>> set_color() # returns back to normal
"""
_set_color(fg, bg, fg_dark, bg_dark, underlined) |
def compare_md5(self):
"""Compare md5 of file on network device to md5 of local file."""
if self.direction == "put":
remote_md5 = self.remote_md5()
return self.source_md5 == remote_md5
elif self.direction == "get":
local_md5 = self.file_md5(self.dest_file)
return self.source_md5 == local_md5 | Compare md5 of file on network device to md5 of local file. | Below is the the instruction that describes the task:
### Input:
Compare md5 of file on network device to md5 of local file.
### Response:
def compare_md5(self):
"""Compare md5 of file on network device to md5 of local file."""
if self.direction == "put":
remote_md5 = self.remote_md5()
return self.source_md5 == remote_md5
elif self.direction == "get":
local_md5 = self.file_md5(self.dest_file)
return self.source_md5 == local_md5 |
def is_int(value):
"""
Check if value is an int
:type value: int, str, bytes, float, Decimal
>>> is_int(123), is_int('123'), is_int(Decimal('10'))
(True, True, True)
>>> is_int(1.1), is_int('1.1'), is_int(Decimal('10.1'))
(False, False, False)
>>> is_int(object)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(value, (int, str, bytes, float, Decimal))
if isinstance(value, int):
return True
elif isinstance(value, float):
return False
elif isinstance(value, Decimal):
return str(value).isdigit()
elif isinstance(value, (str, bytes)):
return value.isdigit()
raise ValueError() | Check if value is an int
:type value: int, str, bytes, float, Decimal
>>> is_int(123), is_int('123'), is_int(Decimal('10'))
(True, True, True)
>>> is_int(1.1), is_int('1.1'), is_int(Decimal('10.1'))
(False, False, False)
>>> is_int(object)
Traceback (most recent call last):
TypeError: | Below is the the instruction that describes the task:
### Input:
Check if value is an int
:type value: int, str, bytes, float, Decimal
>>> is_int(123), is_int('123'), is_int(Decimal('10'))
(True, True, True)
>>> is_int(1.1), is_int('1.1'), is_int(Decimal('10.1'))
(False, False, False)
>>> is_int(object)
Traceback (most recent call last):
TypeError:
### Response:
def is_int(value):
"""
Check if value is an int
:type value: int, str, bytes, float, Decimal
>>> is_int(123), is_int('123'), is_int(Decimal('10'))
(True, True, True)
>>> is_int(1.1), is_int('1.1'), is_int(Decimal('10.1'))
(False, False, False)
>>> is_int(object)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(value, (int, str, bytes, float, Decimal))
if isinstance(value, int):
return True
elif isinstance(value, float):
return False
elif isinstance(value, Decimal):
return str(value).isdigit()
elif isinstance(value, (str, bytes)):
return value.isdigit()
raise ValueError() |
def translate_paths (properties, path):
""" Interpret all path properties in 'properties' as relative to 'path'
The property values are assumed to be in system-specific form, and
will be translated into normalized form.
"""
assert is_iterable_typed(properties, Property)
result = []
for p in properties:
if p.feature.path:
values = __re_two_ampersands.split(p.value)
new_value = "&&".join(os.path.normpath(os.path.join(path, v)) for v in values)
if new_value != p.value:
result.append(Property(p.feature, new_value, p.condition))
else:
result.append(p)
else:
result.append (p)
return result | Interpret all path properties in 'properties' as relative to 'path'
The property values are assumed to be in system-specific form, and
will be translated into normalized form. | Below is the the instruction that describes the task:
### Input:
Interpret all path properties in 'properties' as relative to 'path'
The property values are assumed to be in system-specific form, and
will be translated into normalized form.
### Response:
def translate_paths (properties, path):
""" Interpret all path properties in 'properties' as relative to 'path'
The property values are assumed to be in system-specific form, and
will be translated into normalized form.
"""
assert is_iterable_typed(properties, Property)
result = []
for p in properties:
if p.feature.path:
values = __re_two_ampersands.split(p.value)
new_value = "&&".join(os.path.normpath(os.path.join(path, v)) for v in values)
if new_value != p.value:
result.append(Property(p.feature, new_value, p.condition))
else:
result.append(p)
else:
result.append (p)
return result |
def _gwf_channel_segments(path, channel, warn=True):
"""Yields the segments containing data for ``channel`` in this GWF path
"""
stream = open_gwf(path)
# get segments for frames
toc = stream.GetTOC()
secs = toc.GetGTimeS()
nano = toc.GetGTimeN()
dur = toc.GetDt()
readers = [getattr(stream, 'ReadFr{0}Data'.format(type_.title())) for
type_ in ("proc", "sim", "adc")]
# for each segment, try and read the data for this channel
for i, (s, ns, dt) in enumerate(zip(secs, nano, dur)):
for read in readers:
try:
read(i, channel)
except (IndexError, ValueError):
continue
readers = [read] # use this one from now on
epoch = LIGOTimeGPS(s, ns)
yield Segment(epoch, epoch + dt)
break
else: # none of the readers worked for this channel, warn
if warn:
warnings.warn(
"{0!r} not found in frame {1} of {2}".format(
channel, i, path),
) | Yields the segments containing data for ``channel`` in this GWF path | Below is the the instruction that describes the task:
### Input:
Yields the segments containing data for ``channel`` in this GWF path
### Response:
def _gwf_channel_segments(path, channel, warn=True):
"""Yields the segments containing data for ``channel`` in this GWF path
"""
stream = open_gwf(path)
# get segments for frames
toc = stream.GetTOC()
secs = toc.GetGTimeS()
nano = toc.GetGTimeN()
dur = toc.GetDt()
readers = [getattr(stream, 'ReadFr{0}Data'.format(type_.title())) for
type_ in ("proc", "sim", "adc")]
# for each segment, try and read the data for this channel
for i, (s, ns, dt) in enumerate(zip(secs, nano, dur)):
for read in readers:
try:
read(i, channel)
except (IndexError, ValueError):
continue
readers = [read] # use this one from now on
epoch = LIGOTimeGPS(s, ns)
yield Segment(epoch, epoch + dt)
break
else: # none of the readers worked for this channel, warn
if warn:
warnings.warn(
"{0!r} not found in frame {1} of {2}".format(
channel, i, path),
) |
def fix_even_row_data_fc(fdata):
"""When the number of rows in fdata is even, there is a subtlety that must
be taken care of if fdata is to satisfy the symmetry required for further
processing. For an array length of 6,the data is align as [0 1 2 -3 -2 -1]
this routine simply sets the row corresponding to the -3 index equal to
zero. It is an unfortunate subtlety, but not taking care of this has
resulted in answers that are not double precision. This operation should
be applied before any other operators are applied to fdata."""
L = fdata.shape[0]
if np.mod(L, 2) == 0:
fdata[int(L / 2), :] = 0 | When the number of rows in fdata is even, there is a subtlety that must
be taken care of if fdata is to satisfy the symmetry required for further
processing. For an array length of 6,the data is align as [0 1 2 -3 -2 -1]
this routine simply sets the row corresponding to the -3 index equal to
zero. It is an unfortunate subtlety, but not taking care of this has
resulted in answers that are not double precision. This operation should
be applied before any other operators are applied to fdata. | Below is the the instruction that describes the task:
### Input:
When the number of rows in fdata is even, there is a subtlety that must
be taken care of if fdata is to satisfy the symmetry required for further
processing. For an array length of 6,the data is align as [0 1 2 -3 -2 -1]
this routine simply sets the row corresponding to the -3 index equal to
zero. It is an unfortunate subtlety, but not taking care of this has
resulted in answers that are not double precision. This operation should
be applied before any other operators are applied to fdata.
### Response:
def fix_even_row_data_fc(fdata):
"""When the number of rows in fdata is even, there is a subtlety that must
be taken care of if fdata is to satisfy the symmetry required for further
processing. For an array length of 6,the data is align as [0 1 2 -3 -2 -1]
this routine simply sets the row corresponding to the -3 index equal to
zero. It is an unfortunate subtlety, but not taking care of this has
resulted in answers that are not double precision. This operation should
be applied before any other operators are applied to fdata."""
L = fdata.shape[0]
if np.mod(L, 2) == 0:
fdata[int(L / 2), :] = 0 |
def visualRegionForSelection(self, selection):
"""Gets the region of all of the components in selection
:param selection: a selection model for this view
:type selection: :qtdoc:`QItemSelectionModel`
:returns: :qtdoc:`QRegion` -- union of rects of the selected components
"""
region = QtGui.QRegion()
for index in selection.indexes():
region = region.united(self._rects[index.row()][index.column()])
return region | Gets the region of all of the components in selection
:param selection: a selection model for this view
:type selection: :qtdoc:`QItemSelectionModel`
:returns: :qtdoc:`QRegion` -- union of rects of the selected components | Below is the the instruction that describes the task:
### Input:
Gets the region of all of the components in selection
:param selection: a selection model for this view
:type selection: :qtdoc:`QItemSelectionModel`
:returns: :qtdoc:`QRegion` -- union of rects of the selected components
### Response:
def visualRegionForSelection(self, selection):
"""Gets the region of all of the components in selection
:param selection: a selection model for this view
:type selection: :qtdoc:`QItemSelectionModel`
:returns: :qtdoc:`QRegion` -- union of rects of the selected components
"""
region = QtGui.QRegion()
for index in selection.indexes():
region = region.united(self._rects[index.row()][index.column()])
return region |
def add_to_emails(self, *emails):
"""
:calls: `POST /user/emails <http://developer.github.com/v3/users/emails>`_
:param email: string
:rtype: None
"""
assert all(isinstance(element, (str, unicode)) for element in emails), emails
post_parameters = emails
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/user/emails",
input=post_parameters
) | :calls: `POST /user/emails <http://developer.github.com/v3/users/emails>`_
:param email: string
:rtype: None | Below is the the instruction that describes the task:
### Input:
:calls: `POST /user/emails <http://developer.github.com/v3/users/emails>`_
:param email: string
:rtype: None
### Response:
def add_to_emails(self, *emails):
"""
:calls: `POST /user/emails <http://developer.github.com/v3/users/emails>`_
:param email: string
:rtype: None
"""
assert all(isinstance(element, (str, unicode)) for element in emails), emails
post_parameters = emails
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/user/emails",
input=post_parameters
) |
def vert_attr2tex_2_meshes(script, source_mesh=0, target_mesh=1, attribute=0,
max_distance=0.5, tex_name='TEMP3D_texture.png',
tex_width=1024, tex_height=1024,
overwrite_tex=True, assign_tex=False,
fill_tex=True):
"""Transfer Vertex Attributes to Texture (between 2 meshes)
Args:
script: the FilterScript object or script filename to write
the filter to.
source_mesh (int): The mesh that contains the source data that we want to transfer
target_mesh (int): The mesh whose texture will be filled according to source mesh data
attribute (int): Choose what attribute has to be transferred onto the target texture. You can choose between Per vertex attributes (color, normal, quality) or to transfer color information from source mesh texture
max_distance (float): Sample points for which we do not find anything within this distance are rejected and not considered for recovering data
tex_name (str): The texture file to be created
tex_width (int): The texture width
tex_height (int): The texture height
overwrite_tex (bool): If target mesh has a texture will be overwritten (with provided texture dimension)
assign_tex (bool): Assign the newly created texture to target mesh
fill_tex (bool): If enabled the unmapped texture space is colored using a pull push filling algorithm, if false is set to black
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
"""
if script.ml_version == '1.3.4BETA':
filter_name = 'Transfer Vertex Attributes to Texture (between 2 meshes)'
else:
filter_name = 'Transfer: Vertex Attributes to Texture (1 or 2 meshes)'
filter_xml = ''.join([
' <filter name="{}">\n'.format(filter_name),
' <Param name="sourceMesh" ',
'value="%d" ' % source_mesh,
'description="Source Mesh" ',
'type="RichMesh" ',
'/>\n',
' <Param name="targetMesh" ',
'value="%d" ' % target_mesh,
'description="Target Mesh" ',
'type="RichMesh" ',
'/>\n',
' <Param name="AttributeEnum" ',
'value="%d" ' % attribute,
'description="Color Data Source" ',
'enum_val0="Vertex Color" ',
'enum_val1="Vertex Normal" ',
'enum_val2="Vertex Quality" ',
'enum_val3="Texture Color" ',
'enum_cardinality="4" ',
'type="RichEnum" ',
'/>\n',
' <Param name="upperBound" ',
'value="%s" ' % max_distance,
'description="Max Dist Search" ',
'min="0" ',
'max="100" ',
'type="RichAbsPerc" ',
'/>\n',
' <Param name="textName" ',
'value="%s" ' % tex_name,
'description="Texture file" ',
'type="RichString" ',
'/>\n',
' <Param name="textW" ',
'value="%d" ' % tex_width,
'description="Texture width (px)" ',
'type="RichInt" ',
'/>\n',
' <Param name="textH" ',
'value="%d" ' % tex_height,
'description="Texture height (px)" ',
'type="RichInt" ',
'/>\n',
' <Param name="overwrite" ',
'value="%s" ' % str(overwrite_tex).lower(),
'description="Overwrite Target Mesh Texture" ',
'type="RichBool" ',
'/>\n',
' <Param name="assign" ',
'value="%s" ' % str(assign_tex).lower(),
'description="Assign Texture" ',
'type="RichBool" ',
'/>\n',
' <Param name="pullpush" ',
'value="%s" ' % str(fill_tex).lower(),
'description="Fill texture" ',
'type="RichBool" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None | Transfer Vertex Attributes to Texture (between 2 meshes)
Args:
script: the FilterScript object or script filename to write
the filter to.
source_mesh (int): The mesh that contains the source data that we want to transfer
target_mesh (int): The mesh whose texture will be filled according to source mesh data
attribute (int): Choose what attribute has to be transferred onto the target texture. You can choose between Per vertex attributes (color, normal, quality) or to transfer color information from source mesh texture
max_distance (float): Sample points for which we do not find anything within this distance are rejected and not considered for recovering data
tex_name (str): The texture file to be created
tex_width (int): The texture width
tex_height (int): The texture height
overwrite_tex (bool): If target mesh has a texture will be overwritten (with provided texture dimension)
assign_tex (bool): Assign the newly created texture to target mesh
fill_tex (bool): If enabled the unmapped texture space is colored using a pull push filling algorithm, if false is set to black
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA | Below is the the instruction that describes the task:
### Input:
Transfer Vertex Attributes to Texture (between 2 meshes)
Args:
script: the FilterScript object or script filename to write
the filter to.
source_mesh (int): The mesh that contains the source data that we want to transfer
target_mesh (int): The mesh whose texture will be filled according to source mesh data
attribute (int): Choose what attribute has to be transferred onto the target texture. You can choose between Per vertex attributes (color, normal, quality) or to transfer color information from source mesh texture
max_distance (float): Sample points for which we do not find anything within this distance are rejected and not considered for recovering data
tex_name (str): The texture file to be created
tex_width (int): The texture width
tex_height (int): The texture height
overwrite_tex (bool): If target mesh has a texture will be overwritten (with provided texture dimension)
assign_tex (bool): Assign the newly created texture to target mesh
fill_tex (bool): If enabled the unmapped texture space is colored using a pull push filling algorithm, if false is set to black
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
### Response:
def vert_attr2tex_2_meshes(script, source_mesh=0, target_mesh=1, attribute=0,
max_distance=0.5, tex_name='TEMP3D_texture.png',
tex_width=1024, tex_height=1024,
overwrite_tex=True, assign_tex=False,
fill_tex=True):
"""Transfer Vertex Attributes to Texture (between 2 meshes)
Args:
script: the FilterScript object or script filename to write
the filter to.
source_mesh (int): The mesh that contains the source data that we want to transfer
target_mesh (int): The mesh whose texture will be filled according to source mesh data
attribute (int): Choose what attribute has to be transferred onto the target texture. You can choose between Per vertex attributes (color, normal, quality) or to transfer color information from source mesh texture
max_distance (float): Sample points for which we do not find anything within this distance are rejected and not considered for recovering data
tex_name (str): The texture file to be created
tex_width (int): The texture width
tex_height (int): The texture height
overwrite_tex (bool): If target mesh has a texture will be overwritten (with provided texture dimension)
assign_tex (bool): Assign the newly created texture to target mesh
fill_tex (bool): If enabled the unmapped texture space is colored using a pull push filling algorithm, if false is set to black
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
"""
if script.ml_version == '1.3.4BETA':
filter_name = 'Transfer Vertex Attributes to Texture (between 2 meshes)'
else:
filter_name = 'Transfer: Vertex Attributes to Texture (1 or 2 meshes)'
filter_xml = ''.join([
' <filter name="{}">\n'.format(filter_name),
' <Param name="sourceMesh" ',
'value="%d" ' % source_mesh,
'description="Source Mesh" ',
'type="RichMesh" ',
'/>\n',
' <Param name="targetMesh" ',
'value="%d" ' % target_mesh,
'description="Target Mesh" ',
'type="RichMesh" ',
'/>\n',
' <Param name="AttributeEnum" ',
'value="%d" ' % attribute,
'description="Color Data Source" ',
'enum_val0="Vertex Color" ',
'enum_val1="Vertex Normal" ',
'enum_val2="Vertex Quality" ',
'enum_val3="Texture Color" ',
'enum_cardinality="4" ',
'type="RichEnum" ',
'/>\n',
' <Param name="upperBound" ',
'value="%s" ' % max_distance,
'description="Max Dist Search" ',
'min="0" ',
'max="100" ',
'type="RichAbsPerc" ',
'/>\n',
' <Param name="textName" ',
'value="%s" ' % tex_name,
'description="Texture file" ',
'type="RichString" ',
'/>\n',
' <Param name="textW" ',
'value="%d" ' % tex_width,
'description="Texture width (px)" ',
'type="RichInt" ',
'/>\n',
' <Param name="textH" ',
'value="%d" ' % tex_height,
'description="Texture height (px)" ',
'type="RichInt" ',
'/>\n',
' <Param name="overwrite" ',
'value="%s" ' % str(overwrite_tex).lower(),
'description="Overwrite Target Mesh Texture" ',
'type="RichBool" ',
'/>\n',
' <Param name="assign" ',
'value="%s" ' % str(assign_tex).lower(),
'description="Assign Texture" ',
'type="RichBool" ',
'/>\n',
' <Param name="pullpush" ',
'value="%s" ' % str(fill_tex).lower(),
'description="Fill texture" ',
'type="RichBool" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None |
def norm_nuclear(X):
r"""Compute the nuclear norm
.. math::
\| X \|_* = \sum_i \sigma_i
where :math:`\sigma_i` are the singular values of matrix :math:`X`.
Parameters
----------
X : array_like
Input array :math:`X`
Returns
-------
nncl : float
Nuclear norm of `X`
"""
return np.sum(np.linalg.svd(sl.promote16(X), compute_uv=False)) | r"""Compute the nuclear norm
.. math::
\| X \|_* = \sum_i \sigma_i
where :math:`\sigma_i` are the singular values of matrix :math:`X`.
Parameters
----------
X : array_like
Input array :math:`X`
Returns
-------
nncl : float
Nuclear norm of `X` | Below is the the instruction that describes the task:
### Input:
r"""Compute the nuclear norm
.. math::
\| X \|_* = \sum_i \sigma_i
where :math:`\sigma_i` are the singular values of matrix :math:`X`.
Parameters
----------
X : array_like
Input array :math:`X`
Returns
-------
nncl : float
Nuclear norm of `X`
### Response:
def norm_nuclear(X):
r"""Compute the nuclear norm
.. math::
\| X \|_* = \sum_i \sigma_i
where :math:`\sigma_i` are the singular values of matrix :math:`X`.
Parameters
----------
X : array_like
Input array :math:`X`
Returns
-------
nncl : float
Nuclear norm of `X`
"""
return np.sum(np.linalg.svd(sl.promote16(X), compute_uv=False)) |
def Page_deleteCookie(self, cookieName, url):
"""
Function path: Page.deleteCookie
Domain: Page
Method name: deleteCookie
WARNING: This function is marked 'Experimental'!
Parameters:
Required arguments:
'cookieName' (type: string) -> Name of the cookie to remove.
'url' (type: string) -> URL to match cooke domain and path.
No return value.
Description: Deletes browser cookie with given name, domain and path.
"""
assert isinstance(cookieName, (str,)
), "Argument 'cookieName' must be of type '['str']'. Received type: '%s'" % type(
cookieName)
assert isinstance(url, (str,)
), "Argument 'url' must be of type '['str']'. Received type: '%s'" % type(
url)
subdom_funcs = self.synchronous_command('Page.deleteCookie', cookieName=
cookieName, url=url)
return subdom_funcs | Function path: Page.deleteCookie
Domain: Page
Method name: deleteCookie
WARNING: This function is marked 'Experimental'!
Parameters:
Required arguments:
'cookieName' (type: string) -> Name of the cookie to remove.
'url' (type: string) -> URL to match cooke domain and path.
No return value.
Description: Deletes browser cookie with given name, domain and path. | Below is the the instruction that describes the task:
### Input:
Function path: Page.deleteCookie
Domain: Page
Method name: deleteCookie
WARNING: This function is marked 'Experimental'!
Parameters:
Required arguments:
'cookieName' (type: string) -> Name of the cookie to remove.
'url' (type: string) -> URL to match cooke domain and path.
No return value.
Description: Deletes browser cookie with given name, domain and path.
### Response:
def Page_deleteCookie(self, cookieName, url):
"""
Function path: Page.deleteCookie
Domain: Page
Method name: deleteCookie
WARNING: This function is marked 'Experimental'!
Parameters:
Required arguments:
'cookieName' (type: string) -> Name of the cookie to remove.
'url' (type: string) -> URL to match cooke domain and path.
No return value.
Description: Deletes browser cookie with given name, domain and path.
"""
assert isinstance(cookieName, (str,)
), "Argument 'cookieName' must be of type '['str']'. Received type: '%s'" % type(
cookieName)
assert isinstance(url, (str,)
), "Argument 'url' must be of type '['str']'. Received type: '%s'" % type(
url)
subdom_funcs = self.synchronous_command('Page.deleteCookie', cookieName=
cookieName, url=url)
return subdom_funcs |
def lal(self):
""" Returns a LAL Object that contains this data """
lal_data = None
if self._data.dtype == float32:
lal_data = _lal.CreateREAL4Vector(len(self))
elif self._data.dtype == float64:
lal_data = _lal.CreateREAL8Vector(len(self))
elif self._data.dtype == complex64:
lal_data = _lal.CreateCOMPLEX8Vector(len(self))
elif self._data.dtype == complex128:
lal_data = _lal.CreateCOMPLEX16Vector(len(self))
lal_data.data[:] = self.numpy()
return lal_data | Returns a LAL Object that contains this data | Below is the the instruction that describes the task:
### Input:
Returns a LAL Object that contains this data
### Response:
def lal(self):
""" Returns a LAL Object that contains this data """
lal_data = None
if self._data.dtype == float32:
lal_data = _lal.CreateREAL4Vector(len(self))
elif self._data.dtype == float64:
lal_data = _lal.CreateREAL8Vector(len(self))
elif self._data.dtype == complex64:
lal_data = _lal.CreateCOMPLEX8Vector(len(self))
elif self._data.dtype == complex128:
lal_data = _lal.CreateCOMPLEX16Vector(len(self))
lal_data.data[:] = self.numpy()
return lal_data |
def _parse_metadata(self, metadata):
"""
Transforms raw HADS metadata into a dictionary (station code -> props)
"""
retval = {}
# these are the first keys, afterwards follows a var-len list of variables/props
# first key always blank so skip it
field_keys = [
"nesdis_id",
"nwsli",
"location_text",
"latitude",
"longitude",
"hsa",
"state",
"owner",
"manufacturer",
"channel",
"init_transmit", # HHMM
"trans_interval",
] # min
# repeat in blocks of 7 after field_keys
var_keys = [
"pe_code",
"data_interval", # min
"coefficient",
"constant",
"time_offset", # min
"base_elevation", # ft
"gauge_correction",
] # ft
lines = metadata.splitlines()
for line in lines:
if len(line) == 0:
continue
raw_fields = line.split("|")
fields = dict(zip(field_keys, raw_fields[1 : len(field_keys)]))
# how many blocks of var_keys after initial fields
var_offset = len(field_keys) + 1
var_blocks = (len(raw_fields) - var_offset) // len(
var_keys
) # how many variables
vars_only = raw_fields[var_offset:]
variables = {}
for offset in range(var_blocks):
var_dict = dict(
zip(
var_keys,
vars_only[
offset
* len(var_keys) : (offset + 1)
* len(var_keys)
],
)
)
variables[var_dict["pe_code"]] = var_dict
var_dict["base_elevation"] = float(var_dict["base_elevation"])
var_dict["gauge_correction"] = float(
var_dict["gauge_correction"]
)
del var_dict["pe_code"] # no need to duplicate
line_val = {"variables": variables}
line_val.update(fields)
# conversions
def dms_to_dd(dms):
parts = dms.split(" ")
sec = int(parts[1]) * 60 + int(parts[2])
return float(parts[0]) + (
sec / 3600.0
) # negative already in first portion
line_val["latitude"] = dms_to_dd(line_val["latitude"])
line_val["longitude"] = dms_to_dd(line_val["longitude"])
retval[line_val["nesdis_id"]] = line_val
return retval | Transforms raw HADS metadata into a dictionary (station code -> props) | Below is the the instruction that describes the task:
### Input:
Transforms raw HADS metadata into a dictionary (station code -> props)
### Response:
def _parse_metadata(self, metadata):
"""
Transforms raw HADS metadata into a dictionary (station code -> props)
"""
retval = {}
# these are the first keys, afterwards follows a var-len list of variables/props
# first key always blank so skip it
field_keys = [
"nesdis_id",
"nwsli",
"location_text",
"latitude",
"longitude",
"hsa",
"state",
"owner",
"manufacturer",
"channel",
"init_transmit", # HHMM
"trans_interval",
] # min
# repeat in blocks of 7 after field_keys
var_keys = [
"pe_code",
"data_interval", # min
"coefficient",
"constant",
"time_offset", # min
"base_elevation", # ft
"gauge_correction",
] # ft
lines = metadata.splitlines()
for line in lines:
if len(line) == 0:
continue
raw_fields = line.split("|")
fields = dict(zip(field_keys, raw_fields[1 : len(field_keys)]))
# how many blocks of var_keys after initial fields
var_offset = len(field_keys) + 1
var_blocks = (len(raw_fields) - var_offset) // len(
var_keys
) # how many variables
vars_only = raw_fields[var_offset:]
variables = {}
for offset in range(var_blocks):
var_dict = dict(
zip(
var_keys,
vars_only[
offset
* len(var_keys) : (offset + 1)
* len(var_keys)
],
)
)
variables[var_dict["pe_code"]] = var_dict
var_dict["base_elevation"] = float(var_dict["base_elevation"])
var_dict["gauge_correction"] = float(
var_dict["gauge_correction"]
)
del var_dict["pe_code"] # no need to duplicate
line_val = {"variables": variables}
line_val.update(fields)
# conversions
def dms_to_dd(dms):
parts = dms.split(" ")
sec = int(parts[1]) * 60 + int(parts[2])
return float(parts[0]) + (
sec / 3600.0
) # negative already in first portion
line_val["latitude"] = dms_to_dd(line_val["latitude"])
line_val["longitude"] = dms_to_dd(line_val["longitude"])
retval[line_val["nesdis_id"]] = line_val
return retval |
async def update_firmware(self,
filename: str,
loop: asyncio.AbstractEventLoop = None,
explicit_modeset: bool = True) -> str:
"""
Program the smoothie board with a given hex file.
If explicit_modeset is True (default), explicitly place the smoothie in
programming mode.
If explicit_modeset is False, assume the smoothie is already in
programming mode.
"""
# ensure there is a reference to the port
if self.simulating:
return 'Did nothing (simulating)'
smoothie_update._ensure_programmer_executable()
if not self.is_connected():
self._connect_to_port()
# get port name
port = self._connection.port
if explicit_modeset:
# set smoothieware into programming mode
self._smoothie_programming_mode()
# close the port so other application can access it
self._connection.close()
# run lpc21isp, THIS WILL TAKE AROUND 1 MINUTE TO COMPLETE
update_cmd = 'lpc21isp -wipe -donotstart {0} {1} {2} 12000'.format(
filename, port, self._config.serial_speed)
kwargs: Dict[str, Any] = {'stdout': asyncio.subprocess.PIPE}
if loop:
kwargs['loop'] = loop
proc = await asyncio.create_subprocess_shell(
update_cmd, **kwargs)
rd: bytes = await proc.stdout.read() # type: ignore
res = rd.decode().strip()
await proc.communicate()
# re-open the port
self._connection.open()
# reset smoothieware
self._smoothie_reset()
# run setup gcodes
self._setup()
return res | Program the smoothie board with a given hex file.
If explicit_modeset is True (default), explicitly place the smoothie in
programming mode.
If explicit_modeset is False, assume the smoothie is already in
programming mode. | Below is the the instruction that describes the task:
### Input:
Program the smoothie board with a given hex file.
If explicit_modeset is True (default), explicitly place the smoothie in
programming mode.
If explicit_modeset is False, assume the smoothie is already in
programming mode.
### Response:
async def update_firmware(self,
filename: str,
loop: asyncio.AbstractEventLoop = None,
explicit_modeset: bool = True) -> str:
"""
Program the smoothie board with a given hex file.
If explicit_modeset is True (default), explicitly place the smoothie in
programming mode.
If explicit_modeset is False, assume the smoothie is already in
programming mode.
"""
# ensure there is a reference to the port
if self.simulating:
return 'Did nothing (simulating)'
smoothie_update._ensure_programmer_executable()
if not self.is_connected():
self._connect_to_port()
# get port name
port = self._connection.port
if explicit_modeset:
# set smoothieware into programming mode
self._smoothie_programming_mode()
# close the port so other application can access it
self._connection.close()
# run lpc21isp, THIS WILL TAKE AROUND 1 MINUTE TO COMPLETE
update_cmd = 'lpc21isp -wipe -donotstart {0} {1} {2} 12000'.format(
filename, port, self._config.serial_speed)
kwargs: Dict[str, Any] = {'stdout': asyncio.subprocess.PIPE}
if loop:
kwargs['loop'] = loop
proc = await asyncio.create_subprocess_shell(
update_cmd, **kwargs)
rd: bytes = await proc.stdout.read() # type: ignore
res = rd.decode().strip()
await proc.communicate()
# re-open the port
self._connection.open()
# reset smoothieware
self._smoothie_reset()
# run setup gcodes
self._setup()
return res |
def open(self, path, mode='r', *args, **kwargs):
"""Proxy to function `open` with path to the current file."""
return open(os.path.join(os.path.dirname(self.path), path),
mode=mode, *args, **kwargs) | Proxy to function `open` with path to the current file. | Below is the the instruction that describes the task:
### Input:
Proxy to function `open` with path to the current file.
### Response:
def open(self, path, mode='r', *args, **kwargs):
"""Proxy to function `open` with path to the current file."""
return open(os.path.join(os.path.dirname(self.path), path),
mode=mode, *args, **kwargs) |
def axes(self):
'''A list of axes of rotation for this joint.'''
return [np.array(self.ode_obj.getAxis1()),
np.array(self.ode_obj.getAxis2())] | A list of axes of rotation for this joint. | Below is the the instruction that describes the task:
### Input:
A list of axes of rotation for this joint.
### Response:
def axes(self):
'''A list of axes of rotation for this joint.'''
return [np.array(self.ode_obj.getAxis1()),
np.array(self.ode_obj.getAxis2())] |
def optimized(fn):
"""Decorator that will call the optimized c++ version
of a pycast function if available rather than theo
original pycast function
:param function fn: original pycast function
:return: return the wrapped function
:rtype: function
"""
def _optimized(self, *args, **kwargs):
""" This method calls the pycastC function if
optimization is enabled and the pycastC function
is available.
:param: PyCastObject self: reference to the calling object.
Needs to be passed to the pycastC function,
so that all uts members are available.
:param: list *args: list of arguments the function is called with.
:param: dict **kwargs: dictionary of parameter names and values the function has been called with.
:return result of the function call either from pycast or pycastC module.
:rtype: function
"""
if self.optimizationEnabled:
class_name = self.__class__.__name__
module = self.__module__.replace("pycast", "pycastC")
try:
imported = __import__("%s.%s" % (module, class_name), globals(), locals(), [fn.__name__])
function = getattr(imported, fn.__name__)
return function(self, *args, **kwargs)
except ImportError:
print "[WARNING] Could not enable optimization for %s, %s" % (fn.__name__, self)
return fn(self, *args, **kwargs)
else:
return fn(self, *args, **kwargs)
setattr(_optimized, "__name__", fn.__name__)
setattr(_optimized, "__repr__", fn.__repr__)
setattr(_optimized, "__str__", fn.__str__)
setattr(_optimized, "__doc__", fn.__doc__)
return _optimized | Decorator that will call the optimized c++ version
of a pycast function if available rather than theo
original pycast function
:param function fn: original pycast function
:return: return the wrapped function
:rtype: function | Below is the the instruction that describes the task:
### Input:
Decorator that will call the optimized c++ version
of a pycast function if available rather than theo
original pycast function
:param function fn: original pycast function
:return: return the wrapped function
:rtype: function
### Response:
def optimized(fn):
"""Decorator that will call the optimized c++ version
of a pycast function if available rather than theo
original pycast function
:param function fn: original pycast function
:return: return the wrapped function
:rtype: function
"""
def _optimized(self, *args, **kwargs):
""" This method calls the pycastC function if
optimization is enabled and the pycastC function
is available.
:param: PyCastObject self: reference to the calling object.
Needs to be passed to the pycastC function,
so that all uts members are available.
:param: list *args: list of arguments the function is called with.
:param: dict **kwargs: dictionary of parameter names and values the function has been called with.
:return result of the function call either from pycast or pycastC module.
:rtype: function
"""
if self.optimizationEnabled:
class_name = self.__class__.__name__
module = self.__module__.replace("pycast", "pycastC")
try:
imported = __import__("%s.%s" % (module, class_name), globals(), locals(), [fn.__name__])
function = getattr(imported, fn.__name__)
return function(self, *args, **kwargs)
except ImportError:
print "[WARNING] Could not enable optimization for %s, %s" % (fn.__name__, self)
return fn(self, *args, **kwargs)
else:
return fn(self, *args, **kwargs)
setattr(_optimized, "__name__", fn.__name__)
setattr(_optimized, "__repr__", fn.__repr__)
setattr(_optimized, "__str__", fn.__str__)
setattr(_optimized, "__doc__", fn.__doc__)
return _optimized |
def log_interp1d(self, xx, yy, kind='linear'):
"""
Performs a log space 1d interpolation.
:param xx: the x values.
:param yy: the y values.
:param kind: the type of interpolation to apply (as per scipy interp1d)
:return: the interpolation function.
"""
logx = np.log10(xx)
logy = np.log10(yy)
lin_interp = interp1d(logx, logy, kind=kind)
log_interp = lambda zz: np.power(10.0, lin_interp(np.log10(zz)))
return log_interp | Performs a log space 1d interpolation.
:param xx: the x values.
:param yy: the y values.
:param kind: the type of interpolation to apply (as per scipy interp1d)
:return: the interpolation function. | Below is the the instruction that describes the task:
### Input:
Performs a log space 1d interpolation.
:param xx: the x values.
:param yy: the y values.
:param kind: the type of interpolation to apply (as per scipy interp1d)
:return: the interpolation function.
### Response:
def log_interp1d(self, xx, yy, kind='linear'):
"""
Performs a log space 1d interpolation.
:param xx: the x values.
:param yy: the y values.
:param kind: the type of interpolation to apply (as per scipy interp1d)
:return: the interpolation function.
"""
logx = np.log10(xx)
logy = np.log10(yy)
lin_interp = interp1d(logx, logy, kind=kind)
log_interp = lambda zz: np.power(10.0, lin_interp(np.log10(zz)))
return log_interp |
def load_class(path):
"""
dynamically load a class given a string of the format
package.Class
"""
package, klass = path.rsplit('.', 1)
module = import_module(package)
return getattr(module, klass) | dynamically load a class given a string of the format
package.Class | Below is the the instruction that describes the task:
### Input:
dynamically load a class given a string of the format
package.Class
### Response:
def load_class(path):
"""
dynamically load a class given a string of the format
package.Class
"""
package, klass = path.rsplit('.', 1)
module = import_module(package)
return getattr(module, klass) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.