code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def on_new_line(self):
"""On new input line"""
self.set_cursor_position('eof')
self.current_prompt_pos = self.get_position('cursor')
self.new_input_line = False | On new input line | Below is the the instruction that describes the task:
### Input:
On new input line
### Response:
def on_new_line(self):
"""On new input line"""
self.set_cursor_position('eof')
self.current_prompt_pos = self.get_position('cursor')
self.new_input_line = False |
def compute_rewards(self, scores):
"""
Compute the "velocity" of (average distance between) the k+1 best
scores. Return a list with those k velocities padded out with zeros so
that the count remains the same.
"""
# get the k + 1 best scores in descending order
best_scores = sorted(scores, reverse=True)[:self.k + 1]
velocities = [best_scores[i] - best_scores[i + 1]
for i in range(len(best_scores) - 1)]
# pad the list out with zeros to maintain the length of the list
zeros = (len(scores) - self.k) * [0]
return velocities + zeros | Compute the "velocity" of (average distance between) the k+1 best
scores. Return a list with those k velocities padded out with zeros so
that the count remains the same. | Below is the the instruction that describes the task:
### Input:
Compute the "velocity" of (average distance between) the k+1 best
scores. Return a list with those k velocities padded out with zeros so
that the count remains the same.
### Response:
def compute_rewards(self, scores):
"""
Compute the "velocity" of (average distance between) the k+1 best
scores. Return a list with those k velocities padded out with zeros so
that the count remains the same.
"""
# get the k + 1 best scores in descending order
best_scores = sorted(scores, reverse=True)[:self.k + 1]
velocities = [best_scores[i] - best_scores[i + 1]
for i in range(len(best_scores) - 1)]
# pad the list out with zeros to maintain the length of the list
zeros = (len(scores) - self.k) * [0]
return velocities + zeros |
def return_hdr(self):
"""Return the header for further use.
Returns
-------
subj_id : str
subject identification code
start_time : datetime
start time of the dataset
s_freq : float
sampling frequency
chan_name : list of str
list of all the channels
n_samples : int
number of samples in the dataset
orig : dict
additional information taken directly from the header
Notes
-----
As far as I can, BCI2000 doesn't have channel labels, so we use dummies
starting at chan001 (more consistent with Matlab 1-base indexing...)
"""
orig = {}
orig = _read_header(self.filename)
nchan = int(orig['SourceCh'])
chan_name = ['ch{:03d}'.format(i + 1) for i in range(nchan)]
chan_dtype = dtype(orig['DataFormat'])
self.statevector_len = int(orig['StatevectorLen'])
s_freq = orig['Parameter']['SamplingRate']
if s_freq.endswith('Hz'):
s_freq = s_freq.replace('Hz', '')
s_freq = int(s_freq.strip())
self.s_freq = s_freq
storagetime = orig['Parameter']['StorageTime'].replace('%20', ' ')
try: # newer version
start_time = datetime.strptime(storagetime, '%a %b %d %H:%M:%S %Y')
except:
start_time = datetime.strptime(storagetime, '%Y-%m-%dT%H:%M:%S')
subj_id = orig['Parameter']['SubjectName']
self.dtype = dtype([(chan, chan_dtype) for chan in chan_name] +
[('statevector', 'S', self.statevector_len)])
# compute n_samples based on file size - header
with open(self.filename, 'rb') as f:
f.seek(0, SEEK_END)
EOData = f.tell()
n_samples = int((EOData - int(orig['HeaderLen'])) / self.dtype.itemsize)
self.s_freq = s_freq
self.header_len = int(orig['HeaderLen'])
self.n_samples = n_samples
self.statevectors = _prepare_statevectors(orig['StateVector'])
# TODO: a better way to parse header
self.gain = array([float(x) for x in orig['Parameter']['SourceChGain'].split(' ')[1:]])
return subj_id, start_time, s_freq, chan_name, n_samples, orig | Return the header for further use.
Returns
-------
subj_id : str
subject identification code
start_time : datetime
start time of the dataset
s_freq : float
sampling frequency
chan_name : list of str
list of all the channels
n_samples : int
number of samples in the dataset
orig : dict
additional information taken directly from the header
Notes
-----
As far as I can, BCI2000 doesn't have channel labels, so we use dummies
starting at chan001 (more consistent with Matlab 1-base indexing...) | Below is the the instruction that describes the task:
### Input:
Return the header for further use.
Returns
-------
subj_id : str
subject identification code
start_time : datetime
start time of the dataset
s_freq : float
sampling frequency
chan_name : list of str
list of all the channels
n_samples : int
number of samples in the dataset
orig : dict
additional information taken directly from the header
Notes
-----
As far as I can, BCI2000 doesn't have channel labels, so we use dummies
starting at chan001 (more consistent with Matlab 1-base indexing...)
### Response:
def return_hdr(self):
"""Return the header for further use.
Returns
-------
subj_id : str
subject identification code
start_time : datetime
start time of the dataset
s_freq : float
sampling frequency
chan_name : list of str
list of all the channels
n_samples : int
number of samples in the dataset
orig : dict
additional information taken directly from the header
Notes
-----
As far as I can, BCI2000 doesn't have channel labels, so we use dummies
starting at chan001 (more consistent with Matlab 1-base indexing...)
"""
orig = {}
orig = _read_header(self.filename)
nchan = int(orig['SourceCh'])
chan_name = ['ch{:03d}'.format(i + 1) for i in range(nchan)]
chan_dtype = dtype(orig['DataFormat'])
self.statevector_len = int(orig['StatevectorLen'])
s_freq = orig['Parameter']['SamplingRate']
if s_freq.endswith('Hz'):
s_freq = s_freq.replace('Hz', '')
s_freq = int(s_freq.strip())
self.s_freq = s_freq
storagetime = orig['Parameter']['StorageTime'].replace('%20', ' ')
try: # newer version
start_time = datetime.strptime(storagetime, '%a %b %d %H:%M:%S %Y')
except:
start_time = datetime.strptime(storagetime, '%Y-%m-%dT%H:%M:%S')
subj_id = orig['Parameter']['SubjectName']
self.dtype = dtype([(chan, chan_dtype) for chan in chan_name] +
[('statevector', 'S', self.statevector_len)])
# compute n_samples based on file size - header
with open(self.filename, 'rb') as f:
f.seek(0, SEEK_END)
EOData = f.tell()
n_samples = int((EOData - int(orig['HeaderLen'])) / self.dtype.itemsize)
self.s_freq = s_freq
self.header_len = int(orig['HeaderLen'])
self.n_samples = n_samples
self.statevectors = _prepare_statevectors(orig['StateVector'])
# TODO: a better way to parse header
self.gain = array([float(x) for x in orig['Parameter']['SourceChGain'].split(' ')[1:]])
return subj_id, start_time, s_freq, chan_name, n_samples, orig |
def _getParameters(self):
"""Returns the result of this decorator."""
param = self.query._getParameters()
index = self._getFilterIndex()
param.update({
'filter['+str(index)+'][columnAlias]' : self.__column,
'filter['+str(index)+'][data][type]' : self.__type,
'filter['+str(index)+'][data][value]' : self.__value,
'filter['+str(index)+'][data][comparison]' : self.__comparison})
#self.__column.getColumnAlias()
return param | Returns the result of this decorator. | Below is the the instruction that describes the task:
### Input:
Returns the result of this decorator.
### Response:
def _getParameters(self):
"""Returns the result of this decorator."""
param = self.query._getParameters()
index = self._getFilterIndex()
param.update({
'filter['+str(index)+'][columnAlias]' : self.__column,
'filter['+str(index)+'][data][type]' : self.__type,
'filter['+str(index)+'][data][value]' : self.__value,
'filter['+str(index)+'][data][comparison]' : self.__comparison})
#self.__column.getColumnAlias()
return param |
def _notify_receiver(self, receiver, params, doc):
"""Send notification to the receiver"""
verb = VMAP[doc['op']]
ns = doc['ns']
notification_id = Id(ns + 'Notification:' + str(ObjectId()) + '@' + params['authority'])
object_id = Id(ns + ':' + str(doc['o']['_id']) + '@' + params['authority'])
try:
getattr(receiver, '_'.join([verb, params['obj_name_plural']]))(notification_id, [object_id])
except AttributeError:
pass
return notification_id | Send notification to the receiver | Below is the the instruction that describes the task:
### Input:
Send notification to the receiver
### Response:
def _notify_receiver(self, receiver, params, doc):
"""Send notification to the receiver"""
verb = VMAP[doc['op']]
ns = doc['ns']
notification_id = Id(ns + 'Notification:' + str(ObjectId()) + '@' + params['authority'])
object_id = Id(ns + ':' + str(doc['o']['_id']) + '@' + params['authority'])
try:
getattr(receiver, '_'.join([verb, params['obj_name_plural']]))(notification_id, [object_id])
except AttributeError:
pass
return notification_id |
def create_ticket_from_albaran(pk, list_lines):
MODEL_SOURCE = SalesAlbaran
MODEL_FINAL = SalesTicket
url_reverse = 'CDNX_invoicing_ticketsaless_list'
# type_doc
msg_error_relation = _("Hay lineas asignadas a ticket")
msg_error_not_found = _('Sales albaran not found')
msg_error_line_not_found = _('Todas las lineas ya se han pasado a ticket')
return SalesLines.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, url_reverse,
msg_error_relation, msg_error_not_found, msg_error_line_not_found,
False)
"""
context = {}
if list_lines:
new_list_lines = SalesLines.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(
invoice__isnull=True
).values_list('pk')
if new_list_lines:
new_pk = SalesLines.objects.values_list('order__pk').filter(pk__in=new_list_lines).first()
if new_pk:
context = SalesLines.create_ticket_from_order(new_pk, new_list_lines)
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
""" | context = {}
if list_lines:
new_list_lines = SalesLines.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(
invoice__isnull=True
).values_list('pk')
if new_list_lines:
new_pk = SalesLines.objects.values_list('order__pk').filter(pk__in=new_list_lines).first()
if new_pk:
context = SalesLines.create_ticket_from_order(new_pk, new_list_lines)
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context | Below is the the instruction that describes the task:
### Input:
context = {}
if list_lines:
new_list_lines = SalesLines.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(
invoice__isnull=True
).values_list('pk')
if new_list_lines:
new_pk = SalesLines.objects.values_list('order__pk').filter(pk__in=new_list_lines).first()
if new_pk:
context = SalesLines.create_ticket_from_order(new_pk, new_list_lines)
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
### Response:
def create_ticket_from_albaran(pk, list_lines):
MODEL_SOURCE = SalesAlbaran
MODEL_FINAL = SalesTicket
url_reverse = 'CDNX_invoicing_ticketsaless_list'
# type_doc
msg_error_relation = _("Hay lineas asignadas a ticket")
msg_error_not_found = _('Sales albaran not found')
msg_error_line_not_found = _('Todas las lineas ya se han pasado a ticket')
return SalesLines.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, url_reverse,
msg_error_relation, msg_error_not_found, msg_error_line_not_found,
False)
"""
context = {}
if list_lines:
new_list_lines = SalesLines.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(
invoice__isnull=True
).values_list('pk')
if new_list_lines:
new_pk = SalesLines.objects.values_list('order__pk').filter(pk__in=new_list_lines).first()
if new_pk:
context = SalesLines.create_ticket_from_order(new_pk, new_list_lines)
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
""" |
def freeze_bn(self):
'''Freeze BatchNorm layers.'''
for layer in self.modules():
if isinstance(layer, nn.BatchNorm2d):
layer.eval() | Freeze BatchNorm layers. | Below is the the instruction that describes the task:
### Input:
Freeze BatchNorm layers.
### Response:
def freeze_bn(self):
'''Freeze BatchNorm layers.'''
for layer in self.modules():
if isinstance(layer, nn.BatchNorm2d):
layer.eval() |
def _batch_norm_op(self, input_batch, mean, variance, use_batch_stats,
stat_dtype):
"""Creates a batch normalization op.
It uses the tf.nn.batch_normalization op by default and the
tf.nn.fused_batch_norm op to support fused batch normalization.
Args:
input_batch: A input Tensor of arbitrary dimension.
mean: A mean tensor, of the same dtype as `input_batch`.
variance: A variance tensor, of the same dtype as `input_batch`.
use_batch_stats: A bool value that indicates whether the operation should
use the batch statistics.
stat_dtype: TensorFlow datatype used for the moving mean and variance.
Returns:
A batch normalization operation.
The current mean tensor, of datatype `stat_dtype`.
The current variance tensor, of datatype `stat_dtype`.
"""
if self._fused:
# For the non-training case where not using batch stats,
# pass in the moving statistic variables directly.
# These will already be in the correct dtype, even for float16 input.
batch_norm_op, mean, variance = self._fused_batch_norm_op(
input_batch,
self._moving_mean, self._moving_variance, use_batch_stats)
else:
batch_norm_op = tf.nn.batch_normalization(
input_batch,
mean,
variance,
self._beta,
self._gamma,
self._eps,
name="batch_norm")
# We'll echo the supplied mean and variance so that they can also be used
# to update the moving statistics. Cast to matching type if necessary.
if input_batch.dtype.base_dtype != stat_dtype:
mean = tf.cast(mean, stat_dtype)
variance = tf.cast(variance, stat_dtype)
return batch_norm_op, mean, variance | Creates a batch normalization op.
It uses the tf.nn.batch_normalization op by default and the
tf.nn.fused_batch_norm op to support fused batch normalization.
Args:
input_batch: A input Tensor of arbitrary dimension.
mean: A mean tensor, of the same dtype as `input_batch`.
variance: A variance tensor, of the same dtype as `input_batch`.
use_batch_stats: A bool value that indicates whether the operation should
use the batch statistics.
stat_dtype: TensorFlow datatype used for the moving mean and variance.
Returns:
A batch normalization operation.
The current mean tensor, of datatype `stat_dtype`.
The current variance tensor, of datatype `stat_dtype`. | Below is the the instruction that describes the task:
### Input:
Creates a batch normalization op.
It uses the tf.nn.batch_normalization op by default and the
tf.nn.fused_batch_norm op to support fused batch normalization.
Args:
input_batch: A input Tensor of arbitrary dimension.
mean: A mean tensor, of the same dtype as `input_batch`.
variance: A variance tensor, of the same dtype as `input_batch`.
use_batch_stats: A bool value that indicates whether the operation should
use the batch statistics.
stat_dtype: TensorFlow datatype used for the moving mean and variance.
Returns:
A batch normalization operation.
The current mean tensor, of datatype `stat_dtype`.
The current variance tensor, of datatype `stat_dtype`.
### Response:
def _batch_norm_op(self, input_batch, mean, variance, use_batch_stats,
stat_dtype):
"""Creates a batch normalization op.
It uses the tf.nn.batch_normalization op by default and the
tf.nn.fused_batch_norm op to support fused batch normalization.
Args:
input_batch: A input Tensor of arbitrary dimension.
mean: A mean tensor, of the same dtype as `input_batch`.
variance: A variance tensor, of the same dtype as `input_batch`.
use_batch_stats: A bool value that indicates whether the operation should
use the batch statistics.
stat_dtype: TensorFlow datatype used for the moving mean and variance.
Returns:
A batch normalization operation.
The current mean tensor, of datatype `stat_dtype`.
The current variance tensor, of datatype `stat_dtype`.
"""
if self._fused:
# For the non-training case where not using batch stats,
# pass in the moving statistic variables directly.
# These will already be in the correct dtype, even for float16 input.
batch_norm_op, mean, variance = self._fused_batch_norm_op(
input_batch,
self._moving_mean, self._moving_variance, use_batch_stats)
else:
batch_norm_op = tf.nn.batch_normalization(
input_batch,
mean,
variance,
self._beta,
self._gamma,
self._eps,
name="batch_norm")
# We'll echo the supplied mean and variance so that they can also be used
# to update the moving statistics. Cast to matching type if necessary.
if input_batch.dtype.base_dtype != stat_dtype:
mean = tf.cast(mean, stat_dtype)
variance = tf.cast(variance, stat_dtype)
return batch_norm_op, mean, variance |
def load_repo_addons(_globals):
'''Load all fabsetup addons which are stored under ~/.fabsetup-addon-repos
as git repositories.
Args:
_globals(dict): the globals() namespace of the fabric script.
Return: None
'''
repos_dir = os.path.expanduser('~/.fabsetup-addon-repos')
if os.path.isdir(repos_dir):
basedir, repos, _ = next(os.walk(repos_dir))
for repo_dir in [os.path.join(basedir, repo)
for repo in repos
# omit dot dirs like '.rope'
# or 'fabsetup-theno-termdown.disabled'
if '.' not in repo]:
sys.path.append(repo_dir)
package_name, username = package_username(repo_dir.split('/')[-1])
load_addon(username, package_name, _globals) | Load all fabsetup addons which are stored under ~/.fabsetup-addon-repos
as git repositories.
Args:
_globals(dict): the globals() namespace of the fabric script.
Return: None | Below is the the instruction that describes the task:
### Input:
Load all fabsetup addons which are stored under ~/.fabsetup-addon-repos
as git repositories.
Args:
_globals(dict): the globals() namespace of the fabric script.
Return: None
### Response:
def load_repo_addons(_globals):
'''Load all fabsetup addons which are stored under ~/.fabsetup-addon-repos
as git repositories.
Args:
_globals(dict): the globals() namespace of the fabric script.
Return: None
'''
repos_dir = os.path.expanduser('~/.fabsetup-addon-repos')
if os.path.isdir(repos_dir):
basedir, repos, _ = next(os.walk(repos_dir))
for repo_dir in [os.path.join(basedir, repo)
for repo in repos
# omit dot dirs like '.rope'
# or 'fabsetup-theno-termdown.disabled'
if '.' not in repo]:
sys.path.append(repo_dir)
package_name, username = package_username(repo_dir.split('/')[-1])
load_addon(username, package_name, _globals) |
def f_cash(x, counts, bkg, model):
"""
Wrapper for cash statistics, that defines the model function.
Parameters
----------
x : float
Model amplitude.
counts : `~numpy.ndarray`
Count map slice, where model is defined.
bkg : `~numpy.ndarray`
Background map slice, where model is defined.
model : `~numpy.ndarray`
Source template (multiplied with exposure).
"""
return 2.0 * poisson_log_like(counts, bkg + x * model) | Wrapper for cash statistics, that defines the model function.
Parameters
----------
x : float
Model amplitude.
counts : `~numpy.ndarray`
Count map slice, where model is defined.
bkg : `~numpy.ndarray`
Background map slice, where model is defined.
model : `~numpy.ndarray`
Source template (multiplied with exposure). | Below is the the instruction that describes the task:
### Input:
Wrapper for cash statistics, that defines the model function.
Parameters
----------
x : float
Model amplitude.
counts : `~numpy.ndarray`
Count map slice, where model is defined.
bkg : `~numpy.ndarray`
Background map slice, where model is defined.
model : `~numpy.ndarray`
Source template (multiplied with exposure).
### Response:
def f_cash(x, counts, bkg, model):
"""
Wrapper for cash statistics, that defines the model function.
Parameters
----------
x : float
Model amplitude.
counts : `~numpy.ndarray`
Count map slice, where model is defined.
bkg : `~numpy.ndarray`
Background map slice, where model is defined.
model : `~numpy.ndarray`
Source template (multiplied with exposure).
"""
return 2.0 * poisson_log_like(counts, bkg + x * model) |
def _from_dict(cls, _dict):
"""Initialize a TableReturn object from a json dictionary."""
args = {}
if 'document' in _dict:
args['document'] = DocInfo._from_dict(_dict.get('document'))
if 'model_id' in _dict:
args['model_id'] = _dict.get('model_id')
if 'model_version' in _dict:
args['model_version'] = _dict.get('model_version')
if 'tables' in _dict:
args['tables'] = [
Tables._from_dict(x) for x in (_dict.get('tables'))
]
return cls(**args) | Initialize a TableReturn object from a json dictionary. | Below is the the instruction that describes the task:
### Input:
Initialize a TableReturn object from a json dictionary.
### Response:
def _from_dict(cls, _dict):
"""Initialize a TableReturn object from a json dictionary."""
args = {}
if 'document' in _dict:
args['document'] = DocInfo._from_dict(_dict.get('document'))
if 'model_id' in _dict:
args['model_id'] = _dict.get('model_id')
if 'model_version' in _dict:
args['model_version'] = _dict.get('model_version')
if 'tables' in _dict:
args['tables'] = [
Tables._from_dict(x) for x in (_dict.get('tables'))
]
return cls(**args) |
def parse_feeds(self, message_channel=True):
"""
Iterates through each of the feed URLs, parses their items, and
sends any items to the channel that have not been previously
been parsed.
"""
if parse:
for feed_url in self.feeds:
feed = parse(feed_url)
for item in feed.entries:
if item["id"] not in self.feed_items:
self.feed_items.add(item["id"])
if message_channel:
message = self.format_item_message(feed, item)
self.message_channel(message)
return | Iterates through each of the feed URLs, parses their items, and
sends any items to the channel that have not been previously
been parsed. | Below is the the instruction that describes the task:
### Input:
Iterates through each of the feed URLs, parses their items, and
sends any items to the channel that have not been previously
been parsed.
### Response:
def parse_feeds(self, message_channel=True):
"""
Iterates through each of the feed URLs, parses their items, and
sends any items to the channel that have not been previously
been parsed.
"""
if parse:
for feed_url in self.feeds:
feed = parse(feed_url)
for item in feed.entries:
if item["id"] not in self.feed_items:
self.feed_items.add(item["id"])
if message_channel:
message = self.format_item_message(feed, item)
self.message_channel(message)
return |
def get_account_data(self, start=0, stop=None, inclusion_states=False, security_level=None):
# type: (int, Optional[int], bool, Optional[int]) -> dict
"""
More comprehensive version of :py:meth:`get_transfers` that
returns addresses and account balance in addition to bundles.
This function is useful in getting all the relevant information
of your account.
:param start:
Starting key index.
:param stop:
Stop before this index.
Note that this parameter behaves like the ``stop`` attribute
in a :py:class:`slice` object; the stop index is *not*
included in the result.
If ``None`` (default), then this method will check every
address until it finds one without any transfers.
:param inclusion_states:
Whether to also fetch the inclusion states of the transfers.
This requires an additional API call to the node, so it is
disabled by default.
:param security_level:
Number of iterations to use when generating new addresses
(see :py:meth:`get_new_addresses`).
This value must be between 1 and 3, inclusive.
If not set, defaults to
:py:attr:`AddressGenerator.DEFAULT_SECURITY_LEVEL`.
:return:
Dict with the following structure::
{
'addresses': List[Address],
List of generated addresses.
Note that this list may include unused
addresses.
'balance': int,
Total account balance. Might be 0.
'bundles': List[Bundle],
List of bundles with transactions to/from this
account.
}
"""
return extended.GetAccountDataCommand(self.adapter)(
seed=self.seed,
start=start,
stop=stop,
inclusionStates=inclusion_states,
security_level=security_level
) | More comprehensive version of :py:meth:`get_transfers` that
returns addresses and account balance in addition to bundles.
This function is useful in getting all the relevant information
of your account.
:param start:
Starting key index.
:param stop:
Stop before this index.
Note that this parameter behaves like the ``stop`` attribute
in a :py:class:`slice` object; the stop index is *not*
included in the result.
If ``None`` (default), then this method will check every
address until it finds one without any transfers.
:param inclusion_states:
Whether to also fetch the inclusion states of the transfers.
This requires an additional API call to the node, so it is
disabled by default.
:param security_level:
Number of iterations to use when generating new addresses
(see :py:meth:`get_new_addresses`).
This value must be between 1 and 3, inclusive.
If not set, defaults to
:py:attr:`AddressGenerator.DEFAULT_SECURITY_LEVEL`.
:return:
Dict with the following structure::
{
'addresses': List[Address],
List of generated addresses.
Note that this list may include unused
addresses.
'balance': int,
Total account balance. Might be 0.
'bundles': List[Bundle],
List of bundles with transactions to/from this
account.
} | Below is the the instruction that describes the task:
### Input:
More comprehensive version of :py:meth:`get_transfers` that
returns addresses and account balance in addition to bundles.
This function is useful in getting all the relevant information
of your account.
:param start:
Starting key index.
:param stop:
Stop before this index.
Note that this parameter behaves like the ``stop`` attribute
in a :py:class:`slice` object; the stop index is *not*
included in the result.
If ``None`` (default), then this method will check every
address until it finds one without any transfers.
:param inclusion_states:
Whether to also fetch the inclusion states of the transfers.
This requires an additional API call to the node, so it is
disabled by default.
:param security_level:
Number of iterations to use when generating new addresses
(see :py:meth:`get_new_addresses`).
This value must be between 1 and 3, inclusive.
If not set, defaults to
:py:attr:`AddressGenerator.DEFAULT_SECURITY_LEVEL`.
:return:
Dict with the following structure::
{
'addresses': List[Address],
List of generated addresses.
Note that this list may include unused
addresses.
'balance': int,
Total account balance. Might be 0.
'bundles': List[Bundle],
List of bundles with transactions to/from this
account.
}
### Response:
def get_account_data(self, start=0, stop=None, inclusion_states=False, security_level=None):
# type: (int, Optional[int], bool, Optional[int]) -> dict
"""
More comprehensive version of :py:meth:`get_transfers` that
returns addresses and account balance in addition to bundles.
This function is useful in getting all the relevant information
of your account.
:param start:
Starting key index.
:param stop:
Stop before this index.
Note that this parameter behaves like the ``stop`` attribute
in a :py:class:`slice` object; the stop index is *not*
included in the result.
If ``None`` (default), then this method will check every
address until it finds one without any transfers.
:param inclusion_states:
Whether to also fetch the inclusion states of the transfers.
This requires an additional API call to the node, so it is
disabled by default.
:param security_level:
Number of iterations to use when generating new addresses
(see :py:meth:`get_new_addresses`).
This value must be between 1 and 3, inclusive.
If not set, defaults to
:py:attr:`AddressGenerator.DEFAULT_SECURITY_LEVEL`.
:return:
Dict with the following structure::
{
'addresses': List[Address],
List of generated addresses.
Note that this list may include unused
addresses.
'balance': int,
Total account balance. Might be 0.
'bundles': List[Bundle],
List of bundles with transactions to/from this
account.
}
"""
return extended.GetAccountDataCommand(self.adapter)(
seed=self.seed,
start=start,
stop=stop,
inclusionStates=inclusion_states,
security_level=security_level
) |
def get(self, orig_key):
"""Get cache entry for key, or return None."""
resp = requests.Response()
key = self._clean_key(orig_key)
path = os.path.join(self.cache_dir, key)
try:
with open(path, 'rb') as f:
# read lines one at a time
while True:
line = f.readline().decode('utf8').strip('\r\n')
# set headers
if self.check_last_modified and re.search("last-modified", line, flags=re.I):
# line contains last modified header
head_resp = requests.head(orig_key)
try:
new_lm = head_resp.headers['last-modified']
old_lm = line[string.find(line, ':') + 1:].strip()
if old_lm != new_lm:
# last modified timestamps don't match, need to download again
return None
except KeyError:
# no last modified header present, so redownload
return None
header = self._header_re.match(line)
if header:
resp.headers[header.group(1)] = header.group(2)
else:
break
# everything left is the real content
resp._content = f.read()
# status & encoding will be in headers, but are faked
# need to split spaces out of status to get code (e.g. '200 OK')
resp.status_code = int(resp.headers.pop('status').split(' ')[0])
resp.encoding = resp.headers.pop('encoding')
resp.url = resp.headers.get('content-location', orig_key)
# TODO: resp.request = request
return resp
except IOError:
return None | Get cache entry for key, or return None. | Below is the the instruction that describes the task:
### Input:
Get cache entry for key, or return None.
### Response:
def get(self, orig_key):
"""Get cache entry for key, or return None."""
resp = requests.Response()
key = self._clean_key(orig_key)
path = os.path.join(self.cache_dir, key)
try:
with open(path, 'rb') as f:
# read lines one at a time
while True:
line = f.readline().decode('utf8').strip('\r\n')
# set headers
if self.check_last_modified and re.search("last-modified", line, flags=re.I):
# line contains last modified header
head_resp = requests.head(orig_key)
try:
new_lm = head_resp.headers['last-modified']
old_lm = line[string.find(line, ':') + 1:].strip()
if old_lm != new_lm:
# last modified timestamps don't match, need to download again
return None
except KeyError:
# no last modified header present, so redownload
return None
header = self._header_re.match(line)
if header:
resp.headers[header.group(1)] = header.group(2)
else:
break
# everything left is the real content
resp._content = f.read()
# status & encoding will be in headers, but are faked
# need to split spaces out of status to get code (e.g. '200 OK')
resp.status_code = int(resp.headers.pop('status').split(' ')[0])
resp.encoding = resp.headers.pop('encoding')
resp.url = resp.headers.get('content-location', orig_key)
# TODO: resp.request = request
return resp
except IOError:
return None |
def p_req_section(self, p):
'''req_section : REQUIREMENTS ASSIGN_EQUAL LCURLY string_list RCURLY SEMI
| REQUIREMENTS LCURLY string_list RCURLY SEMI
| empty'''
if len(p) == 7:
p[0] = p[4]
elif len(p) == 6:
p[0] = p[3]
self._print_verbose('requirements') | req_section : REQUIREMENTS ASSIGN_EQUAL LCURLY string_list RCURLY SEMI
| REQUIREMENTS LCURLY string_list RCURLY SEMI
| empty | Below is the the instruction that describes the task:
### Input:
req_section : REQUIREMENTS ASSIGN_EQUAL LCURLY string_list RCURLY SEMI
| REQUIREMENTS LCURLY string_list RCURLY SEMI
| empty
### Response:
def p_req_section(self, p):
'''req_section : REQUIREMENTS ASSIGN_EQUAL LCURLY string_list RCURLY SEMI
| REQUIREMENTS LCURLY string_list RCURLY SEMI
| empty'''
if len(p) == 7:
p[0] = p[4]
elif len(p) == 6:
p[0] = p[3]
self._print_verbose('requirements') |
def bot_has_any_role(*items):
"""Similar to :func:`.has_any_role` except checks if the bot itself has
any of the roles listed.
This check raises one of two special exceptions, :exc:`.BotMissingAnyRole` if the bot
is missing all roles, or :exc:`.NoPrivateMessage` if it is used in a private message.
Both inherit from :exc:`.CheckFailure`.
.. versionchanged:: 1.1.0
Raise :exc:`.BotMissingAnyRole` or :exc:`.NoPrivateMessage`
instead of generic checkfailure
"""
def predicate(ctx):
ch = ctx.channel
if not isinstance(ch, discord.abc.GuildChannel):
raise NoPrivateMessage()
me = ch.guild.me
getter = functools.partial(discord.utils.get, me.roles)
if any(getter(id=item) is not None if isinstance(item, int) else getter(name=item) is not None for item in items):
return True
raise BotMissingAnyRole(items)
return check(predicate) | Similar to :func:`.has_any_role` except checks if the bot itself has
any of the roles listed.
This check raises one of two special exceptions, :exc:`.BotMissingAnyRole` if the bot
is missing all roles, or :exc:`.NoPrivateMessage` if it is used in a private message.
Both inherit from :exc:`.CheckFailure`.
.. versionchanged:: 1.1.0
Raise :exc:`.BotMissingAnyRole` or :exc:`.NoPrivateMessage`
instead of generic checkfailure | Below is the the instruction that describes the task:
### Input:
Similar to :func:`.has_any_role` except checks if the bot itself has
any of the roles listed.
This check raises one of two special exceptions, :exc:`.BotMissingAnyRole` if the bot
is missing all roles, or :exc:`.NoPrivateMessage` if it is used in a private message.
Both inherit from :exc:`.CheckFailure`.
.. versionchanged:: 1.1.0
Raise :exc:`.BotMissingAnyRole` or :exc:`.NoPrivateMessage`
instead of generic checkfailure
### Response:
def bot_has_any_role(*items):
"""Similar to :func:`.has_any_role` except checks if the bot itself has
any of the roles listed.
This check raises one of two special exceptions, :exc:`.BotMissingAnyRole` if the bot
is missing all roles, or :exc:`.NoPrivateMessage` if it is used in a private message.
Both inherit from :exc:`.CheckFailure`.
.. versionchanged:: 1.1.0
Raise :exc:`.BotMissingAnyRole` or :exc:`.NoPrivateMessage`
instead of generic checkfailure
"""
def predicate(ctx):
ch = ctx.channel
if not isinstance(ch, discord.abc.GuildChannel):
raise NoPrivateMessage()
me = ch.guild.me
getter = functools.partial(discord.utils.get, me.roles)
if any(getter(id=item) is not None if isinstance(item, int) else getter(name=item) is not None for item in items):
return True
raise BotMissingAnyRole(items)
return check(predicate) |
def degrees_dir(CIJ):
'''
Node degree is the number of links connected to the node. The indegree
is the number of inward links and the outdegree is the number of
outward links.
Parameters
----------
CIJ : NxN np.ndarray
directed binary/weighted connection matrix
Returns
-------
id : Nx1 np.ndarray
node in-degree
od : Nx1 np.ndarray
node out-degree
deg : Nx1 np.ndarray
node degree (in-degree + out-degree)
Notes
-----
Inputs are assumed to be on the columns of the CIJ matrix.
Weight information is discarded.
'''
CIJ = binarize(CIJ, copy=True) # ensure CIJ is binary
id = np.sum(CIJ, axis=0) # indegree = column sum of CIJ
od = np.sum(CIJ, axis=1) # outdegree = row sum of CIJ
deg = id + od # degree = indegree+outdegree
return id, od, deg | Node degree is the number of links connected to the node. The indegree
is the number of inward links and the outdegree is the number of
outward links.
Parameters
----------
CIJ : NxN np.ndarray
directed binary/weighted connection matrix
Returns
-------
id : Nx1 np.ndarray
node in-degree
od : Nx1 np.ndarray
node out-degree
deg : Nx1 np.ndarray
node degree (in-degree + out-degree)
Notes
-----
Inputs are assumed to be on the columns of the CIJ matrix.
Weight information is discarded. | Below is the the instruction that describes the task:
### Input:
Node degree is the number of links connected to the node. The indegree
is the number of inward links and the outdegree is the number of
outward links.
Parameters
----------
CIJ : NxN np.ndarray
directed binary/weighted connection matrix
Returns
-------
id : Nx1 np.ndarray
node in-degree
od : Nx1 np.ndarray
node out-degree
deg : Nx1 np.ndarray
node degree (in-degree + out-degree)
Notes
-----
Inputs are assumed to be on the columns of the CIJ matrix.
Weight information is discarded.
### Response:
def degrees_dir(CIJ):
'''
Node degree is the number of links connected to the node. The indegree
is the number of inward links and the outdegree is the number of
outward links.
Parameters
----------
CIJ : NxN np.ndarray
directed binary/weighted connection matrix
Returns
-------
id : Nx1 np.ndarray
node in-degree
od : Nx1 np.ndarray
node out-degree
deg : Nx1 np.ndarray
node degree (in-degree + out-degree)
Notes
-----
Inputs are assumed to be on the columns of the CIJ matrix.
Weight information is discarded.
'''
CIJ = binarize(CIJ, copy=True) # ensure CIJ is binary
id = np.sum(CIJ, axis=0) # indegree = column sum of CIJ
od = np.sum(CIJ, axis=1) # outdegree = row sum of CIJ
deg = id + od # degree = indegree+outdegree
return id, od, deg |
def getprefix(self, u):
"""
Get the prefix for the specified namespace (uri)
@param u: A namespace uri.
@type u: str
@return: The namspace.
@rtype: (prefix, uri).
"""
for ns in Namespace.all:
if u == ns[1]:
return ns[0]
for ns in self.prefixes:
if u == ns[1]:
return ns[0]
raise Exception('ns (%s) not mapped' % u) | Get the prefix for the specified namespace (uri)
@param u: A namespace uri.
@type u: str
@return: The namspace.
@rtype: (prefix, uri). | Below is the the instruction that describes the task:
### Input:
Get the prefix for the specified namespace (uri)
@param u: A namespace uri.
@type u: str
@return: The namspace.
@rtype: (prefix, uri).
### Response:
def getprefix(self, u):
"""
Get the prefix for the specified namespace (uri)
@param u: A namespace uri.
@type u: str
@return: The namspace.
@rtype: (prefix, uri).
"""
for ns in Namespace.all:
if u == ns[1]:
return ns[0]
for ns in self.prefixes:
if u == ns[1]:
return ns[0]
raise Exception('ns (%s) not mapped' % u) |
def stream(self, recurring=values.unset, trigger_by=values.unset,
usage_category=values.unset, limit=None, page_size=None):
"""
Streams TriggerInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param TriggerInstance.Recurring recurring: The frequency of recurring UsageTriggers to read
:param TriggerInstance.TriggerField trigger_by: The trigger field of the UsageTriggers to read
:param TriggerInstance.UsageCategory usage_category: The usage category of the UsageTriggers to read
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.usage.trigger.TriggerInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
recurring=recurring,
trigger_by=trigger_by,
usage_category=usage_category,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit']) | Streams TriggerInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param TriggerInstance.Recurring recurring: The frequency of recurring UsageTriggers to read
:param TriggerInstance.TriggerField trigger_by: The trigger field of the UsageTriggers to read
:param TriggerInstance.UsageCategory usage_category: The usage category of the UsageTriggers to read
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.usage.trigger.TriggerInstance] | Below is the the instruction that describes the task:
### Input:
Streams TriggerInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param TriggerInstance.Recurring recurring: The frequency of recurring UsageTriggers to read
:param TriggerInstance.TriggerField trigger_by: The trigger field of the UsageTriggers to read
:param TriggerInstance.UsageCategory usage_category: The usage category of the UsageTriggers to read
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.usage.trigger.TriggerInstance]
### Response:
def stream(self, recurring=values.unset, trigger_by=values.unset,
usage_category=values.unset, limit=None, page_size=None):
"""
Streams TriggerInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param TriggerInstance.Recurring recurring: The frequency of recurring UsageTriggers to read
:param TriggerInstance.TriggerField trigger_by: The trigger field of the UsageTriggers to read
:param TriggerInstance.UsageCategory usage_category: The usage category of the UsageTriggers to read
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.usage.trigger.TriggerInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
recurring=recurring,
trigger_by=trigger_by,
usage_category=usage_category,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit']) |
def example_number_for_non_geo_entity(country_calling_code):
"""Gets a valid number for the specified country calling code for a non-geographical entity.
Arguments:
country_calling_code -- The country calling code for a non-geographical entity.
Returns a valid number for the non-geographical entity. Returns None when
the metadata does not contain such information, or the country calling
code passed in does not belong to a non-geographical entity.
"""
metadata = PhoneMetadata.metadata_for_nongeo_region(country_calling_code, None)
if metadata is not None:
# For geographical entities, fixed-line data is always present. However, for non-geographical
# entities, this is not the case, so we have to go through different types to find the
# example number. We don't check fixed-line or personal number since they aren't used by
# non-geographical entities (if this changes, a unit-test will catch this.)
for desc in (metadata.mobile, metadata.toll_free, metadata.shared_cost, metadata.voip,
metadata.voicemail, metadata.uan, metadata.premium_rate):
try:
if (desc is not None and desc.example_number is not None):
return parse(_PLUS_SIGN + unicod(country_calling_code) + desc.example_number, UNKNOWN_REGION)
except NumberParseException:
pass
return None | Gets a valid number for the specified country calling code for a non-geographical entity.
Arguments:
country_calling_code -- The country calling code for a non-geographical entity.
Returns a valid number for the non-geographical entity. Returns None when
the metadata does not contain such information, or the country calling
code passed in does not belong to a non-geographical entity. | Below is the the instruction that describes the task:
### Input:
Gets a valid number for the specified country calling code for a non-geographical entity.
Arguments:
country_calling_code -- The country calling code for a non-geographical entity.
Returns a valid number for the non-geographical entity. Returns None when
the metadata does not contain such information, or the country calling
code passed in does not belong to a non-geographical entity.
### Response:
def example_number_for_non_geo_entity(country_calling_code):
"""Gets a valid number for the specified country calling code for a non-geographical entity.
Arguments:
country_calling_code -- The country calling code for a non-geographical entity.
Returns a valid number for the non-geographical entity. Returns None when
the metadata does not contain such information, or the country calling
code passed in does not belong to a non-geographical entity.
"""
metadata = PhoneMetadata.metadata_for_nongeo_region(country_calling_code, None)
if metadata is not None:
# For geographical entities, fixed-line data is always present. However, for non-geographical
# entities, this is not the case, so we have to go through different types to find the
# example number. We don't check fixed-line or personal number since they aren't used by
# non-geographical entities (if this changes, a unit-test will catch this.)
for desc in (metadata.mobile, metadata.toll_free, metadata.shared_cost, metadata.voip,
metadata.voicemail, metadata.uan, metadata.premium_rate):
try:
if (desc is not None and desc.example_number is not None):
return parse(_PLUS_SIGN + unicod(country_calling_code) + desc.example_number, UNKNOWN_REGION)
except NumberParseException:
pass
return None |
def delete_role_perm(role_id, perm_id,**kwargs):
"""
Remove a permission from a role
"""
#check_perm(kwargs.get('user_id'), 'edit_perm')
_get_perm(perm_id)
_get_role(role_id)
try:
roleperm_i = db.DBSession.query(RolePerm).filter(RolePerm.role_id==role_id, RolePerm.perm_id==perm_id).one()
db.DBSession.delete(roleperm_i)
except NoResultFound:
raise ResourceNotFoundError("Role Perm does not exist")
return 'OK' | Remove a permission from a role | Below is the the instruction that describes the task:
### Input:
Remove a permission from a role
### Response:
def delete_role_perm(role_id, perm_id,**kwargs):
"""
Remove a permission from a role
"""
#check_perm(kwargs.get('user_id'), 'edit_perm')
_get_perm(perm_id)
_get_role(role_id)
try:
roleperm_i = db.DBSession.query(RolePerm).filter(RolePerm.role_id==role_id, RolePerm.perm_id==perm_id).one()
db.DBSession.delete(roleperm_i)
except NoResultFound:
raise ResourceNotFoundError("Role Perm does not exist")
return 'OK' |
def return_letters_from_string(text):
"""Get letters from string only."""
out = ""
for letter in text:
if letter.isalpha():
out += letter
return out | Get letters from string only. | Below is the the instruction that describes the task:
### Input:
Get letters from string only.
### Response:
def return_letters_from_string(text):
"""Get letters from string only."""
out = ""
for letter in text:
if letter.isalpha():
out += letter
return out |
def project(self, project_id):
"""Fetch project `project_id`."""
type_ = self.guid(project_id)
url = self._build_url(type_, project_id)
if type_ in Project._types:
return Project(self._json(self._get(url), 200), self.session)
raise OSFException('{} is unrecognized type {}. Clone supports projects and registrations'.format(project_id, type_)) | Fetch project `project_id`. | Below is the the instruction that describes the task:
### Input:
Fetch project `project_id`.
### Response:
def project(self, project_id):
"""Fetch project `project_id`."""
type_ = self.guid(project_id)
url = self._build_url(type_, project_id)
if type_ in Project._types:
return Project(self._json(self._get(url), 200), self.session)
raise OSFException('{} is unrecognized type {}. Clone supports projects and registrations'.format(project_id, type_)) |
def context_chain(self) -> List['Context']:
"""Return a list of contexts starting from this one, its parent and so on."""
contexts = []
ctx = self # type: Optional[Context]
while ctx is not None:
contexts.append(ctx)
ctx = ctx.parent
return contexts | Return a list of contexts starting from this one, its parent and so on. | Below is the the instruction that describes the task:
### Input:
Return a list of contexts starting from this one, its parent and so on.
### Response:
def context_chain(self) -> List['Context']:
"""Return a list of contexts starting from this one, its parent and so on."""
contexts = []
ctx = self # type: Optional[Context]
while ctx is not None:
contexts.append(ctx)
ctx = ctx.parent
return contexts |
def append(x: T, xs: Iterable[T]) -> Iterator[T]:
""" Append a value to an iterable.
Parameters
----------
x
An element of type T.
xs
An iterable of elements of type T.
Returns
-------
Iterator
An iterator that yields elements of *xs*, then yields *x*.
Examples
--------
>>> from delphi.utils.fp import append
>>> list(append(1, [2, 3]))
[2, 3, 1]
"""
return chain(xs, [x]) | Append a value to an iterable.
Parameters
----------
x
An element of type T.
xs
An iterable of elements of type T.
Returns
-------
Iterator
An iterator that yields elements of *xs*, then yields *x*.
Examples
--------
>>> from delphi.utils.fp import append
>>> list(append(1, [2, 3]))
[2, 3, 1] | Below is the the instruction that describes the task:
### Input:
Append a value to an iterable.
Parameters
----------
x
An element of type T.
xs
An iterable of elements of type T.
Returns
-------
Iterator
An iterator that yields elements of *xs*, then yields *x*.
Examples
--------
>>> from delphi.utils.fp import append
>>> list(append(1, [2, 3]))
[2, 3, 1]
### Response:
def append(x: T, xs: Iterable[T]) -> Iterator[T]:
""" Append a value to an iterable.
Parameters
----------
x
An element of type T.
xs
An iterable of elements of type T.
Returns
-------
Iterator
An iterator that yields elements of *xs*, then yields *x*.
Examples
--------
>>> from delphi.utils.fp import append
>>> list(append(1, [2, 3]))
[2, 3, 1]
"""
return chain(xs, [x]) |
def _calc_position_for_pin(self, x, y, relative_position):
"""Determine position at fraction of x, y path.
:param x,y: two equal length lists of values describing a path.
:param relative_position: value between 0 and 1
:returns: the x, y position of the fraction (relative_position)
of the path length.
"""
try:
max_idx_x = len(x) - 1
max_idx_y = len(y) - 1
except TypeError:
return x, y
else:
assert max_idx_x == max_idx_y, \
'If x and y are iterables, they must be the same length'
if relative_position == 0:
xs, ys = x[0], y[0]
elif relative_position == 1:
xs, ys = x[max_idx_x], y[max_idx_y]
else:
if self.xmode == 'log':
x = np.log10(np.array(x))
if self.ymode == 'log':
y = np.log10(np.array(y))
rel_length = [0]
rel_length.extend(self._calc_relative_path_lengths(x, y))
idx = np.interp(relative_position, rel_length,
range(len(rel_length)))
frac, idx = modf(idx)
idx = int(idx)
if self.xmode == 'log':
xs = 10 ** (x[idx] + (x[idx + 1] - x[idx]) * frac)
else:
xs = x[idx] + (x[idx + 1] - x[idx]) * frac
if self.ymode == 'log':
ys = 10 ** (y[idx] + (y[idx + 1] - y[idx]) * frac)
else:
ys = y[idx] + (y[idx + 1] - y[idx]) * frac
return xs, ys | Determine position at fraction of x, y path.
:param x,y: two equal length lists of values describing a path.
:param relative_position: value between 0 and 1
:returns: the x, y position of the fraction (relative_position)
of the path length. | Below is the the instruction that describes the task:
### Input:
Determine position at fraction of x, y path.
:param x,y: two equal length lists of values describing a path.
:param relative_position: value between 0 and 1
:returns: the x, y position of the fraction (relative_position)
of the path length.
### Response:
def _calc_position_for_pin(self, x, y, relative_position):
"""Determine position at fraction of x, y path.
:param x,y: two equal length lists of values describing a path.
:param relative_position: value between 0 and 1
:returns: the x, y position of the fraction (relative_position)
of the path length.
"""
try:
max_idx_x = len(x) - 1
max_idx_y = len(y) - 1
except TypeError:
return x, y
else:
assert max_idx_x == max_idx_y, \
'If x and y are iterables, they must be the same length'
if relative_position == 0:
xs, ys = x[0], y[0]
elif relative_position == 1:
xs, ys = x[max_idx_x], y[max_idx_y]
else:
if self.xmode == 'log':
x = np.log10(np.array(x))
if self.ymode == 'log':
y = np.log10(np.array(y))
rel_length = [0]
rel_length.extend(self._calc_relative_path_lengths(x, y))
idx = np.interp(relative_position, rel_length,
range(len(rel_length)))
frac, idx = modf(idx)
idx = int(idx)
if self.xmode == 'log':
xs = 10 ** (x[idx] + (x[idx + 1] - x[idx]) * frac)
else:
xs = x[idx] + (x[idx + 1] - x[idx]) * frac
if self.ymode == 'log':
ys = 10 ** (y[idx] + (y[idx + 1] - y[idx]) * frac)
else:
ys = y[idx] + (y[idx + 1] - y[idx]) * frac
return xs, ys |
def compress_pdf(filepath, output_path, ghostscript_binary):
"""Compress a single PDF file.
Args:
filepath (str): Path to the PDF file.
output_path (str): Output path.
ghostscript_binary (str): Name/alias of the Ghostscript binary.
Raises:
ValueError
FileNotFoundError
"""
if not filepath.endswith(PDF_EXTENSION):
raise ValueError("Filename must end with .pdf!\n%s does not." % filepath)
try:
file_size = os.stat(filepath).st_size
if file_size < FILE_SIZE_LOWER_LIMIT:
LOGGER.info(NOT_COMPRESSING.format(filepath, file_size, FILE_SIZE_LOWER_LIMIT))
process = subprocess.Popen(['cp', filepath, output_path])
else:
LOGGER.info(COMPRESSING.format(filepath))
process = subprocess.Popen(
[ghostscript_binary, "-sDEVICE=pdfwrite",
"-dCompatabilityLevel=1.4", "-dPDFSETTINGS=/ebook",
"-dNOPAUSE", "-dQUIET", "-dBATCH",
"-sOutputFile=%s" % output_path, filepath]
)
except FileNotFoundError:
msg = GS_NOT_INSTALLED.format(ghostscript_binary)
raise FileNotFoundError(msg)
process.communicate()
LOGGER.info(FILE_DONE.format(output_path)) | Compress a single PDF file.
Args:
filepath (str): Path to the PDF file.
output_path (str): Output path.
ghostscript_binary (str): Name/alias of the Ghostscript binary.
Raises:
ValueError
FileNotFoundError | Below is the the instruction that describes the task:
### Input:
Compress a single PDF file.
Args:
filepath (str): Path to the PDF file.
output_path (str): Output path.
ghostscript_binary (str): Name/alias of the Ghostscript binary.
Raises:
ValueError
FileNotFoundError
### Response:
def compress_pdf(filepath, output_path, ghostscript_binary):
"""Compress a single PDF file.
Args:
filepath (str): Path to the PDF file.
output_path (str): Output path.
ghostscript_binary (str): Name/alias of the Ghostscript binary.
Raises:
ValueError
FileNotFoundError
"""
if not filepath.endswith(PDF_EXTENSION):
raise ValueError("Filename must end with .pdf!\n%s does not." % filepath)
try:
file_size = os.stat(filepath).st_size
if file_size < FILE_SIZE_LOWER_LIMIT:
LOGGER.info(NOT_COMPRESSING.format(filepath, file_size, FILE_SIZE_LOWER_LIMIT))
process = subprocess.Popen(['cp', filepath, output_path])
else:
LOGGER.info(COMPRESSING.format(filepath))
process = subprocess.Popen(
[ghostscript_binary, "-sDEVICE=pdfwrite",
"-dCompatabilityLevel=1.4", "-dPDFSETTINGS=/ebook",
"-dNOPAUSE", "-dQUIET", "-dBATCH",
"-sOutputFile=%s" % output_path, filepath]
)
except FileNotFoundError:
msg = GS_NOT_INSTALLED.format(ghostscript_binary)
raise FileNotFoundError(msg)
process.communicate()
LOGGER.info(FILE_DONE.format(output_path)) |
def get_legal_params(self, method):
'''Given a API name, list all legal parameters using boto3 service model.'''
if method not in self.client.meta.method_to_api_mapping:
# Injected methods. Ignore.
return []
api = self.client.meta.method_to_api_mapping[method]
shape = self.client.meta.service_model.operation_model(api).input_shape
if shape is None:
# No params needed for this API.
return []
return shape.members.keys() | Given a API name, list all legal parameters using boto3 service model. | Below is the the instruction that describes the task:
### Input:
Given a API name, list all legal parameters using boto3 service model.
### Response:
def get_legal_params(self, method):
'''Given a API name, list all legal parameters using boto3 service model.'''
if method not in self.client.meta.method_to_api_mapping:
# Injected methods. Ignore.
return []
api = self.client.meta.method_to_api_mapping[method]
shape = self.client.meta.service_model.operation_model(api).input_shape
if shape is None:
# No params needed for this API.
return []
return shape.members.keys() |
def get_all_items(self):
"""
Returns all items in the combobox dictionary.
"""
return [self._widget.itemText(k) for k in range(self._widget.count())] | Returns all items in the combobox dictionary. | Below is the the instruction that describes the task:
### Input:
Returns all items in the combobox dictionary.
### Response:
def get_all_items(self):
"""
Returns all items in the combobox dictionary.
"""
return [self._widget.itemText(k) for k in range(self._widget.count())] |
def binary_operation_comparison(self, rule, left, right, **kwargs):
"""
Callback method for rule tree traversing. Will be called at proper time
from :py:class:`pynspect.rules.ComparisonBinOpRule.traverse` method.
:param pynspect.rules.Rule rule: Reference to rule.
:param left: Left operand for operation.
:param right: right operand for operation.
:param dict kwargs: Optional callback arguments.
"""
return '<div class="pynspect-rule-operation pynspect-rule-operation-comparison"><h3 class="pynspect-rule-operation-name">{}</h3><ul class="pynspect-rule-operation-arguments"><li class="pynspect-rule-operation-argument-left">{}</li><li class="pynspect-rule-operation-argument-right">{}</li></ul></div>'.format(rule.operation, left, right) | Callback method for rule tree traversing. Will be called at proper time
from :py:class:`pynspect.rules.ComparisonBinOpRule.traverse` method.
:param pynspect.rules.Rule rule: Reference to rule.
:param left: Left operand for operation.
:param right: right operand for operation.
:param dict kwargs: Optional callback arguments. | Below is the the instruction that describes the task:
### Input:
Callback method for rule tree traversing. Will be called at proper time
from :py:class:`pynspect.rules.ComparisonBinOpRule.traverse` method.
:param pynspect.rules.Rule rule: Reference to rule.
:param left: Left operand for operation.
:param right: right operand for operation.
:param dict kwargs: Optional callback arguments.
### Response:
def binary_operation_comparison(self, rule, left, right, **kwargs):
"""
Callback method for rule tree traversing. Will be called at proper time
from :py:class:`pynspect.rules.ComparisonBinOpRule.traverse` method.
:param pynspect.rules.Rule rule: Reference to rule.
:param left: Left operand for operation.
:param right: right operand for operation.
:param dict kwargs: Optional callback arguments.
"""
return '<div class="pynspect-rule-operation pynspect-rule-operation-comparison"><h3 class="pynspect-rule-operation-name">{}</h3><ul class="pynspect-rule-operation-arguments"><li class="pynspect-rule-operation-argument-left">{}</li><li class="pynspect-rule-operation-argument-right">{}</li></ul></div>'.format(rule.operation, left, right) |
def iselect(self, tag, limit=0):
"""Iterate the specified tags."""
for el in CSSMatch(self.selectors, tag, self.namespaces, self.flags).select(limit):
yield el | Iterate the specified tags. | Below is the the instruction that describes the task:
### Input:
Iterate the specified tags.
### Response:
def iselect(self, tag, limit=0):
"""Iterate the specified tags."""
for el in CSSMatch(self.selectors, tag, self.namespaces, self.flags).select(limit):
yield el |
def draw_rect(setter, x, y, w, h, color=None, aa=False):
"""Draw rectangle with top-left corner at x,y, width w and height h"""
_draw_fast_hline(setter, x, y, w, color, aa)
_draw_fast_hline(setter, x, y + h - 1, w, color, aa)
_draw_fast_vline(setter, x, y, h, color, aa)
_draw_fast_vline(setter, x + w - 1, y, h, color, aa) | Draw rectangle with top-left corner at x,y, width w and height h | Below is the the instruction that describes the task:
### Input:
Draw rectangle with top-left corner at x,y, width w and height h
### Response:
def draw_rect(setter, x, y, w, h, color=None, aa=False):
"""Draw rectangle with top-left corner at x,y, width w and height h"""
_draw_fast_hline(setter, x, y, w, color, aa)
_draw_fast_hline(setter, x, y + h - 1, w, color, aa)
_draw_fast_vline(setter, x, y, h, color, aa)
_draw_fast_vline(setter, x + w - 1, y, h, color, aa) |
def _load_scalar_fit(self, fit_key=None, h5file=None, fit_data=None):
""" Loads a single fit
"""
if (fit_key is None) ^ (h5file is None):
raise ValueError("Either specify both fit_key and h5file, or"
" neither")
if not ((fit_key is None) ^ (fit_data is None)):
raise ValueError("Specify exactly one of fit_key and fit_data.")
if fit_data is None:
fit_data = self._read_dict(h5file[fit_key])
if 'fitType' in fit_data.keys() and fit_data['fitType'] == 'GPR':
fit = _eval_pysur.evaluate_fit.getGPRFitAndErrorEvaluator(fit_data)
else:
fit = _eval_pysur.evaluate_fit.getFitEvaluator(fit_data)
return fit | Loads a single fit | Below is the the instruction that describes the task:
### Input:
Loads a single fit
### Response:
def _load_scalar_fit(self, fit_key=None, h5file=None, fit_data=None):
""" Loads a single fit
"""
if (fit_key is None) ^ (h5file is None):
raise ValueError("Either specify both fit_key and h5file, or"
" neither")
if not ((fit_key is None) ^ (fit_data is None)):
raise ValueError("Specify exactly one of fit_key and fit_data.")
if fit_data is None:
fit_data = self._read_dict(h5file[fit_key])
if 'fitType' in fit_data.keys() and fit_data['fitType'] == 'GPR':
fit = _eval_pysur.evaluate_fit.getGPRFitAndErrorEvaluator(fit_data)
else:
fit = _eval_pysur.evaluate_fit.getFitEvaluator(fit_data)
return fit |
def detail_view(self, request):
"""
Renders the message view to a response.
"""
context = {
'preview': self,
}
kwargs = {}
if self.form_class:
if request.GET:
form = self.form_class(data=request.GET)
else:
form = self.form_class()
context['form'] = form
if not form.is_bound or not form.is_valid():
return render(request, 'mailviews/previews/detail.html', context)
kwargs.update(form.get_message_view_kwargs())
message_view = self.get_message_view(request, **kwargs)
message = message_view.render_to_message()
raw = message.message()
headers = OrderedDict((header, maybe_decode_header(raw[header])) for header in self.headers)
context.update({
'message': message,
'subject': message.subject,
'body': message.body,
'headers': headers,
'raw': raw.as_string(),
})
alternatives = getattr(message, 'alternatives', [])
try:
html = next(alternative[0] for alternative in alternatives
if alternative[1] == 'text/html')
context.update({
'html': html,
'escaped_html': b64encode(html.encode('utf-8')),
})
except StopIteration:
pass
return render(request, self.template_name, context) | Renders the message view to a response. | Below is the the instruction that describes the task:
### Input:
Renders the message view to a response.
### Response:
def detail_view(self, request):
"""
Renders the message view to a response.
"""
context = {
'preview': self,
}
kwargs = {}
if self.form_class:
if request.GET:
form = self.form_class(data=request.GET)
else:
form = self.form_class()
context['form'] = form
if not form.is_bound or not form.is_valid():
return render(request, 'mailviews/previews/detail.html', context)
kwargs.update(form.get_message_view_kwargs())
message_view = self.get_message_view(request, **kwargs)
message = message_view.render_to_message()
raw = message.message()
headers = OrderedDict((header, maybe_decode_header(raw[header])) for header in self.headers)
context.update({
'message': message,
'subject': message.subject,
'body': message.body,
'headers': headers,
'raw': raw.as_string(),
})
alternatives = getattr(message, 'alternatives', [])
try:
html = next(alternative[0] for alternative in alternatives
if alternative[1] == 'text/html')
context.update({
'html': html,
'escaped_html': b64encode(html.encode('utf-8')),
})
except StopIteration:
pass
return render(request, self.template_name, context) |
def login(self, login, password, url=None):
"""login page
"""
auth = self._auth(login, password)
cherrypy.session['isadmin'] = auth['isadmin']
cherrypy.session['connected'] = auth['connected']
if auth['connected']:
if auth['isadmin']:
message = \
"login success for user '%(user)s' as administrator" % {
'user': login
}
else:
message = \
"login success for user '%(user)s' as normal user" % {
'user': login
}
cherrypy.log.error(
msg=message,
severity=logging.INFO
)
cherrypy.session[SESSION_KEY] = cherrypy.request.login = login
if url is None:
redirect = "/"
else:
redirect = url
raise cherrypy.HTTPRedirect(redirect)
else:
message = "login failed for user '%(user)s'" % {
'user': login
}
cherrypy.log.error(
msg=message,
severity=logging.WARNING
)
if url is None:
qs = ''
else:
qs = '?url=' + quote_plus(url)
raise cherrypy.HTTPRedirect("/signin" + qs) | login page | Below is the the instruction that describes the task:
### Input:
login page
### Response:
def login(self, login, password, url=None):
"""login page
"""
auth = self._auth(login, password)
cherrypy.session['isadmin'] = auth['isadmin']
cherrypy.session['connected'] = auth['connected']
if auth['connected']:
if auth['isadmin']:
message = \
"login success for user '%(user)s' as administrator" % {
'user': login
}
else:
message = \
"login success for user '%(user)s' as normal user" % {
'user': login
}
cherrypy.log.error(
msg=message,
severity=logging.INFO
)
cherrypy.session[SESSION_KEY] = cherrypy.request.login = login
if url is None:
redirect = "/"
else:
redirect = url
raise cherrypy.HTTPRedirect(redirect)
else:
message = "login failed for user '%(user)s'" % {
'user': login
}
cherrypy.log.error(
msg=message,
severity=logging.WARNING
)
if url is None:
qs = ''
else:
qs = '?url=' + quote_plus(url)
raise cherrypy.HTTPRedirect("/signin" + qs) |
def train_on_batch(self, *args) -> None:
"""Trains the model on a single batch.
Args:
*args: the list of network inputs.
Last element of `args` is the batch of targets,
all previous elements are training data batches
"""
*data, labels = args
self._net.train_on_batch(data, labels) | Trains the model on a single batch.
Args:
*args: the list of network inputs.
Last element of `args` is the batch of targets,
all previous elements are training data batches | Below is the the instruction that describes the task:
### Input:
Trains the model on a single batch.
Args:
*args: the list of network inputs.
Last element of `args` is the batch of targets,
all previous elements are training data batches
### Response:
def train_on_batch(self, *args) -> None:
"""Trains the model on a single batch.
Args:
*args: the list of network inputs.
Last element of `args` is the batch of targets,
all previous elements are training data batches
"""
*data, labels = args
self._net.train_on_batch(data, labels) |
def reindex(args):
"""
%prog reindex gffile pep.fasta ref.pep.fasta
Reindex the splice isoforms (mRNA) in input GFF file, preferably
generated after PASA annotation update
In the input GFF file, there can be several types of mRNA within a locus:
* CDS matches reference, UTR extended, inherits reference mRNA ID
* CDS (slightly) different from reference, inherits reference mRNA ID
* Novel isoform added by PASA, have IDs like "LOCUS.1.1", "LOCUS.1.2"
* Multiple mRNA collapsed due to shared structure, have IDs like "LOCUS.1-LOCUS.1.1"
In the case of multiple mRNA which have inherited the same reference mRNA ID,
break ties by comparing the new protein with the reference protein using
EMBOSS `needle` to decide which mRNA retains ID and which is assigned a new ID.
All mRNA identifiers should follow the AGI naming conventions.
When reindexing the isoform identifiers, order mRNA based on:
* decreasing transcript length
* decreasing support from multiple input datasets used to run pasa.consolidate()
"""
from jcvi.formats.gff import make_index
from jcvi.formats.fasta import Fasta
from jcvi.apps.emboss import needle
from jcvi.formats.base import FileShredder
from tempfile import mkstemp
p = OptionParser(reindex.__doc__)
p.add_option("--scores", type="str", \
help="read from existing EMBOSS `needle` scores file")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
gffile, pep, refpep, = args
gffdb = make_index(gffile)
reffasta = Fasta(refpep)
if not opts.scores:
fh, pairsfile = mkstemp(prefix='pairs', suffix=".txt", dir=".")
fw = must_open(pairsfile, "w")
conflict, novel = AutoVivification(), {}
for gene in gffdb.features_of_type('gene', order_by=('seqid', 'start')):
geneid = atg_name(gene.id, retval='locus')
novel[geneid] = []
updated_mrna, hybrid_mrna = [], []
for mrna in gffdb.children(gene, featuretype='mRNA', order_by=('seqid', 'start')):
if re.match(atg_name_pat, mrna.id) is not None and "_" not in mrna.id:
pf, mrnaid = parse_prefix(mrna.id)
mlen = gffdb.children_bp(mrna, child_featuretype='exon')
if "-" in mrna.id:
hybrid_mrna.append((mrna.id, mrna.start, mlen, len(pf)))
else:
updated_mrna.append((mrna.id, mrna.start, mlen, len(pf)))
for mrna in sorted(updated_mrna, key=lambda k:(k[1], -k[2], -k[3])):
pf, mrnaid = parse_prefix(mrna[0])
mstart, mlen = mrna[1], mrna[2]
iso = atg_name(mrnaid, retval='iso')
newiso = "{0}{1}".format(iso, re.sub(atg_name_pat, "", mrnaid))
if iso == newiso:
if iso not in conflict[geneid]:
conflict[geneid][iso] = []
conflict[geneid][iso].append((mrna[0], iso, newiso, \
mstart, mlen, len(pf)))
else:
novel[geneid].append((mrna[0], None, newiso, \
mstart, mlen, len(pf)))
for mrna in sorted(hybrid_mrna, key=lambda k:(k[1], -k[2], -k[3])):
pf, mrnaid = parse_prefix(mrna[0])
mstart, mlen = mrna[1], mrna[2]
_iso, _newiso = [], []
for id in sorted(mrnaid.split("-")):
a = atg_name(id, retval='iso')
b = "{0}{1}".format(a, re.sub(atg_name_pat, "", id))
_iso.append(a)
_newiso.append(b)
_novel = None
newiso = "-".join(str(x) for x in set(_newiso))
for iso, niso in zip(_iso, _newiso):
if iso == niso:
if iso not in conflict[geneid]:
conflict[geneid][iso] = \
[(mrna[0], iso, newiso, mstart, mlen, len(pf))]
_novel = None
break
_novel = True
if _novel is not None:
novel[geneid].append((mrna[0], None, newiso, \
mstart, mlen, len(pf)))
if not opts.scores:
for isoform in sorted(conflict[geneid]):
mrnaid = "{0}.{1}".format(geneid, isoform)
if mrnaid in reffasta.keys():
for mrna in conflict[geneid][isoform]:
print("\t".join(str(x) for x in (mrnaid, mrna[0])), file=fw)
scoresfile = None
if not opts.scores:
fw.close()
needle([pairsfile, refpep, pep])
FileShredder([pairsfile], verbose=False)
scoresfile = "{0}.scores".format(pairsfile.rsplit(".")[0])
else:
scoresfile = opts.scores
scores = read_scores(scoresfile, sort=True, trimsuffix=False)
primary = {}
for geneid in conflict:
primary[geneid] = []
for iso in sorted(conflict[geneid]):
conflict[geneid][iso].sort(key=lambda k:(k[3], -k[4], -k[5]))
_iso = "{0}.{1}".format(geneid, iso)
if _iso not in scores:
novel[geneid].extend(conflict[geneid][iso])
continue
top_score = scores[_iso][0][1]
result = next((i for i, v in enumerate(conflict[geneid][iso]) if v[0] == top_score), None)
if result is not None:
primary[geneid].append(conflict[geneid][iso][result])
del conflict[geneid][iso][result]
if geneid not in novel:
novel[geneid] = []
novel[geneid].extend(conflict[geneid][iso])
novel[geneid].sort(key=lambda k:(k[3], -k[4], -k[5]))
fw = must_open(opts.outfile, 'w')
for gene in gffdb.features_of_type('gene', order_by=('seqid', 'start')):
geneid = gene.id
print(gene, file=fw)
seen = []
if geneid in primary:
all_mrna = primary[geneid]
all_mrna.extend(novel[geneid])
for iso, mrna in enumerate(all_mrna):
_mrna = gffdb[mrna[0]]
_iso = mrna[1]
if mrna not in novel[geneid]:
seen.append(int(mrna[1]))
else:
mseen = 0 if len(seen) == 0 else max(seen)
_iso = (mseen + iso + 1) - len(seen)
_mrnaid = "{0}.{1}".format(geneid, _iso)
_mrna['ID'], _mrna['_old_ID'] = [_mrnaid], [_mrna.id]
print(_mrna, file=fw)
for c in gffdb.children(_mrna, order_by=('start')):
c['Parent'] = [_mrnaid]
print(c, file=fw)
else:
for feat in gffdb.children(gene, order_by=('seqid', 'start')):
print(feat, file=fw)
fw.close() | %prog reindex gffile pep.fasta ref.pep.fasta
Reindex the splice isoforms (mRNA) in input GFF file, preferably
generated after PASA annotation update
In the input GFF file, there can be several types of mRNA within a locus:
* CDS matches reference, UTR extended, inherits reference mRNA ID
* CDS (slightly) different from reference, inherits reference mRNA ID
* Novel isoform added by PASA, have IDs like "LOCUS.1.1", "LOCUS.1.2"
* Multiple mRNA collapsed due to shared structure, have IDs like "LOCUS.1-LOCUS.1.1"
In the case of multiple mRNA which have inherited the same reference mRNA ID,
break ties by comparing the new protein with the reference protein using
EMBOSS `needle` to decide which mRNA retains ID and which is assigned a new ID.
All mRNA identifiers should follow the AGI naming conventions.
When reindexing the isoform identifiers, order mRNA based on:
* decreasing transcript length
* decreasing support from multiple input datasets used to run pasa.consolidate() | Below is the the instruction that describes the task:
### Input:
%prog reindex gffile pep.fasta ref.pep.fasta
Reindex the splice isoforms (mRNA) in input GFF file, preferably
generated after PASA annotation update
In the input GFF file, there can be several types of mRNA within a locus:
* CDS matches reference, UTR extended, inherits reference mRNA ID
* CDS (slightly) different from reference, inherits reference mRNA ID
* Novel isoform added by PASA, have IDs like "LOCUS.1.1", "LOCUS.1.2"
* Multiple mRNA collapsed due to shared structure, have IDs like "LOCUS.1-LOCUS.1.1"
In the case of multiple mRNA which have inherited the same reference mRNA ID,
break ties by comparing the new protein with the reference protein using
EMBOSS `needle` to decide which mRNA retains ID and which is assigned a new ID.
All mRNA identifiers should follow the AGI naming conventions.
When reindexing the isoform identifiers, order mRNA based on:
* decreasing transcript length
* decreasing support from multiple input datasets used to run pasa.consolidate()
### Response:
def reindex(args):
"""
%prog reindex gffile pep.fasta ref.pep.fasta
Reindex the splice isoforms (mRNA) in input GFF file, preferably
generated after PASA annotation update
In the input GFF file, there can be several types of mRNA within a locus:
* CDS matches reference, UTR extended, inherits reference mRNA ID
* CDS (slightly) different from reference, inherits reference mRNA ID
* Novel isoform added by PASA, have IDs like "LOCUS.1.1", "LOCUS.1.2"
* Multiple mRNA collapsed due to shared structure, have IDs like "LOCUS.1-LOCUS.1.1"
In the case of multiple mRNA which have inherited the same reference mRNA ID,
break ties by comparing the new protein with the reference protein using
EMBOSS `needle` to decide which mRNA retains ID and which is assigned a new ID.
All mRNA identifiers should follow the AGI naming conventions.
When reindexing the isoform identifiers, order mRNA based on:
* decreasing transcript length
* decreasing support from multiple input datasets used to run pasa.consolidate()
"""
from jcvi.formats.gff import make_index
from jcvi.formats.fasta import Fasta
from jcvi.apps.emboss import needle
from jcvi.formats.base import FileShredder
from tempfile import mkstemp
p = OptionParser(reindex.__doc__)
p.add_option("--scores", type="str", \
help="read from existing EMBOSS `needle` scores file")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
gffile, pep, refpep, = args
gffdb = make_index(gffile)
reffasta = Fasta(refpep)
if not opts.scores:
fh, pairsfile = mkstemp(prefix='pairs', suffix=".txt", dir=".")
fw = must_open(pairsfile, "w")
conflict, novel = AutoVivification(), {}
for gene in gffdb.features_of_type('gene', order_by=('seqid', 'start')):
geneid = atg_name(gene.id, retval='locus')
novel[geneid] = []
updated_mrna, hybrid_mrna = [], []
for mrna in gffdb.children(gene, featuretype='mRNA', order_by=('seqid', 'start')):
if re.match(atg_name_pat, mrna.id) is not None and "_" not in mrna.id:
pf, mrnaid = parse_prefix(mrna.id)
mlen = gffdb.children_bp(mrna, child_featuretype='exon')
if "-" in mrna.id:
hybrid_mrna.append((mrna.id, mrna.start, mlen, len(pf)))
else:
updated_mrna.append((mrna.id, mrna.start, mlen, len(pf)))
for mrna in sorted(updated_mrna, key=lambda k:(k[1], -k[2], -k[3])):
pf, mrnaid = parse_prefix(mrna[0])
mstart, mlen = mrna[1], mrna[2]
iso = atg_name(mrnaid, retval='iso')
newiso = "{0}{1}".format(iso, re.sub(atg_name_pat, "", mrnaid))
if iso == newiso:
if iso not in conflict[geneid]:
conflict[geneid][iso] = []
conflict[geneid][iso].append((mrna[0], iso, newiso, \
mstart, mlen, len(pf)))
else:
novel[geneid].append((mrna[0], None, newiso, \
mstart, mlen, len(pf)))
for mrna in sorted(hybrid_mrna, key=lambda k:(k[1], -k[2], -k[3])):
pf, mrnaid = parse_prefix(mrna[0])
mstart, mlen = mrna[1], mrna[2]
_iso, _newiso = [], []
for id in sorted(mrnaid.split("-")):
a = atg_name(id, retval='iso')
b = "{0}{1}".format(a, re.sub(atg_name_pat, "", id))
_iso.append(a)
_newiso.append(b)
_novel = None
newiso = "-".join(str(x) for x in set(_newiso))
for iso, niso in zip(_iso, _newiso):
if iso == niso:
if iso not in conflict[geneid]:
conflict[geneid][iso] = \
[(mrna[0], iso, newiso, mstart, mlen, len(pf))]
_novel = None
break
_novel = True
if _novel is not None:
novel[geneid].append((mrna[0], None, newiso, \
mstart, mlen, len(pf)))
if not opts.scores:
for isoform in sorted(conflict[geneid]):
mrnaid = "{0}.{1}".format(geneid, isoform)
if mrnaid in reffasta.keys():
for mrna in conflict[geneid][isoform]:
print("\t".join(str(x) for x in (mrnaid, mrna[0])), file=fw)
scoresfile = None
if not opts.scores:
fw.close()
needle([pairsfile, refpep, pep])
FileShredder([pairsfile], verbose=False)
scoresfile = "{0}.scores".format(pairsfile.rsplit(".")[0])
else:
scoresfile = opts.scores
scores = read_scores(scoresfile, sort=True, trimsuffix=False)
primary = {}
for geneid in conflict:
primary[geneid] = []
for iso in sorted(conflict[geneid]):
conflict[geneid][iso].sort(key=lambda k:(k[3], -k[4], -k[5]))
_iso = "{0}.{1}".format(geneid, iso)
if _iso not in scores:
novel[geneid].extend(conflict[geneid][iso])
continue
top_score = scores[_iso][0][1]
result = next((i for i, v in enumerate(conflict[geneid][iso]) if v[0] == top_score), None)
if result is not None:
primary[geneid].append(conflict[geneid][iso][result])
del conflict[geneid][iso][result]
if geneid not in novel:
novel[geneid] = []
novel[geneid].extend(conflict[geneid][iso])
novel[geneid].sort(key=lambda k:(k[3], -k[4], -k[5]))
fw = must_open(opts.outfile, 'w')
for gene in gffdb.features_of_type('gene', order_by=('seqid', 'start')):
geneid = gene.id
print(gene, file=fw)
seen = []
if geneid in primary:
all_mrna = primary[geneid]
all_mrna.extend(novel[geneid])
for iso, mrna in enumerate(all_mrna):
_mrna = gffdb[mrna[0]]
_iso = mrna[1]
if mrna not in novel[geneid]:
seen.append(int(mrna[1]))
else:
mseen = 0 if len(seen) == 0 else max(seen)
_iso = (mseen + iso + 1) - len(seen)
_mrnaid = "{0}.{1}".format(geneid, _iso)
_mrna['ID'], _mrna['_old_ID'] = [_mrnaid], [_mrna.id]
print(_mrna, file=fw)
for c in gffdb.children(_mrna, order_by=('start')):
c['Parent'] = [_mrnaid]
print(c, file=fw)
else:
for feat in gffdb.children(gene, order_by=('seqid', 'start')):
print(feat, file=fw)
fw.close() |
def connect_array(self, address, connection_key, connection_type, **kwargs):
"""Connect this array with another one.
:param address: IP address or DNS name of other array.
:type address: str
:param connection_key: Connection key of other array.
:type connection_key: str
:param connection_type: Type(s) of connection desired.
:type connection_type: list
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST array/connection**
:type \*\*kwargs: optional
:returns: A dictionary describing the connection to the other array.
:rtype: ResponseDict
.. note::
Currently, the only type of connection is "replication".
.. note::
Requires use of REST API 1.2 or later.
"""
data = {"management_address": address,
"connection_key": connection_key,
"type": connection_type}
data.update(kwargs)
return self._request("POST", "array/connection", data) | Connect this array with another one.
:param address: IP address or DNS name of other array.
:type address: str
:param connection_key: Connection key of other array.
:type connection_key: str
:param connection_type: Type(s) of connection desired.
:type connection_type: list
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST array/connection**
:type \*\*kwargs: optional
:returns: A dictionary describing the connection to the other array.
:rtype: ResponseDict
.. note::
Currently, the only type of connection is "replication".
.. note::
Requires use of REST API 1.2 or later. | Below is the the instruction that describes the task:
### Input:
Connect this array with another one.
:param address: IP address or DNS name of other array.
:type address: str
:param connection_key: Connection key of other array.
:type connection_key: str
:param connection_type: Type(s) of connection desired.
:type connection_type: list
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST array/connection**
:type \*\*kwargs: optional
:returns: A dictionary describing the connection to the other array.
:rtype: ResponseDict
.. note::
Currently, the only type of connection is "replication".
.. note::
Requires use of REST API 1.2 or later.
### Response:
def connect_array(self, address, connection_key, connection_type, **kwargs):
"""Connect this array with another one.
:param address: IP address or DNS name of other array.
:type address: str
:param connection_key: Connection key of other array.
:type connection_key: str
:param connection_type: Type(s) of connection desired.
:type connection_type: list
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST array/connection**
:type \*\*kwargs: optional
:returns: A dictionary describing the connection to the other array.
:rtype: ResponseDict
.. note::
Currently, the only type of connection is "replication".
.. note::
Requires use of REST API 1.2 or later.
"""
data = {"management_address": address,
"connection_key": connection_key,
"type": connection_type}
data.update(kwargs)
return self._request("POST", "array/connection", data) |
def on_message(self, _, basic_deliver, properties, body):
"""Invoked by pika when a message is delivered from RabbitMQ.
The channel is passed for your convenience. The basic_deliver object
that is passed in carries the exchange, routing key, delivery tag
and a redelivered flag for the message. The properties passed in is
an instance of BasicProperties with the message properties and the
body is the message that was sent.
We'll json-decode the message body, and if that succeeds, call the
handler that was given to us. Messages that contain invalid json
will be discarded.
:type _: pika.channel.Channel
:type basic_deliver: pika.Spec.Basic.Deliver
:type properties: pika.Spec.BasicProperties
:type body: str|unicode
"""
logger.debug('Received message # %s from %s: %s', basic_deliver.delivery_tag, properties.app_id, body)
try:
decoded = json.loads(body.decode('-utf-8'))
except ValueError:
logger.warning('Discarding message containing invalid json: %s', body)
else:
self._handler(decoded)
self.acknowledge_message(basic_deliver.delivery_tag) | Invoked by pika when a message is delivered from RabbitMQ.
The channel is passed for your convenience. The basic_deliver object
that is passed in carries the exchange, routing key, delivery tag
and a redelivered flag for the message. The properties passed in is
an instance of BasicProperties with the message properties and the
body is the message that was sent.
We'll json-decode the message body, and if that succeeds, call the
handler that was given to us. Messages that contain invalid json
will be discarded.
:type _: pika.channel.Channel
:type basic_deliver: pika.Spec.Basic.Deliver
:type properties: pika.Spec.BasicProperties
:type body: str|unicode | Below is the the instruction that describes the task:
### Input:
Invoked by pika when a message is delivered from RabbitMQ.
The channel is passed for your convenience. The basic_deliver object
that is passed in carries the exchange, routing key, delivery tag
and a redelivered flag for the message. The properties passed in is
an instance of BasicProperties with the message properties and the
body is the message that was sent.
We'll json-decode the message body, and if that succeeds, call the
handler that was given to us. Messages that contain invalid json
will be discarded.
:type _: pika.channel.Channel
:type basic_deliver: pika.Spec.Basic.Deliver
:type properties: pika.Spec.BasicProperties
:type body: str|unicode
### Response:
def on_message(self, _, basic_deliver, properties, body):
"""Invoked by pika when a message is delivered from RabbitMQ.
The channel is passed for your convenience. The basic_deliver object
that is passed in carries the exchange, routing key, delivery tag
and a redelivered flag for the message. The properties passed in is
an instance of BasicProperties with the message properties and the
body is the message that was sent.
We'll json-decode the message body, and if that succeeds, call the
handler that was given to us. Messages that contain invalid json
will be discarded.
:type _: pika.channel.Channel
:type basic_deliver: pika.Spec.Basic.Deliver
:type properties: pika.Spec.BasicProperties
:type body: str|unicode
"""
logger.debug('Received message # %s from %s: %s', basic_deliver.delivery_tag, properties.app_id, body)
try:
decoded = json.loads(body.decode('-utf-8'))
except ValueError:
logger.warning('Discarding message containing invalid json: %s', body)
else:
self._handler(decoded)
self.acknowledge_message(basic_deliver.delivery_tag) |
def status(id):
"""
View status of all jobs in a project.
The command also accepts a specific job name.
"""
if id:
try:
experiment = ExperimentClient().get(normalize_job_name(id))
except FloydException:
experiment = ExperimentClient().get(id)
print_experiments([experiment])
else:
experiments = ExperimentClient().get_all()
print_experiments(experiments) | View status of all jobs in a project.
The command also accepts a specific job name. | Below is the the instruction that describes the task:
### Input:
View status of all jobs in a project.
The command also accepts a specific job name.
### Response:
def status(id):
"""
View status of all jobs in a project.
The command also accepts a specific job name.
"""
if id:
try:
experiment = ExperimentClient().get(normalize_job_name(id))
except FloydException:
experiment = ExperimentClient().get(id)
print_experiments([experiment])
else:
experiments = ExperimentClient().get_all()
print_experiments(experiments) |
def set_phy_mode(self, mode=IxePhyMode.ignore):
""" Set phy mode to copper or fiber.
:param mode: requested PHY mode.
"""
if isinstance(mode, IxePhyMode):
if mode.value:
self.api.call_rc('port setPhyMode {} {}'.format(mode.value, self.uri))
else:
self.api.call_rc('port setPhyMode {} {}'.format(mode, self.uri)) | Set phy mode to copper or fiber.
:param mode: requested PHY mode. | Below is the the instruction that describes the task:
### Input:
Set phy mode to copper or fiber.
:param mode: requested PHY mode.
### Response:
def set_phy_mode(self, mode=IxePhyMode.ignore):
""" Set phy mode to copper or fiber.
:param mode: requested PHY mode.
"""
if isinstance(mode, IxePhyMode):
if mode.value:
self.api.call_rc('port setPhyMode {} {}'.format(mode.value, self.uri))
else:
self.api.call_rc('port setPhyMode {} {}'.format(mode, self.uri)) |
def sun_events(latitude, longitude, date, timezone=0, zenith=None):
"""Convenience function for calculating sunrise and sunset.
Civil twilight starts/ends when the Sun's centre is 6 degrees below
the horizon.
Nautical twilight starts/ends when the Sun's centre is 12 degrees
below the horizon.
Astronomical twilight starts/ends when the Sun's centre is 18 degrees below
the horizon.
Args:
latitude (float): Location's latitude
longitude (float): Location's longitude
date (datetime.date): Calculate rise or set for given date
timezone (int): Offset from UTC in minutes
zenith (str): Calculate rise/set events, or twilight times
Returns:
tuple of datetime.time: The time for the given events in the specified
timezone
"""
return (sun_rise_set(latitude, longitude, date, 'rise', timezone, zenith),
sun_rise_set(latitude, longitude, date, 'set', timezone, zenith)) | Convenience function for calculating sunrise and sunset.
Civil twilight starts/ends when the Sun's centre is 6 degrees below
the horizon.
Nautical twilight starts/ends when the Sun's centre is 12 degrees
below the horizon.
Astronomical twilight starts/ends when the Sun's centre is 18 degrees below
the horizon.
Args:
latitude (float): Location's latitude
longitude (float): Location's longitude
date (datetime.date): Calculate rise or set for given date
timezone (int): Offset from UTC in minutes
zenith (str): Calculate rise/set events, or twilight times
Returns:
tuple of datetime.time: The time for the given events in the specified
timezone | Below is the the instruction that describes the task:
### Input:
Convenience function for calculating sunrise and sunset.
Civil twilight starts/ends when the Sun's centre is 6 degrees below
the horizon.
Nautical twilight starts/ends when the Sun's centre is 12 degrees
below the horizon.
Astronomical twilight starts/ends when the Sun's centre is 18 degrees below
the horizon.
Args:
latitude (float): Location's latitude
longitude (float): Location's longitude
date (datetime.date): Calculate rise or set for given date
timezone (int): Offset from UTC in minutes
zenith (str): Calculate rise/set events, or twilight times
Returns:
tuple of datetime.time: The time for the given events in the specified
timezone
### Response:
def sun_events(latitude, longitude, date, timezone=0, zenith=None):
"""Convenience function for calculating sunrise and sunset.
Civil twilight starts/ends when the Sun's centre is 6 degrees below
the horizon.
Nautical twilight starts/ends when the Sun's centre is 12 degrees
below the horizon.
Astronomical twilight starts/ends when the Sun's centre is 18 degrees below
the horizon.
Args:
latitude (float): Location's latitude
longitude (float): Location's longitude
date (datetime.date): Calculate rise or set for given date
timezone (int): Offset from UTC in minutes
zenith (str): Calculate rise/set events, or twilight times
Returns:
tuple of datetime.time: The time for the given events in the specified
timezone
"""
return (sun_rise_set(latitude, longitude, date, 'rise', timezone, zenith),
sun_rise_set(latitude, longitude, date, 'set', timezone, zenith)) |
def backdate(res, date=None, as_datetime=False, fmt='%Y-%m-%d'):
""" get past date based on currect date """
if res is None:
return None
if date is None:
date = datetime.datetime.now()
else:
try:
date = parse_date(date)
except Exception as e:
pass
new_date = date
periods = int("".join([s for s in res if s.isdigit()]))
if periods > 0:
if "K" in res:
new_date = date - datetime.timedelta(microseconds=periods)
elif "S" in res:
new_date = date - datetime.timedelta(seconds=periods)
elif "T" in res:
new_date = date - datetime.timedelta(minutes=periods)
elif "H" in res or "V" in res:
new_date = date - datetime.timedelta(hours=periods)
elif "W" in res:
new_date = date - datetime.timedelta(weeks=periods)
else: # days
new_date = date - datetime.timedelta(days=periods)
# not a week day:
while new_date.weekday() > 4: # Mon-Fri are 0-4
new_date = backdate(res="1D", date=new_date, as_datetime=True)
if as_datetime:
return new_date
return new_date.strftime(fmt) | get past date based on currect date | Below is the the instruction that describes the task:
### Input:
get past date based on currect date
### Response:
def backdate(res, date=None, as_datetime=False, fmt='%Y-%m-%d'):
""" get past date based on currect date """
if res is None:
return None
if date is None:
date = datetime.datetime.now()
else:
try:
date = parse_date(date)
except Exception as e:
pass
new_date = date
periods = int("".join([s for s in res if s.isdigit()]))
if periods > 0:
if "K" in res:
new_date = date - datetime.timedelta(microseconds=periods)
elif "S" in res:
new_date = date - datetime.timedelta(seconds=periods)
elif "T" in res:
new_date = date - datetime.timedelta(minutes=periods)
elif "H" in res or "V" in res:
new_date = date - datetime.timedelta(hours=periods)
elif "W" in res:
new_date = date - datetime.timedelta(weeks=periods)
else: # days
new_date = date - datetime.timedelta(days=periods)
# not a week day:
while new_date.weekday() > 4: # Mon-Fri are 0-4
new_date = backdate(res="1D", date=new_date, as_datetime=True)
if as_datetime:
return new_date
return new_date.strftime(fmt) |
def from_dict(cls, label=None, label2=None, icon=None, thumbnail=None,
path=None, selected=None, info=None, properties=None,
context_menu=None, replace_context_menu=False,
is_playable=None, info_type='video', stream_info=None):
'''A ListItem constructor for setting a lot of properties not
available in the regular __init__ method. Useful to collect all
the properties in a dict and then use the **dct to call this
method.
'''
listitem = cls(label, label2, icon, thumbnail, path)
if selected is not None:
listitem.select(selected)
if info:
listitem.set_info(info_type, info)
if is_playable:
listitem.set_is_playable(True)
if properties:
# Need to support existing tuples, but prefer to have a dict for
# properties.
if hasattr(properties, 'items'):
properties = properties.items()
for key, val in properties:
listitem.set_property(key, val)
if stream_info:
for stream_type, stream_values in stream_info.items():
listitem.add_stream_info(stream_type, stream_values)
if context_menu:
listitem.add_context_menu_items(context_menu, replace_context_menu)
return listitem | A ListItem constructor for setting a lot of properties not
available in the regular __init__ method. Useful to collect all
the properties in a dict and then use the **dct to call this
method. | Below is the the instruction that describes the task:
### Input:
A ListItem constructor for setting a lot of properties not
available in the regular __init__ method. Useful to collect all
the properties in a dict and then use the **dct to call this
method.
### Response:
def from_dict(cls, label=None, label2=None, icon=None, thumbnail=None,
path=None, selected=None, info=None, properties=None,
context_menu=None, replace_context_menu=False,
is_playable=None, info_type='video', stream_info=None):
'''A ListItem constructor for setting a lot of properties not
available in the regular __init__ method. Useful to collect all
the properties in a dict and then use the **dct to call this
method.
'''
listitem = cls(label, label2, icon, thumbnail, path)
if selected is not None:
listitem.select(selected)
if info:
listitem.set_info(info_type, info)
if is_playable:
listitem.set_is_playable(True)
if properties:
# Need to support existing tuples, but prefer to have a dict for
# properties.
if hasattr(properties, 'items'):
properties = properties.items()
for key, val in properties:
listitem.set_property(key, val)
if stream_info:
for stream_type, stream_values in stream_info.items():
listitem.add_stream_info(stream_type, stream_values)
if context_menu:
listitem.add_context_menu_items(context_menu, replace_context_menu)
return listitem |
def node_status_changed_handler(**kwargs):
""" send notification when the status of a node changes according to users's settings """
obj = kwargs['instance']
obj.old_status = kwargs['old_status'].name
obj.new_status = kwargs['new_status'].name
queryset = exclude_owner_of_node(obj)
create_notifications.delay(**{
"users": queryset,
"notification_model": Notification,
"notification_type": "node_status_changed",
"related_object": obj
})
# if node has owner send a different notification to him
if obj.user is not None:
create_notifications.delay(**{
"users": [obj.user],
"notification_model": Notification,
"notification_type": "node_own_status_changed",
"related_object": obj
}) | send notification when the status of a node changes according to users's settings | Below is the the instruction that describes the task:
### Input:
send notification when the status of a node changes according to users's settings
### Response:
def node_status_changed_handler(**kwargs):
""" send notification when the status of a node changes according to users's settings """
obj = kwargs['instance']
obj.old_status = kwargs['old_status'].name
obj.new_status = kwargs['new_status'].name
queryset = exclude_owner_of_node(obj)
create_notifications.delay(**{
"users": queryset,
"notification_model": Notification,
"notification_type": "node_status_changed",
"related_object": obj
})
# if node has owner send a different notification to him
if obj.user is not None:
create_notifications.delay(**{
"users": [obj.user],
"notification_model": Notification,
"notification_type": "node_own_status_changed",
"related_object": obj
}) |
def extract_code_from_function(function):
"""Return code handled by function."""
if not function.__name__.startswith('fix_'):
return None
code = re.sub('^fix_', '', function.__name__)
if not code:
return None
try:
int(code[1:])
except ValueError:
return None
return code | Return code handled by function. | Below is the the instruction that describes the task:
### Input:
Return code handled by function.
### Response:
def extract_code_from_function(function):
"""Return code handled by function."""
if not function.__name__.startswith('fix_'):
return None
code = re.sub('^fix_', '', function.__name__)
if not code:
return None
try:
int(code[1:])
except ValueError:
return None
return code |
def genlet(generator_function=None, prime=True):
"""
Decorator to convert a generator function to a :py:class:`~chainlink.ChainLink`
:param generator_function: the generator function to convert
:type generator_function: generator
:param prime: advance the generator to the next/first yield
:type prime: bool
When used as a decorator, this function can also be called with and without keywords.
.. code:: python
@genlet
def pingpong():
"Chainlet that passes on its value"
last = yield
while True:
last = yield last
@genlet(prime=True)
def produce():
"Chainlet that produces a value"
while True:
yield time.time()
@genlet(True)
def read(iterable):
"Chainlet that reads from an iterable"
for item in iterable:
yield item
"""
if generator_function is None:
return GeneratorLink.wraplet(prime=prime)
elif not callable(generator_function):
return GeneratorLink.wraplet(prime=generator_function)
return GeneratorLink.wraplet(prime=prime)(generator_function) | Decorator to convert a generator function to a :py:class:`~chainlink.ChainLink`
:param generator_function: the generator function to convert
:type generator_function: generator
:param prime: advance the generator to the next/first yield
:type prime: bool
When used as a decorator, this function can also be called with and without keywords.
.. code:: python
@genlet
def pingpong():
"Chainlet that passes on its value"
last = yield
while True:
last = yield last
@genlet(prime=True)
def produce():
"Chainlet that produces a value"
while True:
yield time.time()
@genlet(True)
def read(iterable):
"Chainlet that reads from an iterable"
for item in iterable:
yield item | Below is the the instruction that describes the task:
### Input:
Decorator to convert a generator function to a :py:class:`~chainlink.ChainLink`
:param generator_function: the generator function to convert
:type generator_function: generator
:param prime: advance the generator to the next/first yield
:type prime: bool
When used as a decorator, this function can also be called with and without keywords.
.. code:: python
@genlet
def pingpong():
"Chainlet that passes on its value"
last = yield
while True:
last = yield last
@genlet(prime=True)
def produce():
"Chainlet that produces a value"
while True:
yield time.time()
@genlet(True)
def read(iterable):
"Chainlet that reads from an iterable"
for item in iterable:
yield item
### Response:
def genlet(generator_function=None, prime=True):
"""
Decorator to convert a generator function to a :py:class:`~chainlink.ChainLink`
:param generator_function: the generator function to convert
:type generator_function: generator
:param prime: advance the generator to the next/first yield
:type prime: bool
When used as a decorator, this function can also be called with and without keywords.
.. code:: python
@genlet
def pingpong():
"Chainlet that passes on its value"
last = yield
while True:
last = yield last
@genlet(prime=True)
def produce():
"Chainlet that produces a value"
while True:
yield time.time()
@genlet(True)
def read(iterable):
"Chainlet that reads from an iterable"
for item in iterable:
yield item
"""
if generator_function is None:
return GeneratorLink.wraplet(prime=prime)
elif not callable(generator_function):
return GeneratorLink.wraplet(prime=generator_function)
return GeneratorLink.wraplet(prime=prime)(generator_function) |
def parse_bucket_info(domain):
"""Parse a domain name to gather the bucket name and region for an S3 bucket. Returns a tuple
(bucket_name, bucket_region) if a valid domain name, else `None`
>>> parse_bucket_info('www.riotgames.com.br.s3-website-us-west-2.amazonaws.com')
('www.riotgames.com.br', 'us-west-2')
Args:
domain (`str`): Domain name to parse
Returns:
:obj:`list` of `str`: `str`,`None`
"""
match = RGX_BUCKET.match(domain)
if match:
data = match.groupdict()
return data['bucket'], data['region'] or 'us-east-1' | Parse a domain name to gather the bucket name and region for an S3 bucket. Returns a tuple
(bucket_name, bucket_region) if a valid domain name, else `None`
>>> parse_bucket_info('www.riotgames.com.br.s3-website-us-west-2.amazonaws.com')
('www.riotgames.com.br', 'us-west-2')
Args:
domain (`str`): Domain name to parse
Returns:
:obj:`list` of `str`: `str`,`None` | Below is the the instruction that describes the task:
### Input:
Parse a domain name to gather the bucket name and region for an S3 bucket. Returns a tuple
(bucket_name, bucket_region) if a valid domain name, else `None`
>>> parse_bucket_info('www.riotgames.com.br.s3-website-us-west-2.amazonaws.com')
('www.riotgames.com.br', 'us-west-2')
Args:
domain (`str`): Domain name to parse
Returns:
:obj:`list` of `str`: `str`,`None`
### Response:
def parse_bucket_info(domain):
"""Parse a domain name to gather the bucket name and region for an S3 bucket. Returns a tuple
(bucket_name, bucket_region) if a valid domain name, else `None`
>>> parse_bucket_info('www.riotgames.com.br.s3-website-us-west-2.amazonaws.com')
('www.riotgames.com.br', 'us-west-2')
Args:
domain (`str`): Domain name to parse
Returns:
:obj:`list` of `str`: `str`,`None`
"""
match = RGX_BUCKET.match(domain)
if match:
data = match.groupdict()
return data['bucket'], data['region'] or 'us-east-1' |
def primary_mrna(entrystream, parenttype='gene'):
"""
Select a single mRNA as a representative for each protein-coding gene.
The primary mRNA is the one with the longest translation product. In cases
where multiple isoforms have the same translated length, the feature ID is
used for sorting.
This function **does not** return only mRNA features, it returns all GFF3
entry types (pragmas, features, sequences, etc). The function **does**
modify the gene features that pass through to ensure that they have at most
a single mRNA feature.
>>> reader = tag.GFF3Reader(tag.pkgdata('pdom-withseq.gff3'))
>>> filter = tag.transcript.primary_mrna(reader)
>>> for gene in tag.select.features(filter, type='gene'):
... assert gene.num_children == 1
"""
for entry in entrystream:
if not isinstance(entry, tag.Feature):
yield entry
continue
for parent in tag.select.features(entry, parenttype, traverse=True):
mrnas = [f for f in parent.children if f.type == 'mRNA']
if len(mrnas) == 0:
continue
_emplace_pmrna(mrnas, parent)
yield entry | Select a single mRNA as a representative for each protein-coding gene.
The primary mRNA is the one with the longest translation product. In cases
where multiple isoforms have the same translated length, the feature ID is
used for sorting.
This function **does not** return only mRNA features, it returns all GFF3
entry types (pragmas, features, sequences, etc). The function **does**
modify the gene features that pass through to ensure that they have at most
a single mRNA feature.
>>> reader = tag.GFF3Reader(tag.pkgdata('pdom-withseq.gff3'))
>>> filter = tag.transcript.primary_mrna(reader)
>>> for gene in tag.select.features(filter, type='gene'):
... assert gene.num_children == 1 | Below is the the instruction that describes the task:
### Input:
Select a single mRNA as a representative for each protein-coding gene.
The primary mRNA is the one with the longest translation product. In cases
where multiple isoforms have the same translated length, the feature ID is
used for sorting.
This function **does not** return only mRNA features, it returns all GFF3
entry types (pragmas, features, sequences, etc). The function **does**
modify the gene features that pass through to ensure that they have at most
a single mRNA feature.
>>> reader = tag.GFF3Reader(tag.pkgdata('pdom-withseq.gff3'))
>>> filter = tag.transcript.primary_mrna(reader)
>>> for gene in tag.select.features(filter, type='gene'):
... assert gene.num_children == 1
### Response:
def primary_mrna(entrystream, parenttype='gene'):
"""
Select a single mRNA as a representative for each protein-coding gene.
The primary mRNA is the one with the longest translation product. In cases
where multiple isoforms have the same translated length, the feature ID is
used for sorting.
This function **does not** return only mRNA features, it returns all GFF3
entry types (pragmas, features, sequences, etc). The function **does**
modify the gene features that pass through to ensure that they have at most
a single mRNA feature.
>>> reader = tag.GFF3Reader(tag.pkgdata('pdom-withseq.gff3'))
>>> filter = tag.transcript.primary_mrna(reader)
>>> for gene in tag.select.features(filter, type='gene'):
... assert gene.num_children == 1
"""
for entry in entrystream:
if not isinstance(entry, tag.Feature):
yield entry
continue
for parent in tag.select.features(entry, parenttype, traverse=True):
mrnas = [f for f in parent.children if f.type == 'mRNA']
if len(mrnas) == 0:
continue
_emplace_pmrna(mrnas, parent)
yield entry |
def get(*dataset, **kwargs):
'''
Displays properties for the given datasets.
dataset : string
name of snapshot(s), filesystem(s), or volume(s)
properties : string
comma-separated list of properties to list, defaults to all
recursive : boolean
recursively list children
depth : int
recursively list children to depth
fields : string
comma-separated list of fields to include, the name and property field will always be added
type : string
comma-separated list of types to display, where type is one of
filesystem, snapshot, volume, bookmark, or all.
source : string
comma-separated list of sources to display. Must be one of the following:
local, default, inherited, temporary, and none. The default value is all sources.
parsable : boolean
display numbers in parsable (exact) values (default = True)
.. versionadded:: 2018.3.0
.. note::
If no datasets are specified, then the command displays properties
for all datasets on the system.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' zfs.get
salt '*' zfs.get myzpool/mydataset [recursive=True|False]
salt '*' zfs.get myzpool/mydataset properties="sharenfs,mountpoint" [recursive=True|False]
salt '*' zfs.get myzpool/mydataset myzpool/myotherdataset properties=available fields=value depth=1
'''
## Configure command
# NOTE: initialize the defaults
flags = ['-H']
opts = {}
# NOTE: set extra config from kwargs
if kwargs.get('depth', False):
opts['-d'] = kwargs.get('depth')
elif kwargs.get('recursive', False):
flags.append('-r')
fields = kwargs.get('fields', 'value,source').split(',')
if 'name' in fields: # ensure name is first
fields.remove('name')
if 'property' in fields: # ensure property is second
fields.remove('property')
fields.insert(0, 'name')
fields.insert(1, 'property')
opts['-o'] = ",".join(fields)
if kwargs.get('type', False):
opts['-t'] = kwargs.get('type')
if kwargs.get('source', False):
opts['-s'] = kwargs.get('source')
# NOTE: set property_name
property_name = kwargs.get('properties', 'all')
## Get properties
res = __salt__['cmd.run_all'](
__utils__['zfs.zfs_command'](
command='get',
flags=flags,
opts=opts,
property_name=property_name,
target=list(dataset),
),
python_shell=False,
)
ret = __utils__['zfs.parse_command_result'](res)
if res['retcode'] == 0:
for ds in res['stdout'].splitlines():
ds_data = OrderedDict(list(zip(
fields,
ds.split("\t")
)))
if 'value' in ds_data:
if kwargs.get('parsable', True):
ds_data['value'] = __utils__['zfs.from_auto'](
ds_data['property'],
ds_data['value'],
)
else:
ds_data['value'] = __utils__['zfs.to_auto'](
ds_data['property'],
ds_data['value'],
convert_to_human=True,
)
if ds_data['name'] not in ret:
ret[ds_data['name']] = OrderedDict()
ret[ds_data['name']][ds_data['property']] = ds_data
del ds_data['name']
del ds_data['property']
return ret | Displays properties for the given datasets.
dataset : string
name of snapshot(s), filesystem(s), or volume(s)
properties : string
comma-separated list of properties to list, defaults to all
recursive : boolean
recursively list children
depth : int
recursively list children to depth
fields : string
comma-separated list of fields to include, the name and property field will always be added
type : string
comma-separated list of types to display, where type is one of
filesystem, snapshot, volume, bookmark, or all.
source : string
comma-separated list of sources to display. Must be one of the following:
local, default, inherited, temporary, and none. The default value is all sources.
parsable : boolean
display numbers in parsable (exact) values (default = True)
.. versionadded:: 2018.3.0
.. note::
If no datasets are specified, then the command displays properties
for all datasets on the system.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' zfs.get
salt '*' zfs.get myzpool/mydataset [recursive=True|False]
salt '*' zfs.get myzpool/mydataset properties="sharenfs,mountpoint" [recursive=True|False]
salt '*' zfs.get myzpool/mydataset myzpool/myotherdataset properties=available fields=value depth=1 | Below is the the instruction that describes the task:
### Input:
Displays properties for the given datasets.
dataset : string
name of snapshot(s), filesystem(s), or volume(s)
properties : string
comma-separated list of properties to list, defaults to all
recursive : boolean
recursively list children
depth : int
recursively list children to depth
fields : string
comma-separated list of fields to include, the name and property field will always be added
type : string
comma-separated list of types to display, where type is one of
filesystem, snapshot, volume, bookmark, or all.
source : string
comma-separated list of sources to display. Must be one of the following:
local, default, inherited, temporary, and none. The default value is all sources.
parsable : boolean
display numbers in parsable (exact) values (default = True)
.. versionadded:: 2018.3.0
.. note::
If no datasets are specified, then the command displays properties
for all datasets on the system.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' zfs.get
salt '*' zfs.get myzpool/mydataset [recursive=True|False]
salt '*' zfs.get myzpool/mydataset properties="sharenfs,mountpoint" [recursive=True|False]
salt '*' zfs.get myzpool/mydataset myzpool/myotherdataset properties=available fields=value depth=1
### Response:
def get(*dataset, **kwargs):
'''
Displays properties for the given datasets.
dataset : string
name of snapshot(s), filesystem(s), or volume(s)
properties : string
comma-separated list of properties to list, defaults to all
recursive : boolean
recursively list children
depth : int
recursively list children to depth
fields : string
comma-separated list of fields to include, the name and property field will always be added
type : string
comma-separated list of types to display, where type is one of
filesystem, snapshot, volume, bookmark, or all.
source : string
comma-separated list of sources to display. Must be one of the following:
local, default, inherited, temporary, and none. The default value is all sources.
parsable : boolean
display numbers in parsable (exact) values (default = True)
.. versionadded:: 2018.3.0
.. note::
If no datasets are specified, then the command displays properties
for all datasets on the system.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' zfs.get
salt '*' zfs.get myzpool/mydataset [recursive=True|False]
salt '*' zfs.get myzpool/mydataset properties="sharenfs,mountpoint" [recursive=True|False]
salt '*' zfs.get myzpool/mydataset myzpool/myotherdataset properties=available fields=value depth=1
'''
## Configure command
# NOTE: initialize the defaults
flags = ['-H']
opts = {}
# NOTE: set extra config from kwargs
if kwargs.get('depth', False):
opts['-d'] = kwargs.get('depth')
elif kwargs.get('recursive', False):
flags.append('-r')
fields = kwargs.get('fields', 'value,source').split(',')
if 'name' in fields: # ensure name is first
fields.remove('name')
if 'property' in fields: # ensure property is second
fields.remove('property')
fields.insert(0, 'name')
fields.insert(1, 'property')
opts['-o'] = ",".join(fields)
if kwargs.get('type', False):
opts['-t'] = kwargs.get('type')
if kwargs.get('source', False):
opts['-s'] = kwargs.get('source')
# NOTE: set property_name
property_name = kwargs.get('properties', 'all')
## Get properties
res = __salt__['cmd.run_all'](
__utils__['zfs.zfs_command'](
command='get',
flags=flags,
opts=opts,
property_name=property_name,
target=list(dataset),
),
python_shell=False,
)
ret = __utils__['zfs.parse_command_result'](res)
if res['retcode'] == 0:
for ds in res['stdout'].splitlines():
ds_data = OrderedDict(list(zip(
fields,
ds.split("\t")
)))
if 'value' in ds_data:
if kwargs.get('parsable', True):
ds_data['value'] = __utils__['zfs.from_auto'](
ds_data['property'],
ds_data['value'],
)
else:
ds_data['value'] = __utils__['zfs.to_auto'](
ds_data['property'],
ds_data['value'],
convert_to_human=True,
)
if ds_data['name'] not in ret:
ret[ds_data['name']] = OrderedDict()
ret[ds_data['name']][ds_data['property']] = ds_data
del ds_data['name']
del ds_data['property']
return ret |
def _input_templates(self):
"""Read the path template file.
"""
foo = self._config.read([self._pathfile])
if len(foo) == 1:
for k, v in self._config.items('paths'):
self.templates[k] = v
else:
raise ValueError("Could not read {0}!".format(self._pathfile))
return | Read the path template file. | Below is the the instruction that describes the task:
### Input:
Read the path template file.
### Response:
def _input_templates(self):
"""Read the path template file.
"""
foo = self._config.read([self._pathfile])
if len(foo) == 1:
for k, v in self._config.items('paths'):
self.templates[k] = v
else:
raise ValueError("Could not read {0}!".format(self._pathfile))
return |
def add_data(self, data, table, delimiter='|', bands='', clean_up=True, rename_columns={}, column_fill={}, verbose=False):
"""
Adds data to the specified database table. Column names must match table fields to insert,
however order and completeness don't matter.
Parameters
----------
data: str, array-like, astropy.table.Table
The path to an ascii file, array-like object, or table. The first row or element must
be the list of column names
table: str
The name of the table into which the data should be inserted
delimiter: str
The string to use as the delimiter when parsing the ascii file
bands: sequence
Sequence of band to look for in the data header when digesting columns of
multiple photometric measurements (e.g. ['MKO_J','MKO_H','MKO_K']) into individual
rows of data for database insertion
clean_up: bool
Run self.clean_up()
rename_columns: dict
A dictionary of the {input_col_name:desired_col_name} for table columns,
e.g. {'e_Jmag':'J_unc', 'RAJ2000':'ra'}
column_fill: dict
A dictionary of the column name and value to fill, e.g. {'instrument_id':2, 'band':'2MASS.J'}
verbose: bool
Print diagnostic messages
"""
# Store raw entry
entry, del_records = data, []
# Digest the ascii file into table
if isinstance(data, str) and os.path.isfile(data):
data = ii.read(data, delimiter=delimiter)
# Or read the sequence of data elements into a table
elif isinstance(data, (list, tuple, np.ndarray)):
data = ii.read(['|'.join(map(str, row)) for row in data], data_start=1, delimiter='|')
# Or convert pandas dataframe to astropy table
elif isinstance(data, pd.core.frame.DataFrame):
data = at.Table.from_pandas(data)
# Or if it's already an astropy table
elif isinstance(data, at.Table):
pass
else:
data = None
if data:
# Rename columns
if isinstance(rename_columns,str):
rename_columns = astrocat.default_rename_columns(rename_columns)
for input_name,new_name in rename_columns.items():
data.rename_column(input_name,new_name)
# Add column fills
if isinstance(column_fill,str):
column_fill = astrocat.default_column_fill(column_fill)
for colname,fill_value in column_fill.items():
data[colname] = [fill_value]*len(data)
# Get list of all columns and make an empty table for new records
metadata = self.query("PRAGMA table_info({})".format(table), fmt='table')
columns, types, required = [np.array(metadata[n]) for n in ['name', 'type', 'notnull']]
new_records = at.Table(names=columns, dtype=[type_dict[t] for t in types])
# Fix column dtypes and blanks
for col in data.colnames:
# Convert data dtypes to those of the existing table
try:
temp = data[col].astype(new_records[col].dtype)
data.replace_column(col, temp)
except KeyError:
continue
# If a row contains photometry for multiple bands, use the *multiband argument and execute this
if bands and table.lower() == 'photometry':
# Pull out columns that are band names
for b in list(set(bands) & set(data.colnames)):
try:
# Get the repeated data plus the band data and rename the columns
band = data[list(set(columns) & set(data.colnames)) + [b, b + '_unc']]
for suf in ['', '_unc']:
band.rename_column(b+suf, 'magnitude'+suf)
temp = band['magnitude'+suf].astype(new_records['magnitude'+suf].dtype)
band.replace_column('magnitude'+suf, temp)
band.add_column(at.Column([b] * len(band), name='band', dtype='O'))
# Add the band data to the list of new_records
new_records = at.vstack([new_records, band])
except IOError:
pass
else:
# Inject data into full database table format
new_records = at.vstack([new_records, data])[new_records.colnames]
# Reject rows that fail column requirements, e.g. NOT NULL fields like 'source_id'
for r in columns[np.where(np.logical_and(required, columns != 'id'))]:
# Null values...
new_records = new_records[np.where(new_records[r])]
# Masked values...
try:
new_records = new_records[~new_records[r].mask]
except:
pass
# NaN values...
if new_records.dtype[r] in (int, float):
new_records = new_records[~np.isnan(new_records[r])]
# For spectra, try to populate the table by reading the FITS header
if table.lower() == 'spectra':
for n, new_rec in enumerate(new_records):
# Convert relative path to absolute path
relpath = new_rec['spectrum']
if relpath.startswith('$'):
abspath = os.popen('echo {}'.format(relpath.split('/')[0])).read()[:-1]
if abspath:
new_rec['spectrum'] = relpath.replace(relpath.split('/')[0], abspath)
# Test if the file exists and try to pull metadata from the FITS header
if os.path.isfile(new_rec['spectrum']):
new_records[n]['spectrum'] = relpath
new_records[n] = _autofill_spec_record(new_rec)
else:
print('Error adding the spectrum at {}'.format(new_rec['spectrum']))
del_records.append(n)
# Remove bad records from the table
new_records.remove_rows(del_records)
# For images, try to populate the table by reading the FITS header
if table.lower() == 'images':
for n, new_rec in enumerate(new_records):
# Convert relative path to absolute path
relpath = new_rec['image']
if relpath.startswith('$'):
abspath = os.popen('echo {}'.format(relpath.split('/')[0])).read()[:-1]
if abspath:
new_rec['image'] = relpath.replace(relpath.split('/')[0], abspath)
# Test if the file exists and try to pull metadata from the FITS header
if os.path.isfile(new_rec['image']):
new_records[n]['image'] = relpath
new_records[n] = _autofill_spec_record(new_rec)
else:
print('Error adding the image at {}'.format(new_rec['image']))
del_records.append(n)
# Remove bad records from the table
new_records.remove_rows(del_records)
# Get some new row ids for the good records
rowids = self._lowest_rowids(table, len(new_records))
# Add the new records
keepers, rejects = [], []
for N, new_rec in enumerate(new_records):
new_rec = list(new_rec)
new_rec[0] = rowids[N]
for n, col in enumerate(new_rec):
if type(col) == np.int64 and sys.version_info[0] >= 3:
# Fix for Py3 and sqlite3 issue with numpy types
new_rec[n] = col.item()
if type(col) == np.ma.core.MaskedConstant:
new_rec[n] = None
try:
self.modify("INSERT INTO {} VALUES({})".format(table, ','.join('?'*len(columns))), new_rec, verbose=verbose)
keepers.append(N)
except IOError:
rejects.append(N)
new_records[N]['id'] = rowids[N]
# Make tables of keepers and rejects
rejected = new_records[rejects]
new_records = new_records[keepers]
# Print a table of the new records or bad news
if new_records:
print("\033[1;32m{} new records added to the {} table.\033[1;m".format(len(new_records), table.upper()))
new_records.pprint()
if rejected:
print("\033[1;31m{} records rejected from the {} table.\033[1;m".format(len(rejected), table.upper()))
rejected.pprint()
# Run table clean up
if clean_up:
self.clean_up(table, verbose)
else:
print('Please check your input: {}'.format(entry)) | Adds data to the specified database table. Column names must match table fields to insert,
however order and completeness don't matter.
Parameters
----------
data: str, array-like, astropy.table.Table
The path to an ascii file, array-like object, or table. The first row or element must
be the list of column names
table: str
The name of the table into which the data should be inserted
delimiter: str
The string to use as the delimiter when parsing the ascii file
bands: sequence
Sequence of band to look for in the data header when digesting columns of
multiple photometric measurements (e.g. ['MKO_J','MKO_H','MKO_K']) into individual
rows of data for database insertion
clean_up: bool
Run self.clean_up()
rename_columns: dict
A dictionary of the {input_col_name:desired_col_name} for table columns,
e.g. {'e_Jmag':'J_unc', 'RAJ2000':'ra'}
column_fill: dict
A dictionary of the column name and value to fill, e.g. {'instrument_id':2, 'band':'2MASS.J'}
verbose: bool
Print diagnostic messages | Below is the the instruction that describes the task:
### Input:
Adds data to the specified database table. Column names must match table fields to insert,
however order and completeness don't matter.
Parameters
----------
data: str, array-like, astropy.table.Table
The path to an ascii file, array-like object, or table. The first row or element must
be the list of column names
table: str
The name of the table into which the data should be inserted
delimiter: str
The string to use as the delimiter when parsing the ascii file
bands: sequence
Sequence of band to look for in the data header when digesting columns of
multiple photometric measurements (e.g. ['MKO_J','MKO_H','MKO_K']) into individual
rows of data for database insertion
clean_up: bool
Run self.clean_up()
rename_columns: dict
A dictionary of the {input_col_name:desired_col_name} for table columns,
e.g. {'e_Jmag':'J_unc', 'RAJ2000':'ra'}
column_fill: dict
A dictionary of the column name and value to fill, e.g. {'instrument_id':2, 'band':'2MASS.J'}
verbose: bool
Print diagnostic messages
### Response:
def add_data(self, data, table, delimiter='|', bands='', clean_up=True, rename_columns={}, column_fill={}, verbose=False):
"""
Adds data to the specified database table. Column names must match table fields to insert,
however order and completeness don't matter.
Parameters
----------
data: str, array-like, astropy.table.Table
The path to an ascii file, array-like object, or table. The first row or element must
be the list of column names
table: str
The name of the table into which the data should be inserted
delimiter: str
The string to use as the delimiter when parsing the ascii file
bands: sequence
Sequence of band to look for in the data header when digesting columns of
multiple photometric measurements (e.g. ['MKO_J','MKO_H','MKO_K']) into individual
rows of data for database insertion
clean_up: bool
Run self.clean_up()
rename_columns: dict
A dictionary of the {input_col_name:desired_col_name} for table columns,
e.g. {'e_Jmag':'J_unc', 'RAJ2000':'ra'}
column_fill: dict
A dictionary of the column name and value to fill, e.g. {'instrument_id':2, 'band':'2MASS.J'}
verbose: bool
Print diagnostic messages
"""
# Store raw entry
entry, del_records = data, []
# Digest the ascii file into table
if isinstance(data, str) and os.path.isfile(data):
data = ii.read(data, delimiter=delimiter)
# Or read the sequence of data elements into a table
elif isinstance(data, (list, tuple, np.ndarray)):
data = ii.read(['|'.join(map(str, row)) for row in data], data_start=1, delimiter='|')
# Or convert pandas dataframe to astropy table
elif isinstance(data, pd.core.frame.DataFrame):
data = at.Table.from_pandas(data)
# Or if it's already an astropy table
elif isinstance(data, at.Table):
pass
else:
data = None
if data:
# Rename columns
if isinstance(rename_columns,str):
rename_columns = astrocat.default_rename_columns(rename_columns)
for input_name,new_name in rename_columns.items():
data.rename_column(input_name,new_name)
# Add column fills
if isinstance(column_fill,str):
column_fill = astrocat.default_column_fill(column_fill)
for colname,fill_value in column_fill.items():
data[colname] = [fill_value]*len(data)
# Get list of all columns and make an empty table for new records
metadata = self.query("PRAGMA table_info({})".format(table), fmt='table')
columns, types, required = [np.array(metadata[n]) for n in ['name', 'type', 'notnull']]
new_records = at.Table(names=columns, dtype=[type_dict[t] for t in types])
# Fix column dtypes and blanks
for col in data.colnames:
# Convert data dtypes to those of the existing table
try:
temp = data[col].astype(new_records[col].dtype)
data.replace_column(col, temp)
except KeyError:
continue
# If a row contains photometry for multiple bands, use the *multiband argument and execute this
if bands and table.lower() == 'photometry':
# Pull out columns that are band names
for b in list(set(bands) & set(data.colnames)):
try:
# Get the repeated data plus the band data and rename the columns
band = data[list(set(columns) & set(data.colnames)) + [b, b + '_unc']]
for suf in ['', '_unc']:
band.rename_column(b+suf, 'magnitude'+suf)
temp = band['magnitude'+suf].astype(new_records['magnitude'+suf].dtype)
band.replace_column('magnitude'+suf, temp)
band.add_column(at.Column([b] * len(band), name='band', dtype='O'))
# Add the band data to the list of new_records
new_records = at.vstack([new_records, band])
except IOError:
pass
else:
# Inject data into full database table format
new_records = at.vstack([new_records, data])[new_records.colnames]
# Reject rows that fail column requirements, e.g. NOT NULL fields like 'source_id'
for r in columns[np.where(np.logical_and(required, columns != 'id'))]:
# Null values...
new_records = new_records[np.where(new_records[r])]
# Masked values...
try:
new_records = new_records[~new_records[r].mask]
except:
pass
# NaN values...
if new_records.dtype[r] in (int, float):
new_records = new_records[~np.isnan(new_records[r])]
# For spectra, try to populate the table by reading the FITS header
if table.lower() == 'spectra':
for n, new_rec in enumerate(new_records):
# Convert relative path to absolute path
relpath = new_rec['spectrum']
if relpath.startswith('$'):
abspath = os.popen('echo {}'.format(relpath.split('/')[0])).read()[:-1]
if abspath:
new_rec['spectrum'] = relpath.replace(relpath.split('/')[0], abspath)
# Test if the file exists and try to pull metadata from the FITS header
if os.path.isfile(new_rec['spectrum']):
new_records[n]['spectrum'] = relpath
new_records[n] = _autofill_spec_record(new_rec)
else:
print('Error adding the spectrum at {}'.format(new_rec['spectrum']))
del_records.append(n)
# Remove bad records from the table
new_records.remove_rows(del_records)
# For images, try to populate the table by reading the FITS header
if table.lower() == 'images':
for n, new_rec in enumerate(new_records):
# Convert relative path to absolute path
relpath = new_rec['image']
if relpath.startswith('$'):
abspath = os.popen('echo {}'.format(relpath.split('/')[0])).read()[:-1]
if abspath:
new_rec['image'] = relpath.replace(relpath.split('/')[0], abspath)
# Test if the file exists and try to pull metadata from the FITS header
if os.path.isfile(new_rec['image']):
new_records[n]['image'] = relpath
new_records[n] = _autofill_spec_record(new_rec)
else:
print('Error adding the image at {}'.format(new_rec['image']))
del_records.append(n)
# Remove bad records from the table
new_records.remove_rows(del_records)
# Get some new row ids for the good records
rowids = self._lowest_rowids(table, len(new_records))
# Add the new records
keepers, rejects = [], []
for N, new_rec in enumerate(new_records):
new_rec = list(new_rec)
new_rec[0] = rowids[N]
for n, col in enumerate(new_rec):
if type(col) == np.int64 and sys.version_info[0] >= 3:
# Fix for Py3 and sqlite3 issue with numpy types
new_rec[n] = col.item()
if type(col) == np.ma.core.MaskedConstant:
new_rec[n] = None
try:
self.modify("INSERT INTO {} VALUES({})".format(table, ','.join('?'*len(columns))), new_rec, verbose=verbose)
keepers.append(N)
except IOError:
rejects.append(N)
new_records[N]['id'] = rowids[N]
# Make tables of keepers and rejects
rejected = new_records[rejects]
new_records = new_records[keepers]
# Print a table of the new records or bad news
if new_records:
print("\033[1;32m{} new records added to the {} table.\033[1;m".format(len(new_records), table.upper()))
new_records.pprint()
if rejected:
print("\033[1;31m{} records rejected from the {} table.\033[1;m".format(len(rejected), table.upper()))
rejected.pprint()
# Run table clean up
if clean_up:
self.clean_up(table, verbose)
else:
print('Please check your input: {}'.format(entry)) |
def cli(ctx):
"""
Run setup after a fresh Vagrant installation.
"""
log = logging.getLogger('ipsv.setup')
assert isinstance(ctx, Context)
lock_path = os.path.join(ctx.config.get('Paths', 'Data'), 'setup.lck')
if os.path.exists(lock_path):
raise Exception('Setup is locked, please remove the setup lock file to continue')
# Create our package directories
p = Echo('Creating IPS Vagrant system directories...')
dirs = ['/etc/ipsv', ctx.config.get('Paths', 'Data'), ctx.config.get('Paths', 'Log'),
ctx.config.get('Paths', 'NginxSitesAvailable'), ctx.config.get('Paths', 'NginxSitesEnabled'),
ctx.config.get('Paths', 'NginxSSL')]
for d in dirs:
if not os.path.exists(d):
os.makedirs(d, 0o755)
p.done()
p = Echo('Copying IPS Vagrant configuration files...')
with open('/etc/ipsv/ipsv.conf', 'w+') as f:
ctx.config.write(f)
p.done()
# Set up alembic
alembic_cfg = Config(os.path.join(ctx.basedir, 'alembic.ini'))
alembic_cfg.set_main_option("script_location", os.path.join(ctx.basedir, 'migrations'))
alembic_cfg.set_main_option("sqlalchemy.url", "sqlite:////{path}"
.format(path=os.path.join(ctx.config.get('Paths', 'Data'), 'sites.db')))
command.current(alembic_cfg)
command.downgrade(alembic_cfg, 'base')
command.upgrade(alembic_cfg, 'head')
# Update the system
p = Echo('Updating package cache...')
cache = apt.Cache()
cache.update()
cache.open(None)
p.done()
p = Echo('Upgrading system packages...')
cache.upgrade()
cache.commit()
p.done()
# Install our required packages
requirements = ['nginx', 'php5-fpm', 'php5-curl', 'php5-gd', 'php5-imagick', 'php5-json', 'php5-mysql',
'php5-readline', 'php5-apcu', 'php5-xdebug']
for requirement in requirements:
# Make sure the package is available
p = Echo('Marking package {pkg} for installation'.format(pkg=requirement))
if requirement not in cache:
log.warn('Required package {pkg} not available'.format(pkg=requirement))
p.done(p.FAIL)
continue
# Mark the package for installation
cache[requirement].mark_install()
p.done()
log.info('Committing package cache')
p = Echo('Downloading and installing packages...')
cache.commit()
p.done()
# Disable the default server block
p = Echo('Configuring Nginx...')
default_available = os.path.join(ctx.config.get('Paths', 'NginxSitesAvailable'), 'default')
default_enabled = os.path.join(ctx.config.get('Paths', 'NginxSitesEnabled'), 'default')
if os.path.isfile(default_available):
os.remove(default_available)
if os.path.islink(default_enabled):
os.unlink(default_enabled)
p.done()
# Restart Nginx
FNULL = open(os.devnull, 'w')
p = Echo('Restarting Nginx...')
subprocess.check_call(['service', 'nginx', 'restart'], stdout=FNULL, stderr=subprocess.STDOUT)
p.done()
# php.ini configuration
p = Echo('Configuring php...')
with open('/etc/php5/fpm/php.ini', 'a') as f:
f.write('\n[XDebug]')
f.write('\nxdebug.cli_color=1')
temp_fh, temp_path = mkstemp()
with open(temp_path, 'w') as nf:
with open('/etc/php5/fpm/php.ini') as of:
# Configuration options we are replacing
upload_max_filesize = re.compile( '^upload_max_filesize\s+=\s+(\d+[a-zA-Z])\s*$' )
post_max_size = re.compile( '^post_max_size\s+=\s+(\d+[a-zA-Z])\s*$' )
for line in of:
match = upload_max_filesize.match( line ) if upload_max_filesize is not True else False
if match:
nf.write( 'upload_max_filesize = 1000M\n' )
upload_max_filesize = True
continue
match = post_max_size.match( line ) if post_max_size is not True else False
if match:
nf.write( 'post_max_size = 1000M\n' )
post_max_size = True
continue
nf.write(line)
os.close(temp_fh)
os.remove('/etc/php5/fpm/php.ini')
shutil.move(temp_path, '/etc/php5/fpm/php.ini')
os.chmod('/etc/php5/fpm/php.ini', 0o644)
p.done()
# php5-fpm configuration
p = Echo('Configuring php5-fpm...')
if os.path.isfile('/etc/php5/fpm/pool.d/www.conf'):
os.remove('/etc/php5/fpm/pool.d/www.conf')
fpm_config = FpmPoolConfig().template
with open('/etc/php5/fpm/pool.d/ips.conf', 'w') as f:
f.write(fpm_config)
p.done()
# Restart php5-fpm
p = Echo('Restarting php5-fpm...')
subprocess.check_call(['service', 'php5-fpm', 'restart'], stdout=FNULL, stderr=subprocess.STDOUT)
p.done()
# Copy the man pages and rebuild the manual database
p = Echo('Writing manual pages...')
man_path = os.path.join(ctx.basedir, 'man', 'ipsv.1')
sys_man_path = '/usr/local/share/man/man1'
if not os.path.exists(sys_man_path):
os.makedirs(sys_man_path)
shutil.copyfile(man_path, os.path.join(sys_man_path, 'ipsv.1'))
subprocess.check_call(['mandb'], stdout=FNULL, stderr=subprocess.STDOUT)
# Enable the welcome message
log.debug('Writing welcome message')
wm_header = '## DO NOT REMOVE :: AUTOMATICALLY GENERATED BY IPSV ##'
wm_remove = False
# Remove old profile data
for line in fileinput.input('/etc/profile', inplace=True):
# Header / footer match?
if line == wm_header:
# Footer match (Stop removing)
if wm_remove:
wm_remove = False
continue
# Header match (Start removing)
wm_remove = True
continue
# Removing lines?
if wm_remove:
continue
# Print line and continue as normal
sys.stdout.write(line)
# Write new profile data
with open('/etc/profile', 'a') as f:
f.write("\n" + wm_header + "\n")
fl_lock_path = os.path.join(ctx.config.get('Paths', 'Data'), 'first_login.lck')
f.write('if [ ! -f "{lp}" ]; then'.format(lp=fl_lock_path) + "\n")
f.write(' less "{wp}"'.format(wp=os.path.join(ctx.basedir, 'WELCOME.rst')) + "\n")
f.write(' sudo touch "{lp}"'.format(lp=fl_lock_path) + "\n")
f.write('fi' + "\n")
f.write(wm_header + "\n")
p.done()
log.debug('Writing setup lock file')
with open(os.path.join(ctx.config.get('Paths', 'Data'), 'setup.lck'), 'w') as f:
f.write('1') | Run setup after a fresh Vagrant installation. | Below is the the instruction that describes the task:
### Input:
Run setup after a fresh Vagrant installation.
### Response:
def cli(ctx):
"""
Run setup after a fresh Vagrant installation.
"""
log = logging.getLogger('ipsv.setup')
assert isinstance(ctx, Context)
lock_path = os.path.join(ctx.config.get('Paths', 'Data'), 'setup.lck')
if os.path.exists(lock_path):
raise Exception('Setup is locked, please remove the setup lock file to continue')
# Create our package directories
p = Echo('Creating IPS Vagrant system directories...')
dirs = ['/etc/ipsv', ctx.config.get('Paths', 'Data'), ctx.config.get('Paths', 'Log'),
ctx.config.get('Paths', 'NginxSitesAvailable'), ctx.config.get('Paths', 'NginxSitesEnabled'),
ctx.config.get('Paths', 'NginxSSL')]
for d in dirs:
if not os.path.exists(d):
os.makedirs(d, 0o755)
p.done()
p = Echo('Copying IPS Vagrant configuration files...')
with open('/etc/ipsv/ipsv.conf', 'w+') as f:
ctx.config.write(f)
p.done()
# Set up alembic
alembic_cfg = Config(os.path.join(ctx.basedir, 'alembic.ini'))
alembic_cfg.set_main_option("script_location", os.path.join(ctx.basedir, 'migrations'))
alembic_cfg.set_main_option("sqlalchemy.url", "sqlite:////{path}"
.format(path=os.path.join(ctx.config.get('Paths', 'Data'), 'sites.db')))
command.current(alembic_cfg)
command.downgrade(alembic_cfg, 'base')
command.upgrade(alembic_cfg, 'head')
# Update the system
p = Echo('Updating package cache...')
cache = apt.Cache()
cache.update()
cache.open(None)
p.done()
p = Echo('Upgrading system packages...')
cache.upgrade()
cache.commit()
p.done()
# Install our required packages
requirements = ['nginx', 'php5-fpm', 'php5-curl', 'php5-gd', 'php5-imagick', 'php5-json', 'php5-mysql',
'php5-readline', 'php5-apcu', 'php5-xdebug']
for requirement in requirements:
# Make sure the package is available
p = Echo('Marking package {pkg} for installation'.format(pkg=requirement))
if requirement not in cache:
log.warn('Required package {pkg} not available'.format(pkg=requirement))
p.done(p.FAIL)
continue
# Mark the package for installation
cache[requirement].mark_install()
p.done()
log.info('Committing package cache')
p = Echo('Downloading and installing packages...')
cache.commit()
p.done()
# Disable the default server block
p = Echo('Configuring Nginx...')
default_available = os.path.join(ctx.config.get('Paths', 'NginxSitesAvailable'), 'default')
default_enabled = os.path.join(ctx.config.get('Paths', 'NginxSitesEnabled'), 'default')
if os.path.isfile(default_available):
os.remove(default_available)
if os.path.islink(default_enabled):
os.unlink(default_enabled)
p.done()
# Restart Nginx
FNULL = open(os.devnull, 'w')
p = Echo('Restarting Nginx...')
subprocess.check_call(['service', 'nginx', 'restart'], stdout=FNULL, stderr=subprocess.STDOUT)
p.done()
# php.ini configuration
p = Echo('Configuring php...')
with open('/etc/php5/fpm/php.ini', 'a') as f:
f.write('\n[XDebug]')
f.write('\nxdebug.cli_color=1')
temp_fh, temp_path = mkstemp()
with open(temp_path, 'w') as nf:
with open('/etc/php5/fpm/php.ini') as of:
# Configuration options we are replacing
upload_max_filesize = re.compile( '^upload_max_filesize\s+=\s+(\d+[a-zA-Z])\s*$' )
post_max_size = re.compile( '^post_max_size\s+=\s+(\d+[a-zA-Z])\s*$' )
for line in of:
match = upload_max_filesize.match( line ) if upload_max_filesize is not True else False
if match:
nf.write( 'upload_max_filesize = 1000M\n' )
upload_max_filesize = True
continue
match = post_max_size.match( line ) if post_max_size is not True else False
if match:
nf.write( 'post_max_size = 1000M\n' )
post_max_size = True
continue
nf.write(line)
os.close(temp_fh)
os.remove('/etc/php5/fpm/php.ini')
shutil.move(temp_path, '/etc/php5/fpm/php.ini')
os.chmod('/etc/php5/fpm/php.ini', 0o644)
p.done()
# php5-fpm configuration
p = Echo('Configuring php5-fpm...')
if os.path.isfile('/etc/php5/fpm/pool.d/www.conf'):
os.remove('/etc/php5/fpm/pool.d/www.conf')
fpm_config = FpmPoolConfig().template
with open('/etc/php5/fpm/pool.d/ips.conf', 'w') as f:
f.write(fpm_config)
p.done()
# Restart php5-fpm
p = Echo('Restarting php5-fpm...')
subprocess.check_call(['service', 'php5-fpm', 'restart'], stdout=FNULL, stderr=subprocess.STDOUT)
p.done()
# Copy the man pages and rebuild the manual database
p = Echo('Writing manual pages...')
man_path = os.path.join(ctx.basedir, 'man', 'ipsv.1')
sys_man_path = '/usr/local/share/man/man1'
if not os.path.exists(sys_man_path):
os.makedirs(sys_man_path)
shutil.copyfile(man_path, os.path.join(sys_man_path, 'ipsv.1'))
subprocess.check_call(['mandb'], stdout=FNULL, stderr=subprocess.STDOUT)
# Enable the welcome message
log.debug('Writing welcome message')
wm_header = '## DO NOT REMOVE :: AUTOMATICALLY GENERATED BY IPSV ##'
wm_remove = False
# Remove old profile data
for line in fileinput.input('/etc/profile', inplace=True):
# Header / footer match?
if line == wm_header:
# Footer match (Stop removing)
if wm_remove:
wm_remove = False
continue
# Header match (Start removing)
wm_remove = True
continue
# Removing lines?
if wm_remove:
continue
# Print line and continue as normal
sys.stdout.write(line)
# Write new profile data
with open('/etc/profile', 'a') as f:
f.write("\n" + wm_header + "\n")
fl_lock_path = os.path.join(ctx.config.get('Paths', 'Data'), 'first_login.lck')
f.write('if [ ! -f "{lp}" ]; then'.format(lp=fl_lock_path) + "\n")
f.write(' less "{wp}"'.format(wp=os.path.join(ctx.basedir, 'WELCOME.rst')) + "\n")
f.write(' sudo touch "{lp}"'.format(lp=fl_lock_path) + "\n")
f.write('fi' + "\n")
f.write(wm_header + "\n")
p.done()
log.debug('Writing setup lock file')
with open(os.path.join(ctx.config.get('Paths', 'Data'), 'setup.lck'), 'w') as f:
f.write('1') |
def find_steam_location():
"""
Finds the location of the current Steam installation on Windows machines.
Returns None for any non-Windows machines, or for Windows machines where
Steam is not installed.
"""
if registry is None:
return None
key = registry.CreateKey(registry.HKEY_CURRENT_USER,"Software\Valve\Steam")
return registry.QueryValueEx(key,"SteamPath")[0] | Finds the location of the current Steam installation on Windows machines.
Returns None for any non-Windows machines, or for Windows machines where
Steam is not installed. | Below is the the instruction that describes the task:
### Input:
Finds the location of the current Steam installation on Windows machines.
Returns None for any non-Windows machines, or for Windows machines where
Steam is not installed.
### Response:
def find_steam_location():
"""
Finds the location of the current Steam installation on Windows machines.
Returns None for any non-Windows machines, or for Windows machines where
Steam is not installed.
"""
if registry is None:
return None
key = registry.CreateKey(registry.HKEY_CURRENT_USER,"Software\Valve\Steam")
return registry.QueryValueEx(key,"SteamPath")[0] |
def get_float_time():
'''returns time as double precision floats - Time64 in pytables - mapping to and from python datetime's
'''
t1 = time.time()
t2 = datetime.datetime.fromtimestamp(t1)
return time.mktime(t2.timetuple()) + 1e-6 * t2.microsecond | returns time as double precision floats - Time64 in pytables - mapping to and from python datetime's | Below is the the instruction that describes the task:
### Input:
returns time as double precision floats - Time64 in pytables - mapping to and from python datetime's
### Response:
def get_float_time():
'''returns time as double precision floats - Time64 in pytables - mapping to and from python datetime's
'''
t1 = time.time()
t2 = datetime.datetime.fromtimestamp(t1)
return time.mktime(t2.timetuple()) + 1e-6 * t2.microsecond |
def rerouteTraveltime(self, vehID, currentTravelTimes=True):
"""rerouteTraveltime(string, bool) -> None Reroutes a vehicle. If
currentTravelTimes is True (default) then the current traveltime of the
edges is loaded and used for rerouting. If currentTravelTimes is False
custom travel times are used. The various functions and options for
customizing travel times are described at http://sumo.dlr.de/wiki/Simulation/Routing
When rerouteTraveltime has been called once with option
currentTravelTimes=True, all edge weights are set to the current travel
times at the time of that call (even for subsequent simulation steps).
"""
if currentTravelTimes:
time = self._connection.simulation.getCurrentTime()
if time != self.LAST_TRAVEL_TIME_UPDATE:
self.LAST_TRAVEL_TIME_UPDATE = time
for edge in self._connection.edge.getIDList():
self._connection.edge.adaptTraveltime(
edge, self._connection.edge.getTraveltime(edge))
self._connection._beginMessage(
tc.CMD_SET_VEHICLE_VARIABLE, tc.CMD_REROUTE_TRAVELTIME, vehID, 1 + 4)
self._connection._string += struct.pack("!Bi", tc.TYPE_COMPOUND, 0)
self._connection._sendExact() | rerouteTraveltime(string, bool) -> None Reroutes a vehicle. If
currentTravelTimes is True (default) then the current traveltime of the
edges is loaded and used for rerouting. If currentTravelTimes is False
custom travel times are used. The various functions and options for
customizing travel times are described at http://sumo.dlr.de/wiki/Simulation/Routing
When rerouteTraveltime has been called once with option
currentTravelTimes=True, all edge weights are set to the current travel
times at the time of that call (even for subsequent simulation steps). | Below is the the instruction that describes the task:
### Input:
rerouteTraveltime(string, bool) -> None Reroutes a vehicle. If
currentTravelTimes is True (default) then the current traveltime of the
edges is loaded and used for rerouting. If currentTravelTimes is False
custom travel times are used. The various functions and options for
customizing travel times are described at http://sumo.dlr.de/wiki/Simulation/Routing
When rerouteTraveltime has been called once with option
currentTravelTimes=True, all edge weights are set to the current travel
times at the time of that call (even for subsequent simulation steps).
### Response:
def rerouteTraveltime(self, vehID, currentTravelTimes=True):
"""rerouteTraveltime(string, bool) -> None Reroutes a vehicle. If
currentTravelTimes is True (default) then the current traveltime of the
edges is loaded and used for rerouting. If currentTravelTimes is False
custom travel times are used. The various functions and options for
customizing travel times are described at http://sumo.dlr.de/wiki/Simulation/Routing
When rerouteTraveltime has been called once with option
currentTravelTimes=True, all edge weights are set to the current travel
times at the time of that call (even for subsequent simulation steps).
"""
if currentTravelTimes:
time = self._connection.simulation.getCurrentTime()
if time != self.LAST_TRAVEL_TIME_UPDATE:
self.LAST_TRAVEL_TIME_UPDATE = time
for edge in self._connection.edge.getIDList():
self._connection.edge.adaptTraveltime(
edge, self._connection.edge.getTraveltime(edge))
self._connection._beginMessage(
tc.CMD_SET_VEHICLE_VARIABLE, tc.CMD_REROUTE_TRAVELTIME, vehID, 1 + 4)
self._connection._string += struct.pack("!Bi", tc.TYPE_COMPOUND, 0)
self._connection._sendExact() |
def example_path(cls, project, dataset, annotated_dataset, example):
"""Return a fully-qualified example string."""
return google.api_core.path_template.expand(
"projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}/examples/{example}",
project=project,
dataset=dataset,
annotated_dataset=annotated_dataset,
example=example,
) | Return a fully-qualified example string. | Below is the the instruction that describes the task:
### Input:
Return a fully-qualified example string.
### Response:
def example_path(cls, project, dataset, annotated_dataset, example):
"""Return a fully-qualified example string."""
return google.api_core.path_template.expand(
"projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}/examples/{example}",
project=project,
dataset=dataset,
annotated_dataset=annotated_dataset,
example=example,
) |
def printRemoteDatawraps(location = conf.pyGeno_REMOTE_LOCATION) :
"""
print all available datawraps from a remote location the location must have a datawraps.json in the following format::
{
"Ordered": {
"Reference genomes": {
"Human" : ["GRCh37.75", "GRCh38.78"],
"Mouse" : ["GRCm38.78"],
},
"SNPs":{
}
},
"Flat":{
"Reference genomes": {
"GRCh37.75": "Human.GRCh37.75.tar.gz",
"GRCh38.78": "Human.GRCh37.75.tar.gz",
"GRCm38.78": "Mouse.GRCm38.78.tar.gz"
},
"SNPs":{
}
}
}
"""
l = listRemoteDatawraps(location)
printf("Available datawraps for bootstraping\n")
print json.dumps(l["Ordered"], sort_keys=True, indent=4, separators=(',', ': ')) | print all available datawraps from a remote location the location must have a datawraps.json in the following format::
{
"Ordered": {
"Reference genomes": {
"Human" : ["GRCh37.75", "GRCh38.78"],
"Mouse" : ["GRCm38.78"],
},
"SNPs":{
}
},
"Flat":{
"Reference genomes": {
"GRCh37.75": "Human.GRCh37.75.tar.gz",
"GRCh38.78": "Human.GRCh37.75.tar.gz",
"GRCm38.78": "Mouse.GRCm38.78.tar.gz"
},
"SNPs":{
}
}
} | Below is the the instruction that describes the task:
### Input:
print all available datawraps from a remote location the location must have a datawraps.json in the following format::
{
"Ordered": {
"Reference genomes": {
"Human" : ["GRCh37.75", "GRCh38.78"],
"Mouse" : ["GRCm38.78"],
},
"SNPs":{
}
},
"Flat":{
"Reference genomes": {
"GRCh37.75": "Human.GRCh37.75.tar.gz",
"GRCh38.78": "Human.GRCh37.75.tar.gz",
"GRCm38.78": "Mouse.GRCm38.78.tar.gz"
},
"SNPs":{
}
}
}
### Response:
def printRemoteDatawraps(location = conf.pyGeno_REMOTE_LOCATION) :
"""
print all available datawraps from a remote location the location must have a datawraps.json in the following format::
{
"Ordered": {
"Reference genomes": {
"Human" : ["GRCh37.75", "GRCh38.78"],
"Mouse" : ["GRCm38.78"],
},
"SNPs":{
}
},
"Flat":{
"Reference genomes": {
"GRCh37.75": "Human.GRCh37.75.tar.gz",
"GRCh38.78": "Human.GRCh37.75.tar.gz",
"GRCm38.78": "Mouse.GRCm38.78.tar.gz"
},
"SNPs":{
}
}
}
"""
l = listRemoteDatawraps(location)
printf("Available datawraps for bootstraping\n")
print json.dumps(l["Ordered"], sort_keys=True, indent=4, separators=(',', ': ')) |
def update_scale(self, overflow):
"""dynamically update loss scale"""
iter_since_rescale = self._num_steps - self._last_rescale_iter
if overflow:
self._last_overflow_iter = self._num_steps
self._overflows_since_rescale += 1
percentage = self._overflows_since_rescale / float(iter_since_rescale)
# we tolerate a certrain amount of NaNs before actually scaling it down
if percentage >= self.tolerance:
self.loss_scale /= self.scale_factor
self._last_rescale_iter = self._num_steps
self._overflows_since_rescale = 0
logging.info('DynamicLossScaler: overflow detected. set loss_scale = %s',
self.loss_scale)
elif (self._num_steps - self._last_overflow_iter) % self.scale_window == 0:
self.loss_scale *= self.scale_factor
self._last_rescale_iter = self._num_steps
self._num_steps += 1 | dynamically update loss scale | Below is the the instruction that describes the task:
### Input:
dynamically update loss scale
### Response:
def update_scale(self, overflow):
"""dynamically update loss scale"""
iter_since_rescale = self._num_steps - self._last_rescale_iter
if overflow:
self._last_overflow_iter = self._num_steps
self._overflows_since_rescale += 1
percentage = self._overflows_since_rescale / float(iter_since_rescale)
# we tolerate a certrain amount of NaNs before actually scaling it down
if percentage >= self.tolerance:
self.loss_scale /= self.scale_factor
self._last_rescale_iter = self._num_steps
self._overflows_since_rescale = 0
logging.info('DynamicLossScaler: overflow detected. set loss_scale = %s',
self.loss_scale)
elif (self._num_steps - self._last_overflow_iter) % self.scale_window == 0:
self.loss_scale *= self.scale_factor
self._last_rescale_iter = self._num_steps
self._num_steps += 1 |
def deviance(self, endog, mu, freq_weights=1, scale=1., axis=None):
r'''
Deviance function for either Bernoulli or Binomial data.
Parameters
----------
endog : array-like
Endogenous response variable (already transformed to a probability
if appropriate).
mu : array
Fitted mean response variable
freq_weights : array-like
1d array of frequency weights. The default is 1.
scale : float, optional
An optional scale argument. The default is 1.
Returns
--------
deviance : float
The deviance function as defined below
'''
if np.shape(self.n) == () and self.n == 1:
one = np.equal(endog, 1)
return -2 * np.sum((one * np.log(mu + 1e-200) + (1-one) *
np.log(1 - mu + 1e-200)) * freq_weights, axis=axis)
else:
return 2 * np.sum(self.n * freq_weights *
(endog * np.log(endog/mu + 1e-200) +
(1 - endog) * np.log((1 - endog) /
(1 - mu) + 1e-200)), axis=axis) | r'''
Deviance function for either Bernoulli or Binomial data.
Parameters
----------
endog : array-like
Endogenous response variable (already transformed to a probability
if appropriate).
mu : array
Fitted mean response variable
freq_weights : array-like
1d array of frequency weights. The default is 1.
scale : float, optional
An optional scale argument. The default is 1.
Returns
--------
deviance : float
The deviance function as defined below | Below is the the instruction that describes the task:
### Input:
r'''
Deviance function for either Bernoulli or Binomial data.
Parameters
----------
endog : array-like
Endogenous response variable (already transformed to a probability
if appropriate).
mu : array
Fitted mean response variable
freq_weights : array-like
1d array of frequency weights. The default is 1.
scale : float, optional
An optional scale argument. The default is 1.
Returns
--------
deviance : float
The deviance function as defined below
### Response:
def deviance(self, endog, mu, freq_weights=1, scale=1., axis=None):
r'''
Deviance function for either Bernoulli or Binomial data.
Parameters
----------
endog : array-like
Endogenous response variable (already transformed to a probability
if appropriate).
mu : array
Fitted mean response variable
freq_weights : array-like
1d array of frequency weights. The default is 1.
scale : float, optional
An optional scale argument. The default is 1.
Returns
--------
deviance : float
The deviance function as defined below
'''
if np.shape(self.n) == () and self.n == 1:
one = np.equal(endog, 1)
return -2 * np.sum((one * np.log(mu + 1e-200) + (1-one) *
np.log(1 - mu + 1e-200)) * freq_weights, axis=axis)
else:
return 2 * np.sum(self.n * freq_weights *
(endog * np.log(endog/mu + 1e-200) +
(1 - endog) * np.log((1 - endog) /
(1 - mu) + 1e-200)), axis=axis) |
def liquid_jet_pump_ancillary(rhop, rhos, Kp, Ks, d_nozzle=None, d_mixing=None,
Qp=None, Qs=None, P1=None, P2=None):
r'''Calculates the remaining variable in a liquid jet pump when solving for
one if the inlet variables only and the rest of them are known. The
equation comes from conservation of energy and momentum in the mixing
chamber.
The variable to be solved for must be one of `d_nozzle`, `d_mixing`,
`Qp`, `Qs`, `P1`, or `P2`.
.. math::
P_1 - P_2 = \frac{1}{2}\rho_pV_n^2(1+K_p)
- \frac{1}{2}\rho_s V_3^2(1+K_s)
Rearrange to express V3 in terms of Vn, and using the density ratio `C`,
the expression becomes:
.. math::
P_1 - P_2 = \frac{1}{2}\rho_p V_n^2\left[(1+K_p) - C(1+K_s)
\left(\frac{MR}{1-R}\right)^2\right]
Using the primary nozzle area and flow rate:
.. math::
P_1 - P_2 = \frac{1}{2}\rho_p \left(\frac{Q_p}{A_n}\right)^2
\left[(1+K_p) - C(1+K_s) \left(\frac{MR}{1-R}\right)^2\right]
For `P`, `P2`, `Qs`, and `Qp`, the equation can be rearranged explicitly
for them. For `d_mixing` and `d_nozzle`, a bounded solver is used searching
between 1E-9 m and 20 times the other diameter which was specified.
Parameters
----------
rhop : float
The density of the primary (motive) fluid, [kg/m^3]
rhos : float
The density of the secondary fluid (drawn from the vacuum chamber),
[kg/m^3]
Kp : float
The primary nozzle loss coefficient, [-]
Ks : float
The secondary inlet loss coefficient, [-]
d_nozzle : float, optional
The inside diameter of the primary fluid's nozle, [m]
d_mixing : float, optional
The diameter of the mixing chamber, [m]
Qp : float, optional
The volumetric flow rate of the primary fluid, [m^3/s]
Qs : float, optional
The volumetric flow rate of the secondary fluid, [m^3/s]
P1 : float, optional
The pressure of the primary fluid entering its nozzle, [Pa]
P2 : float, optional
The pressure of the secondary fluid at the entry of the ejector, [Pa]
Returns
-------
solution : float
The parameter not specified (one of `d_nozzle`, `d_mixing`,
`Qp`, `Qs`, `P1`, or `P2`), (units of `m`, `m`, `m^3/s`, `m^3/s`,
`Pa`, or `Pa` respectively)
Notes
-----
The following SymPy code was used to obtain the analytical formulas (
they are not shown here due to their length):
>>> from sympy import *
>>> A_nozzle, A_mixing, Qs, Qp, P1, P2, rhos, rhop, Ks, Kp = symbols('A_nozzle, A_mixing, Qs, Qp, P1, P2, rhos, rhop, Ks, Kp')
>>> R = A_nozzle/A_mixing
>>> M = Qs/Qp
>>> C = rhos/rhop
>>> rhs = rhop/2*(Qp/A_nozzle)**2*((1+Kp) - C*(1 + Ks)*((M*R)/(1-R))**2 )
>>> new = Eq(P1 - P2, rhs)
>>> #solve(new, Qp)
>>> #solve(new, Qs)
>>> #solve(new, P1)
>>> #solve(new, P2)
Examples
--------
Calculating primary fluid nozzle inlet pressure P1:
>>> liquid_jet_pump_ancillary(rhop=998., rhos=1098., Ks=0.11, Kp=.04,
... P2=133600, Qp=0.01, Qs=0.01, d_mixing=0.045, d_nozzle=0.02238)
426434.60314398084
References
----------
.. [1] Ejectors and Jet Pumps. Design and Performance for Incompressible
Liquid Flow. 85032. ESDU International PLC, 1985.
'''
unknowns = sum(i is None for i in (d_nozzle, d_mixing, Qs, Qp, P1, P2))
if unknowns > 1:
raise Exception('Too many unknowns')
elif unknowns < 1:
raise Exception('Overspecified')
C = rhos/rhop
if Qp is not None and Qs is not None:
M = Qs/Qp
if d_nozzle is not None:
A_nozzle = pi/4*d_nozzle*d_nozzle
if d_mixing is not None:
A_mixing = pi/4*d_mixing*d_mixing
R = A_nozzle/A_mixing
if P1 is None:
return rhop/2*(Qp/A_nozzle)**2*((1+Kp) - C*(1 + Ks)*((M*R)/(1-R))**2 ) + P2
elif P2 is None:
return -rhop/2*(Qp/A_nozzle)**2*((1+Kp) - C*(1 + Ks)*((M*R)/(1-R))**2 ) + P1
elif Qs is None:
try:
return ((-2*A_nozzle**2*P1 + 2*A_nozzle**2*P2 + Kp*Qp**2*rhop + Qp**2*rhop)/(C*rhop*(Ks + 1)))**0.5*(A_mixing - A_nozzle)/A_nozzle
except ValueError:
return -1j
elif Qp is None:
return A_nozzle*((2*A_mixing**2*P1 - 2*A_mixing**2*P2 - 4*A_mixing*A_nozzle*P1 + 4*A_mixing*A_nozzle*P2 + 2*A_nozzle**2*P1 - 2*A_nozzle**2*P2 + C*Ks*Qs**2*rhop + C*Qs**2*rhop)/(rhop*(Kp + 1)))**0.5/(A_mixing - A_nozzle)
elif d_nozzle is None:
def err(d_nozzle):
return P1 - liquid_jet_pump_ancillary(rhop=rhop, rhos=rhos, Kp=Kp, Ks=Ks, d_nozzle=d_nozzle, d_mixing=d_mixing, Qp=Qp, Qs=Qs,
P1=None, P2=P2)
return brenth(err, 1E-9, d_mixing*20)
elif d_mixing is None:
def err(d_mixing):
return P1 - liquid_jet_pump_ancillary(rhop=rhop, rhos=rhos, Kp=Kp, Ks=Ks, d_nozzle=d_nozzle, d_mixing=d_mixing, Qp=Qp, Qs=Qs,
P1=None, P2=P2)
try:
return brenth(err, 1E-9, d_nozzle*20)
except:
return newton(err, d_nozzle*2) | r'''Calculates the remaining variable in a liquid jet pump when solving for
one if the inlet variables only and the rest of them are known. The
equation comes from conservation of energy and momentum in the mixing
chamber.
The variable to be solved for must be one of `d_nozzle`, `d_mixing`,
`Qp`, `Qs`, `P1`, or `P2`.
.. math::
P_1 - P_2 = \frac{1}{2}\rho_pV_n^2(1+K_p)
- \frac{1}{2}\rho_s V_3^2(1+K_s)
Rearrange to express V3 in terms of Vn, and using the density ratio `C`,
the expression becomes:
.. math::
P_1 - P_2 = \frac{1}{2}\rho_p V_n^2\left[(1+K_p) - C(1+K_s)
\left(\frac{MR}{1-R}\right)^2\right]
Using the primary nozzle area and flow rate:
.. math::
P_1 - P_2 = \frac{1}{2}\rho_p \left(\frac{Q_p}{A_n}\right)^2
\left[(1+K_p) - C(1+K_s) \left(\frac{MR}{1-R}\right)^2\right]
For `P`, `P2`, `Qs`, and `Qp`, the equation can be rearranged explicitly
for them. For `d_mixing` and `d_nozzle`, a bounded solver is used searching
between 1E-9 m and 20 times the other diameter which was specified.
Parameters
----------
rhop : float
The density of the primary (motive) fluid, [kg/m^3]
rhos : float
The density of the secondary fluid (drawn from the vacuum chamber),
[kg/m^3]
Kp : float
The primary nozzle loss coefficient, [-]
Ks : float
The secondary inlet loss coefficient, [-]
d_nozzle : float, optional
The inside diameter of the primary fluid's nozle, [m]
d_mixing : float, optional
The diameter of the mixing chamber, [m]
Qp : float, optional
The volumetric flow rate of the primary fluid, [m^3/s]
Qs : float, optional
The volumetric flow rate of the secondary fluid, [m^3/s]
P1 : float, optional
The pressure of the primary fluid entering its nozzle, [Pa]
P2 : float, optional
The pressure of the secondary fluid at the entry of the ejector, [Pa]
Returns
-------
solution : float
The parameter not specified (one of `d_nozzle`, `d_mixing`,
`Qp`, `Qs`, `P1`, or `P2`), (units of `m`, `m`, `m^3/s`, `m^3/s`,
`Pa`, or `Pa` respectively)
Notes
-----
The following SymPy code was used to obtain the analytical formulas (
they are not shown here due to their length):
>>> from sympy import *
>>> A_nozzle, A_mixing, Qs, Qp, P1, P2, rhos, rhop, Ks, Kp = symbols('A_nozzle, A_mixing, Qs, Qp, P1, P2, rhos, rhop, Ks, Kp')
>>> R = A_nozzle/A_mixing
>>> M = Qs/Qp
>>> C = rhos/rhop
>>> rhs = rhop/2*(Qp/A_nozzle)**2*((1+Kp) - C*(1 + Ks)*((M*R)/(1-R))**2 )
>>> new = Eq(P1 - P2, rhs)
>>> #solve(new, Qp)
>>> #solve(new, Qs)
>>> #solve(new, P1)
>>> #solve(new, P2)
Examples
--------
Calculating primary fluid nozzle inlet pressure P1:
>>> liquid_jet_pump_ancillary(rhop=998., rhos=1098., Ks=0.11, Kp=.04,
... P2=133600, Qp=0.01, Qs=0.01, d_mixing=0.045, d_nozzle=0.02238)
426434.60314398084
References
----------
.. [1] Ejectors and Jet Pumps. Design and Performance for Incompressible
Liquid Flow. 85032. ESDU International PLC, 1985. | Below is the the instruction that describes the task:
### Input:
r'''Calculates the remaining variable in a liquid jet pump when solving for
one if the inlet variables only and the rest of them are known. The
equation comes from conservation of energy and momentum in the mixing
chamber.
The variable to be solved for must be one of `d_nozzle`, `d_mixing`,
`Qp`, `Qs`, `P1`, or `P2`.
.. math::
P_1 - P_2 = \frac{1}{2}\rho_pV_n^2(1+K_p)
- \frac{1}{2}\rho_s V_3^2(1+K_s)
Rearrange to express V3 in terms of Vn, and using the density ratio `C`,
the expression becomes:
.. math::
P_1 - P_2 = \frac{1}{2}\rho_p V_n^2\left[(1+K_p) - C(1+K_s)
\left(\frac{MR}{1-R}\right)^2\right]
Using the primary nozzle area and flow rate:
.. math::
P_1 - P_2 = \frac{1}{2}\rho_p \left(\frac{Q_p}{A_n}\right)^2
\left[(1+K_p) - C(1+K_s) \left(\frac{MR}{1-R}\right)^2\right]
For `P`, `P2`, `Qs`, and `Qp`, the equation can be rearranged explicitly
for them. For `d_mixing` and `d_nozzle`, a bounded solver is used searching
between 1E-9 m and 20 times the other diameter which was specified.
Parameters
----------
rhop : float
The density of the primary (motive) fluid, [kg/m^3]
rhos : float
The density of the secondary fluid (drawn from the vacuum chamber),
[kg/m^3]
Kp : float
The primary nozzle loss coefficient, [-]
Ks : float
The secondary inlet loss coefficient, [-]
d_nozzle : float, optional
The inside diameter of the primary fluid's nozle, [m]
d_mixing : float, optional
The diameter of the mixing chamber, [m]
Qp : float, optional
The volumetric flow rate of the primary fluid, [m^3/s]
Qs : float, optional
The volumetric flow rate of the secondary fluid, [m^3/s]
P1 : float, optional
The pressure of the primary fluid entering its nozzle, [Pa]
P2 : float, optional
The pressure of the secondary fluid at the entry of the ejector, [Pa]
Returns
-------
solution : float
The parameter not specified (one of `d_nozzle`, `d_mixing`,
`Qp`, `Qs`, `P1`, or `P2`), (units of `m`, `m`, `m^3/s`, `m^3/s`,
`Pa`, or `Pa` respectively)
Notes
-----
The following SymPy code was used to obtain the analytical formulas (
they are not shown here due to their length):
>>> from sympy import *
>>> A_nozzle, A_mixing, Qs, Qp, P1, P2, rhos, rhop, Ks, Kp = symbols('A_nozzle, A_mixing, Qs, Qp, P1, P2, rhos, rhop, Ks, Kp')
>>> R = A_nozzle/A_mixing
>>> M = Qs/Qp
>>> C = rhos/rhop
>>> rhs = rhop/2*(Qp/A_nozzle)**2*((1+Kp) - C*(1 + Ks)*((M*R)/(1-R))**2 )
>>> new = Eq(P1 - P2, rhs)
>>> #solve(new, Qp)
>>> #solve(new, Qs)
>>> #solve(new, P1)
>>> #solve(new, P2)
Examples
--------
Calculating primary fluid nozzle inlet pressure P1:
>>> liquid_jet_pump_ancillary(rhop=998., rhos=1098., Ks=0.11, Kp=.04,
... P2=133600, Qp=0.01, Qs=0.01, d_mixing=0.045, d_nozzle=0.02238)
426434.60314398084
References
----------
.. [1] Ejectors and Jet Pumps. Design and Performance for Incompressible
Liquid Flow. 85032. ESDU International PLC, 1985.
### Response:
def liquid_jet_pump_ancillary(rhop, rhos, Kp, Ks, d_nozzle=None, d_mixing=None,
Qp=None, Qs=None, P1=None, P2=None):
r'''Calculates the remaining variable in a liquid jet pump when solving for
one if the inlet variables only and the rest of them are known. The
equation comes from conservation of energy and momentum in the mixing
chamber.
The variable to be solved for must be one of `d_nozzle`, `d_mixing`,
`Qp`, `Qs`, `P1`, or `P2`.
.. math::
P_1 - P_2 = \frac{1}{2}\rho_pV_n^2(1+K_p)
- \frac{1}{2}\rho_s V_3^2(1+K_s)
Rearrange to express V3 in terms of Vn, and using the density ratio `C`,
the expression becomes:
.. math::
P_1 - P_2 = \frac{1}{2}\rho_p V_n^2\left[(1+K_p) - C(1+K_s)
\left(\frac{MR}{1-R}\right)^2\right]
Using the primary nozzle area and flow rate:
.. math::
P_1 - P_2 = \frac{1}{2}\rho_p \left(\frac{Q_p}{A_n}\right)^2
\left[(1+K_p) - C(1+K_s) \left(\frac{MR}{1-R}\right)^2\right]
For `P`, `P2`, `Qs`, and `Qp`, the equation can be rearranged explicitly
for them. For `d_mixing` and `d_nozzle`, a bounded solver is used searching
between 1E-9 m and 20 times the other diameter which was specified.
Parameters
----------
rhop : float
The density of the primary (motive) fluid, [kg/m^3]
rhos : float
The density of the secondary fluid (drawn from the vacuum chamber),
[kg/m^3]
Kp : float
The primary nozzle loss coefficient, [-]
Ks : float
The secondary inlet loss coefficient, [-]
d_nozzle : float, optional
The inside diameter of the primary fluid's nozle, [m]
d_mixing : float, optional
The diameter of the mixing chamber, [m]
Qp : float, optional
The volumetric flow rate of the primary fluid, [m^3/s]
Qs : float, optional
The volumetric flow rate of the secondary fluid, [m^3/s]
P1 : float, optional
The pressure of the primary fluid entering its nozzle, [Pa]
P2 : float, optional
The pressure of the secondary fluid at the entry of the ejector, [Pa]
Returns
-------
solution : float
The parameter not specified (one of `d_nozzle`, `d_mixing`,
`Qp`, `Qs`, `P1`, or `P2`), (units of `m`, `m`, `m^3/s`, `m^3/s`,
`Pa`, or `Pa` respectively)
Notes
-----
The following SymPy code was used to obtain the analytical formulas (
they are not shown here due to their length):
>>> from sympy import *
>>> A_nozzle, A_mixing, Qs, Qp, P1, P2, rhos, rhop, Ks, Kp = symbols('A_nozzle, A_mixing, Qs, Qp, P1, P2, rhos, rhop, Ks, Kp')
>>> R = A_nozzle/A_mixing
>>> M = Qs/Qp
>>> C = rhos/rhop
>>> rhs = rhop/2*(Qp/A_nozzle)**2*((1+Kp) - C*(1 + Ks)*((M*R)/(1-R))**2 )
>>> new = Eq(P1 - P2, rhs)
>>> #solve(new, Qp)
>>> #solve(new, Qs)
>>> #solve(new, P1)
>>> #solve(new, P2)
Examples
--------
Calculating primary fluid nozzle inlet pressure P1:
>>> liquid_jet_pump_ancillary(rhop=998., rhos=1098., Ks=0.11, Kp=.04,
... P2=133600, Qp=0.01, Qs=0.01, d_mixing=0.045, d_nozzle=0.02238)
426434.60314398084
References
----------
.. [1] Ejectors and Jet Pumps. Design and Performance for Incompressible
Liquid Flow. 85032. ESDU International PLC, 1985.
'''
unknowns = sum(i is None for i in (d_nozzle, d_mixing, Qs, Qp, P1, P2))
if unknowns > 1:
raise Exception('Too many unknowns')
elif unknowns < 1:
raise Exception('Overspecified')
C = rhos/rhop
if Qp is not None and Qs is not None:
M = Qs/Qp
if d_nozzle is not None:
A_nozzle = pi/4*d_nozzle*d_nozzle
if d_mixing is not None:
A_mixing = pi/4*d_mixing*d_mixing
R = A_nozzle/A_mixing
if P1 is None:
return rhop/2*(Qp/A_nozzle)**2*((1+Kp) - C*(1 + Ks)*((M*R)/(1-R))**2 ) + P2
elif P2 is None:
return -rhop/2*(Qp/A_nozzle)**2*((1+Kp) - C*(1 + Ks)*((M*R)/(1-R))**2 ) + P1
elif Qs is None:
try:
return ((-2*A_nozzle**2*P1 + 2*A_nozzle**2*P2 + Kp*Qp**2*rhop + Qp**2*rhop)/(C*rhop*(Ks + 1)))**0.5*(A_mixing - A_nozzle)/A_nozzle
except ValueError:
return -1j
elif Qp is None:
return A_nozzle*((2*A_mixing**2*P1 - 2*A_mixing**2*P2 - 4*A_mixing*A_nozzle*P1 + 4*A_mixing*A_nozzle*P2 + 2*A_nozzle**2*P1 - 2*A_nozzle**2*P2 + C*Ks*Qs**2*rhop + C*Qs**2*rhop)/(rhop*(Kp + 1)))**0.5/(A_mixing - A_nozzle)
elif d_nozzle is None:
def err(d_nozzle):
return P1 - liquid_jet_pump_ancillary(rhop=rhop, rhos=rhos, Kp=Kp, Ks=Ks, d_nozzle=d_nozzle, d_mixing=d_mixing, Qp=Qp, Qs=Qs,
P1=None, P2=P2)
return brenth(err, 1E-9, d_mixing*20)
elif d_mixing is None:
def err(d_mixing):
return P1 - liquid_jet_pump_ancillary(rhop=rhop, rhos=rhos, Kp=Kp, Ks=Ks, d_nozzle=d_nozzle, d_mixing=d_mixing, Qp=Qp, Qs=Qs,
P1=None, P2=P2)
try:
return brenth(err, 1E-9, d_nozzle*20)
except:
return newton(err, d_nozzle*2) |
def from_json(cls, obj):
"""Construct a MarathonConstraint from a parsed response.
:param dict attributes: object attributes from parsed response
:rtype: :class:`MarathonConstraint`
"""
if len(obj) == 2:
(field, operator) = obj
return cls(field, operator)
if len(obj) > 2:
(field, operator, value) = obj
return cls(field, operator, value) | Construct a MarathonConstraint from a parsed response.
:param dict attributes: object attributes from parsed response
:rtype: :class:`MarathonConstraint` | Below is the the instruction that describes the task:
### Input:
Construct a MarathonConstraint from a parsed response.
:param dict attributes: object attributes from parsed response
:rtype: :class:`MarathonConstraint`
### Response:
def from_json(cls, obj):
"""Construct a MarathonConstraint from a parsed response.
:param dict attributes: object attributes from parsed response
:rtype: :class:`MarathonConstraint`
"""
if len(obj) == 2:
(field, operator) = obj
return cls(field, operator)
if len(obj) > 2:
(field, operator, value) = obj
return cls(field, operator, value) |
def scatter_table(self, x, y, c, s, mark='*'):
"""Add a data series to the plot.
:param x: array containing x-values.
:param y: array containing y-values.
:param c: array containing values for the color of the mark.
:param s: array containing values for the size of the mark.
:param mark: the symbol used to mark the data point. May be None,
or any plot mark accepted by TikZ (e.g. ``*, x, +, o, square,
triangle``).
The dimensions of x, y, c and s should be equal. The c values will
be mapped to a colormap.
"""
# clear the background of the marks
# self._clear_plot_mark_background(x, y, mark, markstyle)
# draw the plot series over the background
options = self._parse_plot_options(mark)
s = [sqrt(si) for si in s]
plot_series = self._create_plot_tables_object(x, y, c, s, options)
self.plot_table_list.append(plot_series) | Add a data series to the plot.
:param x: array containing x-values.
:param y: array containing y-values.
:param c: array containing values for the color of the mark.
:param s: array containing values for the size of the mark.
:param mark: the symbol used to mark the data point. May be None,
or any plot mark accepted by TikZ (e.g. ``*, x, +, o, square,
triangle``).
The dimensions of x, y, c and s should be equal. The c values will
be mapped to a colormap. | Below is the the instruction that describes the task:
### Input:
Add a data series to the plot.
:param x: array containing x-values.
:param y: array containing y-values.
:param c: array containing values for the color of the mark.
:param s: array containing values for the size of the mark.
:param mark: the symbol used to mark the data point. May be None,
or any plot mark accepted by TikZ (e.g. ``*, x, +, o, square,
triangle``).
The dimensions of x, y, c and s should be equal. The c values will
be mapped to a colormap.
### Response:
def scatter_table(self, x, y, c, s, mark='*'):
"""Add a data series to the plot.
:param x: array containing x-values.
:param y: array containing y-values.
:param c: array containing values for the color of the mark.
:param s: array containing values for the size of the mark.
:param mark: the symbol used to mark the data point. May be None,
or any plot mark accepted by TikZ (e.g. ``*, x, +, o, square,
triangle``).
The dimensions of x, y, c and s should be equal. The c values will
be mapped to a colormap.
"""
# clear the background of the marks
# self._clear_plot_mark_background(x, y, mark, markstyle)
# draw the plot series over the background
options = self._parse_plot_options(mark)
s = [sqrt(si) for si in s]
plot_series = self._create_plot_tables_object(x, y, c, s, options)
self.plot_table_list.append(plot_series) |
def _import_module(module_path, classnames):
""" Tries to import the given Python module path. """
try:
imported_module = __import__(module_path, fromlist=classnames)
return imported_module
except ImportError:
# In case of an ImportError, the module being loaded generally does not exist. But an
# ImportError can occur if the module being loaded exists and another import located inside
# it failed.
#
# In order to provide a meaningfull traceback, the execution information can be inspected in
# order to determine which case to consider. If the execution information provides more than
# a certain amount of frames, this means that an ImportError occured while loading the
# initial Python module.
__, __, exc_traceback = sys.exc_info()
frames = traceback.extract_tb(exc_traceback)
if len(frames) > 1:
raise | Tries to import the given Python module path. | Below is the the instruction that describes the task:
### Input:
Tries to import the given Python module path.
### Response:
def _import_module(module_path, classnames):
""" Tries to import the given Python module path. """
try:
imported_module = __import__(module_path, fromlist=classnames)
return imported_module
except ImportError:
# In case of an ImportError, the module being loaded generally does not exist. But an
# ImportError can occur if the module being loaded exists and another import located inside
# it failed.
#
# In order to provide a meaningfull traceback, the execution information can be inspected in
# order to determine which case to consider. If the execution information provides more than
# a certain amount of frames, this means that an ImportError occured while loading the
# initial Python module.
__, __, exc_traceback = sys.exc_info()
frames = traceback.extract_tb(exc_traceback)
if len(frames) > 1:
raise |
def sendDAT(self):
"""This method sends the next DAT packet based on the data in the
context. It returns a boolean indicating whether the transfer is
finished."""
finished = False
blocknumber = self.context.next_block
# Test hook
if DELAY_BLOCK and DELAY_BLOCK == blocknumber:
import time
log.debug("Deliberately delaying 10 seconds...")
time.sleep(10)
dat = None
blksize = self.context.getBlocksize()
buffer = self.context.fileobj.read(blksize)
log.debug("Read %d bytes into buffer", len(buffer))
if len(buffer) < blksize:
log.info("Reached EOF on file %s"
% self.context.file_to_transfer)
finished = True
dat = TftpPacketDAT()
dat.data = buffer
dat.blocknumber = blocknumber
self.context.metrics.bytes += len(dat.data)
log.debug("Sending DAT packet %d", dat.blocknumber)
self.context.sock.sendto(dat.encode().buffer,
(self.context.host, self.context.tidport))
if self.context.packethook:
self.context.packethook(dat)
self.context.last_pkt = dat
return finished | This method sends the next DAT packet based on the data in the
context. It returns a boolean indicating whether the transfer is
finished. | Below is the the instruction that describes the task:
### Input:
This method sends the next DAT packet based on the data in the
context. It returns a boolean indicating whether the transfer is
finished.
### Response:
def sendDAT(self):
"""This method sends the next DAT packet based on the data in the
context. It returns a boolean indicating whether the transfer is
finished."""
finished = False
blocknumber = self.context.next_block
# Test hook
if DELAY_BLOCK and DELAY_BLOCK == blocknumber:
import time
log.debug("Deliberately delaying 10 seconds...")
time.sleep(10)
dat = None
blksize = self.context.getBlocksize()
buffer = self.context.fileobj.read(blksize)
log.debug("Read %d bytes into buffer", len(buffer))
if len(buffer) < blksize:
log.info("Reached EOF on file %s"
% self.context.file_to_transfer)
finished = True
dat = TftpPacketDAT()
dat.data = buffer
dat.blocknumber = blocknumber
self.context.metrics.bytes += len(dat.data)
log.debug("Sending DAT packet %d", dat.blocknumber)
self.context.sock.sendto(dat.encode().buffer,
(self.context.host, self.context.tidport))
if self.context.packethook:
self.context.packethook(dat)
self.context.last_pkt = dat
return finished |
def add_bpmn_files(self, filenames):
"""
Add all filenames in the given list to the parser's set.
"""
for filename in filenames:
f = open(filename, 'r')
try:
self.add_bpmn_xml(ET.parse(f), filename=filename)
finally:
f.close() | Add all filenames in the given list to the parser's set. | Below is the the instruction that describes the task:
### Input:
Add all filenames in the given list to the parser's set.
### Response:
def add_bpmn_files(self, filenames):
"""
Add all filenames in the given list to the parser's set.
"""
for filename in filenames:
f = open(filename, 'r')
try:
self.add_bpmn_xml(ET.parse(f), filename=filename)
finally:
f.close() |
def get_system_uptime_output_cmd_error(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_system_uptime = ET.Element("get_system_uptime")
config = get_system_uptime
output = ET.SubElement(get_system_uptime, "output")
cmd_error = ET.SubElement(output, "cmd-error")
cmd_error.text = kwargs.pop('cmd_error')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_system_uptime_output_cmd_error(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_system_uptime = ET.Element("get_system_uptime")
config = get_system_uptime
output = ET.SubElement(get_system_uptime, "output")
cmd_error = ET.SubElement(output, "cmd-error")
cmd_error.text = kwargs.pop('cmd_error')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def validate(cls, mapper_spec):
"""Validates mapper specification.
Args:
mapper_spec: an instance of model.MapperSpec to validate.
Raises:
BadWriterParamsError: when Output writer class mismatch.
"""
if mapper_spec.output_writer_class() != cls:
raise errors.BadWriterParamsError("Output writer class mismatch")
params = output_writers._get_params(mapper_spec)
# Bucket Name is required
if cls.BUCKET_NAME_PARAM not in params:
raise errors.BadWriterParamsError(
"%s is required for the _HashingGCSOutputWriter" %
cls.BUCKET_NAME_PARAM) | Validates mapper specification.
Args:
mapper_spec: an instance of model.MapperSpec to validate.
Raises:
BadWriterParamsError: when Output writer class mismatch. | Below is the the instruction that describes the task:
### Input:
Validates mapper specification.
Args:
mapper_spec: an instance of model.MapperSpec to validate.
Raises:
BadWriterParamsError: when Output writer class mismatch.
### Response:
def validate(cls, mapper_spec):
"""Validates mapper specification.
Args:
mapper_spec: an instance of model.MapperSpec to validate.
Raises:
BadWriterParamsError: when Output writer class mismatch.
"""
if mapper_spec.output_writer_class() != cls:
raise errors.BadWriterParamsError("Output writer class mismatch")
params = output_writers._get_params(mapper_spec)
# Bucket Name is required
if cls.BUCKET_NAME_PARAM not in params:
raise errors.BadWriterParamsError(
"%s is required for the _HashingGCSOutputWriter" %
cls.BUCKET_NAME_PARAM) |
def _relay_message(self, message):
"""
Relay messages from the forum on the server to the client represented
by this actor.
"""
info("relaying message: {message}")
if not message.was_sent_by(self._id_factory):
self.pipe.send(message)
self.pipe.deliver() | Relay messages from the forum on the server to the client represented
by this actor. | Below is the the instruction that describes the task:
### Input:
Relay messages from the forum on the server to the client represented
by this actor.
### Response:
def _relay_message(self, message):
"""
Relay messages from the forum on the server to the client represented
by this actor.
"""
info("relaying message: {message}")
if not message.was_sent_by(self._id_factory):
self.pipe.send(message)
self.pipe.deliver() |
def get_ZPE(viblist):
"""Returns the zero point energy from a list of frequencies.
Parameters
----------
viblist : List of numbers or string of list of numbers.
Returns
-------
ZPE : Zero point energy in eV.
"""
if type(viblist) is str:
l = ast.literal_eval(viblist)
else:
l = viblist
l = [float(w) for w in l]
ZPE = 0.5*sum(l)*cm2ev
return(ZPE) | Returns the zero point energy from a list of frequencies.
Parameters
----------
viblist : List of numbers or string of list of numbers.
Returns
-------
ZPE : Zero point energy in eV. | Below is the the instruction that describes the task:
### Input:
Returns the zero point energy from a list of frequencies.
Parameters
----------
viblist : List of numbers or string of list of numbers.
Returns
-------
ZPE : Zero point energy in eV.
### Response:
def get_ZPE(viblist):
"""Returns the zero point energy from a list of frequencies.
Parameters
----------
viblist : List of numbers or string of list of numbers.
Returns
-------
ZPE : Zero point energy in eV.
"""
if type(viblist) is str:
l = ast.literal_eval(viblist)
else:
l = viblist
l = [float(w) for w in l]
ZPE = 0.5*sum(l)*cm2ev
return(ZPE) |
def get_task(self, name):
""" Returns a TaskConfig """
config = getattr(self, "tasks__{}".format(name))
if not config:
raise TaskNotFoundError("Task not found: {}".format(name))
return TaskConfig(config) | Returns a TaskConfig | Below is the the instruction that describes the task:
### Input:
Returns a TaskConfig
### Response:
def get_task(self, name):
""" Returns a TaskConfig """
config = getattr(self, "tasks__{}".format(name))
if not config:
raise TaskNotFoundError("Task not found: {}".format(name))
return TaskConfig(config) |
def rotate(self):
"""Rotate the files to disk.
This is done by calling `store.close()` on each store, bumping the
batchno and reopening the stores using their factories.
"""
self._logger.info('Rotating data files. New batch number will be: %s',
self.batchno + 1)
self.estore.close()
self.estore = None
self.batchno += 1
self.estore = self._open_event_store() | Rotate the files to disk.
This is done by calling `store.close()` on each store, bumping the
batchno and reopening the stores using their factories. | Below is the the instruction that describes the task:
### Input:
Rotate the files to disk.
This is done by calling `store.close()` on each store, bumping the
batchno and reopening the stores using their factories.
### Response:
def rotate(self):
"""Rotate the files to disk.
This is done by calling `store.close()` on each store, bumping the
batchno and reopening the stores using their factories.
"""
self._logger.info('Rotating data files. New batch number will be: %s',
self.batchno + 1)
self.estore.close()
self.estore = None
self.batchno += 1
self.estore = self._open_event_store() |
def get_postcodedata(self, postcode, nr, addition="", **params):
"""get_postcodedata - fetch information for 'postcode'.
Parameters
----------
postcode : string
The full (dutch) postcode
nr : int
The housenumber
addition : string (optional)
the extension to a housenumber
params : dict (optional)
a list of parameters to send with the request.
returns :
a response dictionary
"""
endpoint = 'rest/addresses/%s/%s' % (postcode, nr)
if addition:
endpoint += '/' + addition
retValue = self._API__request(endpoint, params=params)
# then it should match the houseNumberAdditions
if addition and addition.upper() not in \
[a.upper() for a in retValue['houseNumberAdditions']]:
raise PostcodeError(
"ERRHouseNumberAdditionInvalid",
{"exceptionId": "ERRHouseNumberAdditionInvalid",
"exception": "Invalid housenumber addition: '%s'" %
retValue['houseNumberAddition'],
"validHouseNumberAdditions":
retValue['houseNumberAdditions']})
return retValue | get_postcodedata - fetch information for 'postcode'.
Parameters
----------
postcode : string
The full (dutch) postcode
nr : int
The housenumber
addition : string (optional)
the extension to a housenumber
params : dict (optional)
a list of parameters to send with the request.
returns :
a response dictionary | Below is the the instruction that describes the task:
### Input:
get_postcodedata - fetch information for 'postcode'.
Parameters
----------
postcode : string
The full (dutch) postcode
nr : int
The housenumber
addition : string (optional)
the extension to a housenumber
params : dict (optional)
a list of parameters to send with the request.
returns :
a response dictionary
### Response:
def get_postcodedata(self, postcode, nr, addition="", **params):
"""get_postcodedata - fetch information for 'postcode'.
Parameters
----------
postcode : string
The full (dutch) postcode
nr : int
The housenumber
addition : string (optional)
the extension to a housenumber
params : dict (optional)
a list of parameters to send with the request.
returns :
a response dictionary
"""
endpoint = 'rest/addresses/%s/%s' % (postcode, nr)
if addition:
endpoint += '/' + addition
retValue = self._API__request(endpoint, params=params)
# then it should match the houseNumberAdditions
if addition and addition.upper() not in \
[a.upper() for a in retValue['houseNumberAdditions']]:
raise PostcodeError(
"ERRHouseNumberAdditionInvalid",
{"exceptionId": "ERRHouseNumberAdditionInvalid",
"exception": "Invalid housenumber addition: '%s'" %
retValue['houseNumberAddition'],
"validHouseNumberAdditions":
retValue['houseNumberAdditions']})
return retValue |
def _parse_requirements(self, input):
""" Parse a list of requirements specifications.
Lines that look like "foobar==1.0" are parsed; all other lines are
silently ignored.
Returns a tuple of tuples, where each inner tuple is:
(package, version) """
results = []
for line in input:
(package, version) = self._parse_line(line)
if package:
results.append((package, version))
return tuple(results) | Parse a list of requirements specifications.
Lines that look like "foobar==1.0" are parsed; all other lines are
silently ignored.
Returns a tuple of tuples, where each inner tuple is:
(package, version) | Below is the the instruction that describes the task:
### Input:
Parse a list of requirements specifications.
Lines that look like "foobar==1.0" are parsed; all other lines are
silently ignored.
Returns a tuple of tuples, where each inner tuple is:
(package, version)
### Response:
def _parse_requirements(self, input):
""" Parse a list of requirements specifications.
Lines that look like "foobar==1.0" are parsed; all other lines are
silently ignored.
Returns a tuple of tuples, where each inner tuple is:
(package, version) """
results = []
for line in input:
(package, version) = self._parse_line(line)
if package:
results.append((package, version))
return tuple(results) |
def keep_only_current_window(self):
"""
Close all other windows, except the current one.
"""
self.tab_pages = [TabPage(self.active_tab.active_window)]
self.active_tab_index = 0 | Close all other windows, except the current one. | Below is the the instruction that describes the task:
### Input:
Close all other windows, except the current one.
### Response:
def keep_only_current_window(self):
"""
Close all other windows, except the current one.
"""
self.tab_pages = [TabPage(self.active_tab.active_window)]
self.active_tab_index = 0 |
def aes_b64_encrypt(value, secret, block_size=AES.block_size):
""" AES encrypt @value with @secret using the |CFB| mode of AES
with a cryptographically secure initialization vector.
-> (#str) AES encrypted @value
..
from vital.security import aes_encrypt, aes_decrypt
aes_encrypt("Hello, world",
"aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW")
# -> 'zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw='
aes_decrypt(
"zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw=",
"aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW")
# -> 'Hello, world'
..
"""
# iv = randstr(block_size * 2, rng=random)
iv = randstr(block_size * 2)
cipher = AES.new(secret[:32], AES.MODE_CFB, iv[:block_size].encode())
return iv + b64encode(cipher.encrypt(
uniorbytes(value, bytes))).decode('utf-8') | AES encrypt @value with @secret using the |CFB| mode of AES
with a cryptographically secure initialization vector.
-> (#str) AES encrypted @value
..
from vital.security import aes_encrypt, aes_decrypt
aes_encrypt("Hello, world",
"aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW")
# -> 'zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw='
aes_decrypt(
"zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw=",
"aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW")
# -> 'Hello, world'
.. | Below is the the instruction that describes the task:
### Input:
AES encrypt @value with @secret using the |CFB| mode of AES
with a cryptographically secure initialization vector.
-> (#str) AES encrypted @value
..
from vital.security import aes_encrypt, aes_decrypt
aes_encrypt("Hello, world",
"aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW")
# -> 'zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw='
aes_decrypt(
"zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw=",
"aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW")
# -> 'Hello, world'
..
### Response:
def aes_b64_encrypt(value, secret, block_size=AES.block_size):
""" AES encrypt @value with @secret using the |CFB| mode of AES
with a cryptographically secure initialization vector.
-> (#str) AES encrypted @value
..
from vital.security import aes_encrypt, aes_decrypt
aes_encrypt("Hello, world",
"aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW")
# -> 'zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw='
aes_decrypt(
"zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw=",
"aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW")
# -> 'Hello, world'
..
"""
# iv = randstr(block_size * 2, rng=random)
iv = randstr(block_size * 2)
cipher = AES.new(secret[:32], AES.MODE_CFB, iv[:block_size].encode())
return iv + b64encode(cipher.encrypt(
uniorbytes(value, bytes))).decode('utf-8') |
def check_web_config(config_fname):
'''
Try to load the Django settings.
If this does not work, than settings file does not exist.
Returns:
Loaded configuration, or None.
'''
print("Looking for config file at {0} ...".format(config_fname))
config = RawConfigParser()
try:
config.readfp(open(config_fname))
return config
except IOError:
print("ERROR: Seems like the config file does not exist. Please call 'opensubmit-web configcreate' first, or specify a location with the '-c' option.")
return None | Try to load the Django settings.
If this does not work, than settings file does not exist.
Returns:
Loaded configuration, or None. | Below is the the instruction that describes the task:
### Input:
Try to load the Django settings.
If this does not work, than settings file does not exist.
Returns:
Loaded configuration, or None.
### Response:
def check_web_config(config_fname):
'''
Try to load the Django settings.
If this does not work, than settings file does not exist.
Returns:
Loaded configuration, or None.
'''
print("Looking for config file at {0} ...".format(config_fname))
config = RawConfigParser()
try:
config.readfp(open(config_fname))
return config
except IOError:
print("ERROR: Seems like the config file does not exist. Please call 'opensubmit-web configcreate' first, or specify a location with the '-c' option.")
return None |
def disable_beacons(self):
'''
Enable beacons
'''
self.opts['beacons']['enabled'] = False
# Fire the complete event back along with updated list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
tag='/salt/minion/minion_beacons_disabled_complete')
return True | Enable beacons | Below is the the instruction that describes the task:
### Input:
Enable beacons
### Response:
def disable_beacons(self):
'''
Enable beacons
'''
self.opts['beacons']['enabled'] = False
# Fire the complete event back along with updated list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
tag='/salt/minion/minion_beacons_disabled_complete')
return True |
def _diff_internal(d1,d2):
'''
d1 = {'a':'x','b':'y','c':'z'}
d2 = {'a':'x','b':'u','d':'v'}
_diff_internal(d1,d2)
_diff_internald2,d1)
'''
same =[]
kdiff =[]
vdiff = []
for key in d1:
value = d1[key]
if(key in d2):
if(value == d2[key]):
same.append(key)
else:
vdiff.append(key)
else:
kdiff.append(key)
return({'same':same,'kdiff':kdiff,'vdiff':vdiff}) | d1 = {'a':'x','b':'y','c':'z'}
d2 = {'a':'x','b':'u','d':'v'}
_diff_internal(d1,d2)
_diff_internald2,d1) | Below is the the instruction that describes the task:
### Input:
d1 = {'a':'x','b':'y','c':'z'}
d2 = {'a':'x','b':'u','d':'v'}
_diff_internal(d1,d2)
_diff_internald2,d1)
### Response:
def _diff_internal(d1,d2):
'''
d1 = {'a':'x','b':'y','c':'z'}
d2 = {'a':'x','b':'u','d':'v'}
_diff_internal(d1,d2)
_diff_internald2,d1)
'''
same =[]
kdiff =[]
vdiff = []
for key in d1:
value = d1[key]
if(key in d2):
if(value == d2[key]):
same.append(key)
else:
vdiff.append(key)
else:
kdiff.append(key)
return({'same':same,'kdiff':kdiff,'vdiff':vdiff}) |
def neg_loglikelihood(y, mean, scale, shape, skewness):
""" Negative loglikelihood function
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Exponential distribution
scale : float
scale parameter for the Exponential distribution
shape : float
tail thickness parameter for the Exponential distribution
skewness : float
skewness parameter for the Exponential distribution
Returns
----------
- Negative loglikelihood of the Exponential family
"""
return -np.sum(ss.expon.logpdf(x=y, scale=1/mean)) | Negative loglikelihood function
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Exponential distribution
scale : float
scale parameter for the Exponential distribution
shape : float
tail thickness parameter for the Exponential distribution
skewness : float
skewness parameter for the Exponential distribution
Returns
----------
- Negative loglikelihood of the Exponential family | Below is the the instruction that describes the task:
### Input:
Negative loglikelihood function
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Exponential distribution
scale : float
scale parameter for the Exponential distribution
shape : float
tail thickness parameter for the Exponential distribution
skewness : float
skewness parameter for the Exponential distribution
Returns
----------
- Negative loglikelihood of the Exponential family
### Response:
def neg_loglikelihood(y, mean, scale, shape, skewness):
""" Negative loglikelihood function
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Exponential distribution
scale : float
scale parameter for the Exponential distribution
shape : float
tail thickness parameter for the Exponential distribution
skewness : float
skewness parameter for the Exponential distribution
Returns
----------
- Negative loglikelihood of the Exponential family
"""
return -np.sum(ss.expon.logpdf(x=y, scale=1/mean)) |
def sniff(self, sample, delimiters=None):
"""
Returns a dialect (or None) corresponding to the sample
"""
quotechar, doublequote, delimiter, skipinitialspace = \
self._guess_quote_and_delimiter(sample, delimiters)
if not delimiter:
delimiter, skipinitialspace = self._guess_delimiter(sample,
delimiters)
if not delimiter:
raise Error, "Could not determine delimiter"
class dialect(Dialect):
_name = "sniffed"
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
# escapechar = ''
dialect.doublequote = doublequote
dialect.delimiter = delimiter
# _csv.reader won't accept a quotechar of ''
dialect.quotechar = quotechar or '"'
dialect.skipinitialspace = skipinitialspace
return dialect | Returns a dialect (or None) corresponding to the sample | Below is the the instruction that describes the task:
### Input:
Returns a dialect (or None) corresponding to the sample
### Response:
def sniff(self, sample, delimiters=None):
"""
Returns a dialect (or None) corresponding to the sample
"""
quotechar, doublequote, delimiter, skipinitialspace = \
self._guess_quote_and_delimiter(sample, delimiters)
if not delimiter:
delimiter, skipinitialspace = self._guess_delimiter(sample,
delimiters)
if not delimiter:
raise Error, "Could not determine delimiter"
class dialect(Dialect):
_name = "sniffed"
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
# escapechar = ''
dialect.doublequote = doublequote
dialect.delimiter = delimiter
# _csv.reader won't accept a quotechar of ''
dialect.quotechar = quotechar or '"'
dialect.skipinitialspace = skipinitialspace
return dialect |
def author_names(self):
"""
Returns a dictionary like this:
{
"urn:cts:greekLit:tlg0012$$n1" : "Homer"
, "urn:cts:greekLit:tlg0012$$n2" : "Omero"
, ...
}
"""
return {"%s$$n%i" % (author.get_urn(), i): name[1]
for author in self.get_authors()
for i, name in enumerate(author.get_names())
if author.get_urn() is not None} | Returns a dictionary like this:
{
"urn:cts:greekLit:tlg0012$$n1" : "Homer"
, "urn:cts:greekLit:tlg0012$$n2" : "Omero"
, ...
} | Below is the the instruction that describes the task:
### Input:
Returns a dictionary like this:
{
"urn:cts:greekLit:tlg0012$$n1" : "Homer"
, "urn:cts:greekLit:tlg0012$$n2" : "Omero"
, ...
}
### Response:
def author_names(self):
"""
Returns a dictionary like this:
{
"urn:cts:greekLit:tlg0012$$n1" : "Homer"
, "urn:cts:greekLit:tlg0012$$n2" : "Omero"
, ...
}
"""
return {"%s$$n%i" % (author.get_urn(), i): name[1]
for author in self.get_authors()
for i, name in enumerate(author.get_names())
if author.get_urn() is not None} |
def upload(target):
# type: (str) -> None
""" Upload the release to a pypi server.
TODO: Make sure the git directory is clean before allowing a release.
Args:
target (str):
pypi target as defined in ~/.pypirc
"""
log.info("Uploading to pypi server <33>{}".format(target))
with conf.within_proj_dir():
shell.run('python setup.py sdist register -r "{}"'.format(target))
shell.run('python setup.py sdist upload -r "{}"'.format(target)) | Upload the release to a pypi server.
TODO: Make sure the git directory is clean before allowing a release.
Args:
target (str):
pypi target as defined in ~/.pypirc | Below is the the instruction that describes the task:
### Input:
Upload the release to a pypi server.
TODO: Make sure the git directory is clean before allowing a release.
Args:
target (str):
pypi target as defined in ~/.pypirc
### Response:
def upload(target):
# type: (str) -> None
""" Upload the release to a pypi server.
TODO: Make sure the git directory is clean before allowing a release.
Args:
target (str):
pypi target as defined in ~/.pypirc
"""
log.info("Uploading to pypi server <33>{}".format(target))
with conf.within_proj_dir():
shell.run('python setup.py sdist register -r "{}"'.format(target))
shell.run('python setup.py sdist upload -r "{}"'.format(target)) |
def parse(response):
"""Parse a postdata-style response format from the API into usable data"""
"""Split a a=1b=2c=3 string into a dictionary of pairs"""
tokens = {r[0]: r[1] for r in [r.split('=') for r in response.split("&")]}
# The odd dummy parameter is of no use to us
if 'dummy' in tokens:
del tokens['dummy']
"""
If we have key names that end in digits, these indicate the result set contains multiple sets
For example, planet0=Hoth&x=1&y=-10&planet1=Naboo&x=9&y=13 is actually data for two planets
Elements that end in digits (like tag0, tag1 for planets) are formatted like (tag0_1, tag1_1), so we rstrip
underscores afterwards.
"""
if re.match('\D\d+$', tokens.keys()[0]):
# Produce a list of dictionaries
set_tokens = []
for key, value in tokens:
key = re.match('^(.+\D)(\d+)$', key)
# If the key isn't in the format (i.e. a failsafe), skip it
if key is not None:
if key.group(1) not in set_tokens:
set_tokens[key.group(1)] = {}
set_tokens[key.group(1)][key.group(0).rstrip('_')] = value
tokens = set_tokens
return tokens | Parse a postdata-style response format from the API into usable data | Below is the the instruction that describes the task:
### Input:
Parse a postdata-style response format from the API into usable data
### Response:
def parse(response):
"""Parse a postdata-style response format from the API into usable data"""
"""Split a a=1b=2c=3 string into a dictionary of pairs"""
tokens = {r[0]: r[1] for r in [r.split('=') for r in response.split("&")]}
# The odd dummy parameter is of no use to us
if 'dummy' in tokens:
del tokens['dummy']
"""
If we have key names that end in digits, these indicate the result set contains multiple sets
For example, planet0=Hoth&x=1&y=-10&planet1=Naboo&x=9&y=13 is actually data for two planets
Elements that end in digits (like tag0, tag1 for planets) are formatted like (tag0_1, tag1_1), so we rstrip
underscores afterwards.
"""
if re.match('\D\d+$', tokens.keys()[0]):
# Produce a list of dictionaries
set_tokens = []
for key, value in tokens:
key = re.match('^(.+\D)(\d+)$', key)
# If the key isn't in the format (i.e. a failsafe), skip it
if key is not None:
if key.group(1) not in set_tokens:
set_tokens[key.group(1)] = {}
set_tokens[key.group(1)][key.group(0).rstrip('_')] = value
tokens = set_tokens
return tokens |
def LAST(COND, N1, N2):
"""表达持续性
从前N1日到前N2日一直满足COND条件
Arguments:
COND {[type]} -- [description]
N1 {[type]} -- [description]
N2 {[type]} -- [description]
"""
N2 = 1 if N2 == 0 else N2
assert N2 > 0
assert N1 > N2
return COND.iloc[-N1:-N2].all() | 表达持续性
从前N1日到前N2日一直满足COND条件
Arguments:
COND {[type]} -- [description]
N1 {[type]} -- [description]
N2 {[type]} -- [description] | Below is the the instruction that describes the task:
### Input:
表达持续性
从前N1日到前N2日一直满足COND条件
Arguments:
COND {[type]} -- [description]
N1 {[type]} -- [description]
N2 {[type]} -- [description]
### Response:
def LAST(COND, N1, N2):
"""表达持续性
从前N1日到前N2日一直满足COND条件
Arguments:
COND {[type]} -- [description]
N1 {[type]} -- [description]
N2 {[type]} -- [description]
"""
N2 = 1 if N2 == 0 else N2
assert N2 > 0
assert N1 > N2
return COND.iloc[-N1:-N2].all() |
def as_freq(data_series, freq, atomic_freq="1 Min", series_type="cumulative"):
"""Resample data to a different frequency.
This method can be used to upsample or downsample meter data. The
assumption it makes to do so is that meter data is constant and averaged
over the given periods. For instance, to convert billing-period data to
daily data, this method first upsamples to the atomic frequency
(1 minute freqency, by default), "spreading" usage evenly across all
minutes in each period. Then it downsamples to hourly frequency and
returns that result. With instantaneous series, the data is copied to all
contiguous time intervals and the mean over `freq` is returned.
**Caveats**:
- This method gives a fair amount of flexibility in
resampling as long as you are OK with the assumption that usage is
constant over the period (this assumption is generally broken in
observed data at large enough frequencies, so this caveat should not be
taken lightly).
Parameters
----------
data_series : :any:`pandas.Series`
Data to resample. Should have a :any:`pandas.DatetimeIndex`.
freq : :any:`str`
The frequency to resample to. This should be given in a form recognized
by the :any:`pandas.Series.resample` method.
atomic_freq : :any:`str`, optional
The "atomic" frequency of the intermediate data form. This can be
adjusted to a higher atomic frequency to increase speed or memory
performance.
series_type : :any:`str`, {'cumulative', ‘instantaneous’},
default 'cumulative'
Type of data sampling. 'cumulative' data can be spread over smaller
time intervals and is aggregated using addition (e.g. meter data).
'instantaneous' data is copied (not spread) over smaller time intervals
and is aggregated by averaging (e.g. weather data).
Returns
-------
resampled_data : :any:`pandas.Series`
Data resampled to the given frequency.
"""
# TODO(philngo): make sure this complies with CalTRACK 2.2.2.1
if not isinstance(data_series, pd.Series):
raise ValueError(
"expected series, got object with class {}".format(data_series.__class__)
)
if data_series.empty:
return data_series
series = remove_duplicates(data_series)
target_freq = pd.Timedelta(atomic_freq)
timedeltas = (series.index[1:] - series.index[:-1]).append(
pd.TimedeltaIndex([pd.NaT])
)
if series_type == "cumulative":
spread_factor = target_freq.total_seconds() / timedeltas.total_seconds()
series_spread = series * spread_factor
atomic_series = series_spread.asfreq(atomic_freq, method="ffill")
resampled = atomic_series.resample(freq).sum()
resampled_with_nans = atomic_series.resample(freq).mean()
resampled = resampled[resampled_with_nans.notnull()].reindex(resampled.index)
elif series_type == "instantaneous":
atomic_series = series.asfreq(atomic_freq, method="ffill")
resampled = atomic_series.resample(freq).mean()
if resampled.index[-1] < series.index[-1]:
# this adds a null at the end using the target frequency
last_index = pd.date_range(resampled.index[-1], freq=freq, periods=2)[1:]
resampled = (
pd.concat([resampled, pd.Series(np.nan, index=last_index)])
.resample(freq)
.mean()
)
return resampled | Resample data to a different frequency.
This method can be used to upsample or downsample meter data. The
assumption it makes to do so is that meter data is constant and averaged
over the given periods. For instance, to convert billing-period data to
daily data, this method first upsamples to the atomic frequency
(1 minute freqency, by default), "spreading" usage evenly across all
minutes in each period. Then it downsamples to hourly frequency and
returns that result. With instantaneous series, the data is copied to all
contiguous time intervals and the mean over `freq` is returned.
**Caveats**:
- This method gives a fair amount of flexibility in
resampling as long as you are OK with the assumption that usage is
constant over the period (this assumption is generally broken in
observed data at large enough frequencies, so this caveat should not be
taken lightly).
Parameters
----------
data_series : :any:`pandas.Series`
Data to resample. Should have a :any:`pandas.DatetimeIndex`.
freq : :any:`str`
The frequency to resample to. This should be given in a form recognized
by the :any:`pandas.Series.resample` method.
atomic_freq : :any:`str`, optional
The "atomic" frequency of the intermediate data form. This can be
adjusted to a higher atomic frequency to increase speed or memory
performance.
series_type : :any:`str`, {'cumulative', ‘instantaneous’},
default 'cumulative'
Type of data sampling. 'cumulative' data can be spread over smaller
time intervals and is aggregated using addition (e.g. meter data).
'instantaneous' data is copied (not spread) over smaller time intervals
and is aggregated by averaging (e.g. weather data).
Returns
-------
resampled_data : :any:`pandas.Series`
Data resampled to the given frequency. | Below is the the instruction that describes the task:
### Input:
Resample data to a different frequency.
This method can be used to upsample or downsample meter data. The
assumption it makes to do so is that meter data is constant and averaged
over the given periods. For instance, to convert billing-period data to
daily data, this method first upsamples to the atomic frequency
(1 minute freqency, by default), "spreading" usage evenly across all
minutes in each period. Then it downsamples to hourly frequency and
returns that result. With instantaneous series, the data is copied to all
contiguous time intervals and the mean over `freq` is returned.
**Caveats**:
- This method gives a fair amount of flexibility in
resampling as long as you are OK with the assumption that usage is
constant over the period (this assumption is generally broken in
observed data at large enough frequencies, so this caveat should not be
taken lightly).
Parameters
----------
data_series : :any:`pandas.Series`
Data to resample. Should have a :any:`pandas.DatetimeIndex`.
freq : :any:`str`
The frequency to resample to. This should be given in a form recognized
by the :any:`pandas.Series.resample` method.
atomic_freq : :any:`str`, optional
The "atomic" frequency of the intermediate data form. This can be
adjusted to a higher atomic frequency to increase speed or memory
performance.
series_type : :any:`str`, {'cumulative', ‘instantaneous’},
default 'cumulative'
Type of data sampling. 'cumulative' data can be spread over smaller
time intervals and is aggregated using addition (e.g. meter data).
'instantaneous' data is copied (not spread) over smaller time intervals
and is aggregated by averaging (e.g. weather data).
Returns
-------
resampled_data : :any:`pandas.Series`
Data resampled to the given frequency.
### Response:
def as_freq(data_series, freq, atomic_freq="1 Min", series_type="cumulative"):
"""Resample data to a different frequency.
This method can be used to upsample or downsample meter data. The
assumption it makes to do so is that meter data is constant and averaged
over the given periods. For instance, to convert billing-period data to
daily data, this method first upsamples to the atomic frequency
(1 minute freqency, by default), "spreading" usage evenly across all
minutes in each period. Then it downsamples to hourly frequency and
returns that result. With instantaneous series, the data is copied to all
contiguous time intervals and the mean over `freq` is returned.
**Caveats**:
- This method gives a fair amount of flexibility in
resampling as long as you are OK with the assumption that usage is
constant over the period (this assumption is generally broken in
observed data at large enough frequencies, so this caveat should not be
taken lightly).
Parameters
----------
data_series : :any:`pandas.Series`
Data to resample. Should have a :any:`pandas.DatetimeIndex`.
freq : :any:`str`
The frequency to resample to. This should be given in a form recognized
by the :any:`pandas.Series.resample` method.
atomic_freq : :any:`str`, optional
The "atomic" frequency of the intermediate data form. This can be
adjusted to a higher atomic frequency to increase speed or memory
performance.
series_type : :any:`str`, {'cumulative', ‘instantaneous’},
default 'cumulative'
Type of data sampling. 'cumulative' data can be spread over smaller
time intervals and is aggregated using addition (e.g. meter data).
'instantaneous' data is copied (not spread) over smaller time intervals
and is aggregated by averaging (e.g. weather data).
Returns
-------
resampled_data : :any:`pandas.Series`
Data resampled to the given frequency.
"""
# TODO(philngo): make sure this complies with CalTRACK 2.2.2.1
if not isinstance(data_series, pd.Series):
raise ValueError(
"expected series, got object with class {}".format(data_series.__class__)
)
if data_series.empty:
return data_series
series = remove_duplicates(data_series)
target_freq = pd.Timedelta(atomic_freq)
timedeltas = (series.index[1:] - series.index[:-1]).append(
pd.TimedeltaIndex([pd.NaT])
)
if series_type == "cumulative":
spread_factor = target_freq.total_seconds() / timedeltas.total_seconds()
series_spread = series * spread_factor
atomic_series = series_spread.asfreq(atomic_freq, method="ffill")
resampled = atomic_series.resample(freq).sum()
resampled_with_nans = atomic_series.resample(freq).mean()
resampled = resampled[resampled_with_nans.notnull()].reindex(resampled.index)
elif series_type == "instantaneous":
atomic_series = series.asfreq(atomic_freq, method="ffill")
resampled = atomic_series.resample(freq).mean()
if resampled.index[-1] < series.index[-1]:
# this adds a null at the end using the target frequency
last_index = pd.date_range(resampled.index[-1], freq=freq, periods=2)[1:]
resampled = (
pd.concat([resampled, pd.Series(np.nan, index=last_index)])
.resample(freq)
.mean()
)
return resampled |
def BackAssign(cls,
other_entity_klass,
this_entity_backpopulate_field,
other_entity_backpopulate_field,
is_many_to_one=False):
"""
Assign defined one side mapping relationship to other side.
For example, each employee belongs to one department, then one department
includes many employees. If you defined each employee's department,
this method will assign employees to ``Department.employees`` field.
This is an one to many (department to employee) example.
Another example would be, each employee has multiple tags. If you defined
tags for each employee, this method will assign employees to
``Tag.employees`` field. This is and many to many (employee to tag) example.
Support:
- many to many mapping
- one to many mapping
:param other_entity_klass: a :class:`Constant` class.
:param this_entity_backpopulate_field: str
:param other_entity_backpopulate_field: str
:param is_many_to_one: bool
:return:
"""
data = dict()
for _, other_klass in other_entity_klass.Subclasses():
other_field_value = getattr(
other_klass, this_entity_backpopulate_field)
if isinstance(other_field_value, (tuple, list)):
for self_klass in other_field_value:
self_key = self_klass.__name__
try:
data[self_key].append(other_klass)
except KeyError:
data[self_key] = [other_klass, ]
else:
if other_field_value is not None:
self_klass = other_field_value
self_key = self_klass.__name__
try:
data[self_key].append(other_klass)
except KeyError:
data[self_key] = [other_klass, ]
if is_many_to_one:
new_data = dict()
for key, value in data.items():
try:
new_data[key] = value[0]
except: # pragma: no cover
pass
data = new_data
for self_key, other_klass_list in data.items():
setattr(getattr(cls, self_key),
other_entity_backpopulate_field, other_klass_list) | Assign defined one side mapping relationship to other side.
For example, each employee belongs to one department, then one department
includes many employees. If you defined each employee's department,
this method will assign employees to ``Department.employees`` field.
This is an one to many (department to employee) example.
Another example would be, each employee has multiple tags. If you defined
tags for each employee, this method will assign employees to
``Tag.employees`` field. This is and many to many (employee to tag) example.
Support:
- many to many mapping
- one to many mapping
:param other_entity_klass: a :class:`Constant` class.
:param this_entity_backpopulate_field: str
:param other_entity_backpopulate_field: str
:param is_many_to_one: bool
:return: | Below is the the instruction that describes the task:
### Input:
Assign defined one side mapping relationship to other side.
For example, each employee belongs to one department, then one department
includes many employees. If you defined each employee's department,
this method will assign employees to ``Department.employees`` field.
This is an one to many (department to employee) example.
Another example would be, each employee has multiple tags. If you defined
tags for each employee, this method will assign employees to
``Tag.employees`` field. This is and many to many (employee to tag) example.
Support:
- many to many mapping
- one to many mapping
:param other_entity_klass: a :class:`Constant` class.
:param this_entity_backpopulate_field: str
:param other_entity_backpopulate_field: str
:param is_many_to_one: bool
:return:
### Response:
def BackAssign(cls,
other_entity_klass,
this_entity_backpopulate_field,
other_entity_backpopulate_field,
is_many_to_one=False):
"""
Assign defined one side mapping relationship to other side.
For example, each employee belongs to one department, then one department
includes many employees. If you defined each employee's department,
this method will assign employees to ``Department.employees`` field.
This is an one to many (department to employee) example.
Another example would be, each employee has multiple tags. If you defined
tags for each employee, this method will assign employees to
``Tag.employees`` field. This is and many to many (employee to tag) example.
Support:
- many to many mapping
- one to many mapping
:param other_entity_klass: a :class:`Constant` class.
:param this_entity_backpopulate_field: str
:param other_entity_backpopulate_field: str
:param is_many_to_one: bool
:return:
"""
data = dict()
for _, other_klass in other_entity_klass.Subclasses():
other_field_value = getattr(
other_klass, this_entity_backpopulate_field)
if isinstance(other_field_value, (tuple, list)):
for self_klass in other_field_value:
self_key = self_klass.__name__
try:
data[self_key].append(other_klass)
except KeyError:
data[self_key] = [other_klass, ]
else:
if other_field_value is not None:
self_klass = other_field_value
self_key = self_klass.__name__
try:
data[self_key].append(other_klass)
except KeyError:
data[self_key] = [other_klass, ]
if is_many_to_one:
new_data = dict()
for key, value in data.items():
try:
new_data[key] = value[0]
except: # pragma: no cover
pass
data = new_data
for self_key, other_klass_list in data.items():
setattr(getattr(cls, self_key),
other_entity_backpopulate_field, other_klass_list) |
def get_operations(self,
indices: Sequence[LogicalIndex],
qubits: Sequence[ops.Qid]
) -> ops.OP_TREE:
"""Gets the logical operations to apply to qubits.""" | Gets the logical operations to apply to qubits. | Below is the the instruction that describes the task:
### Input:
Gets the logical operations to apply to qubits.
### Response:
def get_operations(self,
indices: Sequence[LogicalIndex],
qubits: Sequence[ops.Qid]
) -> ops.OP_TREE:
"""Gets the logical operations to apply to qubits.""" |
def execstr_dict(dict_, local_name=None, exclude_list=None, explicit=False):
"""
returns execable python code that declares variables using keys and values
execstr_dict
Args:
dict_ (dict):
local_name (str): optional: local name of dictionary. Specifying this
is much safer
exclude_list (list):
Returns:
str: execstr --- the executable string that will put keys from dict
into local vars
CommandLine:
python -m utool.util_dbg --test-execstr_dict
Example:
>>> # DISABLE_DOCTEST
>>> # UNSTABLE_DOCTEST
>>> from utool.util_dbg import * # NOQA
>>> my_dictionary = {'a': True, 'b': False}
>>> execstr = execstr_dict(my_dictionary)
>>> exec(execstr)
>>> assert 'a' in vars() and 'b' in vars(), 'execstr failed'
>>> assert b is False and a is True, 'execstr failed'
>>> result = execstr
>>> print(result)
a = my_dictionary['a']
b = my_dictionary['b']
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dbg import * # NOQA
>>> import utool as ut
>>> my_dictionary = {'a': True, 'b': False}
>>> execstr = execstr_dict(my_dictionary)
>>> locals_ = locals()
>>> exec(execstr, locals_)
>>> a, b = ut.dict_take(locals_, ['a', 'b'])
>>> assert 'a' in locals_ and 'b' in locals_, 'execstr failed'
>>> assert b is False and a is True, 'execstr failed'
>>> result = execstr
>>> print(result)
a = my_dictionary['a']
b = my_dictionary['b']
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dbg import * # NOQA
>>> import utool as ut
>>> my_dictionary = {'a': True, 'b': False}
>>> execstr = execstr_dict(my_dictionary, explicit=True)
>>> result = execstr
>>> print(result)
a = True
b = False
"""
import utool as ut
if explicit:
expr_list = []
for (key, val) in sorted(dict_.items()):
assert isinstance(key, six.string_types), 'keys must be strings'
expr_list.append('%s = %s' % (key, ut.repr2(val),))
execstr = '\n'.join(expr_list)
return execstr
else:
if local_name is None:
# Magic way of getting the local name of dict_
local_name = get_varname_from_locals(dict_, get_parent_frame().f_locals)
try:
if exclude_list is None:
exclude_list = []
assert isinstance(exclude_list, list)
exclude_list.append(local_name)
expr_list = []
assert isinstance(dict_, dict), 'incorrect type type(dict_)=%r, dict_=%r' % (type(dict), dict_)
for (key, val) in sorted(dict_.items()):
assert isinstance(key, six.string_types), 'keys must be strings'
if not is_valid_varname(key):
continue
if not any((fnmatch.fnmatch(key, pat) for pat in exclude_list)):
expr = '%s = %s[%s]' % (key, local_name, ut.repr2(key))
expr_list.append(expr)
execstr = '\n'.join(expr_list)
return execstr
except Exception as ex:
locals_ = locals()
ut.printex(ex, key_list=['locals_'])
raise | returns execable python code that declares variables using keys and values
execstr_dict
Args:
dict_ (dict):
local_name (str): optional: local name of dictionary. Specifying this
is much safer
exclude_list (list):
Returns:
str: execstr --- the executable string that will put keys from dict
into local vars
CommandLine:
python -m utool.util_dbg --test-execstr_dict
Example:
>>> # DISABLE_DOCTEST
>>> # UNSTABLE_DOCTEST
>>> from utool.util_dbg import * # NOQA
>>> my_dictionary = {'a': True, 'b': False}
>>> execstr = execstr_dict(my_dictionary)
>>> exec(execstr)
>>> assert 'a' in vars() and 'b' in vars(), 'execstr failed'
>>> assert b is False and a is True, 'execstr failed'
>>> result = execstr
>>> print(result)
a = my_dictionary['a']
b = my_dictionary['b']
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dbg import * # NOQA
>>> import utool as ut
>>> my_dictionary = {'a': True, 'b': False}
>>> execstr = execstr_dict(my_dictionary)
>>> locals_ = locals()
>>> exec(execstr, locals_)
>>> a, b = ut.dict_take(locals_, ['a', 'b'])
>>> assert 'a' in locals_ and 'b' in locals_, 'execstr failed'
>>> assert b is False and a is True, 'execstr failed'
>>> result = execstr
>>> print(result)
a = my_dictionary['a']
b = my_dictionary['b']
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dbg import * # NOQA
>>> import utool as ut
>>> my_dictionary = {'a': True, 'b': False}
>>> execstr = execstr_dict(my_dictionary, explicit=True)
>>> result = execstr
>>> print(result)
a = True
b = False | Below is the the instruction that describes the task:
### Input:
returns execable python code that declares variables using keys and values
execstr_dict
Args:
dict_ (dict):
local_name (str): optional: local name of dictionary. Specifying this
is much safer
exclude_list (list):
Returns:
str: execstr --- the executable string that will put keys from dict
into local vars
CommandLine:
python -m utool.util_dbg --test-execstr_dict
Example:
>>> # DISABLE_DOCTEST
>>> # UNSTABLE_DOCTEST
>>> from utool.util_dbg import * # NOQA
>>> my_dictionary = {'a': True, 'b': False}
>>> execstr = execstr_dict(my_dictionary)
>>> exec(execstr)
>>> assert 'a' in vars() and 'b' in vars(), 'execstr failed'
>>> assert b is False and a is True, 'execstr failed'
>>> result = execstr
>>> print(result)
a = my_dictionary['a']
b = my_dictionary['b']
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dbg import * # NOQA
>>> import utool as ut
>>> my_dictionary = {'a': True, 'b': False}
>>> execstr = execstr_dict(my_dictionary)
>>> locals_ = locals()
>>> exec(execstr, locals_)
>>> a, b = ut.dict_take(locals_, ['a', 'b'])
>>> assert 'a' in locals_ and 'b' in locals_, 'execstr failed'
>>> assert b is False and a is True, 'execstr failed'
>>> result = execstr
>>> print(result)
a = my_dictionary['a']
b = my_dictionary['b']
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dbg import * # NOQA
>>> import utool as ut
>>> my_dictionary = {'a': True, 'b': False}
>>> execstr = execstr_dict(my_dictionary, explicit=True)
>>> result = execstr
>>> print(result)
a = True
b = False
### Response:
def execstr_dict(dict_, local_name=None, exclude_list=None, explicit=False):
"""
returns execable python code that declares variables using keys and values
execstr_dict
Args:
dict_ (dict):
local_name (str): optional: local name of dictionary. Specifying this
is much safer
exclude_list (list):
Returns:
str: execstr --- the executable string that will put keys from dict
into local vars
CommandLine:
python -m utool.util_dbg --test-execstr_dict
Example:
>>> # DISABLE_DOCTEST
>>> # UNSTABLE_DOCTEST
>>> from utool.util_dbg import * # NOQA
>>> my_dictionary = {'a': True, 'b': False}
>>> execstr = execstr_dict(my_dictionary)
>>> exec(execstr)
>>> assert 'a' in vars() and 'b' in vars(), 'execstr failed'
>>> assert b is False and a is True, 'execstr failed'
>>> result = execstr
>>> print(result)
a = my_dictionary['a']
b = my_dictionary['b']
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dbg import * # NOQA
>>> import utool as ut
>>> my_dictionary = {'a': True, 'b': False}
>>> execstr = execstr_dict(my_dictionary)
>>> locals_ = locals()
>>> exec(execstr, locals_)
>>> a, b = ut.dict_take(locals_, ['a', 'b'])
>>> assert 'a' in locals_ and 'b' in locals_, 'execstr failed'
>>> assert b is False and a is True, 'execstr failed'
>>> result = execstr
>>> print(result)
a = my_dictionary['a']
b = my_dictionary['b']
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dbg import * # NOQA
>>> import utool as ut
>>> my_dictionary = {'a': True, 'b': False}
>>> execstr = execstr_dict(my_dictionary, explicit=True)
>>> result = execstr
>>> print(result)
a = True
b = False
"""
import utool as ut
if explicit:
expr_list = []
for (key, val) in sorted(dict_.items()):
assert isinstance(key, six.string_types), 'keys must be strings'
expr_list.append('%s = %s' % (key, ut.repr2(val),))
execstr = '\n'.join(expr_list)
return execstr
else:
if local_name is None:
# Magic way of getting the local name of dict_
local_name = get_varname_from_locals(dict_, get_parent_frame().f_locals)
try:
if exclude_list is None:
exclude_list = []
assert isinstance(exclude_list, list)
exclude_list.append(local_name)
expr_list = []
assert isinstance(dict_, dict), 'incorrect type type(dict_)=%r, dict_=%r' % (type(dict), dict_)
for (key, val) in sorted(dict_.items()):
assert isinstance(key, six.string_types), 'keys must be strings'
if not is_valid_varname(key):
continue
if not any((fnmatch.fnmatch(key, pat) for pat in exclude_list)):
expr = '%s = %s[%s]' % (key, local_name, ut.repr2(key))
expr_list.append(expr)
execstr = '\n'.join(expr_list)
return execstr
except Exception as ex:
locals_ = locals()
ut.printex(ex, key_list=['locals_'])
raise |
def _ratelimited_get(self, *args, **kwargs):
"""Perform get request, handling rate limiting."""
with self._ratelimiter:
resp = self.session.get(*args, **kwargs)
# It's possible that Space-Track will return HTTP status 500 with a
# query rate limit violation. This can happen if a script is cancelled
# before it has finished sleeping to satisfy the rate limit and it is
# started again.
#
# Let's catch this specific instance and retry once if it happens.
if resp.status_code == 500:
# Let's only retry if the error page tells us it's a rate limit
# violation.
if 'violated your query rate limit' in resp.text:
# Mimic the RateLimiter callback behaviour.
until = time.time() + self._ratelimiter.period
t = threading.Thread(target=self._ratelimit_callback, args=(until,))
t.daemon = True
t.start()
time.sleep(self._ratelimiter.period)
# Now retry
with self._ratelimiter:
resp = self.session.get(*args, **kwargs)
return resp | Perform get request, handling rate limiting. | Below is the the instruction that describes the task:
### Input:
Perform get request, handling rate limiting.
### Response:
def _ratelimited_get(self, *args, **kwargs):
"""Perform get request, handling rate limiting."""
with self._ratelimiter:
resp = self.session.get(*args, **kwargs)
# It's possible that Space-Track will return HTTP status 500 with a
# query rate limit violation. This can happen if a script is cancelled
# before it has finished sleeping to satisfy the rate limit and it is
# started again.
#
# Let's catch this specific instance and retry once if it happens.
if resp.status_code == 500:
# Let's only retry if the error page tells us it's a rate limit
# violation.
if 'violated your query rate limit' in resp.text:
# Mimic the RateLimiter callback behaviour.
until = time.time() + self._ratelimiter.period
t = threading.Thread(target=self._ratelimit_callback, args=(until,))
t.daemon = True
t.start()
time.sleep(self._ratelimiter.period)
# Now retry
with self._ratelimiter:
resp = self.session.get(*args, **kwargs)
return resp |
def _AbandonQueuedTasks(self):
"""Marks queued tasks abandoned.
This method does not lock the manager and should be called by a method
holding the manager lock.
"""
# Abandon all tasks after they're identified so as not to modify the
# dict while iterating over it.
tasks_to_abandon = []
for task_identifier, task in iter(self._tasks_queued.items()):
logger.debug('Abandoned queued task: {0:s}.'.format(task_identifier))
tasks_to_abandon.append((task_identifier, task))
for task_identifier, task in tasks_to_abandon:
self._tasks_abandoned[task_identifier] = task
del self._tasks_queued[task_identifier] | Marks queued tasks abandoned.
This method does not lock the manager and should be called by a method
holding the manager lock. | Below is the the instruction that describes the task:
### Input:
Marks queued tasks abandoned.
This method does not lock the manager and should be called by a method
holding the manager lock.
### Response:
def _AbandonQueuedTasks(self):
"""Marks queued tasks abandoned.
This method does not lock the manager and should be called by a method
holding the manager lock.
"""
# Abandon all tasks after they're identified so as not to modify the
# dict while iterating over it.
tasks_to_abandon = []
for task_identifier, task in iter(self._tasks_queued.items()):
logger.debug('Abandoned queued task: {0:s}.'.format(task_identifier))
tasks_to_abandon.append((task_identifier, task))
for task_identifier, task in tasks_to_abandon:
self._tasks_abandoned[task_identifier] = task
del self._tasks_queued[task_identifier] |
def stop(label, at=None, remove_from_labels=False, stop_once=True):
"""Stops the countdown"""
t = at if at is not None else time.time()
if label not in labels:
return None
timer = Marker().loads(labels[label])
if timer.is_running() or (timer.is_stopped() and not stop_once):
timer.stop(t)
if remove_from_labels:
del labels[label]
else:
labels[label] = timer.dumps()
return timer.duration() | Stops the countdown | Below is the the instruction that describes the task:
### Input:
Stops the countdown
### Response:
def stop(label, at=None, remove_from_labels=False, stop_once=True):
"""Stops the countdown"""
t = at if at is not None else time.time()
if label not in labels:
return None
timer = Marker().loads(labels[label])
if timer.is_running() or (timer.is_stopped() and not stop_once):
timer.stop(t)
if remove_from_labels:
del labels[label]
else:
labels[label] = timer.dumps()
return timer.duration() |
def download_collection_configs(self, collection, fs_path):
'''
Downloads ZK Directory to the FileSystem.
:param collection str: Name of the collection (zk config name)
:param fs_path str: Destination filesystem path.
'''
if not self.kz.exists('/configs/{}'.format(collection)):
raise ZookeeperError("Collection doesn't exist in Zookeeper. Current Collections are: {} ".format(self.kz.get_children('/configs')))
self._download_dir('/configs/{}'.format(collection), fs_path + os.sep + collection) | Downloads ZK Directory to the FileSystem.
:param collection str: Name of the collection (zk config name)
:param fs_path str: Destination filesystem path. | Below is the the instruction that describes the task:
### Input:
Downloads ZK Directory to the FileSystem.
:param collection str: Name of the collection (zk config name)
:param fs_path str: Destination filesystem path.
### Response:
def download_collection_configs(self, collection, fs_path):
'''
Downloads ZK Directory to the FileSystem.
:param collection str: Name of the collection (zk config name)
:param fs_path str: Destination filesystem path.
'''
if not self.kz.exists('/configs/{}'.format(collection)):
raise ZookeeperError("Collection doesn't exist in Zookeeper. Current Collections are: {} ".format(self.kz.get_children('/configs')))
self._download_dir('/configs/{}'.format(collection), fs_path + os.sep + collection) |
def lex(self, text, start=0):
"""Lexically analyze `text`.
Yields pairs (`name`, `tokentext`).
"""
max = len(text)
eaten = start
s = self.state
r = self.regexes
toks = self.toks
while eaten < max:
for match in r[s].finditer(text, eaten):
name = match.lastgroup
tok = toks[name]
toktext = match.group(name)
eaten += len(toktext)
yield (tok.name, toktext)
if tok.next:
s = tok.next
break
self.state = s | Lexically analyze `text`.
Yields pairs (`name`, `tokentext`). | Below is the the instruction that describes the task:
### Input:
Lexically analyze `text`.
Yields pairs (`name`, `tokentext`).
### Response:
def lex(self, text, start=0):
"""Lexically analyze `text`.
Yields pairs (`name`, `tokentext`).
"""
max = len(text)
eaten = start
s = self.state
r = self.regexes
toks = self.toks
while eaten < max:
for match in r[s].finditer(text, eaten):
name = match.lastgroup
tok = toks[name]
toktext = match.group(name)
eaten += len(toktext)
yield (tok.name, toktext)
if tok.next:
s = tok.next
break
self.state = s |
def tangent_bbox_intersection(first, second, intersections):
r"""Check if two curves with tangent bounding boxes intersect.
.. note::
This is a helper for :func:`intersect_one_round`. These
functions are used (directly or indirectly) by
:func:`_all_intersections` exclusively, and that function has a
Fortran equivalent.
If the bounding boxes are tangent, intersection can
only occur along that tangency.
If the curve is **not** a line, the **only** way the curve can touch
the bounding box is at the endpoints. To see this, consider the
component
.. math::
x(s) = \sum_j W_j x_j.
Since :math:`W_j > 0` for :math:`s \in \left(0, 1\right)`, if there
is some :math:`k` with :math:`x_k < M = \max x_j`, then for any
interior :math:`s`
.. math::
x(s) < \sum_j W_j M = M.
If all :math:`x_j = M`, then :math:`B(s)` falls on the line
:math:`x = M`. (A similar argument holds for the other three
component-extrema types.)
.. note::
This function assumes callers will not pass curves that can be
linearized / are linear. In :func:`_all_intersections`, curves
are pre-processed to do any linearization before the
subdivision / intersection process begins.
Args:
first (SubdividedCurve): First curve being intersected (assumed in
:math:\mathbf{R}^2`).
second (SubdividedCurve): Second curve being intersected (assumed in
:math:\mathbf{R}^2`).
intersections (list): A list of already encountered
intersections. If these curves intersect at their tangency,
then those intersections will be added to this list.
"""
node_first1 = first.nodes[:, 0]
node_first2 = first.nodes[:, -1]
node_second1 = second.nodes[:, 0]
node_second2 = second.nodes[:, -1]
endpoint_check(
first, node_first1, 0.0, second, node_second1, 0.0, intersections
)
endpoint_check(
first, node_first1, 0.0, second, node_second2, 1.0, intersections
)
endpoint_check(
first, node_first2, 1.0, second, node_second1, 0.0, intersections
)
endpoint_check(
first, node_first2, 1.0, second, node_second2, 1.0, intersections
) | r"""Check if two curves with tangent bounding boxes intersect.
.. note::
This is a helper for :func:`intersect_one_round`. These
functions are used (directly or indirectly) by
:func:`_all_intersections` exclusively, and that function has a
Fortran equivalent.
If the bounding boxes are tangent, intersection can
only occur along that tangency.
If the curve is **not** a line, the **only** way the curve can touch
the bounding box is at the endpoints. To see this, consider the
component
.. math::
x(s) = \sum_j W_j x_j.
Since :math:`W_j > 0` for :math:`s \in \left(0, 1\right)`, if there
is some :math:`k` with :math:`x_k < M = \max x_j`, then for any
interior :math:`s`
.. math::
x(s) < \sum_j W_j M = M.
If all :math:`x_j = M`, then :math:`B(s)` falls on the line
:math:`x = M`. (A similar argument holds for the other three
component-extrema types.)
.. note::
This function assumes callers will not pass curves that can be
linearized / are linear. In :func:`_all_intersections`, curves
are pre-processed to do any linearization before the
subdivision / intersection process begins.
Args:
first (SubdividedCurve): First curve being intersected (assumed in
:math:\mathbf{R}^2`).
second (SubdividedCurve): Second curve being intersected (assumed in
:math:\mathbf{R}^2`).
intersections (list): A list of already encountered
intersections. If these curves intersect at their tangency,
then those intersections will be added to this list. | Below is the the instruction that describes the task:
### Input:
r"""Check if two curves with tangent bounding boxes intersect.
.. note::
This is a helper for :func:`intersect_one_round`. These
functions are used (directly or indirectly) by
:func:`_all_intersections` exclusively, and that function has a
Fortran equivalent.
If the bounding boxes are tangent, intersection can
only occur along that tangency.
If the curve is **not** a line, the **only** way the curve can touch
the bounding box is at the endpoints. To see this, consider the
component
.. math::
x(s) = \sum_j W_j x_j.
Since :math:`W_j > 0` for :math:`s \in \left(0, 1\right)`, if there
is some :math:`k` with :math:`x_k < M = \max x_j`, then for any
interior :math:`s`
.. math::
x(s) < \sum_j W_j M = M.
If all :math:`x_j = M`, then :math:`B(s)` falls on the line
:math:`x = M`. (A similar argument holds for the other three
component-extrema types.)
.. note::
This function assumes callers will not pass curves that can be
linearized / are linear. In :func:`_all_intersections`, curves
are pre-processed to do any linearization before the
subdivision / intersection process begins.
Args:
first (SubdividedCurve): First curve being intersected (assumed in
:math:\mathbf{R}^2`).
second (SubdividedCurve): Second curve being intersected (assumed in
:math:\mathbf{R}^2`).
intersections (list): A list of already encountered
intersections. If these curves intersect at their tangency,
then those intersections will be added to this list.
### Response:
def tangent_bbox_intersection(first, second, intersections):
r"""Check if two curves with tangent bounding boxes intersect.
.. note::
This is a helper for :func:`intersect_one_round`. These
functions are used (directly or indirectly) by
:func:`_all_intersections` exclusively, and that function has a
Fortran equivalent.
If the bounding boxes are tangent, intersection can
only occur along that tangency.
If the curve is **not** a line, the **only** way the curve can touch
the bounding box is at the endpoints. To see this, consider the
component
.. math::
x(s) = \sum_j W_j x_j.
Since :math:`W_j > 0` for :math:`s \in \left(0, 1\right)`, if there
is some :math:`k` with :math:`x_k < M = \max x_j`, then for any
interior :math:`s`
.. math::
x(s) < \sum_j W_j M = M.
If all :math:`x_j = M`, then :math:`B(s)` falls on the line
:math:`x = M`. (A similar argument holds for the other three
component-extrema types.)
.. note::
This function assumes callers will not pass curves that can be
linearized / are linear. In :func:`_all_intersections`, curves
are pre-processed to do any linearization before the
subdivision / intersection process begins.
Args:
first (SubdividedCurve): First curve being intersected (assumed in
:math:\mathbf{R}^2`).
second (SubdividedCurve): Second curve being intersected (assumed in
:math:\mathbf{R}^2`).
intersections (list): A list of already encountered
intersections. If these curves intersect at their tangency,
then those intersections will be added to this list.
"""
node_first1 = first.nodes[:, 0]
node_first2 = first.nodes[:, -1]
node_second1 = second.nodes[:, 0]
node_second2 = second.nodes[:, -1]
endpoint_check(
first, node_first1, 0.0, second, node_second1, 0.0, intersections
)
endpoint_check(
first, node_first1, 0.0, second, node_second2, 1.0, intersections
)
endpoint_check(
first, node_first2, 1.0, second, node_second1, 0.0, intersections
)
endpoint_check(
first, node_first2, 1.0, second, node_second2, 1.0, intersections
) |
def date(self, *args, **kwargs):
"""Compare attributes of pairs with date algorithm.
Shortcut of :class:`recordlinkage.compare.Date`::
from recordlinkage.compare import Date
indexer = recordlinkage.Compare()
indexer.add(Date())
"""
compare = Date(*args, **kwargs)
self.add(compare)
return self | Compare attributes of pairs with date algorithm.
Shortcut of :class:`recordlinkage.compare.Date`::
from recordlinkage.compare import Date
indexer = recordlinkage.Compare()
indexer.add(Date()) | Below is the the instruction that describes the task:
### Input:
Compare attributes of pairs with date algorithm.
Shortcut of :class:`recordlinkage.compare.Date`::
from recordlinkage.compare import Date
indexer = recordlinkage.Compare()
indexer.add(Date())
### Response:
def date(self, *args, **kwargs):
"""Compare attributes of pairs with date algorithm.
Shortcut of :class:`recordlinkage.compare.Date`::
from recordlinkage.compare import Date
indexer = recordlinkage.Compare()
indexer.add(Date())
"""
compare = Date(*args, **kwargs)
self.add(compare)
return self |
def attr(**context):
"""
Decorator that add attributes into func.
Added attributes can be access outside via function's `func_dict` property.
"""
#TODO(Jim Zhan) FIXME
def decorator(func):
def wrapped_func(*args, **kwargs):
for key, value in context.items():
print key, value
return func(*args, **kwargs)
return wraps(func)(decorator)
return decorator | Decorator that add attributes into func.
Added attributes can be access outside via function's `func_dict` property. | Below is the the instruction that describes the task:
### Input:
Decorator that add attributes into func.
Added attributes can be access outside via function's `func_dict` property.
### Response:
def attr(**context):
"""
Decorator that add attributes into func.
Added attributes can be access outside via function's `func_dict` property.
"""
#TODO(Jim Zhan) FIXME
def decorator(func):
def wrapped_func(*args, **kwargs):
for key, value in context.items():
print key, value
return func(*args, **kwargs)
return wraps(func)(decorator)
return decorator |
def Wow64EnableWow64FsRedirection(Wow64FsEnableRedirection):
"""
This function may not work reliably when there are nested calls. Therefore,
this function has been replaced by the L{Wow64DisableWow64FsRedirection}
and L{Wow64RevertWow64FsRedirection} functions.
@see: U{http://msdn.microsoft.com/en-us/library/windows/desktop/aa365744(v=vs.85).aspx}
"""
_Wow64EnableWow64FsRedirection = windll.kernel32.Wow64EnableWow64FsRedirection
_Wow64EnableWow64FsRedirection.argtypes = [BOOLEAN]
_Wow64EnableWow64FsRedirection.restype = BOOLEAN
_Wow64EnableWow64FsRedirection.errcheck = RaiseIfZero | This function may not work reliably when there are nested calls. Therefore,
this function has been replaced by the L{Wow64DisableWow64FsRedirection}
and L{Wow64RevertWow64FsRedirection} functions.
@see: U{http://msdn.microsoft.com/en-us/library/windows/desktop/aa365744(v=vs.85).aspx} | Below is the the instruction that describes the task:
### Input:
This function may not work reliably when there are nested calls. Therefore,
this function has been replaced by the L{Wow64DisableWow64FsRedirection}
and L{Wow64RevertWow64FsRedirection} functions.
@see: U{http://msdn.microsoft.com/en-us/library/windows/desktop/aa365744(v=vs.85).aspx}
### Response:
def Wow64EnableWow64FsRedirection(Wow64FsEnableRedirection):
"""
This function may not work reliably when there are nested calls. Therefore,
this function has been replaced by the L{Wow64DisableWow64FsRedirection}
and L{Wow64RevertWow64FsRedirection} functions.
@see: U{http://msdn.microsoft.com/en-us/library/windows/desktop/aa365744(v=vs.85).aspx}
"""
_Wow64EnableWow64FsRedirection = windll.kernel32.Wow64EnableWow64FsRedirection
_Wow64EnableWow64FsRedirection.argtypes = [BOOLEAN]
_Wow64EnableWow64FsRedirection.restype = BOOLEAN
_Wow64EnableWow64FsRedirection.errcheck = RaiseIfZero |
def task_transaction(channel):
"""Ensures a task is fetched and acknowledged atomically."""
with channel.lock:
if channel.poll(0):
task = channel.recv()
channel.send(Acknowledgement(os.getpid(), task.id))
else:
raise RuntimeError("Race condition between workers")
return task | Ensures a task is fetched and acknowledged atomically. | Below is the the instruction that describes the task:
### Input:
Ensures a task is fetched and acknowledged atomically.
### Response:
def task_transaction(channel):
"""Ensures a task is fetched and acknowledged atomically."""
with channel.lock:
if channel.poll(0):
task = channel.recv()
channel.send(Acknowledgement(os.getpid(), task.id))
else:
raise RuntimeError("Race condition between workers")
return task |
def remove_environment(self, name=None, path=None, **kwargs):
"""
Remove an environment entirely.
See ``remove``.
"""
return self.remove(name=name, path=path, all=True, **kwargs) | Remove an environment entirely.
See ``remove``. | Below is the the instruction that describes the task:
### Input:
Remove an environment entirely.
See ``remove``.
### Response:
def remove_environment(self, name=None, path=None, **kwargs):
"""
Remove an environment entirely.
See ``remove``.
"""
return self.remove(name=name, path=path, all=True, **kwargs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.