code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def check( state_engine, token_op, block_id, checked_ops ):
"""
Verify that a token transfer operation is permitted.
* the token feature must exist
* the sender must be unlocked---i.e. able to send at this point
* the sender must have enough balance of the given token to send the amount requested
* the token value must be positive
* the consensus hash must be valid
Return True if accepted
Return False if not
"""
epoch_features = get_epoch_features(block_id)
if EPOCH_FEATURE_TOKEN_TRANSFER not in epoch_features:
log.warning("Token transfers are not enabled in this epoch")
return False
consensus_hash = token_op['consensus_hash']
address = token_op['address']
recipient_address = token_op['recipient_address']
token_type = token_op['token_units']
token_value = token_op['token_fee']
# token value must be positive
if token_value <= 0:
log.warning("Zero-value token transfer from {}".format(address))
return False
# can't send to ourselves
if address == recipient_address:
log.warning('Cannot transfer token from the account to itself ({})'.format(address))
return False
# consensus hash must be valid
if not state_engine.is_consensus_hash_valid(block_id, consensus_hash):
log.warning('Invalid consensus hash {}'.format(consensus_hash))
return False
# sender account must exist
account_info = state_engine.get_account(address, token_type)
if account_info is None:
log.warning("No account for {} ({})".format(address, token_type))
return False
# sender must not be transfer-locked
if block_id < account_info['lock_transfer_block_id']:
log.warning('Account {} is blocked from transferring tokens until block height {}'.format(address, account_info['lock_transfer_block_id']))
return False
# sender must have enough balance of the token
account_balance = state_engine.get_account_balance(account_info)
if account_balance < token_value:
log.warning('Account {} has {} {}; tried to send {}'.format(address, account_balance, token_type, token_value))
return False
receiver_account = state_engine.get_account(recipient_address, token_type)
if receiver_account is not None:
if not receiver_account['receive_whitelisted']:
log.warning('Receiver account {} is not whitelisted'.format(recipient_address))
return False
log.debug("Account {} will pay {} {} to {}".format(address, token_value, token_type, recipient_address))
# will execute a debit against the sender address
token_operation_put_account_payment_info(token_op, address, token_type, token_value)
# will execute a credit against the receiver address
token_operation_put_account_credit_info(token_op, recipient_address, token_type, token_value)
# preserve token_fee as a string to prevent overflow
token_op['token_fee'] = '{}'.format(token_op['token_fee'])
return True | Verify that a token transfer operation is permitted.
* the token feature must exist
* the sender must be unlocked---i.e. able to send at this point
* the sender must have enough balance of the given token to send the amount requested
* the token value must be positive
* the consensus hash must be valid
Return True if accepted
Return False if not | Below is the the instruction that describes the task:
### Input:
Verify that a token transfer operation is permitted.
* the token feature must exist
* the sender must be unlocked---i.e. able to send at this point
* the sender must have enough balance of the given token to send the amount requested
* the token value must be positive
* the consensus hash must be valid
Return True if accepted
Return False if not
### Response:
def check( state_engine, token_op, block_id, checked_ops ):
"""
Verify that a token transfer operation is permitted.
* the token feature must exist
* the sender must be unlocked---i.e. able to send at this point
* the sender must have enough balance of the given token to send the amount requested
* the token value must be positive
* the consensus hash must be valid
Return True if accepted
Return False if not
"""
epoch_features = get_epoch_features(block_id)
if EPOCH_FEATURE_TOKEN_TRANSFER not in epoch_features:
log.warning("Token transfers are not enabled in this epoch")
return False
consensus_hash = token_op['consensus_hash']
address = token_op['address']
recipient_address = token_op['recipient_address']
token_type = token_op['token_units']
token_value = token_op['token_fee']
# token value must be positive
if token_value <= 0:
log.warning("Zero-value token transfer from {}".format(address))
return False
# can't send to ourselves
if address == recipient_address:
log.warning('Cannot transfer token from the account to itself ({})'.format(address))
return False
# consensus hash must be valid
if not state_engine.is_consensus_hash_valid(block_id, consensus_hash):
log.warning('Invalid consensus hash {}'.format(consensus_hash))
return False
# sender account must exist
account_info = state_engine.get_account(address, token_type)
if account_info is None:
log.warning("No account for {} ({})".format(address, token_type))
return False
# sender must not be transfer-locked
if block_id < account_info['lock_transfer_block_id']:
log.warning('Account {} is blocked from transferring tokens until block height {}'.format(address, account_info['lock_transfer_block_id']))
return False
# sender must have enough balance of the token
account_balance = state_engine.get_account_balance(account_info)
if account_balance < token_value:
log.warning('Account {} has {} {}; tried to send {}'.format(address, account_balance, token_type, token_value))
return False
receiver_account = state_engine.get_account(recipient_address, token_type)
if receiver_account is not None:
if not receiver_account['receive_whitelisted']:
log.warning('Receiver account {} is not whitelisted'.format(recipient_address))
return False
log.debug("Account {} will pay {} {} to {}".format(address, token_value, token_type, recipient_address))
# will execute a debit against the sender address
token_operation_put_account_payment_info(token_op, address, token_type, token_value)
# will execute a credit against the receiver address
token_operation_put_account_credit_info(token_op, recipient_address, token_type, token_value)
# preserve token_fee as a string to prevent overflow
token_op['token_fee'] = '{}'.format(token_op['token_fee'])
return True |
def returnFalseNegatives(kw, noisePct, n, theta):
"""
Generate a weight vector W, with kw non-zero components. Generate 1000
noisy versions of W and return the match statistics. Noisy version of W is
generated by randomly setting noisePct of the non-zero components to zero.
:param kw: k for the weight vectors
:param noisePct: percent noise, from 0 to 1
:param n: dimensionality of input vector
:param theta: threshold for matching after dot product
:return: percent that matched, number that matched, total match comparisons
"""
W = getSparseTensor(kw, n, 1, fixedRange=1.0 / kw)
# Get permuted versions of W and see how many match
m2 = 10
inputVectors = getPermutedTensors(W, kw, n, m2, noisePct)
dot = inputVectors.matmul(W.t())
numMatches = ((dot >= theta).sum()).item()
pctMatches = numMatches / float(m2)
return pctMatches, numMatches, m2 | Generate a weight vector W, with kw non-zero components. Generate 1000
noisy versions of W and return the match statistics. Noisy version of W is
generated by randomly setting noisePct of the non-zero components to zero.
:param kw: k for the weight vectors
:param noisePct: percent noise, from 0 to 1
:param n: dimensionality of input vector
:param theta: threshold for matching after dot product
:return: percent that matched, number that matched, total match comparisons | Below is the the instruction that describes the task:
### Input:
Generate a weight vector W, with kw non-zero components. Generate 1000
noisy versions of W and return the match statistics. Noisy version of W is
generated by randomly setting noisePct of the non-zero components to zero.
:param kw: k for the weight vectors
:param noisePct: percent noise, from 0 to 1
:param n: dimensionality of input vector
:param theta: threshold for matching after dot product
:return: percent that matched, number that matched, total match comparisons
### Response:
def returnFalseNegatives(kw, noisePct, n, theta):
"""
Generate a weight vector W, with kw non-zero components. Generate 1000
noisy versions of W and return the match statistics. Noisy version of W is
generated by randomly setting noisePct of the non-zero components to zero.
:param kw: k for the weight vectors
:param noisePct: percent noise, from 0 to 1
:param n: dimensionality of input vector
:param theta: threshold for matching after dot product
:return: percent that matched, number that matched, total match comparisons
"""
W = getSparseTensor(kw, n, 1, fixedRange=1.0 / kw)
# Get permuted versions of W and see how many match
m2 = 10
inputVectors = getPermutedTensors(W, kw, n, m2, noisePct)
dot = inputVectors.matmul(W.t())
numMatches = ((dot >= theta).sum()).item()
pctMatches = numMatches / float(m2)
return pctMatches, numMatches, m2 |
def tags_uuids(self, uuids, archiver="", timeout=DEFAULT_TIMEOUT):
"""
Retrieves tags for all streams with the provided UUIDs
Arguments:
[uuids]: list of UUIDs
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
if not isinstance(uuids, list):
uuids = [uuids]
where = " or ".join(['uuid = "{0}"'.format(uuid) for uuid in uuids])
return self.query("select * where {0}".format(where), archiver, timeout).get('metadata',{}) | Retrieves tags for all streams with the provided UUIDs
Arguments:
[uuids]: list of UUIDs
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver | Below is the the instruction that describes the task:
### Input:
Retrieves tags for all streams with the provided UUIDs
Arguments:
[uuids]: list of UUIDs
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
### Response:
def tags_uuids(self, uuids, archiver="", timeout=DEFAULT_TIMEOUT):
"""
Retrieves tags for all streams with the provided UUIDs
Arguments:
[uuids]: list of UUIDs
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
if not isinstance(uuids, list):
uuids = [uuids]
where = " or ".join(['uuid = "{0}"'.format(uuid) for uuid in uuids])
return self.query("select * where {0}".format(where), archiver, timeout).get('metadata',{}) |
def float_field_data(field, **kwargs):
"""
Return random value for FloatField
>>> result = any_form_field(forms.FloatField(max_value=200, min_value=100))
>>> type(result)
<type 'str'>
>>> float(result) >=100, float(result) <=200
(True, True)
"""
min_value = 0
max_value = 100
from django.core.validators import MinValueValidator, MaxValueValidator
for elem in field.validators:
if isinstance(elem, MinValueValidator):
min_value = elem.limit_value
if isinstance(elem, MaxValueValidator):
max_value = elem.limit_value
min_value = kwargs.get('min_value', min_value)
max_value = kwargs.get('max_value', max_value)
precision = kwargs.get('precision', 3)
return str(xunit.any_float(min_value=min_value, max_value=max_value, precision=precision)) | Return random value for FloatField
>>> result = any_form_field(forms.FloatField(max_value=200, min_value=100))
>>> type(result)
<type 'str'>
>>> float(result) >=100, float(result) <=200
(True, True) | Below is the the instruction that describes the task:
### Input:
Return random value for FloatField
>>> result = any_form_field(forms.FloatField(max_value=200, min_value=100))
>>> type(result)
<type 'str'>
>>> float(result) >=100, float(result) <=200
(True, True)
### Response:
def float_field_data(field, **kwargs):
"""
Return random value for FloatField
>>> result = any_form_field(forms.FloatField(max_value=200, min_value=100))
>>> type(result)
<type 'str'>
>>> float(result) >=100, float(result) <=200
(True, True)
"""
min_value = 0
max_value = 100
from django.core.validators import MinValueValidator, MaxValueValidator
for elem in field.validators:
if isinstance(elem, MinValueValidator):
min_value = elem.limit_value
if isinstance(elem, MaxValueValidator):
max_value = elem.limit_value
min_value = kwargs.get('min_value', min_value)
max_value = kwargs.get('max_value', max_value)
precision = kwargs.get('precision', 3)
return str(xunit.any_float(min_value=min_value, max_value=max_value, precision=precision)) |
def find_sample_min_std(self, Intensities):
'''
find the best interpretation with the minimum stratard deviation (in units of percent % !)
'''
Best_array = []
best_array_std_perc = inf
Best_array_tmp = []
Best_interpretations = {}
Best_interpretations_tmp = {}
for this_specimen in list(Intensities.keys()):
for value in Intensities[this_specimen]:
Best_interpretations_tmp[this_specimen] = value
Best_array_tmp = [value]
all_other_specimens = list(Intensities.keys())
all_other_specimens.remove(this_specimen)
for other_specimen in all_other_specimens:
closest_value = self.find_close_value(
Intensities[other_specimen], value)
Best_array_tmp.append(closest_value)
Best_interpretations_tmp[other_specimen] = closest_value
if std(Best_array_tmp, ddof=1) / mean(Best_array_tmp) < best_array_std_perc:
Best_array = Best_array_tmp
best_array_std_perc = std(
Best_array, ddof=1) / mean(Best_array_tmp)
Best_interpretations = copy.deepcopy(
Best_interpretations_tmp)
Best_interpretations_tmp = {}
return Best_interpretations, mean(Best_array), std(Best_array, ddof=1) | find the best interpretation with the minimum stratard deviation (in units of percent % !) | Below is the the instruction that describes the task:
### Input:
find the best interpretation with the minimum stratard deviation (in units of percent % !)
### Response:
def find_sample_min_std(self, Intensities):
'''
find the best interpretation with the minimum stratard deviation (in units of percent % !)
'''
Best_array = []
best_array_std_perc = inf
Best_array_tmp = []
Best_interpretations = {}
Best_interpretations_tmp = {}
for this_specimen in list(Intensities.keys()):
for value in Intensities[this_specimen]:
Best_interpretations_tmp[this_specimen] = value
Best_array_tmp = [value]
all_other_specimens = list(Intensities.keys())
all_other_specimens.remove(this_specimen)
for other_specimen in all_other_specimens:
closest_value = self.find_close_value(
Intensities[other_specimen], value)
Best_array_tmp.append(closest_value)
Best_interpretations_tmp[other_specimen] = closest_value
if std(Best_array_tmp, ddof=1) / mean(Best_array_tmp) < best_array_std_perc:
Best_array = Best_array_tmp
best_array_std_perc = std(
Best_array, ddof=1) / mean(Best_array_tmp)
Best_interpretations = copy.deepcopy(
Best_interpretations_tmp)
Best_interpretations_tmp = {}
return Best_interpretations, mean(Best_array), std(Best_array, ddof=1) |
def set_sync_info(self, filename, mtime, size):
"""Store mtime/size when local and remote file was last synchronized.
This is stored in the local file's folder as meta data.
The information is used to detect conflicts, i.e. if both source and
remote had been modified by other means since last synchronization.
"""
assert self.target.is_local()
remote_target = self.target.peer
ps = self.dir["peer_sync"].setdefault(remote_target.get_id(), {})
ut = time.time() # UTC time stamp
ps[":last_sync"] = ut # this is an invalid file name to avoid conflicts
pse = ps[filename] = {"m": mtime, "s": size, "u": ut}
if self.PRETTY:
ps[":last_sync_str"] = pretty_stamp(
ut
) # use an invalid file name to avoid conflicts
pse["mtime_str"] = pretty_stamp(mtime) if mtime else "(directory)"
pse["uploaded_str"] = pretty_stamp(ut)
self.modified_sync = True | Store mtime/size when local and remote file was last synchronized.
This is stored in the local file's folder as meta data.
The information is used to detect conflicts, i.e. if both source and
remote had been modified by other means since last synchronization. | Below is the the instruction that describes the task:
### Input:
Store mtime/size when local and remote file was last synchronized.
This is stored in the local file's folder as meta data.
The information is used to detect conflicts, i.e. if both source and
remote had been modified by other means since last synchronization.
### Response:
def set_sync_info(self, filename, mtime, size):
"""Store mtime/size when local and remote file was last synchronized.
This is stored in the local file's folder as meta data.
The information is used to detect conflicts, i.e. if both source and
remote had been modified by other means since last synchronization.
"""
assert self.target.is_local()
remote_target = self.target.peer
ps = self.dir["peer_sync"].setdefault(remote_target.get_id(), {})
ut = time.time() # UTC time stamp
ps[":last_sync"] = ut # this is an invalid file name to avoid conflicts
pse = ps[filename] = {"m": mtime, "s": size, "u": ut}
if self.PRETTY:
ps[":last_sync_str"] = pretty_stamp(
ut
) # use an invalid file name to avoid conflicts
pse["mtime_str"] = pretty_stamp(mtime) if mtime else "(directory)"
pse["uploaded_str"] = pretty_stamp(ut)
self.modified_sync = True |
def _prepare_find(cls, *args, **kw):
"""Execute a find and return the resulting queryset using combined plain and parametric query generation.
Additionally, performs argument case normalization, refer to the `_prepare_query` method's docstring.
"""
cls, collection, query, options = cls._prepare_query(
cls.FIND_MAPPING,
cls.FIND_OPTIONS,
*args,
**kw
)
if 'await' in options:
raise TypeError("Await is hard-deprecated as reserved keyword in Python 3.7, use wait instead.")
if 'cursor_type' in options and {'tail', 'wait'} & set(options):
raise TypeError("Can not combine cursor_type and tail/wait arguments.")
elif options.pop('tail', False):
options['cursor_type'] = CursorType.TAILABLE_AWAIT if options.pop('wait', True) else CursorType.TAILABLE
elif 'wait' in options:
raise TypeError("Wait option only applies to tailing cursors.")
modifiers = options.get('modifiers', dict())
if 'max_time_ms' in options:
modifiers['$maxTimeMS'] = options.pop('max_time_ms')
if modifiers:
options['modifiers'] = modifiers
return cls, collection, query, options | Execute a find and return the resulting queryset using combined plain and parametric query generation.
Additionally, performs argument case normalization, refer to the `_prepare_query` method's docstring. | Below is the the instruction that describes the task:
### Input:
Execute a find and return the resulting queryset using combined plain and parametric query generation.
Additionally, performs argument case normalization, refer to the `_prepare_query` method's docstring.
### Response:
def _prepare_find(cls, *args, **kw):
"""Execute a find and return the resulting queryset using combined plain and parametric query generation.
Additionally, performs argument case normalization, refer to the `_prepare_query` method's docstring.
"""
cls, collection, query, options = cls._prepare_query(
cls.FIND_MAPPING,
cls.FIND_OPTIONS,
*args,
**kw
)
if 'await' in options:
raise TypeError("Await is hard-deprecated as reserved keyword in Python 3.7, use wait instead.")
if 'cursor_type' in options and {'tail', 'wait'} & set(options):
raise TypeError("Can not combine cursor_type and tail/wait arguments.")
elif options.pop('tail', False):
options['cursor_type'] = CursorType.TAILABLE_AWAIT if options.pop('wait', True) else CursorType.TAILABLE
elif 'wait' in options:
raise TypeError("Wait option only applies to tailing cursors.")
modifiers = options.get('modifiers', dict())
if 'max_time_ms' in options:
modifiers['$maxTimeMS'] = options.pop('max_time_ms')
if modifiers:
options['modifiers'] = modifiers
return cls, collection, query, options |
def get_jobs(self, recursive=True):
"""Return a dictionary with all the jobs
If recursive is True this will include jobs from all internal `Link`
"""
if recursive:
ret_dict = self.jobs.copy()
ret_dict.update(self._scatter_link.get_jobs(recursive))
return ret_dict
return self.jobs | Return a dictionary with all the jobs
If recursive is True this will include jobs from all internal `Link` | Below is the the instruction that describes the task:
### Input:
Return a dictionary with all the jobs
If recursive is True this will include jobs from all internal `Link`
### Response:
def get_jobs(self, recursive=True):
"""Return a dictionary with all the jobs
If recursive is True this will include jobs from all internal `Link`
"""
if recursive:
ret_dict = self.jobs.copy()
ret_dict.update(self._scatter_link.get_jobs(recursive))
return ret_dict
return self.jobs |
def list_resources(self, lang):
"""Return a sequence of resources for a given lang.
Each Resource is a dict containing the slug, name, i18n_type,
source_language_code and the category.
"""
return registry.registry.http_handler.get(
'/api/2/project/%s/resources/' % (
self.get_project_slug(lang),)
) | Return a sequence of resources for a given lang.
Each Resource is a dict containing the slug, name, i18n_type,
source_language_code and the category. | Below is the the instruction that describes the task:
### Input:
Return a sequence of resources for a given lang.
Each Resource is a dict containing the slug, name, i18n_type,
source_language_code and the category.
### Response:
def list_resources(self, lang):
"""Return a sequence of resources for a given lang.
Each Resource is a dict containing the slug, name, i18n_type,
source_language_code and the category.
"""
return registry.registry.http_handler.get(
'/api/2/project/%s/resources/' % (
self.get_project_slug(lang),)
) |
def map_field(field, func, dict_sequence):
"""
Apply given function to value of given key in every dictionary in sequence and
set the result as new value for that key.
"""
for item in dict_sequence:
try:
item[field] = func(item.get(field, None))
yield item
except ValueError:
pass | Apply given function to value of given key in every dictionary in sequence and
set the result as new value for that key. | Below is the the instruction that describes the task:
### Input:
Apply given function to value of given key in every dictionary in sequence and
set the result as new value for that key.
### Response:
def map_field(field, func, dict_sequence):
"""
Apply given function to value of given key in every dictionary in sequence and
set the result as new value for that key.
"""
for item in dict_sequence:
try:
item[field] = func(item.get(field, None))
yield item
except ValueError:
pass |
def streamify(self, state, frame):
"""Prepare frame for output as a byte-stuffed stream."""
# Split the frame apart for stuffing...
pieces = frame.split(self.prefix)
return '%s%s%s%s%s' % (self.prefix, self.begin,
(self.prefix + self.nop).join(pieces),
self.prefix, self.end) | Prepare frame for output as a byte-stuffed stream. | Below is the the instruction that describes the task:
### Input:
Prepare frame for output as a byte-stuffed stream.
### Response:
def streamify(self, state, frame):
"""Prepare frame for output as a byte-stuffed stream."""
# Split the frame apart for stuffing...
pieces = frame.split(self.prefix)
return '%s%s%s%s%s' % (self.prefix, self.begin,
(self.prefix + self.nop).join(pieces),
self.prefix, self.end) |
def parse_line(self, statement, element, mode):
"""As part of real-time update, parses the statement and adjusts the attributes
of the specified CustomType instance to reflect the changes.
:arg statement: the lines of code that was added/removed/changed on the
element after it had alread been parsed. The lines together form a single
continuous code statement.
:arg element: the CustomType instance to update.
:arg mode: 'insert', or 'delete'.
"""
if element.incomplete:
#We need to check for the end_token so we can close up the incomplete
#status for the instance.
if element.end_token in statement:
element.incomplete = False
return
#This method deals with updating the *body* of the type declaration. The only
#possible entries in the body are member variable declarations and type
#executable definitions.
self._process_execs_contents(statement, element.module.name, element, mode)
self._rt_parse_members(statement, element, mode) | As part of real-time update, parses the statement and adjusts the attributes
of the specified CustomType instance to reflect the changes.
:arg statement: the lines of code that was added/removed/changed on the
element after it had alread been parsed. The lines together form a single
continuous code statement.
:arg element: the CustomType instance to update.
:arg mode: 'insert', or 'delete'. | Below is the the instruction that describes the task:
### Input:
As part of real-time update, parses the statement and adjusts the attributes
of the specified CustomType instance to reflect the changes.
:arg statement: the lines of code that was added/removed/changed on the
element after it had alread been parsed. The lines together form a single
continuous code statement.
:arg element: the CustomType instance to update.
:arg mode: 'insert', or 'delete'.
### Response:
def parse_line(self, statement, element, mode):
"""As part of real-time update, parses the statement and adjusts the attributes
of the specified CustomType instance to reflect the changes.
:arg statement: the lines of code that was added/removed/changed on the
element after it had alread been parsed. The lines together form a single
continuous code statement.
:arg element: the CustomType instance to update.
:arg mode: 'insert', or 'delete'.
"""
if element.incomplete:
#We need to check for the end_token so we can close up the incomplete
#status for the instance.
if element.end_token in statement:
element.incomplete = False
return
#This method deals with updating the *body* of the type declaration. The only
#possible entries in the body are member variable declarations and type
#executable definitions.
self._process_execs_contents(statement, element.module.name, element, mode)
self._rt_parse_members(statement, element, mode) |
def _pylab_changed(self, name, old, new):
"""Replace --pylab='inline' with --pylab='auto'"""
if new == 'inline':
warn.warn("'inline' not available as pylab backend, "
"using 'auto' instead.\n")
self.pylab = 'auto' | Replace --pylab='inline' with --pylab='auto | Below is the the instruction that describes the task:
### Input:
Replace --pylab='inline' with --pylab='auto
### Response:
def _pylab_changed(self, name, old, new):
"""Replace --pylab='inline' with --pylab='auto'"""
if new == 'inline':
warn.warn("'inline' not available as pylab backend, "
"using 'auto' instead.\n")
self.pylab = 'auto' |
def backend_to_retrieve(self, namespace, stream):
"""
Return backend enabled for reading for `stream`.
"""
if namespace not in self.namespaces:
raise NamespaceMissing('`{}` namespace is not configured'
.format(namespace))
stream_prefix = self.get_matching_prefix(namespace, stream)
read_backend = self.prefix_read_backends[namespace][stream_prefix]
return (read_backend,
self.prefix_confs[namespace][stream_prefix][read_backend]) | Return backend enabled for reading for `stream`. | Below is the the instruction that describes the task:
### Input:
Return backend enabled for reading for `stream`.
### Response:
def backend_to_retrieve(self, namespace, stream):
"""
Return backend enabled for reading for `stream`.
"""
if namespace not in self.namespaces:
raise NamespaceMissing('`{}` namespace is not configured'
.format(namespace))
stream_prefix = self.get_matching_prefix(namespace, stream)
read_backend = self.prefix_read_backends[namespace][stream_prefix]
return (read_backend,
self.prefix_confs[namespace][stream_prefix][read_backend]) |
def get_class(schema_name):
"""
Retrieve the message class associated with the schema name.
If no match is found, the default schema is returned and a warning is logged.
Args:
schema_name (six.text_type): The name of the :class:`Message` sub-class;
this is typically the Python path.
Returns:
Message: A sub-class of :class:`Message` to create the message from.
"""
global _registry_loaded
if not _registry_loaded:
load_message_classes()
try:
return _schema_name_to_class[schema_name]
except KeyError:
_log.warning(
'The schema "%s" is not in the schema registry! Either install '
"the package with its schema definition or define a schema. "
"Falling back to the default schema...",
schema_name,
)
return Message | Retrieve the message class associated with the schema name.
If no match is found, the default schema is returned and a warning is logged.
Args:
schema_name (six.text_type): The name of the :class:`Message` sub-class;
this is typically the Python path.
Returns:
Message: A sub-class of :class:`Message` to create the message from. | Below is the the instruction that describes the task:
### Input:
Retrieve the message class associated with the schema name.
If no match is found, the default schema is returned and a warning is logged.
Args:
schema_name (six.text_type): The name of the :class:`Message` sub-class;
this is typically the Python path.
Returns:
Message: A sub-class of :class:`Message` to create the message from.
### Response:
def get_class(schema_name):
"""
Retrieve the message class associated with the schema name.
If no match is found, the default schema is returned and a warning is logged.
Args:
schema_name (six.text_type): The name of the :class:`Message` sub-class;
this is typically the Python path.
Returns:
Message: A sub-class of :class:`Message` to create the message from.
"""
global _registry_loaded
if not _registry_loaded:
load_message_classes()
try:
return _schema_name_to_class[schema_name]
except KeyError:
_log.warning(
'The schema "%s" is not in the schema registry! Either install '
"the package with its schema definition or define a schema. "
"Falling back to the default schema...",
schema_name,
)
return Message |
def on_menu_exit(self, event):
"""
Exit the GUI
"""
# also delete appropriate copy file
try:
self.help_window.Destroy()
except:
pass
if '-i' in sys.argv:
self.Destroy()
try:
sys.exit() # can raise TypeError if wx inspector was used
except Exception as ex:
if isinstance(ex, TypeError):
pass
else:
raise ex | Exit the GUI | Below is the the instruction that describes the task:
### Input:
Exit the GUI
### Response:
def on_menu_exit(self, event):
"""
Exit the GUI
"""
# also delete appropriate copy file
try:
self.help_window.Destroy()
except:
pass
if '-i' in sys.argv:
self.Destroy()
try:
sys.exit() # can raise TypeError if wx inspector was used
except Exception as ex:
if isinstance(ex, TypeError):
pass
else:
raise ex |
def extendleft(self, other):
"""
Extend the left side of the the collection by appending values from
the iterable *other*. Note that the appends will reverse the order
of the given values.
"""
def extendleft_trans(pipe):
values = list(other.__iter__(pipe)) if use_redis else other
for v in values:
self._appendleft_helper(v, pipe)
if self._same_redis(other, RedisCollection):
use_redis = True
self._transaction(extendleft_trans, other.key)
else:
use_redis = False
self._transaction(extendleft_trans) | Extend the left side of the the collection by appending values from
the iterable *other*. Note that the appends will reverse the order
of the given values. | Below is the the instruction that describes the task:
### Input:
Extend the left side of the the collection by appending values from
the iterable *other*. Note that the appends will reverse the order
of the given values.
### Response:
def extendleft(self, other):
"""
Extend the left side of the the collection by appending values from
the iterable *other*. Note that the appends will reverse the order
of the given values.
"""
def extendleft_trans(pipe):
values = list(other.__iter__(pipe)) if use_redis else other
for v in values:
self._appendleft_helper(v, pipe)
if self._same_redis(other, RedisCollection):
use_redis = True
self._transaction(extendleft_trans, other.key)
else:
use_redis = False
self._transaction(extendleft_trans) |
def mod_aggregate(low, chunks, running):
'''
The mod_aggregate function which looks up all packages in the available
low chunks and merges them into a single pkgs ref in the present low data
'''
pkgs = []
pkg_type = None
agg_enabled = [
'installed',
'removed',
]
if low.get('fun') not in agg_enabled:
return low
for chunk in chunks:
tag = __utils__['state.gen_tag'](chunk)
if tag in running:
# Already ran the pkg state, skip aggregation
continue
if chunk.get('state') == 'pip':
if '__agg__' in chunk:
continue
# Check for the same function
if chunk.get('fun') != low.get('fun'):
continue
# Check first if 'sources' was passed so we don't aggregate pkgs
# and sources together.
if pkg_type is None:
pkg_type = 'pkgs'
if pkg_type == 'pkgs':
# Pull out the pkg names!
if 'pkgs' in chunk:
pkgs.extend(chunk['pkgs'])
chunk['__agg__'] = True
elif 'name' in chunk:
version = chunk.pop('version', None)
if version is not None:
pkgs.append({chunk['name']: version})
else:
pkgs.append(chunk['name'])
chunk['__agg__'] = True
if pkg_type is not None and pkgs:
if pkg_type in low:
low[pkg_type].extend(pkgs)
else:
low[pkg_type] = pkgs
return low | The mod_aggregate function which looks up all packages in the available
low chunks and merges them into a single pkgs ref in the present low data | Below is the the instruction that describes the task:
### Input:
The mod_aggregate function which looks up all packages in the available
low chunks and merges them into a single pkgs ref in the present low data
### Response:
def mod_aggregate(low, chunks, running):
'''
The mod_aggregate function which looks up all packages in the available
low chunks and merges them into a single pkgs ref in the present low data
'''
pkgs = []
pkg_type = None
agg_enabled = [
'installed',
'removed',
]
if low.get('fun') not in agg_enabled:
return low
for chunk in chunks:
tag = __utils__['state.gen_tag'](chunk)
if tag in running:
# Already ran the pkg state, skip aggregation
continue
if chunk.get('state') == 'pip':
if '__agg__' in chunk:
continue
# Check for the same function
if chunk.get('fun') != low.get('fun'):
continue
# Check first if 'sources' was passed so we don't aggregate pkgs
# and sources together.
if pkg_type is None:
pkg_type = 'pkgs'
if pkg_type == 'pkgs':
# Pull out the pkg names!
if 'pkgs' in chunk:
pkgs.extend(chunk['pkgs'])
chunk['__agg__'] = True
elif 'name' in chunk:
version = chunk.pop('version', None)
if version is not None:
pkgs.append({chunk['name']: version})
else:
pkgs.append(chunk['name'])
chunk['__agg__'] = True
if pkg_type is not None and pkgs:
if pkg_type in low:
low[pkg_type].extend(pkgs)
else:
low[pkg_type] = pkgs
return low |
def share(self, institute, case, collaborator_id, user, link):
"""Share a case with a new institute.
Arguments:
institute (dict): A Institute object
case (dict): Case object
collaborator_id (str): A instute id
user (dict): A User object
link (str): The url to be used in the event
Return:
updated_case
"""
if collaborator_id in case.get('collaborators', []):
raise ValueError('new customer is already a collaborator')
self.create_event(
institute=institute,
case=case,
user=user,
link=link,
category='case',
verb='share',
subject=collaborator_id
)
updated_case = self.case_collection.find_one_and_update(
{'_id': case['_id']},
{
'$push': {'collaborators': collaborator_id}
},
return_document=pymongo.ReturnDocument.AFTER
)
LOG.debug("Case updated")
return updated_case | Share a case with a new institute.
Arguments:
institute (dict): A Institute object
case (dict): Case object
collaborator_id (str): A instute id
user (dict): A User object
link (str): The url to be used in the event
Return:
updated_case | Below is the the instruction that describes the task:
### Input:
Share a case with a new institute.
Arguments:
institute (dict): A Institute object
case (dict): Case object
collaborator_id (str): A instute id
user (dict): A User object
link (str): The url to be used in the event
Return:
updated_case
### Response:
def share(self, institute, case, collaborator_id, user, link):
"""Share a case with a new institute.
Arguments:
institute (dict): A Institute object
case (dict): Case object
collaborator_id (str): A instute id
user (dict): A User object
link (str): The url to be used in the event
Return:
updated_case
"""
if collaborator_id in case.get('collaborators', []):
raise ValueError('new customer is already a collaborator')
self.create_event(
institute=institute,
case=case,
user=user,
link=link,
category='case',
verb='share',
subject=collaborator_id
)
updated_case = self.case_collection.find_one_and_update(
{'_id': case['_id']},
{
'$push': {'collaborators': collaborator_id}
},
return_document=pymongo.ReturnDocument.AFTER
)
LOG.debug("Case updated")
return updated_case |
def get_mysql_password(self, username=None, password=None):
"""Retrieve, generate or store a mysql password for the provided
username using peer relation cluster."""
excludes = []
# First check peer relation.
try:
for key in self.passwd_keys(username):
_password = leader_get(key)
if _password:
break
# If root password available don't update peer relation from local
if _password and not username:
excludes.append(self.root_passwd_file_template)
except ValueError:
# cluster relation is not yet started; use on-disk
_password = None
# If none available, generate new one
if not _password:
_password = self.get_mysql_password_on_disk(username, password)
# Put on wire if required
if self.migrate_passwd_to_leader_storage:
self.migrate_passwords_to_leader_storage(excludes=excludes)
return _password | Retrieve, generate or store a mysql password for the provided
username using peer relation cluster. | Below is the the instruction that describes the task:
### Input:
Retrieve, generate or store a mysql password for the provided
username using peer relation cluster.
### Response:
def get_mysql_password(self, username=None, password=None):
"""Retrieve, generate or store a mysql password for the provided
username using peer relation cluster."""
excludes = []
# First check peer relation.
try:
for key in self.passwd_keys(username):
_password = leader_get(key)
if _password:
break
# If root password available don't update peer relation from local
if _password and not username:
excludes.append(self.root_passwd_file_template)
except ValueError:
# cluster relation is not yet started; use on-disk
_password = None
# If none available, generate new one
if not _password:
_password = self.get_mysql_password_on_disk(username, password)
# Put on wire if required
if self.migrate_passwd_to_leader_storage:
self.migrate_passwords_to_leader_storage(excludes=excludes)
return _password |
def check_output(self, **kwargs):
"""Runs this command returning its captured stdout.
:param kwargs: Any extra keyword arguments to pass along to `subprocess.Popen`.
:returns: The captured standard output stream of the command.
:rtype: string
:raises: :class:`subprocess.CalledProcessError` if the command fails.
"""
env, kwargs = self._prepare_env(kwargs)
return subprocess.check_output(self.cmd, env=env, **kwargs).decode('utf-8') | Runs this command returning its captured stdout.
:param kwargs: Any extra keyword arguments to pass along to `subprocess.Popen`.
:returns: The captured standard output stream of the command.
:rtype: string
:raises: :class:`subprocess.CalledProcessError` if the command fails. | Below is the the instruction that describes the task:
### Input:
Runs this command returning its captured stdout.
:param kwargs: Any extra keyword arguments to pass along to `subprocess.Popen`.
:returns: The captured standard output stream of the command.
:rtype: string
:raises: :class:`subprocess.CalledProcessError` if the command fails.
### Response:
def check_output(self, **kwargs):
"""Runs this command returning its captured stdout.
:param kwargs: Any extra keyword arguments to pass along to `subprocess.Popen`.
:returns: The captured standard output stream of the command.
:rtype: string
:raises: :class:`subprocess.CalledProcessError` if the command fails.
"""
env, kwargs = self._prepare_env(kwargs)
return subprocess.check_output(self.cmd, env=env, **kwargs).decode('utf-8') |
def _parse_list(self, text, i):
"""Parse a list from source text starting at i."""
res = []
end_match = self.end_list_re.match(text, i)
old_current_type = self.current_type
while not end_match:
list_item, i = self._parse(text, i)
res.append(list_item)
end_match = self.end_list_re.match(text, i)
if not end_match:
m = self.list_delim_re.match(text, i)
if not m:
self._fail("Missing delimiter in list before content", text, i)
parsed = m.group(0)
i += len(parsed)
self.current_type = old_current_type
parsed = end_match.group(0)
i += len(parsed)
return res, i | Parse a list from source text starting at i. | Below is the the instruction that describes the task:
### Input:
Parse a list from source text starting at i.
### Response:
def _parse_list(self, text, i):
"""Parse a list from source text starting at i."""
res = []
end_match = self.end_list_re.match(text, i)
old_current_type = self.current_type
while not end_match:
list_item, i = self._parse(text, i)
res.append(list_item)
end_match = self.end_list_re.match(text, i)
if not end_match:
m = self.list_delim_re.match(text, i)
if not m:
self._fail("Missing delimiter in list before content", text, i)
parsed = m.group(0)
i += len(parsed)
self.current_type = old_current_type
parsed = end_match.group(0)
i += len(parsed)
return res, i |
def is_dict_like(obj):
"""
Check if the object is dict-like.
Parameters
----------
obj : The object to check
Returns
-------
is_dict_like : bool
Whether `obj` has dict-like properties.
Examples
--------
>>> is_dict_like({1: 2})
True
>>> is_dict_like([1, 2, 3])
False
>>> is_dict_like(dict)
False
>>> is_dict_like(dict())
True
"""
dict_like_attrs = ("__getitem__", "keys", "__contains__")
return (all(hasattr(obj, attr) for attr in dict_like_attrs)
# [GH 25196] exclude classes
and not isinstance(obj, type)) | Check if the object is dict-like.
Parameters
----------
obj : The object to check
Returns
-------
is_dict_like : bool
Whether `obj` has dict-like properties.
Examples
--------
>>> is_dict_like({1: 2})
True
>>> is_dict_like([1, 2, 3])
False
>>> is_dict_like(dict)
False
>>> is_dict_like(dict())
True | Below is the the instruction that describes the task:
### Input:
Check if the object is dict-like.
Parameters
----------
obj : The object to check
Returns
-------
is_dict_like : bool
Whether `obj` has dict-like properties.
Examples
--------
>>> is_dict_like({1: 2})
True
>>> is_dict_like([1, 2, 3])
False
>>> is_dict_like(dict)
False
>>> is_dict_like(dict())
True
### Response:
def is_dict_like(obj):
"""
Check if the object is dict-like.
Parameters
----------
obj : The object to check
Returns
-------
is_dict_like : bool
Whether `obj` has dict-like properties.
Examples
--------
>>> is_dict_like({1: 2})
True
>>> is_dict_like([1, 2, 3])
False
>>> is_dict_like(dict)
False
>>> is_dict_like(dict())
True
"""
dict_like_attrs = ("__getitem__", "keys", "__contains__")
return (all(hasattr(obj, attr) for attr in dict_like_attrs)
# [GH 25196] exclude classes
and not isinstance(obj, type)) |
def get_hgvs_language() -> ParserElement:
"""Build a HGVS :class:`pyparsing.ParseElement`."""
hgvs = (variant_characters | quote)(IDENTIFIER)
language = variant_tags + nest(hgvs)
return language | Build a HGVS :class:`pyparsing.ParseElement`. | Below is the the instruction that describes the task:
### Input:
Build a HGVS :class:`pyparsing.ParseElement`.
### Response:
def get_hgvs_language() -> ParserElement:
"""Build a HGVS :class:`pyparsing.ParseElement`."""
hgvs = (variant_characters | quote)(IDENTIFIER)
language = variant_tags + nest(hgvs)
return language |
def get_saml_provider(name, region=None, key=None, keyid=None, profile=None):
'''
Get SAML provider document.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.get_saml_provider arn
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
provider = conn.get_saml_provider(name)
return provider['get_saml_provider_response']['get_saml_provider_result']['saml_metadata_document']
except boto.exception.BotoServerError as e:
log.debug(__utils__['boto.get_error'](e))
log.error('Failed to get SAML provider document %s.', name)
return False | Get SAML provider document.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.get_saml_provider arn | Below is the the instruction that describes the task:
### Input:
Get SAML provider document.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.get_saml_provider arn
### Response:
def get_saml_provider(name, region=None, key=None, keyid=None, profile=None):
'''
Get SAML provider document.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.get_saml_provider arn
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
provider = conn.get_saml_provider(name)
return provider['get_saml_provider_response']['get_saml_provider_result']['saml_metadata_document']
except boto.exception.BotoServerError as e:
log.debug(__utils__['boto.get_error'](e))
log.error('Failed to get SAML provider document %s.', name)
return False |
def refresh_save_all_action(self):
"""Enable 'Save All' if there are files to be saved"""
editorstack = self.get_current_editorstack()
if editorstack:
state = any(finfo.editor.document().isModified() or finfo.newly_created
for finfo in editorstack.data)
self.save_all_action.setEnabled(state) | Enable 'Save All' if there are files to be saved | Below is the the instruction that describes the task:
### Input:
Enable 'Save All' if there are files to be saved
### Response:
def refresh_save_all_action(self):
"""Enable 'Save All' if there are files to be saved"""
editorstack = self.get_current_editorstack()
if editorstack:
state = any(finfo.editor.document().isModified() or finfo.newly_created
for finfo in editorstack.data)
self.save_all_action.setEnabled(state) |
def query_cache_by_object(self, zenpy_object):
""" Convenience method for testing. Given an object, return the cached version """
object_type = get_object_type(zenpy_object)
cache_key = self._cache_key_attribute(object_type)
return self.get(object_type, getattr(zenpy_object, cache_key)) | Convenience method for testing. Given an object, return the cached version | Below is the the instruction that describes the task:
### Input:
Convenience method for testing. Given an object, return the cached version
### Response:
def query_cache_by_object(self, zenpy_object):
""" Convenience method for testing. Given an object, return the cached version """
object_type = get_object_type(zenpy_object)
cache_key = self._cache_key_attribute(object_type)
return self.get(object_type, getattr(zenpy_object, cache_key)) |
def fit_general(xy, uv):
""" Performs a simple fit for the shift only between
matched lists of positions 'xy' and 'uv'.
Output: (same as for fit_arrays)
=================================
DEVELOPMENT NOTE:
Checks need to be put in place to verify that
enough objects are available for a fit.
=================================
"""
# Set up products used for computing the fit
gxy = uv.astype(ndfloat128)
guv = xy.astype(ndfloat128)
Sx = gxy[:,0].sum()
Sy = gxy[:,1].sum()
Su = guv[:,0].sum()
Sv = guv[:,1].sum()
Sux = np.dot(guv[:,0], gxy[:,0])
Svx = np.dot(guv[:,1], gxy[:,0])
Suy = np.dot(guv[:,0], gxy[:,1])
Svy = np.dot(guv[:,1], gxy[:,1])
Sxx = np.dot(gxy[:,0], gxy[:,0])
Syy = np.dot(gxy[:,1], gxy[:,1])
Sxy = np.dot(gxy[:,0], gxy[:,1])
n = len(xy[:,0])
M = np.array([[Sx, Sy, n], [Sxx, Sxy, Sx], [Sxy, Syy, Sy]])
U = np.array([Su, Sux, Suy])
V = np.array([Sv, Svx, Svy])
# The fit solutioN...
# where
# u = P0 + P1*x + P2*y
# v = Q0 + Q1*x + Q2*y
#
try:
invM = np.linalg.inv(M.astype(np.float64))
except np.linalg.LinAlgError:
raise SingularMatrixError(
"Singular matrix: suspected colinear points."
)
P = np.dot(invM, U).astype(np.float64)
Q = np.dot(invM, V).astype(np.float64)
if not (np.all(np.isfinite(P)) and np.all(np.isfinite(Q))):
raise ArithmeticError('Singular matrix.')
# Return the shift, rotation, and scale changes
result = build_fit(P, Q, 'general')
resids = xy - np.dot(uv, result['fit_matrix']) - result['offset']
result['rms'] = resids.std(axis=0)
result['resids'] = resids
result['rmse'] = float(np.sqrt(np.mean(2 * resids**2)))
result['mae'] = float(np.mean(np.linalg.norm(resids, axis=1)))
return result | Performs a simple fit for the shift only between
matched lists of positions 'xy' and 'uv'.
Output: (same as for fit_arrays)
=================================
DEVELOPMENT NOTE:
Checks need to be put in place to verify that
enough objects are available for a fit.
================================= | Below is the the instruction that describes the task:
### Input:
Performs a simple fit for the shift only between
matched lists of positions 'xy' and 'uv'.
Output: (same as for fit_arrays)
=================================
DEVELOPMENT NOTE:
Checks need to be put in place to verify that
enough objects are available for a fit.
=================================
### Response:
def fit_general(xy, uv):
""" Performs a simple fit for the shift only between
matched lists of positions 'xy' and 'uv'.
Output: (same as for fit_arrays)
=================================
DEVELOPMENT NOTE:
Checks need to be put in place to verify that
enough objects are available for a fit.
=================================
"""
# Set up products used for computing the fit
gxy = uv.astype(ndfloat128)
guv = xy.astype(ndfloat128)
Sx = gxy[:,0].sum()
Sy = gxy[:,1].sum()
Su = guv[:,0].sum()
Sv = guv[:,1].sum()
Sux = np.dot(guv[:,0], gxy[:,0])
Svx = np.dot(guv[:,1], gxy[:,0])
Suy = np.dot(guv[:,0], gxy[:,1])
Svy = np.dot(guv[:,1], gxy[:,1])
Sxx = np.dot(gxy[:,0], gxy[:,0])
Syy = np.dot(gxy[:,1], gxy[:,1])
Sxy = np.dot(gxy[:,0], gxy[:,1])
n = len(xy[:,0])
M = np.array([[Sx, Sy, n], [Sxx, Sxy, Sx], [Sxy, Syy, Sy]])
U = np.array([Su, Sux, Suy])
V = np.array([Sv, Svx, Svy])
# The fit solutioN...
# where
# u = P0 + P1*x + P2*y
# v = Q0 + Q1*x + Q2*y
#
try:
invM = np.linalg.inv(M.astype(np.float64))
except np.linalg.LinAlgError:
raise SingularMatrixError(
"Singular matrix: suspected colinear points."
)
P = np.dot(invM, U).astype(np.float64)
Q = np.dot(invM, V).astype(np.float64)
if not (np.all(np.isfinite(P)) and np.all(np.isfinite(Q))):
raise ArithmeticError('Singular matrix.')
# Return the shift, rotation, and scale changes
result = build_fit(P, Q, 'general')
resids = xy - np.dot(uv, result['fit_matrix']) - result['offset']
result['rms'] = resids.std(axis=0)
result['resids'] = resids
result['rmse'] = float(np.sqrt(np.mean(2 * resids**2)))
result['mae'] = float(np.mean(np.linalg.norm(resids, axis=1)))
return result |
def info(self, user):
""" https://api.slack.com/methods/users.info
"""
self.params.update({
'user': user,
})
return FromUrl('https://slack.com/api/users.info', self._requests)(data=self.params).get() | https://api.slack.com/methods/users.info | Below is the the instruction that describes the task:
### Input:
https://api.slack.com/methods/users.info
### Response:
def info(self, user):
""" https://api.slack.com/methods/users.info
"""
self.params.update({
'user': user,
})
return FromUrl('https://slack.com/api/users.info', self._requests)(data=self.params).get() |
def attach_photo(self, photo: typing.Union[InputMediaPhoto, base.InputFile],
caption: base.String = None):
"""
Attach photo
:param photo:
:param caption:
"""
if not isinstance(photo, InputMedia):
photo = InputMediaPhoto(media=photo, caption=caption)
self.attach(photo) | Attach photo
:param photo:
:param caption: | Below is the the instruction that describes the task:
### Input:
Attach photo
:param photo:
:param caption:
### Response:
def attach_photo(self, photo: typing.Union[InputMediaPhoto, base.InputFile],
caption: base.String = None):
"""
Attach photo
:param photo:
:param caption:
"""
if not isinstance(photo, InputMedia):
photo = InputMediaPhoto(media=photo, caption=caption)
self.attach(photo) |
def qtable(columns, data, **meta):
'''Creates a QTable out of given column names and data, and initialises the
meta data.
:class:`.QTable` is represented internally by `numpy.core.records.recarray`.
Data for each column is converted to :class:`.QList` via :func:`.qlist`
function. If qtype indicator is defined for a column, this information
is used for explicit array conversion.
Table examples:
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(qlist(numpy.array(['name', 'iq']), qtype = QSYMBOL_LIST),
... [qlist(numpy.array(['Dent', 'Beeblebrox', 'Prefect'])),
... qlist(numpy.array([98, 42, 126], dtype=numpy.int64))])
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(qlist(numpy.array(['name', 'iq']), qtype = QSYMBOL_LIST),
... [qlist(['Dent', 'Beeblebrox', 'Prefect'], qtype = QSYMBOL_LIST),
... qlist([98, 42, 126], qtype = QLONG_LIST)])
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(['name', 'iq'],
... [['Dent', 'Beeblebrox', 'Prefect'],
... [98, 42, 126]],
... name = QSYMBOL, iq = QLONG)
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq`fullname!(`Dent`Beeblebrox`Prefect;98 42 126;("Arthur Dent"; "Zaphod Beeblebrox"; "Ford Prefect"))
>>> t = qtable(('name', 'iq', 'fullname'),
... [qlist(numpy.array(['Dent', 'Beeblebrox', 'Prefect']), qtype = QSYMBOL_LIST),
... qlist(numpy.array([98, 42, 126]), qtype = QLONG_LIST),
... qlist(numpy.array(["Arthur Dent", "Zaphod Beeblebrox", "Ford Prefect"]), qtype = QSTRING_LIST)])
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8'), ('fullname', 'O')] meta: metadata(iq=-7, fullname=0, qtype=98, name=-11): [('Dent', 98L, 'Arthur Dent') ('Beeblebrox', 42L, 'Zaphod Beeblebrox') ('Prefect', 126L, 'Ford Prefect')]
:Parameters:
- `columns` (list of `strings`) - table column names
- `data` (list of lists) - list of columns containing table data
:Kwargs:
- `meta` (`integer`) - qtype for particular column
:returns: `QTable` - representation of q table
:raises: `ValueError`
'''
if len(columns) != len(data):
raise ValueError('Number of columns doesn`t match the data layout. %s vs %s' % (len(columns), len(data)))
meta = {} if not meta else meta
if not 'qtype' in meta:
meta['qtype'] = QTABLE
dtypes = []
for i in range(len(columns)):
column_name = columns[i] if isinstance(columns[i], str) else columns[i].decode("utf-8")
if isinstance(data[i], str):
# convert character list (represented as string) to numpy representation
data[i] = numpy.array(list(data[i]), dtype = numpy.string_)
if isinstance(data[i], bytes):
data[i] = numpy.array(list(data[i].decode()), dtype = numpy.string_)
if column_name in meta:
data[i] = qlist(data[i], qtype = meta[column_name])
elif not isinstance(data[i], QList):
if type(data[i]) in (list, tuple):
data[i] = qlist(data[i], qtype = QGENERAL_LIST)
else:
data[i] = qlist(data[i])
meta[column_name] = data[i].meta.qtype
dtypes.append((column_name, data[i].dtype))
table = numpy.core.records.fromarrays(data, dtype = dtypes)
table = table.view(QTable)
table._meta_init(**meta)
return table | Creates a QTable out of given column names and data, and initialises the
meta data.
:class:`.QTable` is represented internally by `numpy.core.records.recarray`.
Data for each column is converted to :class:`.QList` via :func:`.qlist`
function. If qtype indicator is defined for a column, this information
is used for explicit array conversion.
Table examples:
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(qlist(numpy.array(['name', 'iq']), qtype = QSYMBOL_LIST),
... [qlist(numpy.array(['Dent', 'Beeblebrox', 'Prefect'])),
... qlist(numpy.array([98, 42, 126], dtype=numpy.int64))])
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(qlist(numpy.array(['name', 'iq']), qtype = QSYMBOL_LIST),
... [qlist(['Dent', 'Beeblebrox', 'Prefect'], qtype = QSYMBOL_LIST),
... qlist([98, 42, 126], qtype = QLONG_LIST)])
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(['name', 'iq'],
... [['Dent', 'Beeblebrox', 'Prefect'],
... [98, 42, 126]],
... name = QSYMBOL, iq = QLONG)
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq`fullname!(`Dent`Beeblebrox`Prefect;98 42 126;("Arthur Dent"; "Zaphod Beeblebrox"; "Ford Prefect"))
>>> t = qtable(('name', 'iq', 'fullname'),
... [qlist(numpy.array(['Dent', 'Beeblebrox', 'Prefect']), qtype = QSYMBOL_LIST),
... qlist(numpy.array([98, 42, 126]), qtype = QLONG_LIST),
... qlist(numpy.array(["Arthur Dent", "Zaphod Beeblebrox", "Ford Prefect"]), qtype = QSTRING_LIST)])
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8'), ('fullname', 'O')] meta: metadata(iq=-7, fullname=0, qtype=98, name=-11): [('Dent', 98L, 'Arthur Dent') ('Beeblebrox', 42L, 'Zaphod Beeblebrox') ('Prefect', 126L, 'Ford Prefect')]
:Parameters:
- `columns` (list of `strings`) - table column names
- `data` (list of lists) - list of columns containing table data
:Kwargs:
- `meta` (`integer`) - qtype for particular column
:returns: `QTable` - representation of q table
:raises: `ValueError` | Below is the the instruction that describes the task:
### Input:
Creates a QTable out of given column names and data, and initialises the
meta data.
:class:`.QTable` is represented internally by `numpy.core.records.recarray`.
Data for each column is converted to :class:`.QList` via :func:`.qlist`
function. If qtype indicator is defined for a column, this information
is used for explicit array conversion.
Table examples:
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(qlist(numpy.array(['name', 'iq']), qtype = QSYMBOL_LIST),
... [qlist(numpy.array(['Dent', 'Beeblebrox', 'Prefect'])),
... qlist(numpy.array([98, 42, 126], dtype=numpy.int64))])
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(qlist(numpy.array(['name', 'iq']), qtype = QSYMBOL_LIST),
... [qlist(['Dent', 'Beeblebrox', 'Prefect'], qtype = QSYMBOL_LIST),
... qlist([98, 42, 126], qtype = QLONG_LIST)])
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(['name', 'iq'],
... [['Dent', 'Beeblebrox', 'Prefect'],
... [98, 42, 126]],
... name = QSYMBOL, iq = QLONG)
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq`fullname!(`Dent`Beeblebrox`Prefect;98 42 126;("Arthur Dent"; "Zaphod Beeblebrox"; "Ford Prefect"))
>>> t = qtable(('name', 'iq', 'fullname'),
... [qlist(numpy.array(['Dent', 'Beeblebrox', 'Prefect']), qtype = QSYMBOL_LIST),
... qlist(numpy.array([98, 42, 126]), qtype = QLONG_LIST),
... qlist(numpy.array(["Arthur Dent", "Zaphod Beeblebrox", "Ford Prefect"]), qtype = QSTRING_LIST)])
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8'), ('fullname', 'O')] meta: metadata(iq=-7, fullname=0, qtype=98, name=-11): [('Dent', 98L, 'Arthur Dent') ('Beeblebrox', 42L, 'Zaphod Beeblebrox') ('Prefect', 126L, 'Ford Prefect')]
:Parameters:
- `columns` (list of `strings`) - table column names
- `data` (list of lists) - list of columns containing table data
:Kwargs:
- `meta` (`integer`) - qtype for particular column
:returns: `QTable` - representation of q table
:raises: `ValueError`
### Response:
def qtable(columns, data, **meta):
'''Creates a QTable out of given column names and data, and initialises the
meta data.
:class:`.QTable` is represented internally by `numpy.core.records.recarray`.
Data for each column is converted to :class:`.QList` via :func:`.qlist`
function. If qtype indicator is defined for a column, this information
is used for explicit array conversion.
Table examples:
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(qlist(numpy.array(['name', 'iq']), qtype = QSYMBOL_LIST),
... [qlist(numpy.array(['Dent', 'Beeblebrox', 'Prefect'])),
... qlist(numpy.array([98, 42, 126], dtype=numpy.int64))])
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(qlist(numpy.array(['name', 'iq']), qtype = QSYMBOL_LIST),
... [qlist(['Dent', 'Beeblebrox', 'Prefect'], qtype = QSYMBOL_LIST),
... qlist([98, 42, 126], qtype = QLONG_LIST)])
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(['name', 'iq'],
... [['Dent', 'Beeblebrox', 'Prefect'],
... [98, 42, 126]],
... name = QSYMBOL, iq = QLONG)
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq`fullname!(`Dent`Beeblebrox`Prefect;98 42 126;("Arthur Dent"; "Zaphod Beeblebrox"; "Ford Prefect"))
>>> t = qtable(('name', 'iq', 'fullname'),
... [qlist(numpy.array(['Dent', 'Beeblebrox', 'Prefect']), qtype = QSYMBOL_LIST),
... qlist(numpy.array([98, 42, 126]), qtype = QLONG_LIST),
... qlist(numpy.array(["Arthur Dent", "Zaphod Beeblebrox", "Ford Prefect"]), qtype = QSTRING_LIST)])
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8'), ('fullname', 'O')] meta: metadata(iq=-7, fullname=0, qtype=98, name=-11): [('Dent', 98L, 'Arthur Dent') ('Beeblebrox', 42L, 'Zaphod Beeblebrox') ('Prefect', 126L, 'Ford Prefect')]
:Parameters:
- `columns` (list of `strings`) - table column names
- `data` (list of lists) - list of columns containing table data
:Kwargs:
- `meta` (`integer`) - qtype for particular column
:returns: `QTable` - representation of q table
:raises: `ValueError`
'''
if len(columns) != len(data):
raise ValueError('Number of columns doesn`t match the data layout. %s vs %s' % (len(columns), len(data)))
meta = {} if not meta else meta
if not 'qtype' in meta:
meta['qtype'] = QTABLE
dtypes = []
for i in range(len(columns)):
column_name = columns[i] if isinstance(columns[i], str) else columns[i].decode("utf-8")
if isinstance(data[i], str):
# convert character list (represented as string) to numpy representation
data[i] = numpy.array(list(data[i]), dtype = numpy.string_)
if isinstance(data[i], bytes):
data[i] = numpy.array(list(data[i].decode()), dtype = numpy.string_)
if column_name in meta:
data[i] = qlist(data[i], qtype = meta[column_name])
elif not isinstance(data[i], QList):
if type(data[i]) in (list, tuple):
data[i] = qlist(data[i], qtype = QGENERAL_LIST)
else:
data[i] = qlist(data[i])
meta[column_name] = data[i].meta.qtype
dtypes.append((column_name, data[i].dtype))
table = numpy.core.records.fromarrays(data, dtype = dtypes)
table = table.view(QTable)
table._meta_init(**meta)
return table |
def prettymetrics(self) -> str:
"""
Pretty printing for metrics
"""
rendered = ["{}: {}".format(*m) for m in self.metrics()]
return "\n ".join(rendered) | Pretty printing for metrics | Below is the the instruction that describes the task:
### Input:
Pretty printing for metrics
### Response:
def prettymetrics(self) -> str:
"""
Pretty printing for metrics
"""
rendered = ["{}: {}".format(*m) for m in self.metrics()]
return "\n ".join(rendered) |
def json_dumps(self, data, **options):
"""
Wrapper around `json.dumps` that uses a special JSON encoder.
"""
params = {'sort_keys': True, 'indent': 2}
params.update(options)
# This code is based off django's built in JSON serializer
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
params.update({'use_decimal': False})
return json.dumps(data, cls=DjangoJSONEncoder, **params) | Wrapper around `json.dumps` that uses a special JSON encoder. | Below is the the instruction that describes the task:
### Input:
Wrapper around `json.dumps` that uses a special JSON encoder.
### Response:
def json_dumps(self, data, **options):
"""
Wrapper around `json.dumps` that uses a special JSON encoder.
"""
params = {'sort_keys': True, 'indent': 2}
params.update(options)
# This code is based off django's built in JSON serializer
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
params.update({'use_decimal': False})
return json.dumps(data, cls=DjangoJSONEncoder, **params) |
def convert_field(self, name, field):
"""
Convert a single field from a Peewee model field to a validator field.
:param name: Name of the field as defined on this validator.
:param name: Peewee field instance.
:return: Validator field.
"""
if PEEWEE3:
field_type = field.field_type.lower()
else:
field_type = field.db_field
pwv_field = ModelValidator.FIELD_MAP.get(field_type, StringField)
print('pwv_field', field_type, pwv_field)
validators = []
required = not bool(getattr(field, 'null', True))
choices = getattr(field, 'choices', ())
default = getattr(field, 'default', None)
max_length = getattr(field, 'max_length', None)
unique = getattr(field, 'unique', False)
if required:
validators.append(validate_required())
if choices:
print('CHOICES', choices)
validators.append(validate_one_of([c[0] for c in choices]))
if max_length:
validators.append(validate_length(high=max_length))
if unique:
validators.append(validate_model_unique(field, self.instance.select(), self.pk_field, self.pk_value))
if isinstance(field, peewee.ForeignKeyField):
if PEEWEE3:
rel_field = field.rel_field
else:
rel_field = field.to_field
return ModelChoiceField(field.rel_model, rel_field, default=default, validators=validators)
if isinstance(field, ManyToManyField):
return ManyModelChoiceField(
field.rel_model, field.rel_model._meta.primary_key,
default=default, validators=validators)
return pwv_field(default=default, validators=validators) | Convert a single field from a Peewee model field to a validator field.
:param name: Name of the field as defined on this validator.
:param name: Peewee field instance.
:return: Validator field. | Below is the the instruction that describes the task:
### Input:
Convert a single field from a Peewee model field to a validator field.
:param name: Name of the field as defined on this validator.
:param name: Peewee field instance.
:return: Validator field.
### Response:
def convert_field(self, name, field):
"""
Convert a single field from a Peewee model field to a validator field.
:param name: Name of the field as defined on this validator.
:param name: Peewee field instance.
:return: Validator field.
"""
if PEEWEE3:
field_type = field.field_type.lower()
else:
field_type = field.db_field
pwv_field = ModelValidator.FIELD_MAP.get(field_type, StringField)
print('pwv_field', field_type, pwv_field)
validators = []
required = not bool(getattr(field, 'null', True))
choices = getattr(field, 'choices', ())
default = getattr(field, 'default', None)
max_length = getattr(field, 'max_length', None)
unique = getattr(field, 'unique', False)
if required:
validators.append(validate_required())
if choices:
print('CHOICES', choices)
validators.append(validate_one_of([c[0] for c in choices]))
if max_length:
validators.append(validate_length(high=max_length))
if unique:
validators.append(validate_model_unique(field, self.instance.select(), self.pk_field, self.pk_value))
if isinstance(field, peewee.ForeignKeyField):
if PEEWEE3:
rel_field = field.rel_field
else:
rel_field = field.to_field
return ModelChoiceField(field.rel_model, rel_field, default=default, validators=validators)
if isinstance(field, ManyToManyField):
return ManyModelChoiceField(
field.rel_model, field.rel_model._meta.primary_key,
default=default, validators=validators)
return pwv_field(default=default, validators=validators) |
def _fetch_page_async(self, page_size, **q_options):
"""Internal version of fetch_page_async()."""
q_options.setdefault('batch_size', page_size)
q_options.setdefault('produce_cursors', True)
it = self.iter(limit=page_size + 1, **q_options)
results = []
while (yield it.has_next_async()):
results.append(it.next())
if len(results) >= page_size:
break
try:
cursor = it.cursor_after()
except datastore_errors.BadArgumentError:
cursor = None
raise tasklets.Return(results, cursor, it.probably_has_next()) | Internal version of fetch_page_async(). | Below is the the instruction that describes the task:
### Input:
Internal version of fetch_page_async().
### Response:
def _fetch_page_async(self, page_size, **q_options):
"""Internal version of fetch_page_async()."""
q_options.setdefault('batch_size', page_size)
q_options.setdefault('produce_cursors', True)
it = self.iter(limit=page_size + 1, **q_options)
results = []
while (yield it.has_next_async()):
results.append(it.next())
if len(results) >= page_size:
break
try:
cursor = it.cursor_after()
except datastore_errors.BadArgumentError:
cursor = None
raise tasklets.Return(results, cursor, it.probably_has_next()) |
def handle_lock(handle):
"""
Decorate the handle method with a file lock to ensure there is only ever
one process running at any one time.
"""
def wrapper(self, *args, **options):
def on_interrupt(signum, frame):
# It's necessary to release lockfile
sys.exit()
signal.signal(signal.SIGTERM, on_interrupt)
start_time = time.time()
try:
verbosity = int(options.get('verbosity', 0))
except ValueError:
verbosity = 0
logger = logging.getLogger(self.__module__)
if verbosity == 0:
logger.level = logging.WARNING
elif verbosity == 1:
logger.level = logging.INFO
else:
logger.level = logging.DEBUG
logger.debug("-" * 72)
lock_name = self.__module__.split('.').pop()
lock = FileLock(os.path.join(LOCK_ROOT, lock_name))
logger.debug("%s - acquiring lock..." % lock_name)
try:
lock.acquire(LOCK_WAIT_TIMEOUT)
except AlreadyLocked:
logger.debug("lock already in place. quitting.")
return
except LockTimeout:
logger.debug("waiting for the lock timed out. quitting.")
return
logger.debug("acquired.")
try:
handle(self, logger, *args, **options)
except (KeyboardInterrupt, SystemExit):
pass
except:
import traceback
logging.warn("Command Failed")
logging.warn('=' * 72)
logging.warn(traceback.format_exc())
logging.warn('=' * 72)
logger.debug("releasing lock...")
lock.release()
logger.debug("released.")
logger.info("done in %.2f seconds" % (time.time() - start_time))
return
return wrapper | Decorate the handle method with a file lock to ensure there is only ever
one process running at any one time. | Below is the the instruction that describes the task:
### Input:
Decorate the handle method with a file lock to ensure there is only ever
one process running at any one time.
### Response:
def handle_lock(handle):
"""
Decorate the handle method with a file lock to ensure there is only ever
one process running at any one time.
"""
def wrapper(self, *args, **options):
def on_interrupt(signum, frame):
# It's necessary to release lockfile
sys.exit()
signal.signal(signal.SIGTERM, on_interrupt)
start_time = time.time()
try:
verbosity = int(options.get('verbosity', 0))
except ValueError:
verbosity = 0
logger = logging.getLogger(self.__module__)
if verbosity == 0:
logger.level = logging.WARNING
elif verbosity == 1:
logger.level = logging.INFO
else:
logger.level = logging.DEBUG
logger.debug("-" * 72)
lock_name = self.__module__.split('.').pop()
lock = FileLock(os.path.join(LOCK_ROOT, lock_name))
logger.debug("%s - acquiring lock..." % lock_name)
try:
lock.acquire(LOCK_WAIT_TIMEOUT)
except AlreadyLocked:
logger.debug("lock already in place. quitting.")
return
except LockTimeout:
logger.debug("waiting for the lock timed out. quitting.")
return
logger.debug("acquired.")
try:
handle(self, logger, *args, **options)
except (KeyboardInterrupt, SystemExit):
pass
except:
import traceback
logging.warn("Command Failed")
logging.warn('=' * 72)
logging.warn(traceback.format_exc())
logging.warn('=' * 72)
logger.debug("releasing lock...")
lock.release()
logger.debug("released.")
logger.info("done in %.2f seconds" % (time.time() - start_time))
return
return wrapper |
def add_children_to_node(self, node):
"""
Add children to etree.Element `node`.
"""
if self.has_children:
for child_id in self.children:
child = self.runtime.get_block(child_id)
self.runtime.add_block_as_child_node(child, node) | Add children to etree.Element `node`. | Below is the the instruction that describes the task:
### Input:
Add children to etree.Element `node`.
### Response:
def add_children_to_node(self, node):
"""
Add children to etree.Element `node`.
"""
if self.has_children:
for child_id in self.children:
child = self.runtime.get_block(child_id)
self.runtime.add_block_as_child_node(child, node) |
def request_type(self):
"""Retrieve the type of the request, by fetching it from
`xenon.proto.xenon_pb2`."""
if self.static and not self.uses_request:
return getattr(xenon_pb2, 'Empty')
if not self.uses_request:
return None
return getattr(xenon_pb2, self.request_name) | Retrieve the type of the request, by fetching it from
`xenon.proto.xenon_pb2`. | Below is the the instruction that describes the task:
### Input:
Retrieve the type of the request, by fetching it from
`xenon.proto.xenon_pb2`.
### Response:
def request_type(self):
"""Retrieve the type of the request, by fetching it from
`xenon.proto.xenon_pb2`."""
if self.static and not self.uses_request:
return getattr(xenon_pb2, 'Empty')
if not self.uses_request:
return None
return getattr(xenon_pb2, self.request_name) |
async def build_get_revoc_reg_delta_request(submitter_did: Optional[str],
revoc_reg_def_id: str,
from_: Optional[int],
to: int) -> str:
"""
Builds a GET_REVOC_REG_DELTA request. Request to get the delta of the accumulated state of the Revocation Registry.
The Delta is defined by from and to timestamp fields.
If from is not specified, then the whole state till to will be returned.
:param submitter_did: (Optional) DID of the read request sender (if not provided then default Libindy DID will be used).
:param revoc_reg_def_id: ID of the corresponding Revocation Registry Definition in ledger.
:param from_: Requested time represented as a total number of seconds from Unix Epoch
:param to: Requested time represented as a total number of seconds from Unix Epoch
:return: Request result as json.
"""
logger = logging.getLogger(__name__)
logger.debug("build_get_revoc_reg_delta_request: >>> submitter_did: %r, revoc_reg_def_id: %r, from: %r, to: %r",
submitter_did, revoc_reg_def_id, from_, to)
if not hasattr(build_get_revoc_reg_delta_request, "cb"):
logger.debug("build_get_revoc_reg_delta_request: Creating callback")
build_get_revoc_reg_delta_request.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_submitter_did = c_char_p(submitter_did.encode('utf-8')) if submitter_did is not None else None
c_revoc_reg_def_id = c_char_p(revoc_reg_def_id.encode('utf-8'))
c_from = c_int64(from_) if from_ else -1
c_to = c_int64(to)
request_json = await do_call('indy_build_get_revoc_reg_delta_request',
c_submitter_did,
c_revoc_reg_def_id,
c_from,
c_to,
build_get_revoc_reg_delta_request.cb)
res = request_json.decode()
logger.debug("build_get_revoc_reg_delta_request: <<< res: %r", res)
return res | Builds a GET_REVOC_REG_DELTA request. Request to get the delta of the accumulated state of the Revocation Registry.
The Delta is defined by from and to timestamp fields.
If from is not specified, then the whole state till to will be returned.
:param submitter_did: (Optional) DID of the read request sender (if not provided then default Libindy DID will be used).
:param revoc_reg_def_id: ID of the corresponding Revocation Registry Definition in ledger.
:param from_: Requested time represented as a total number of seconds from Unix Epoch
:param to: Requested time represented as a total number of seconds from Unix Epoch
:return: Request result as json. | Below is the the instruction that describes the task:
### Input:
Builds a GET_REVOC_REG_DELTA request. Request to get the delta of the accumulated state of the Revocation Registry.
The Delta is defined by from and to timestamp fields.
If from is not specified, then the whole state till to will be returned.
:param submitter_did: (Optional) DID of the read request sender (if not provided then default Libindy DID will be used).
:param revoc_reg_def_id: ID of the corresponding Revocation Registry Definition in ledger.
:param from_: Requested time represented as a total number of seconds from Unix Epoch
:param to: Requested time represented as a total number of seconds from Unix Epoch
:return: Request result as json.
### Response:
async def build_get_revoc_reg_delta_request(submitter_did: Optional[str],
revoc_reg_def_id: str,
from_: Optional[int],
to: int) -> str:
"""
Builds a GET_REVOC_REG_DELTA request. Request to get the delta of the accumulated state of the Revocation Registry.
The Delta is defined by from and to timestamp fields.
If from is not specified, then the whole state till to will be returned.
:param submitter_did: (Optional) DID of the read request sender (if not provided then default Libindy DID will be used).
:param revoc_reg_def_id: ID of the corresponding Revocation Registry Definition in ledger.
:param from_: Requested time represented as a total number of seconds from Unix Epoch
:param to: Requested time represented as a total number of seconds from Unix Epoch
:return: Request result as json.
"""
logger = logging.getLogger(__name__)
logger.debug("build_get_revoc_reg_delta_request: >>> submitter_did: %r, revoc_reg_def_id: %r, from: %r, to: %r",
submitter_did, revoc_reg_def_id, from_, to)
if not hasattr(build_get_revoc_reg_delta_request, "cb"):
logger.debug("build_get_revoc_reg_delta_request: Creating callback")
build_get_revoc_reg_delta_request.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_submitter_did = c_char_p(submitter_did.encode('utf-8')) if submitter_did is not None else None
c_revoc_reg_def_id = c_char_p(revoc_reg_def_id.encode('utf-8'))
c_from = c_int64(from_) if from_ else -1
c_to = c_int64(to)
request_json = await do_call('indy_build_get_revoc_reg_delta_request',
c_submitter_did,
c_revoc_reg_def_id,
c_from,
c_to,
build_get_revoc_reg_delta_request.cb)
res = request_json.decode()
logger.debug("build_get_revoc_reg_delta_request: <<< res: %r", res)
return res |
def get_by_ip_hostname(self, ip_hostname):
"""
Retrieve a storage system by its IP.
Works only with API version <= 300.
Args:
ip_hostname: Storage system IP or hostname.
Returns:
dict
"""
resources = self._client.get_all()
resources_filtered = [x for x in resources if x['credentials']['ip_hostname'] == ip_hostname]
if resources_filtered:
return resources_filtered[0]
else:
return None | Retrieve a storage system by its IP.
Works only with API version <= 300.
Args:
ip_hostname: Storage system IP or hostname.
Returns:
dict | Below is the the instruction that describes the task:
### Input:
Retrieve a storage system by its IP.
Works only with API version <= 300.
Args:
ip_hostname: Storage system IP or hostname.
Returns:
dict
### Response:
def get_by_ip_hostname(self, ip_hostname):
"""
Retrieve a storage system by its IP.
Works only with API version <= 300.
Args:
ip_hostname: Storage system IP or hostname.
Returns:
dict
"""
resources = self._client.get_all()
resources_filtered = [x for x in resources if x['credentials']['ip_hostname'] == ip_hostname]
if resources_filtered:
return resources_filtered[0]
else:
return None |
def add_group_COM(self, group_definitions, ref_geom=None, image_molecules=False, mass_weighted=True,):
r"""
Adds the centers of mass (COM) in cartesian coordinates of a group or groups of atoms.
If these group definitions coincide directly with residues, use :obj:`add_residue_COM` instead. No periodic
boundaries are taken into account.
Parameters
----------
group_definitions : iterable of integers
List of the groups of atom indices for which the COM will be computed. The atoms are zero-indexed.
ref_geom : :obj:`mdtraj.Trajectory`, default is None
The coordinates can be centered to a reference geometry before computing the COM.
image_molecules : boolean, default is False
The method traj.image_molecules will be called before computing averages. The method tries to correct
for molecules broken across periodic boundary conditions, but can be time consuming. See
http://mdtraj.org/latest/api/generated/mdtraj.Trajectory.html#mdtraj.Trajectory.image_molecules
for more details
mass_weighted : boolean, default is True
Set to False if you want the geometric center and not the COM
.. note::
Centering (with :obj:`ref_geom`) and imaging (:obj:`image_molecules=True`) the trajectories can sometimes be time consuming. Consider doing that to your trajectory-files prior to the featurization.
"""
from .misc import GroupCOMFeature
f = GroupCOMFeature(self.topology, group_definitions , ref_geom=ref_geom, image_molecules=image_molecules, mass_weighted=mass_weighted)
self.__add_feature(f) | r"""
Adds the centers of mass (COM) in cartesian coordinates of a group or groups of atoms.
If these group definitions coincide directly with residues, use :obj:`add_residue_COM` instead. No periodic
boundaries are taken into account.
Parameters
----------
group_definitions : iterable of integers
List of the groups of atom indices for which the COM will be computed. The atoms are zero-indexed.
ref_geom : :obj:`mdtraj.Trajectory`, default is None
The coordinates can be centered to a reference geometry before computing the COM.
image_molecules : boolean, default is False
The method traj.image_molecules will be called before computing averages. The method tries to correct
for molecules broken across periodic boundary conditions, but can be time consuming. See
http://mdtraj.org/latest/api/generated/mdtraj.Trajectory.html#mdtraj.Trajectory.image_molecules
for more details
mass_weighted : boolean, default is True
Set to False if you want the geometric center and not the COM
.. note::
Centering (with :obj:`ref_geom`) and imaging (:obj:`image_molecules=True`) the trajectories can sometimes be time consuming. Consider doing that to your trajectory-files prior to the featurization. | Below is the the instruction that describes the task:
### Input:
r"""
Adds the centers of mass (COM) in cartesian coordinates of a group or groups of atoms.
If these group definitions coincide directly with residues, use :obj:`add_residue_COM` instead. No periodic
boundaries are taken into account.
Parameters
----------
group_definitions : iterable of integers
List of the groups of atom indices for which the COM will be computed. The atoms are zero-indexed.
ref_geom : :obj:`mdtraj.Trajectory`, default is None
The coordinates can be centered to a reference geometry before computing the COM.
image_molecules : boolean, default is False
The method traj.image_molecules will be called before computing averages. The method tries to correct
for molecules broken across periodic boundary conditions, but can be time consuming. See
http://mdtraj.org/latest/api/generated/mdtraj.Trajectory.html#mdtraj.Trajectory.image_molecules
for more details
mass_weighted : boolean, default is True
Set to False if you want the geometric center and not the COM
.. note::
Centering (with :obj:`ref_geom`) and imaging (:obj:`image_molecules=True`) the trajectories can sometimes be time consuming. Consider doing that to your trajectory-files prior to the featurization.
### Response:
def add_group_COM(self, group_definitions, ref_geom=None, image_molecules=False, mass_weighted=True,):
r"""
Adds the centers of mass (COM) in cartesian coordinates of a group or groups of atoms.
If these group definitions coincide directly with residues, use :obj:`add_residue_COM` instead. No periodic
boundaries are taken into account.
Parameters
----------
group_definitions : iterable of integers
List of the groups of atom indices for which the COM will be computed. The atoms are zero-indexed.
ref_geom : :obj:`mdtraj.Trajectory`, default is None
The coordinates can be centered to a reference geometry before computing the COM.
image_molecules : boolean, default is False
The method traj.image_molecules will be called before computing averages. The method tries to correct
for molecules broken across periodic boundary conditions, but can be time consuming. See
http://mdtraj.org/latest/api/generated/mdtraj.Trajectory.html#mdtraj.Trajectory.image_molecules
for more details
mass_weighted : boolean, default is True
Set to False if you want the geometric center and not the COM
.. note::
Centering (with :obj:`ref_geom`) and imaging (:obj:`image_molecules=True`) the trajectories can sometimes be time consuming. Consider doing that to your trajectory-files prior to the featurization.
"""
from .misc import GroupCOMFeature
f = GroupCOMFeature(self.topology, group_definitions , ref_geom=ref_geom, image_molecules=image_molecules, mass_weighted=mass_weighted)
self.__add_feature(f) |
def create_cache_cluster(CacheClusterId=None, ReplicationGroupId=None, AZMode=None, PreferredAvailabilityZone=None, PreferredAvailabilityZones=None, NumCacheNodes=None, CacheNodeType=None, Engine=None, EngineVersion=None, CacheParameterGroupName=None, CacheSubnetGroupName=None, CacheSecurityGroupNames=None, SecurityGroupIds=None, Tags=None, SnapshotArns=None, SnapshotName=None, PreferredMaintenanceWindow=None, Port=None, NotificationTopicArn=None, AutoMinorVersionUpgrade=None, SnapshotRetentionLimit=None, SnapshotWindow=None, AuthToken=None):
"""
Creates a cache cluster. All nodes in the cache cluster run the same protocol-compliant cache engine software, either Memcached or Redis.
See also: AWS API Documentation
:example: response = client.create_cache_cluster(
CacheClusterId='string',
ReplicationGroupId='string',
AZMode='single-az'|'cross-az',
PreferredAvailabilityZone='string',
PreferredAvailabilityZones=[
'string',
],
NumCacheNodes=123,
CacheNodeType='string',
Engine='string',
EngineVersion='string',
CacheParameterGroupName='string',
CacheSubnetGroupName='string',
CacheSecurityGroupNames=[
'string',
],
SecurityGroupIds=[
'string',
],
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
SnapshotArns=[
'string',
],
SnapshotName='string',
PreferredMaintenanceWindow='string',
Port=123,
NotificationTopicArn='string',
AutoMinorVersionUpgrade=True|False,
SnapshotRetentionLimit=123,
SnapshotWindow='string',
AuthToken='string'
)
:type CacheClusterId: string
:param CacheClusterId: [REQUIRED]
The node group (shard) identifier. This parameter is stored as a lowercase string.
Constraints:
A name must contain from 1 to 20 alphanumeric characters or hyphens.
The first character must be a letter.
A name cannot end with a hyphen or contain two consecutive hyphens.
:type ReplicationGroupId: string
:param ReplicationGroupId:
Warning
Due to current limitations on Redis (cluster mode disabled), this operation or parameter is not supported on Redis (cluster mode enabled) replication groups.
The ID of the replication group to which this cache cluster should belong. If this parameter is specified, the cache cluster is added to the specified replication group as a read replica; otherwise, the cache cluster is a standalone primary that is not part of any replication group.
If the specified replication group is Multi-AZ enabled and the Availability Zone is not specified, the cache cluster is created in Availability Zones that provide the best spread of read replicas across Availability Zones.
Note
This parameter is only valid if the Engine parameter is redis .
:type AZMode: string
:param AZMode: Specifies whether the nodes in this Memcached cluster are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region.
This parameter is only supported for Memcached cache clusters.
If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache assumes single-az mode.
:type PreferredAvailabilityZone: string
:param PreferredAvailabilityZone: The EC2 Availability Zone in which the cache cluster is created.
All nodes belonging to this Memcached cache cluster are placed in the preferred Availability Zone. If you want to create your nodes across multiple Availability Zones, use PreferredAvailabilityZones .
Default: System chosen Availability Zone.
:type PreferredAvailabilityZones: list
:param PreferredAvailabilityZones: A list of the Availability Zones in which cache nodes are created. The order of the zones in the list is not important.
This option is only supported on Memcached.
Note
If you are creating your cache cluster in an Amazon VPC (recommended) you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group.
The number of Availability Zones listed must equal the value of NumCacheNodes .
If you want all the nodes in the same Availability Zone, use PreferredAvailabilityZone instead, or repeat the Availability Zone multiple times in the list.
Default: System chosen Availability Zones.
(string) --
:type NumCacheNodes: integer
:param NumCacheNodes: The initial number of cache nodes that the cache cluster has.
For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 20.
If you need more than 20 nodes for your Memcached cluster, please fill out the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/ .
:type CacheNodeType: string
:param CacheNodeType: The compute and memory capacity of the nodes in the node group (shard).
Valid node types are as follows:
General purpose:
Current generation: cache.t2.micro , cache.t2.small , cache.t2.medium , cache.m3.medium , cache.m3.large , cache.m3.xlarge , cache.m3.2xlarge , cache.m4.large , cache.m4.xlarge , cache.m4.2xlarge , cache.m4.4xlarge , cache.m4.10xlarge
Previous generation: cache.t1.micro , cache.m1.small , cache.m1.medium , cache.m1.large , cache.m1.xlarge
Compute optimized: cache.c1.xlarge
Memory optimized:
Current generation: cache.r3.large , cache.r3.xlarge , cache.r3.2xlarge , cache.r3.4xlarge , cache.r3.8xlarge
Previous generation: cache.m2.xlarge , cache.m2.2xlarge , cache.m2.4xlarge
Notes:
All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).
Redis backup/restore is not supported for Redis (cluster mode disabled) T1 and T2 instances. Backup/restore is supported on Redis (cluster mode enabled) T2 instances.
Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.
For a complete listing of node types and specifications, see Amazon ElastiCache Product Features and Details and either Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis .
:type Engine: string
:param Engine: The name of the cache engine to be used for this cache cluster.
Valid values for this parameter are: memcached | redis
:type EngineVersion: string
:param EngineVersion: The version number of the cache engine to be used for this cache cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version ), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cache cluster or replication group and create it anew with the earlier engine version.
:type CacheParameterGroupName: string
:param CacheParameterGroupName: The name of the parameter group to associate with this cache cluster. If this argument is omitted, the default parameter group for the specified engine is used. You cannot use any parameter group which has cluster-enabled='yes' when creating a cluster.
:type CacheSubnetGroupName: string
:param CacheSubnetGroupName: The name of the subnet group to be used for the cache cluster.
Use this parameter only when you are creating a cache cluster in an Amazon Virtual Private Cloud (Amazon VPC).
Warning
If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see Subnets and Subnet Groups .
:type CacheSecurityGroupNames: list
:param CacheSecurityGroupNames: A list of security group names to associate with this cache cluster.
Use this parameter only when you are creating a cache cluster outside of an Amazon Virtual Private Cloud (Amazon VPC).
(string) --
:type SecurityGroupIds: list
:param SecurityGroupIds: One or more VPC security groups associated with the cache cluster.
Use this parameter only when you are creating a cache cluster in an Amazon Virtual Private Cloud (Amazon VPC).
(string) --
:type Tags: list
:param Tags: A list of cost allocation tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value.
(dict) --A cost allocation Tag that can be added to an ElastiCache cluster or replication group. Tags are composed of a Key/Value pair. A tag with a null Value is permitted.
Key (string) --The key for the tag. May not be null.
Value (string) --The tag's value. May be null.
:type SnapshotArns: list
:param SnapshotArns: A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.
Note
This parameter is only valid if the Engine parameter is redis .
Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb
(string) --
:type SnapshotName: string
:param SnapshotName: The name of a Redis snapshot from which to restore data into the new node group (shard). The snapshot status changes to restoring while the new node group (shard) is being created.
Note
This parameter is only valid if the Engine parameter is redis .
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: Specifies the weekly time range during which maintenance on the cache cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are:
Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.
Valid values for ddd are:
sun
mon
tue
wed
thu
fri
sat
Example: sun:23:00-mon:01:30
:type Port: integer
:param Port: The port number on which each of the cache nodes accepts connections.
:type NotificationTopicArn: string
:param NotificationTopicArn: The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent.
Note
The Amazon SNS topic owner must be the same as the cache cluster owner.
:type AutoMinorVersionUpgrade: boolean
:param AutoMinorVersionUpgrade: This parameter is currently disabled.
:type SnapshotRetentionLimit: integer
:param SnapshotRetentionLimit: The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot taken today is retained for 5 days before being deleted.
Note
This parameter is only valid if the Engine parameter is redis .
Default: 0 (i.e., automatic backups are disabled for this cache cluster).
:type SnapshotWindow: string
:param SnapshotWindow: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard).
Example: 05:00-09:00
If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range.
Note: This parameter is only valid if the Engine parameter is redis .
:type AuthToken: string
:param AuthToken:
Reserved parameter. The password used to access a password protected server.
Password constraints:
Must be only printable ASCII characters.
Must be at least 16 characters and no more than 128 characters in length.
Cannot contain any of the following characters: '/', ''', or '@'.
For more information, see AUTH password at Redis.
:rtype: dict
:return: {
'CacheCluster': {
'CacheClusterId': 'string',
'ConfigurationEndpoint': {
'Address': 'string',
'Port': 123
},
'ClientDownloadLandingPage': 'string',
'CacheNodeType': 'string',
'Engine': 'string',
'EngineVersion': 'string',
'CacheClusterStatus': 'string',
'NumCacheNodes': 123,
'PreferredAvailabilityZone': 'string',
'CacheClusterCreateTime': datetime(2015, 1, 1),
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'NumCacheNodes': 123,
'CacheNodeIdsToRemove': [
'string',
],
'EngineVersion': 'string',
'CacheNodeType': 'string'
},
'NotificationConfiguration': {
'TopicArn': 'string',
'TopicStatus': 'string'
},
'CacheSecurityGroups': [
{
'CacheSecurityGroupName': 'string',
'Status': 'string'
},
],
'CacheParameterGroup': {
'CacheParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'CacheNodeIdsToReboot': [
'string',
]
},
'CacheSubnetGroupName': 'string',
'CacheNodes': [
{
'CacheNodeId': 'string',
'CacheNodeStatus': 'string',
'CacheNodeCreateTime': datetime(2015, 1, 1),
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ParameterGroupStatus': 'string',
'SourceCacheNodeId': 'string',
'CustomerAvailabilityZone': 'string'
},
],
'AutoMinorVersionUpgrade': True|False,
'SecurityGroups': [
{
'SecurityGroupId': 'string',
'Status': 'string'
},
],
'ReplicationGroupId': 'string',
'SnapshotRetentionLimit': 123,
'SnapshotWindow': 'string'
}
}
:returns:
General purpose:
Current generation: cache.t2.micro , cache.t2.small , cache.t2.medium , cache.m3.medium , cache.m3.large , cache.m3.xlarge , cache.m3.2xlarge , cache.m4.large , cache.m4.xlarge , cache.m4.2xlarge , cache.m4.4xlarge , cache.m4.10xlarge
Previous generation: cache.t1.micro , cache.m1.small , cache.m1.medium , cache.m1.large , cache.m1.xlarge
Compute optimized: cache.c1.xlarge
Memory optimized:
Current generation: cache.r3.large , cache.r3.xlarge , cache.r3.2xlarge , cache.r3.4xlarge , cache.r3.8xlarge
Previous generation: cache.m2.xlarge , cache.m2.2xlarge , cache.m2.4xlarge
"""
pass | Creates a cache cluster. All nodes in the cache cluster run the same protocol-compliant cache engine software, either Memcached or Redis.
See also: AWS API Documentation
:example: response = client.create_cache_cluster(
CacheClusterId='string',
ReplicationGroupId='string',
AZMode='single-az'|'cross-az',
PreferredAvailabilityZone='string',
PreferredAvailabilityZones=[
'string',
],
NumCacheNodes=123,
CacheNodeType='string',
Engine='string',
EngineVersion='string',
CacheParameterGroupName='string',
CacheSubnetGroupName='string',
CacheSecurityGroupNames=[
'string',
],
SecurityGroupIds=[
'string',
],
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
SnapshotArns=[
'string',
],
SnapshotName='string',
PreferredMaintenanceWindow='string',
Port=123,
NotificationTopicArn='string',
AutoMinorVersionUpgrade=True|False,
SnapshotRetentionLimit=123,
SnapshotWindow='string',
AuthToken='string'
)
:type CacheClusterId: string
:param CacheClusterId: [REQUIRED]
The node group (shard) identifier. This parameter is stored as a lowercase string.
Constraints:
A name must contain from 1 to 20 alphanumeric characters or hyphens.
The first character must be a letter.
A name cannot end with a hyphen or contain two consecutive hyphens.
:type ReplicationGroupId: string
:param ReplicationGroupId:
Warning
Due to current limitations on Redis (cluster mode disabled), this operation or parameter is not supported on Redis (cluster mode enabled) replication groups.
The ID of the replication group to which this cache cluster should belong. If this parameter is specified, the cache cluster is added to the specified replication group as a read replica; otherwise, the cache cluster is a standalone primary that is not part of any replication group.
If the specified replication group is Multi-AZ enabled and the Availability Zone is not specified, the cache cluster is created in Availability Zones that provide the best spread of read replicas across Availability Zones.
Note
This parameter is only valid if the Engine parameter is redis .
:type AZMode: string
:param AZMode: Specifies whether the nodes in this Memcached cluster are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region.
This parameter is only supported for Memcached cache clusters.
If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache assumes single-az mode.
:type PreferredAvailabilityZone: string
:param PreferredAvailabilityZone: The EC2 Availability Zone in which the cache cluster is created.
All nodes belonging to this Memcached cache cluster are placed in the preferred Availability Zone. If you want to create your nodes across multiple Availability Zones, use PreferredAvailabilityZones .
Default: System chosen Availability Zone.
:type PreferredAvailabilityZones: list
:param PreferredAvailabilityZones: A list of the Availability Zones in which cache nodes are created. The order of the zones in the list is not important.
This option is only supported on Memcached.
Note
If you are creating your cache cluster in an Amazon VPC (recommended) you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group.
The number of Availability Zones listed must equal the value of NumCacheNodes .
If you want all the nodes in the same Availability Zone, use PreferredAvailabilityZone instead, or repeat the Availability Zone multiple times in the list.
Default: System chosen Availability Zones.
(string) --
:type NumCacheNodes: integer
:param NumCacheNodes: The initial number of cache nodes that the cache cluster has.
For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 20.
If you need more than 20 nodes for your Memcached cluster, please fill out the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/ .
:type CacheNodeType: string
:param CacheNodeType: The compute and memory capacity of the nodes in the node group (shard).
Valid node types are as follows:
General purpose:
Current generation: cache.t2.micro , cache.t2.small , cache.t2.medium , cache.m3.medium , cache.m3.large , cache.m3.xlarge , cache.m3.2xlarge , cache.m4.large , cache.m4.xlarge , cache.m4.2xlarge , cache.m4.4xlarge , cache.m4.10xlarge
Previous generation: cache.t1.micro , cache.m1.small , cache.m1.medium , cache.m1.large , cache.m1.xlarge
Compute optimized: cache.c1.xlarge
Memory optimized:
Current generation: cache.r3.large , cache.r3.xlarge , cache.r3.2xlarge , cache.r3.4xlarge , cache.r3.8xlarge
Previous generation: cache.m2.xlarge , cache.m2.2xlarge , cache.m2.4xlarge
Notes:
All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).
Redis backup/restore is not supported for Redis (cluster mode disabled) T1 and T2 instances. Backup/restore is supported on Redis (cluster mode enabled) T2 instances.
Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.
For a complete listing of node types and specifications, see Amazon ElastiCache Product Features and Details and either Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis .
:type Engine: string
:param Engine: The name of the cache engine to be used for this cache cluster.
Valid values for this parameter are: memcached | redis
:type EngineVersion: string
:param EngineVersion: The version number of the cache engine to be used for this cache cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version ), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cache cluster or replication group and create it anew with the earlier engine version.
:type CacheParameterGroupName: string
:param CacheParameterGroupName: The name of the parameter group to associate with this cache cluster. If this argument is omitted, the default parameter group for the specified engine is used. You cannot use any parameter group which has cluster-enabled='yes' when creating a cluster.
:type CacheSubnetGroupName: string
:param CacheSubnetGroupName: The name of the subnet group to be used for the cache cluster.
Use this parameter only when you are creating a cache cluster in an Amazon Virtual Private Cloud (Amazon VPC).
Warning
If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see Subnets and Subnet Groups .
:type CacheSecurityGroupNames: list
:param CacheSecurityGroupNames: A list of security group names to associate with this cache cluster.
Use this parameter only when you are creating a cache cluster outside of an Amazon Virtual Private Cloud (Amazon VPC).
(string) --
:type SecurityGroupIds: list
:param SecurityGroupIds: One or more VPC security groups associated with the cache cluster.
Use this parameter only when you are creating a cache cluster in an Amazon Virtual Private Cloud (Amazon VPC).
(string) --
:type Tags: list
:param Tags: A list of cost allocation tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value.
(dict) --A cost allocation Tag that can be added to an ElastiCache cluster or replication group. Tags are composed of a Key/Value pair. A tag with a null Value is permitted.
Key (string) --The key for the tag. May not be null.
Value (string) --The tag's value. May be null.
:type SnapshotArns: list
:param SnapshotArns: A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.
Note
This parameter is only valid if the Engine parameter is redis .
Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb
(string) --
:type SnapshotName: string
:param SnapshotName: The name of a Redis snapshot from which to restore data into the new node group (shard). The snapshot status changes to restoring while the new node group (shard) is being created.
Note
This parameter is only valid if the Engine parameter is redis .
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: Specifies the weekly time range during which maintenance on the cache cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are:
Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.
Valid values for ddd are:
sun
mon
tue
wed
thu
fri
sat
Example: sun:23:00-mon:01:30
:type Port: integer
:param Port: The port number on which each of the cache nodes accepts connections.
:type NotificationTopicArn: string
:param NotificationTopicArn: The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent.
Note
The Amazon SNS topic owner must be the same as the cache cluster owner.
:type AutoMinorVersionUpgrade: boolean
:param AutoMinorVersionUpgrade: This parameter is currently disabled.
:type SnapshotRetentionLimit: integer
:param SnapshotRetentionLimit: The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot taken today is retained for 5 days before being deleted.
Note
This parameter is only valid if the Engine parameter is redis .
Default: 0 (i.e., automatic backups are disabled for this cache cluster).
:type SnapshotWindow: string
:param SnapshotWindow: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard).
Example: 05:00-09:00
If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range.
Note: This parameter is only valid if the Engine parameter is redis .
:type AuthToken: string
:param AuthToken:
Reserved parameter. The password used to access a password protected server.
Password constraints:
Must be only printable ASCII characters.
Must be at least 16 characters and no more than 128 characters in length.
Cannot contain any of the following characters: '/', ''', or '@'.
For more information, see AUTH password at Redis.
:rtype: dict
:return: {
'CacheCluster': {
'CacheClusterId': 'string',
'ConfigurationEndpoint': {
'Address': 'string',
'Port': 123
},
'ClientDownloadLandingPage': 'string',
'CacheNodeType': 'string',
'Engine': 'string',
'EngineVersion': 'string',
'CacheClusterStatus': 'string',
'NumCacheNodes': 123,
'PreferredAvailabilityZone': 'string',
'CacheClusterCreateTime': datetime(2015, 1, 1),
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'NumCacheNodes': 123,
'CacheNodeIdsToRemove': [
'string',
],
'EngineVersion': 'string',
'CacheNodeType': 'string'
},
'NotificationConfiguration': {
'TopicArn': 'string',
'TopicStatus': 'string'
},
'CacheSecurityGroups': [
{
'CacheSecurityGroupName': 'string',
'Status': 'string'
},
],
'CacheParameterGroup': {
'CacheParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'CacheNodeIdsToReboot': [
'string',
]
},
'CacheSubnetGroupName': 'string',
'CacheNodes': [
{
'CacheNodeId': 'string',
'CacheNodeStatus': 'string',
'CacheNodeCreateTime': datetime(2015, 1, 1),
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ParameterGroupStatus': 'string',
'SourceCacheNodeId': 'string',
'CustomerAvailabilityZone': 'string'
},
],
'AutoMinorVersionUpgrade': True|False,
'SecurityGroups': [
{
'SecurityGroupId': 'string',
'Status': 'string'
},
],
'ReplicationGroupId': 'string',
'SnapshotRetentionLimit': 123,
'SnapshotWindow': 'string'
}
}
:returns:
General purpose:
Current generation: cache.t2.micro , cache.t2.small , cache.t2.medium , cache.m3.medium , cache.m3.large , cache.m3.xlarge , cache.m3.2xlarge , cache.m4.large , cache.m4.xlarge , cache.m4.2xlarge , cache.m4.4xlarge , cache.m4.10xlarge
Previous generation: cache.t1.micro , cache.m1.small , cache.m1.medium , cache.m1.large , cache.m1.xlarge
Compute optimized: cache.c1.xlarge
Memory optimized:
Current generation: cache.r3.large , cache.r3.xlarge , cache.r3.2xlarge , cache.r3.4xlarge , cache.r3.8xlarge
Previous generation: cache.m2.xlarge , cache.m2.2xlarge , cache.m2.4xlarge | Below is the the instruction that describes the task:
### Input:
Creates a cache cluster. All nodes in the cache cluster run the same protocol-compliant cache engine software, either Memcached or Redis.
See also: AWS API Documentation
:example: response = client.create_cache_cluster(
CacheClusterId='string',
ReplicationGroupId='string',
AZMode='single-az'|'cross-az',
PreferredAvailabilityZone='string',
PreferredAvailabilityZones=[
'string',
],
NumCacheNodes=123,
CacheNodeType='string',
Engine='string',
EngineVersion='string',
CacheParameterGroupName='string',
CacheSubnetGroupName='string',
CacheSecurityGroupNames=[
'string',
],
SecurityGroupIds=[
'string',
],
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
SnapshotArns=[
'string',
],
SnapshotName='string',
PreferredMaintenanceWindow='string',
Port=123,
NotificationTopicArn='string',
AutoMinorVersionUpgrade=True|False,
SnapshotRetentionLimit=123,
SnapshotWindow='string',
AuthToken='string'
)
:type CacheClusterId: string
:param CacheClusterId: [REQUIRED]
The node group (shard) identifier. This parameter is stored as a lowercase string.
Constraints:
A name must contain from 1 to 20 alphanumeric characters or hyphens.
The first character must be a letter.
A name cannot end with a hyphen or contain two consecutive hyphens.
:type ReplicationGroupId: string
:param ReplicationGroupId:
Warning
Due to current limitations on Redis (cluster mode disabled), this operation or parameter is not supported on Redis (cluster mode enabled) replication groups.
The ID of the replication group to which this cache cluster should belong. If this parameter is specified, the cache cluster is added to the specified replication group as a read replica; otherwise, the cache cluster is a standalone primary that is not part of any replication group.
If the specified replication group is Multi-AZ enabled and the Availability Zone is not specified, the cache cluster is created in Availability Zones that provide the best spread of read replicas across Availability Zones.
Note
This parameter is only valid if the Engine parameter is redis .
:type AZMode: string
:param AZMode: Specifies whether the nodes in this Memcached cluster are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region.
This parameter is only supported for Memcached cache clusters.
If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache assumes single-az mode.
:type PreferredAvailabilityZone: string
:param PreferredAvailabilityZone: The EC2 Availability Zone in which the cache cluster is created.
All nodes belonging to this Memcached cache cluster are placed in the preferred Availability Zone. If you want to create your nodes across multiple Availability Zones, use PreferredAvailabilityZones .
Default: System chosen Availability Zone.
:type PreferredAvailabilityZones: list
:param PreferredAvailabilityZones: A list of the Availability Zones in which cache nodes are created. The order of the zones in the list is not important.
This option is only supported on Memcached.
Note
If you are creating your cache cluster in an Amazon VPC (recommended) you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group.
The number of Availability Zones listed must equal the value of NumCacheNodes .
If you want all the nodes in the same Availability Zone, use PreferredAvailabilityZone instead, or repeat the Availability Zone multiple times in the list.
Default: System chosen Availability Zones.
(string) --
:type NumCacheNodes: integer
:param NumCacheNodes: The initial number of cache nodes that the cache cluster has.
For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 20.
If you need more than 20 nodes for your Memcached cluster, please fill out the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/ .
:type CacheNodeType: string
:param CacheNodeType: The compute and memory capacity of the nodes in the node group (shard).
Valid node types are as follows:
General purpose:
Current generation: cache.t2.micro , cache.t2.small , cache.t2.medium , cache.m3.medium , cache.m3.large , cache.m3.xlarge , cache.m3.2xlarge , cache.m4.large , cache.m4.xlarge , cache.m4.2xlarge , cache.m4.4xlarge , cache.m4.10xlarge
Previous generation: cache.t1.micro , cache.m1.small , cache.m1.medium , cache.m1.large , cache.m1.xlarge
Compute optimized: cache.c1.xlarge
Memory optimized:
Current generation: cache.r3.large , cache.r3.xlarge , cache.r3.2xlarge , cache.r3.4xlarge , cache.r3.8xlarge
Previous generation: cache.m2.xlarge , cache.m2.2xlarge , cache.m2.4xlarge
Notes:
All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).
Redis backup/restore is not supported for Redis (cluster mode disabled) T1 and T2 instances. Backup/restore is supported on Redis (cluster mode enabled) T2 instances.
Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.
For a complete listing of node types and specifications, see Amazon ElastiCache Product Features and Details and either Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis .
:type Engine: string
:param Engine: The name of the cache engine to be used for this cache cluster.
Valid values for this parameter are: memcached | redis
:type EngineVersion: string
:param EngineVersion: The version number of the cache engine to be used for this cache cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version ), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cache cluster or replication group and create it anew with the earlier engine version.
:type CacheParameterGroupName: string
:param CacheParameterGroupName: The name of the parameter group to associate with this cache cluster. If this argument is omitted, the default parameter group for the specified engine is used. You cannot use any parameter group which has cluster-enabled='yes' when creating a cluster.
:type CacheSubnetGroupName: string
:param CacheSubnetGroupName: The name of the subnet group to be used for the cache cluster.
Use this parameter only when you are creating a cache cluster in an Amazon Virtual Private Cloud (Amazon VPC).
Warning
If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see Subnets and Subnet Groups .
:type CacheSecurityGroupNames: list
:param CacheSecurityGroupNames: A list of security group names to associate with this cache cluster.
Use this parameter only when you are creating a cache cluster outside of an Amazon Virtual Private Cloud (Amazon VPC).
(string) --
:type SecurityGroupIds: list
:param SecurityGroupIds: One or more VPC security groups associated with the cache cluster.
Use this parameter only when you are creating a cache cluster in an Amazon Virtual Private Cloud (Amazon VPC).
(string) --
:type Tags: list
:param Tags: A list of cost allocation tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value.
(dict) --A cost allocation Tag that can be added to an ElastiCache cluster or replication group. Tags are composed of a Key/Value pair. A tag with a null Value is permitted.
Key (string) --The key for the tag. May not be null.
Value (string) --The tag's value. May be null.
:type SnapshotArns: list
:param SnapshotArns: A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.
Note
This parameter is only valid if the Engine parameter is redis .
Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb
(string) --
:type SnapshotName: string
:param SnapshotName: The name of a Redis snapshot from which to restore data into the new node group (shard). The snapshot status changes to restoring while the new node group (shard) is being created.
Note
This parameter is only valid if the Engine parameter is redis .
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: Specifies the weekly time range during which maintenance on the cache cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are:
Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.
Valid values for ddd are:
sun
mon
tue
wed
thu
fri
sat
Example: sun:23:00-mon:01:30
:type Port: integer
:param Port: The port number on which each of the cache nodes accepts connections.
:type NotificationTopicArn: string
:param NotificationTopicArn: The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent.
Note
The Amazon SNS topic owner must be the same as the cache cluster owner.
:type AutoMinorVersionUpgrade: boolean
:param AutoMinorVersionUpgrade: This parameter is currently disabled.
:type SnapshotRetentionLimit: integer
:param SnapshotRetentionLimit: The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot taken today is retained for 5 days before being deleted.
Note
This parameter is only valid if the Engine parameter is redis .
Default: 0 (i.e., automatic backups are disabled for this cache cluster).
:type SnapshotWindow: string
:param SnapshotWindow: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard).
Example: 05:00-09:00
If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range.
Note: This parameter is only valid if the Engine parameter is redis .
:type AuthToken: string
:param AuthToken:
Reserved parameter. The password used to access a password protected server.
Password constraints:
Must be only printable ASCII characters.
Must be at least 16 characters and no more than 128 characters in length.
Cannot contain any of the following characters: '/', ''', or '@'.
For more information, see AUTH password at Redis.
:rtype: dict
:return: {
'CacheCluster': {
'CacheClusterId': 'string',
'ConfigurationEndpoint': {
'Address': 'string',
'Port': 123
},
'ClientDownloadLandingPage': 'string',
'CacheNodeType': 'string',
'Engine': 'string',
'EngineVersion': 'string',
'CacheClusterStatus': 'string',
'NumCacheNodes': 123,
'PreferredAvailabilityZone': 'string',
'CacheClusterCreateTime': datetime(2015, 1, 1),
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'NumCacheNodes': 123,
'CacheNodeIdsToRemove': [
'string',
],
'EngineVersion': 'string',
'CacheNodeType': 'string'
},
'NotificationConfiguration': {
'TopicArn': 'string',
'TopicStatus': 'string'
},
'CacheSecurityGroups': [
{
'CacheSecurityGroupName': 'string',
'Status': 'string'
},
],
'CacheParameterGroup': {
'CacheParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'CacheNodeIdsToReboot': [
'string',
]
},
'CacheSubnetGroupName': 'string',
'CacheNodes': [
{
'CacheNodeId': 'string',
'CacheNodeStatus': 'string',
'CacheNodeCreateTime': datetime(2015, 1, 1),
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ParameterGroupStatus': 'string',
'SourceCacheNodeId': 'string',
'CustomerAvailabilityZone': 'string'
},
],
'AutoMinorVersionUpgrade': True|False,
'SecurityGroups': [
{
'SecurityGroupId': 'string',
'Status': 'string'
},
],
'ReplicationGroupId': 'string',
'SnapshotRetentionLimit': 123,
'SnapshotWindow': 'string'
}
}
:returns:
General purpose:
Current generation: cache.t2.micro , cache.t2.small , cache.t2.medium , cache.m3.medium , cache.m3.large , cache.m3.xlarge , cache.m3.2xlarge , cache.m4.large , cache.m4.xlarge , cache.m4.2xlarge , cache.m4.4xlarge , cache.m4.10xlarge
Previous generation: cache.t1.micro , cache.m1.small , cache.m1.medium , cache.m1.large , cache.m1.xlarge
Compute optimized: cache.c1.xlarge
Memory optimized:
Current generation: cache.r3.large , cache.r3.xlarge , cache.r3.2xlarge , cache.r3.4xlarge , cache.r3.8xlarge
Previous generation: cache.m2.xlarge , cache.m2.2xlarge , cache.m2.4xlarge
### Response:
def create_cache_cluster(CacheClusterId=None, ReplicationGroupId=None, AZMode=None, PreferredAvailabilityZone=None, PreferredAvailabilityZones=None, NumCacheNodes=None, CacheNodeType=None, Engine=None, EngineVersion=None, CacheParameterGroupName=None, CacheSubnetGroupName=None, CacheSecurityGroupNames=None, SecurityGroupIds=None, Tags=None, SnapshotArns=None, SnapshotName=None, PreferredMaintenanceWindow=None, Port=None, NotificationTopicArn=None, AutoMinorVersionUpgrade=None, SnapshotRetentionLimit=None, SnapshotWindow=None, AuthToken=None):
"""
Creates a cache cluster. All nodes in the cache cluster run the same protocol-compliant cache engine software, either Memcached or Redis.
See also: AWS API Documentation
:example: response = client.create_cache_cluster(
CacheClusterId='string',
ReplicationGroupId='string',
AZMode='single-az'|'cross-az',
PreferredAvailabilityZone='string',
PreferredAvailabilityZones=[
'string',
],
NumCacheNodes=123,
CacheNodeType='string',
Engine='string',
EngineVersion='string',
CacheParameterGroupName='string',
CacheSubnetGroupName='string',
CacheSecurityGroupNames=[
'string',
],
SecurityGroupIds=[
'string',
],
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
SnapshotArns=[
'string',
],
SnapshotName='string',
PreferredMaintenanceWindow='string',
Port=123,
NotificationTopicArn='string',
AutoMinorVersionUpgrade=True|False,
SnapshotRetentionLimit=123,
SnapshotWindow='string',
AuthToken='string'
)
:type CacheClusterId: string
:param CacheClusterId: [REQUIRED]
The node group (shard) identifier. This parameter is stored as a lowercase string.
Constraints:
A name must contain from 1 to 20 alphanumeric characters or hyphens.
The first character must be a letter.
A name cannot end with a hyphen or contain two consecutive hyphens.
:type ReplicationGroupId: string
:param ReplicationGroupId:
Warning
Due to current limitations on Redis (cluster mode disabled), this operation or parameter is not supported on Redis (cluster mode enabled) replication groups.
The ID of the replication group to which this cache cluster should belong. If this parameter is specified, the cache cluster is added to the specified replication group as a read replica; otherwise, the cache cluster is a standalone primary that is not part of any replication group.
If the specified replication group is Multi-AZ enabled and the Availability Zone is not specified, the cache cluster is created in Availability Zones that provide the best spread of read replicas across Availability Zones.
Note
This parameter is only valid if the Engine parameter is redis .
:type AZMode: string
:param AZMode: Specifies whether the nodes in this Memcached cluster are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region.
This parameter is only supported for Memcached cache clusters.
If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache assumes single-az mode.
:type PreferredAvailabilityZone: string
:param PreferredAvailabilityZone: The EC2 Availability Zone in which the cache cluster is created.
All nodes belonging to this Memcached cache cluster are placed in the preferred Availability Zone. If you want to create your nodes across multiple Availability Zones, use PreferredAvailabilityZones .
Default: System chosen Availability Zone.
:type PreferredAvailabilityZones: list
:param PreferredAvailabilityZones: A list of the Availability Zones in which cache nodes are created. The order of the zones in the list is not important.
This option is only supported on Memcached.
Note
If you are creating your cache cluster in an Amazon VPC (recommended) you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group.
The number of Availability Zones listed must equal the value of NumCacheNodes .
If you want all the nodes in the same Availability Zone, use PreferredAvailabilityZone instead, or repeat the Availability Zone multiple times in the list.
Default: System chosen Availability Zones.
(string) --
:type NumCacheNodes: integer
:param NumCacheNodes: The initial number of cache nodes that the cache cluster has.
For clusters running Redis, this value must be 1. For clusters running Memcached, this value must be between 1 and 20.
If you need more than 20 nodes for your Memcached cluster, please fill out the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/ .
:type CacheNodeType: string
:param CacheNodeType: The compute and memory capacity of the nodes in the node group (shard).
Valid node types are as follows:
General purpose:
Current generation: cache.t2.micro , cache.t2.small , cache.t2.medium , cache.m3.medium , cache.m3.large , cache.m3.xlarge , cache.m3.2xlarge , cache.m4.large , cache.m4.xlarge , cache.m4.2xlarge , cache.m4.4xlarge , cache.m4.10xlarge
Previous generation: cache.t1.micro , cache.m1.small , cache.m1.medium , cache.m1.large , cache.m1.xlarge
Compute optimized: cache.c1.xlarge
Memory optimized:
Current generation: cache.r3.large , cache.r3.xlarge , cache.r3.2xlarge , cache.r3.4xlarge , cache.r3.8xlarge
Previous generation: cache.m2.xlarge , cache.m2.2xlarge , cache.m2.4xlarge
Notes:
All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).
Redis backup/restore is not supported for Redis (cluster mode disabled) T1 and T2 instances. Backup/restore is supported on Redis (cluster mode enabled) T2 instances.
Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.
For a complete listing of node types and specifications, see Amazon ElastiCache Product Features and Details and either Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis .
:type Engine: string
:param Engine: The name of the cache engine to be used for this cache cluster.
Valid values for this parameter are: memcached | redis
:type EngineVersion: string
:param EngineVersion: The version number of the cache engine to be used for this cache cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version ), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cache cluster or replication group and create it anew with the earlier engine version.
:type CacheParameterGroupName: string
:param CacheParameterGroupName: The name of the parameter group to associate with this cache cluster. If this argument is omitted, the default parameter group for the specified engine is used. You cannot use any parameter group which has cluster-enabled='yes' when creating a cluster.
:type CacheSubnetGroupName: string
:param CacheSubnetGroupName: The name of the subnet group to be used for the cache cluster.
Use this parameter only when you are creating a cache cluster in an Amazon Virtual Private Cloud (Amazon VPC).
Warning
If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see Subnets and Subnet Groups .
:type CacheSecurityGroupNames: list
:param CacheSecurityGroupNames: A list of security group names to associate with this cache cluster.
Use this parameter only when you are creating a cache cluster outside of an Amazon Virtual Private Cloud (Amazon VPC).
(string) --
:type SecurityGroupIds: list
:param SecurityGroupIds: One or more VPC security groups associated with the cache cluster.
Use this parameter only when you are creating a cache cluster in an Amazon Virtual Private Cloud (Amazon VPC).
(string) --
:type Tags: list
:param Tags: A list of cost allocation tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value.
(dict) --A cost allocation Tag that can be added to an ElastiCache cluster or replication group. Tags are composed of a Key/Value pair. A tag with a null Value is permitted.
Key (string) --The key for the tag. May not be null.
Value (string) --The tag's value. May be null.
:type SnapshotArns: list
:param SnapshotArns: A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.
Note
This parameter is only valid if the Engine parameter is redis .
Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb
(string) --
:type SnapshotName: string
:param SnapshotName: The name of a Redis snapshot from which to restore data into the new node group (shard). The snapshot status changes to restoring while the new node group (shard) is being created.
Note
This parameter is only valid if the Engine parameter is redis .
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: Specifies the weekly time range during which maintenance on the cache cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are:
Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.
Valid values for ddd are:
sun
mon
tue
wed
thu
fri
sat
Example: sun:23:00-mon:01:30
:type Port: integer
:param Port: The port number on which each of the cache nodes accepts connections.
:type NotificationTopicArn: string
:param NotificationTopicArn: The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent.
Note
The Amazon SNS topic owner must be the same as the cache cluster owner.
:type AutoMinorVersionUpgrade: boolean
:param AutoMinorVersionUpgrade: This parameter is currently disabled.
:type SnapshotRetentionLimit: integer
:param SnapshotRetentionLimit: The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot taken today is retained for 5 days before being deleted.
Note
This parameter is only valid if the Engine parameter is redis .
Default: 0 (i.e., automatic backups are disabled for this cache cluster).
:type SnapshotWindow: string
:param SnapshotWindow: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard).
Example: 05:00-09:00
If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range.
Note: This parameter is only valid if the Engine parameter is redis .
:type AuthToken: string
:param AuthToken:
Reserved parameter. The password used to access a password protected server.
Password constraints:
Must be only printable ASCII characters.
Must be at least 16 characters and no more than 128 characters in length.
Cannot contain any of the following characters: '/', ''', or '@'.
For more information, see AUTH password at Redis.
:rtype: dict
:return: {
'CacheCluster': {
'CacheClusterId': 'string',
'ConfigurationEndpoint': {
'Address': 'string',
'Port': 123
},
'ClientDownloadLandingPage': 'string',
'CacheNodeType': 'string',
'Engine': 'string',
'EngineVersion': 'string',
'CacheClusterStatus': 'string',
'NumCacheNodes': 123,
'PreferredAvailabilityZone': 'string',
'CacheClusterCreateTime': datetime(2015, 1, 1),
'PreferredMaintenanceWindow': 'string',
'PendingModifiedValues': {
'NumCacheNodes': 123,
'CacheNodeIdsToRemove': [
'string',
],
'EngineVersion': 'string',
'CacheNodeType': 'string'
},
'NotificationConfiguration': {
'TopicArn': 'string',
'TopicStatus': 'string'
},
'CacheSecurityGroups': [
{
'CacheSecurityGroupName': 'string',
'Status': 'string'
},
],
'CacheParameterGroup': {
'CacheParameterGroupName': 'string',
'ParameterApplyStatus': 'string',
'CacheNodeIdsToReboot': [
'string',
]
},
'CacheSubnetGroupName': 'string',
'CacheNodes': [
{
'CacheNodeId': 'string',
'CacheNodeStatus': 'string',
'CacheNodeCreateTime': datetime(2015, 1, 1),
'Endpoint': {
'Address': 'string',
'Port': 123
},
'ParameterGroupStatus': 'string',
'SourceCacheNodeId': 'string',
'CustomerAvailabilityZone': 'string'
},
],
'AutoMinorVersionUpgrade': True|False,
'SecurityGroups': [
{
'SecurityGroupId': 'string',
'Status': 'string'
},
],
'ReplicationGroupId': 'string',
'SnapshotRetentionLimit': 123,
'SnapshotWindow': 'string'
}
}
:returns:
General purpose:
Current generation: cache.t2.micro , cache.t2.small , cache.t2.medium , cache.m3.medium , cache.m3.large , cache.m3.xlarge , cache.m3.2xlarge , cache.m4.large , cache.m4.xlarge , cache.m4.2xlarge , cache.m4.4xlarge , cache.m4.10xlarge
Previous generation: cache.t1.micro , cache.m1.small , cache.m1.medium , cache.m1.large , cache.m1.xlarge
Compute optimized: cache.c1.xlarge
Memory optimized:
Current generation: cache.r3.large , cache.r3.xlarge , cache.r3.2xlarge , cache.r3.4xlarge , cache.r3.8xlarge
Previous generation: cache.m2.xlarge , cache.m2.2xlarge , cache.m2.4xlarge
"""
pass |
def project_layout(proposal, user=None, repo=None, log=None):
"""
generate the project template
proposal is the name of the project,
user is an object containing some information about the user.
- full name,
- github username
- email
"""
proposal = proposal.lower()
#context_file = os.path.expanduser('~/.cookiecutters/cookiecutter-pypackage/cookiecutter.json')
#context = generate_context(context_file)
# os.chdir('..')
# context['cookiecutter']['full_name'] = user.name
# context['cookiecutter']['email'] = user.email
# context['cookiecutter']['github_username'] = user.login
# context['cookiecutter']['project_name'] = proposal
# context['cookiecutter']['repo_name'] = proposal.lower()
try:
os.mkdir(proposal)
except FileExistsError:
log.info('Skip directory structure, as project seem to already exists')
with open('.gitignore', 'w') as f:
f.write('''
*.pyc
__pycache__
/build/
/dist/
''')
with open( '/'.join([proposal, '__init__.py']), 'w') as f:
f.write('''
"""
a simple package
"""
__version__ = '0.0.1'
''')
travis_yml()
#generate_files(
# repo_dir=os.path.expanduser('~/.cookiecutters/cookiecutter-pypackage/'),
# context=context
# )
log.info('Workig in %s', os.getcwd())
os.listdir('.')
subprocess.call(['git','add','.'], )
subprocess.call(['git','commit',"-am'initial commit of %s'" % proposal])
subprocess.call(['git', "push", "origin", "master:master"]) | generate the project template
proposal is the name of the project,
user is an object containing some information about the user.
- full name,
- github username
- email | Below is the the instruction that describes the task:
### Input:
generate the project template
proposal is the name of the project,
user is an object containing some information about the user.
- full name,
- github username
- email
### Response:
def project_layout(proposal, user=None, repo=None, log=None):
"""
generate the project template
proposal is the name of the project,
user is an object containing some information about the user.
- full name,
- github username
- email
"""
proposal = proposal.lower()
#context_file = os.path.expanduser('~/.cookiecutters/cookiecutter-pypackage/cookiecutter.json')
#context = generate_context(context_file)
# os.chdir('..')
# context['cookiecutter']['full_name'] = user.name
# context['cookiecutter']['email'] = user.email
# context['cookiecutter']['github_username'] = user.login
# context['cookiecutter']['project_name'] = proposal
# context['cookiecutter']['repo_name'] = proposal.lower()
try:
os.mkdir(proposal)
except FileExistsError:
log.info('Skip directory structure, as project seem to already exists')
with open('.gitignore', 'w') as f:
f.write('''
*.pyc
__pycache__
/build/
/dist/
''')
with open( '/'.join([proposal, '__init__.py']), 'w') as f:
f.write('''
"""
a simple package
"""
__version__ = '0.0.1'
''')
travis_yml()
#generate_files(
# repo_dir=os.path.expanduser('~/.cookiecutters/cookiecutter-pypackage/'),
# context=context
# )
log.info('Workig in %s', os.getcwd())
os.listdir('.')
subprocess.call(['git','add','.'], )
subprocess.call(['git','commit',"-am'initial commit of %s'" % proposal])
subprocess.call(['git', "push", "origin", "master:master"]) |
def serial_ppmap(func, fixed_arg, var_arg_iter):
"""A serial implementation of the "partially-pickling map" function returned
by the :meth:`ParallelHelper.get_ppmap` interface. Its arguments are:
*func*
A callable taking three arguments and returning a Pickle-able value.
*fixed_arg*
Any value, even one that is not pickle-able.
*var_arg_iter*
An iterable that generates Pickle-able values.
The functionality is::
def serial_ppmap(func, fixed_arg, var_arg_iter):
return [func(i, fixed_arg, x) for i, x in enumerate(var_arg_iter)]
Therefore the arguments to your ``func`` function, which actually does the
interesting computations, are:
*index*
The 0-based index number of the item being processed; often this can
be ignored.
*fixed_arg*
The same *fixed_arg* that was passed to ``ppmap``.
*var_arg*
The *index*'th item in the *var_arg_iter* iterable passed to
``ppmap``.
"""
return [func(i, fixed_arg, x) for i, x in enumerate(var_arg_iter)] | A serial implementation of the "partially-pickling map" function returned
by the :meth:`ParallelHelper.get_ppmap` interface. Its arguments are:
*func*
A callable taking three arguments and returning a Pickle-able value.
*fixed_arg*
Any value, even one that is not pickle-able.
*var_arg_iter*
An iterable that generates Pickle-able values.
The functionality is::
def serial_ppmap(func, fixed_arg, var_arg_iter):
return [func(i, fixed_arg, x) for i, x in enumerate(var_arg_iter)]
Therefore the arguments to your ``func`` function, which actually does the
interesting computations, are:
*index*
The 0-based index number of the item being processed; often this can
be ignored.
*fixed_arg*
The same *fixed_arg* that was passed to ``ppmap``.
*var_arg*
The *index*'th item in the *var_arg_iter* iterable passed to
``ppmap``. | Below is the the instruction that describes the task:
### Input:
A serial implementation of the "partially-pickling map" function returned
by the :meth:`ParallelHelper.get_ppmap` interface. Its arguments are:
*func*
A callable taking three arguments and returning a Pickle-able value.
*fixed_arg*
Any value, even one that is not pickle-able.
*var_arg_iter*
An iterable that generates Pickle-able values.
The functionality is::
def serial_ppmap(func, fixed_arg, var_arg_iter):
return [func(i, fixed_arg, x) for i, x in enumerate(var_arg_iter)]
Therefore the arguments to your ``func`` function, which actually does the
interesting computations, are:
*index*
The 0-based index number of the item being processed; often this can
be ignored.
*fixed_arg*
The same *fixed_arg* that was passed to ``ppmap``.
*var_arg*
The *index*'th item in the *var_arg_iter* iterable passed to
``ppmap``.
### Response:
def serial_ppmap(func, fixed_arg, var_arg_iter):
"""A serial implementation of the "partially-pickling map" function returned
by the :meth:`ParallelHelper.get_ppmap` interface. Its arguments are:
*func*
A callable taking three arguments and returning a Pickle-able value.
*fixed_arg*
Any value, even one that is not pickle-able.
*var_arg_iter*
An iterable that generates Pickle-able values.
The functionality is::
def serial_ppmap(func, fixed_arg, var_arg_iter):
return [func(i, fixed_arg, x) for i, x in enumerate(var_arg_iter)]
Therefore the arguments to your ``func`` function, which actually does the
interesting computations, are:
*index*
The 0-based index number of the item being processed; often this can
be ignored.
*fixed_arg*
The same *fixed_arg* that was passed to ``ppmap``.
*var_arg*
The *index*'th item in the *var_arg_iter* iterable passed to
``ppmap``.
"""
return [func(i, fixed_arg, x) for i, x in enumerate(var_arg_iter)] |
def group_plugins_into_categories(plugins):
"""
Return all plugins, grouped by category.
The structure is a {"Categorynane": [list of plugin classes]}
"""
if not plugins:
return {}
plugins = sorted(plugins, key=lambda p: p.verbose_name)
categories = {}
for plugin in plugins:
title = str(plugin.category or u"") # enforce resolving ugettext_lazy proxies.
if title not in categories:
categories[title] = []
categories[title].append(plugin)
return categories | Return all plugins, grouped by category.
The structure is a {"Categorynane": [list of plugin classes]} | Below is the the instruction that describes the task:
### Input:
Return all plugins, grouped by category.
The structure is a {"Categorynane": [list of plugin classes]}
### Response:
def group_plugins_into_categories(plugins):
"""
Return all plugins, grouped by category.
The structure is a {"Categorynane": [list of plugin classes]}
"""
if not plugins:
return {}
plugins = sorted(plugins, key=lambda p: p.verbose_name)
categories = {}
for plugin in plugins:
title = str(plugin.category or u"") # enforce resolving ugettext_lazy proxies.
if title not in categories:
categories[title] = []
categories[title].append(plugin)
return categories |
def _do_request(self, method, *args, **kwargs):
"""
Modularized because API was broken.
Need to be able to inject Mocked response objects here.
"""
log('Doing HTTP [{3}] request: {0} - headers: {1} - payload: {2}'.format(
args[0], kwargs.get('headers'), kwargs.get('json'), method,),
level=logging.DEBUG,)
requests_method = getattr(requests, method)
return self._handle_response(requests_method(*args, **kwargs)) | Modularized because API was broken.
Need to be able to inject Mocked response objects here. | Below is the the instruction that describes the task:
### Input:
Modularized because API was broken.
Need to be able to inject Mocked response objects here.
### Response:
def _do_request(self, method, *args, **kwargs):
"""
Modularized because API was broken.
Need to be able to inject Mocked response objects here.
"""
log('Doing HTTP [{3}] request: {0} - headers: {1} - payload: {2}'.format(
args[0], kwargs.get('headers'), kwargs.get('json'), method,),
level=logging.DEBUG,)
requests_method = getattr(requests, method)
return self._handle_response(requests_method(*args, **kwargs)) |
def calc_size(rect, orientation):
"""Calculate a size
Parameters
----------
rect : rectangle
The rectangle.
orientation : str
Either "bottom" or "top".
"""
(total_halfx, total_halfy) = rect.center
if orientation in ["bottom", "top"]:
(total_major_axis, total_minor_axis) = (total_halfx, total_halfy)
else:
(total_major_axis, total_minor_axis) = (total_halfy, total_halfx)
major_axis = total_major_axis * (1.0 -
ColorBarWidget.major_axis_padding)
minor_axis = major_axis * ColorBarWidget.minor_axis_ratio
# if the minor axis is "leaking" from the padding, then clamp
minor_axis = np.minimum(minor_axis,
total_minor_axis *
(1.0 - ColorBarWidget.minor_axis_padding))
return (major_axis, minor_axis) | Calculate a size
Parameters
----------
rect : rectangle
The rectangle.
orientation : str
Either "bottom" or "top". | Below is the the instruction that describes the task:
### Input:
Calculate a size
Parameters
----------
rect : rectangle
The rectangle.
orientation : str
Either "bottom" or "top".
### Response:
def calc_size(rect, orientation):
"""Calculate a size
Parameters
----------
rect : rectangle
The rectangle.
orientation : str
Either "bottom" or "top".
"""
(total_halfx, total_halfy) = rect.center
if orientation in ["bottom", "top"]:
(total_major_axis, total_minor_axis) = (total_halfx, total_halfy)
else:
(total_major_axis, total_minor_axis) = (total_halfy, total_halfx)
major_axis = total_major_axis * (1.0 -
ColorBarWidget.major_axis_padding)
minor_axis = major_axis * ColorBarWidget.minor_axis_ratio
# if the minor axis is "leaking" from the padding, then clamp
minor_axis = np.minimum(minor_axis,
total_minor_axis *
(1.0 - ColorBarWidget.minor_axis_padding))
return (major_axis, minor_axis) |
def to_gbq(self, destination_table, project_id=None, chunksize=None,
reauth=False, if_exists='fail', auth_local_webserver=False,
table_schema=None, location=None, progress_bar=True,
credentials=None, verbose=None, private_key=None):
"""
Write a DataFrame to a Google BigQuery table.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists, do nothing.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
*New in version 0.3.1 of pandas-gbq*.
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
*New in version 0.5.0 of pandas-gbq*.
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
*New in version 0.5.0 of pandas-gbq*.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to
override default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service
Account :class:`google.oauth2.service_account.Credentials`
directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
verbose : bool, deprecated
Deprecated in pandas-gbq version 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
See Also
--------
pandas_gbq.to_gbq : This function in the pandas-gbq library.
read_gbq : Read a DataFrame from Google BigQuery.
"""
from pandas.io import gbq
return gbq.to_gbq(
self, destination_table, project_id=project_id,
chunksize=chunksize, reauth=reauth, if_exists=if_exists,
auth_local_webserver=auth_local_webserver,
table_schema=table_schema, location=location,
progress_bar=progress_bar, credentials=credentials,
verbose=verbose, private_key=private_key) | Write a DataFrame to a Google BigQuery table.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists, do nothing.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
*New in version 0.3.1 of pandas-gbq*.
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
*New in version 0.5.0 of pandas-gbq*.
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
*New in version 0.5.0 of pandas-gbq*.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to
override default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service
Account :class:`google.oauth2.service_account.Credentials`
directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
verbose : bool, deprecated
Deprecated in pandas-gbq version 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
See Also
--------
pandas_gbq.to_gbq : This function in the pandas-gbq library.
read_gbq : Read a DataFrame from Google BigQuery. | Below is the the instruction that describes the task:
### Input:
Write a DataFrame to a Google BigQuery table.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists, do nothing.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
*New in version 0.3.1 of pandas-gbq*.
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
*New in version 0.5.0 of pandas-gbq*.
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
*New in version 0.5.0 of pandas-gbq*.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to
override default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service
Account :class:`google.oauth2.service_account.Credentials`
directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
verbose : bool, deprecated
Deprecated in pandas-gbq version 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
See Also
--------
pandas_gbq.to_gbq : This function in the pandas-gbq library.
read_gbq : Read a DataFrame from Google BigQuery.
### Response:
def to_gbq(self, destination_table, project_id=None, chunksize=None,
reauth=False, if_exists='fail', auth_local_webserver=False,
table_schema=None, location=None, progress_bar=True,
credentials=None, verbose=None, private_key=None):
"""
Write a DataFrame to a Google BigQuery table.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists, do nothing.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
*New in version 0.3.1 of pandas-gbq*.
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
*New in version 0.5.0 of pandas-gbq*.
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
*New in version 0.5.0 of pandas-gbq*.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to
override default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service
Account :class:`google.oauth2.service_account.Credentials`
directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
verbose : bool, deprecated
Deprecated in pandas-gbq version 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
See Also
--------
pandas_gbq.to_gbq : This function in the pandas-gbq library.
read_gbq : Read a DataFrame from Google BigQuery.
"""
from pandas.io import gbq
return gbq.to_gbq(
self, destination_table, project_id=project_id,
chunksize=chunksize, reauth=reauth, if_exists=if_exists,
auth_local_webserver=auth_local_webserver,
table_schema=table_schema, location=location,
progress_bar=progress_bar, credentials=credentials,
verbose=verbose, private_key=private_key) |
def open_upload_stream_with_id(
self, file_id, filename, chunk_size_bytes=None, metadata=None):
"""Opens a Stream that the application can write the contents of the
file to.
The user must specify the file id and filename, and can choose to add
any additional information in the metadata field of the file document
or modify the chunk size.
For example::
my_db = MongoClient().test
fs = GridFSBucket(my_db)
grid_in, file_id = fs.open_upload_stream(
ObjectId(),
"test_file",
chunk_size_bytes=4,
metadata={"contentType": "text/plain"})
grid_in.write("data I want to store!")
grid_in.close() # uploaded on close
Returns an instance of :class:`~gridfs.grid_file.GridIn`.
Raises :exc:`~gridfs.errors.NoFile` if no such version of
that file exists.
Raises :exc:`~ValueError` if `filename` is not a string.
:Parameters:
- `file_id`: The id to use for this file. The id must not have
already been used for another file.
- `filename`: The name of the file to upload.
- `chunk_size_bytes` (options): The number of bytes per chunk of this
file. Defaults to the chunk_size_bytes in :class:`GridFSBucket`.
- `metadata` (optional): User data for the 'metadata' field of the
files collection document. If not provided the metadata field will
be omitted from the files collection document.
"""
validate_string("filename", filename)
opts = {"_id": file_id,
"filename": filename,
"chunk_size": (chunk_size_bytes if chunk_size_bytes
is not None else self._chunk_size_bytes)}
if metadata is not None:
opts["metadata"] = metadata
return GridIn(self._collection, **opts) | Opens a Stream that the application can write the contents of the
file to.
The user must specify the file id and filename, and can choose to add
any additional information in the metadata field of the file document
or modify the chunk size.
For example::
my_db = MongoClient().test
fs = GridFSBucket(my_db)
grid_in, file_id = fs.open_upload_stream(
ObjectId(),
"test_file",
chunk_size_bytes=4,
metadata={"contentType": "text/plain"})
grid_in.write("data I want to store!")
grid_in.close() # uploaded on close
Returns an instance of :class:`~gridfs.grid_file.GridIn`.
Raises :exc:`~gridfs.errors.NoFile` if no such version of
that file exists.
Raises :exc:`~ValueError` if `filename` is not a string.
:Parameters:
- `file_id`: The id to use for this file. The id must not have
already been used for another file.
- `filename`: The name of the file to upload.
- `chunk_size_bytes` (options): The number of bytes per chunk of this
file. Defaults to the chunk_size_bytes in :class:`GridFSBucket`.
- `metadata` (optional): User data for the 'metadata' field of the
files collection document. If not provided the metadata field will
be omitted from the files collection document. | Below is the the instruction that describes the task:
### Input:
Opens a Stream that the application can write the contents of the
file to.
The user must specify the file id and filename, and can choose to add
any additional information in the metadata field of the file document
or modify the chunk size.
For example::
my_db = MongoClient().test
fs = GridFSBucket(my_db)
grid_in, file_id = fs.open_upload_stream(
ObjectId(),
"test_file",
chunk_size_bytes=4,
metadata={"contentType": "text/plain"})
grid_in.write("data I want to store!")
grid_in.close() # uploaded on close
Returns an instance of :class:`~gridfs.grid_file.GridIn`.
Raises :exc:`~gridfs.errors.NoFile` if no such version of
that file exists.
Raises :exc:`~ValueError` if `filename` is not a string.
:Parameters:
- `file_id`: The id to use for this file. The id must not have
already been used for another file.
- `filename`: The name of the file to upload.
- `chunk_size_bytes` (options): The number of bytes per chunk of this
file. Defaults to the chunk_size_bytes in :class:`GridFSBucket`.
- `metadata` (optional): User data for the 'metadata' field of the
files collection document. If not provided the metadata field will
be omitted from the files collection document.
### Response:
def open_upload_stream_with_id(
self, file_id, filename, chunk_size_bytes=None, metadata=None):
"""Opens a Stream that the application can write the contents of the
file to.
The user must specify the file id and filename, and can choose to add
any additional information in the metadata field of the file document
or modify the chunk size.
For example::
my_db = MongoClient().test
fs = GridFSBucket(my_db)
grid_in, file_id = fs.open_upload_stream(
ObjectId(),
"test_file",
chunk_size_bytes=4,
metadata={"contentType": "text/plain"})
grid_in.write("data I want to store!")
grid_in.close() # uploaded on close
Returns an instance of :class:`~gridfs.grid_file.GridIn`.
Raises :exc:`~gridfs.errors.NoFile` if no such version of
that file exists.
Raises :exc:`~ValueError` if `filename` is not a string.
:Parameters:
- `file_id`: The id to use for this file. The id must not have
already been used for another file.
- `filename`: The name of the file to upload.
- `chunk_size_bytes` (options): The number of bytes per chunk of this
file. Defaults to the chunk_size_bytes in :class:`GridFSBucket`.
- `metadata` (optional): User data for the 'metadata' field of the
files collection document. If not provided the metadata field will
be omitted from the files collection document.
"""
validate_string("filename", filename)
opts = {"_id": file_id,
"filename": filename,
"chunk_size": (chunk_size_bytes if chunk_size_bytes
is not None else self._chunk_size_bytes)}
if metadata is not None:
opts["metadata"] = metadata
return GridIn(self._collection, **opts) |
def guess_package_path(searchfrom):
"""
package path. return None if failed to guess
"""
from snipy.io import fileutil
current = searchfrom + '/'
init_found = False
pack_found = False
while not init_found and current != '/':
current = os.path.dirname(current)
initfile = os.path.join(current, '__init__.py')
init_found = os.path.exists(initfile)
if not init_found:
# search for breadth
searchfrom = dirname(searchfrom)
for folder in fileutil.listfolder(searchfrom):
current = os.path.join(searchfrom, folder)
initfile = os.path.join(current, '__init__.py')
init_found = os.path.exists(initfile)
if init_found:
break
while init_found:
current = os.path.dirname(current)
initfile = os.path.join(current, '__init__.py')
init_found = os.path.exists(initfile)
pack_found = not init_found
return current if pack_found else None | package path. return None if failed to guess | Below is the the instruction that describes the task:
### Input:
package path. return None if failed to guess
### Response:
def guess_package_path(searchfrom):
"""
package path. return None if failed to guess
"""
from snipy.io import fileutil
current = searchfrom + '/'
init_found = False
pack_found = False
while not init_found and current != '/':
current = os.path.dirname(current)
initfile = os.path.join(current, '__init__.py')
init_found = os.path.exists(initfile)
if not init_found:
# search for breadth
searchfrom = dirname(searchfrom)
for folder in fileutil.listfolder(searchfrom):
current = os.path.join(searchfrom, folder)
initfile = os.path.join(current, '__init__.py')
init_found = os.path.exists(initfile)
if init_found:
break
while init_found:
current = os.path.dirname(current)
initfile = os.path.join(current, '__init__.py')
init_found = os.path.exists(initfile)
pack_found = not init_found
return current if pack_found else None |
def computeNodeLinks(cls, element, graph):
"""
Populate the sourceLinks and targetLinks for each node.
Also, if the source and target are not objects, assume they are indices.
"""
index = element.nodes.kdims[-1]
node_map = {}
if element.nodes.vdims:
values = zip(*(element.nodes.dimension_values(d)
for d in element.nodes.vdims))
else:
values = cycle([tuple()])
for index, vals in zip(element.nodes.dimension_values(index), values):
node = {'index': index, 'sourceLinks': [], 'targetLinks': [], 'values': vals}
graph['nodes'].append(node)
node_map[index] = node
links = [element.dimension_values(d) for d in element.dimensions()[:3]]
for i, (src, tgt, value) in enumerate(zip(*links)):
source, target = node_map[src], node_map[tgt]
link = dict(index=i, source=source, target=target, value=value)
graph['links'].append(link)
source['sourceLinks'].append(link)
target['targetLinks'].append(link) | Populate the sourceLinks and targetLinks for each node.
Also, if the source and target are not objects, assume they are indices. | Below is the the instruction that describes the task:
### Input:
Populate the sourceLinks and targetLinks for each node.
Also, if the source and target are not objects, assume they are indices.
### Response:
def computeNodeLinks(cls, element, graph):
"""
Populate the sourceLinks and targetLinks for each node.
Also, if the source and target are not objects, assume they are indices.
"""
index = element.nodes.kdims[-1]
node_map = {}
if element.nodes.vdims:
values = zip(*(element.nodes.dimension_values(d)
for d in element.nodes.vdims))
else:
values = cycle([tuple()])
for index, vals in zip(element.nodes.dimension_values(index), values):
node = {'index': index, 'sourceLinks': [], 'targetLinks': [], 'values': vals}
graph['nodes'].append(node)
node_map[index] = node
links = [element.dimension_values(d) for d in element.dimensions()[:3]]
for i, (src, tgt, value) in enumerate(zip(*links)):
source, target = node_map[src], node_map[tgt]
link = dict(index=i, source=source, target=target, value=value)
graph['links'].append(link)
source['sourceLinks'].append(link)
target['targetLinks'].append(link) |
def add_interaction(self, u, v, t=None, e=None):
"""Add an interaction between u and v at time t vanishing (optional) at time e.
The nodes u and v will be automatically added if they are
not already in the graph.
Parameters
----------
u, v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
t : appearance snapshot id, mandatory
e : vanishing snapshot id, optional (default=None)
See Also
--------
add_edges_from : add a collection of interaction at time t
Notes
-----
Adding an interaction that already exists but with different snapshot id updates the interaction data.
Examples
--------
The following all add the interaction e=(1,2, 0) to graph G:
>>> G = dn.DynGraph()
>>> G.add_interaction(1, 2, 0) # explicit two-node form
>>> G.add_interaction( [(1,2)], t=0 ) # add interaction from iterable container
Specify the vanishing of the interaction
>>>> G.add_interaction(1, 3, t=1, e=10)
will produce an interaction present in snapshots [0, 9]
"""
if t is None:
raise nx.NetworkXError(
"The t argument must be specified.")
if u not in self._node:
self._adj[u] = self.adjlist_inner_dict_factory()
self._node[u] = {}
if v not in self._node:
self._adj[v] = self.adjlist_inner_dict_factory()
self._node[v] = {}
if type(t) != list:
t = [t, t]
for idt in [t[0]]:
if self.has_edge(u, v) and not self.edge_removal:
continue
else:
if idt not in self.time_to_edge:
self.time_to_edge[idt] = {(u, v, "+"): None}
else:
if (u, v, "+") not in self.time_to_edge[idt]:
self.time_to_edge[idt][(u, v, "+")] = None
if e is not None and self.edge_removal:
t[1] = e - 1
if e not in self.time_to_edge:
self.time_to_edge[e] = {(u, v, "-"): None}
else:
self.time_to_edge[e][(u, v, "-")] = None
# add the interaction
datadict = self._adj[u].get(v, self.edge_attr_dict_factory())
if 't' in datadict:
app = datadict['t']
max_end = app[-1][1]
if max_end == app[-1][0] and t[0] == app[-1][0] + 1:
app[-1] = [app[-1][0], t[1]]
if app[-1][0] + 1 in self.time_to_edge and (u, v, "+") in self.time_to_edge[app[-1][0] + 1]:
del self.time_to_edge[app[-1][0] + 1][(u, v, "+")]
else:
if t[0] < app[-1][0]:
raise ValueError("The specified interaction extension is broader than "
"the ones already present for the given nodes.")
if t[0] <= max_end < t[1]:
app[-1][1] = t[1]
if max_end + 1 in self.time_to_edge:
if self.edge_removal:
del self.time_to_edge[max_end + 1][(u, v, "-")]
del self.time_to_edge[t[0]][(u, v, "+")]
elif max_end == t[0] - 1:
if max_end + 1 in self.time_to_edge and (u, v, "+") in self.time_to_edge[max_end + 1]:
del self.time_to_edge[max_end + 1][(u, v, "+")]
if self.edge_removal:
if max_end + 1 in self.time_to_edge and (u, v, '-') in self.time_to_edge[max_end + 1]:
del self.time_to_edge[max_end + 1][(u, v, '-')]
if t[1] + 1 in self.time_to_edge:
self.time_to_edge[t[1] + 1][(u, v, "-")] = None
else:
self.time_to_edge[t[1] + 1] = {(u, v, "-"): None}
app[-1][1] = t[1]
else:
app.append(t)
else:
datadict['t'] = [t]
if e is not None:
span = range(t[0], t[1] + 1)
for idt in span:
if idt not in self.snapshots:
self.snapshots[idt] = 1
else:
self.snapshots[idt] += 1
else:
for idt in t:
if idt is not None:
if idt not in self.snapshots:
self.snapshots[idt] = 1
else:
self.snapshots[idt] += 1
self._adj[u][v] = datadict
self._adj[v][u] = datadict | Add an interaction between u and v at time t vanishing (optional) at time e.
The nodes u and v will be automatically added if they are
not already in the graph.
Parameters
----------
u, v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
t : appearance snapshot id, mandatory
e : vanishing snapshot id, optional (default=None)
See Also
--------
add_edges_from : add a collection of interaction at time t
Notes
-----
Adding an interaction that already exists but with different snapshot id updates the interaction data.
Examples
--------
The following all add the interaction e=(1,2, 0) to graph G:
>>> G = dn.DynGraph()
>>> G.add_interaction(1, 2, 0) # explicit two-node form
>>> G.add_interaction( [(1,2)], t=0 ) # add interaction from iterable container
Specify the vanishing of the interaction
>>>> G.add_interaction(1, 3, t=1, e=10)
will produce an interaction present in snapshots [0, 9] | Below is the the instruction that describes the task:
### Input:
Add an interaction between u and v at time t vanishing (optional) at time e.
The nodes u and v will be automatically added if they are
not already in the graph.
Parameters
----------
u, v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
t : appearance snapshot id, mandatory
e : vanishing snapshot id, optional (default=None)
See Also
--------
add_edges_from : add a collection of interaction at time t
Notes
-----
Adding an interaction that already exists but with different snapshot id updates the interaction data.
Examples
--------
The following all add the interaction e=(1,2, 0) to graph G:
>>> G = dn.DynGraph()
>>> G.add_interaction(1, 2, 0) # explicit two-node form
>>> G.add_interaction( [(1,2)], t=0 ) # add interaction from iterable container
Specify the vanishing of the interaction
>>>> G.add_interaction(1, 3, t=1, e=10)
will produce an interaction present in snapshots [0, 9]
### Response:
def add_interaction(self, u, v, t=None, e=None):
"""Add an interaction between u and v at time t vanishing (optional) at time e.
The nodes u and v will be automatically added if they are
not already in the graph.
Parameters
----------
u, v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
t : appearance snapshot id, mandatory
e : vanishing snapshot id, optional (default=None)
See Also
--------
add_edges_from : add a collection of interaction at time t
Notes
-----
Adding an interaction that already exists but with different snapshot id updates the interaction data.
Examples
--------
The following all add the interaction e=(1,2, 0) to graph G:
>>> G = dn.DynGraph()
>>> G.add_interaction(1, 2, 0) # explicit two-node form
>>> G.add_interaction( [(1,2)], t=0 ) # add interaction from iterable container
Specify the vanishing of the interaction
>>>> G.add_interaction(1, 3, t=1, e=10)
will produce an interaction present in snapshots [0, 9]
"""
if t is None:
raise nx.NetworkXError(
"The t argument must be specified.")
if u not in self._node:
self._adj[u] = self.adjlist_inner_dict_factory()
self._node[u] = {}
if v not in self._node:
self._adj[v] = self.adjlist_inner_dict_factory()
self._node[v] = {}
if type(t) != list:
t = [t, t]
for idt in [t[0]]:
if self.has_edge(u, v) and not self.edge_removal:
continue
else:
if idt not in self.time_to_edge:
self.time_to_edge[idt] = {(u, v, "+"): None}
else:
if (u, v, "+") not in self.time_to_edge[idt]:
self.time_to_edge[idt][(u, v, "+")] = None
if e is not None and self.edge_removal:
t[1] = e - 1
if e not in self.time_to_edge:
self.time_to_edge[e] = {(u, v, "-"): None}
else:
self.time_to_edge[e][(u, v, "-")] = None
# add the interaction
datadict = self._adj[u].get(v, self.edge_attr_dict_factory())
if 't' in datadict:
app = datadict['t']
max_end = app[-1][1]
if max_end == app[-1][0] and t[0] == app[-1][0] + 1:
app[-1] = [app[-1][0], t[1]]
if app[-1][0] + 1 in self.time_to_edge and (u, v, "+") in self.time_to_edge[app[-1][0] + 1]:
del self.time_to_edge[app[-1][0] + 1][(u, v, "+")]
else:
if t[0] < app[-1][0]:
raise ValueError("The specified interaction extension is broader than "
"the ones already present for the given nodes.")
if t[0] <= max_end < t[1]:
app[-1][1] = t[1]
if max_end + 1 in self.time_to_edge:
if self.edge_removal:
del self.time_to_edge[max_end + 1][(u, v, "-")]
del self.time_to_edge[t[0]][(u, v, "+")]
elif max_end == t[0] - 1:
if max_end + 1 in self.time_to_edge and (u, v, "+") in self.time_to_edge[max_end + 1]:
del self.time_to_edge[max_end + 1][(u, v, "+")]
if self.edge_removal:
if max_end + 1 in self.time_to_edge and (u, v, '-') in self.time_to_edge[max_end + 1]:
del self.time_to_edge[max_end + 1][(u, v, '-')]
if t[1] + 1 in self.time_to_edge:
self.time_to_edge[t[1] + 1][(u, v, "-")] = None
else:
self.time_to_edge[t[1] + 1] = {(u, v, "-"): None}
app[-1][1] = t[1]
else:
app.append(t)
else:
datadict['t'] = [t]
if e is not None:
span = range(t[0], t[1] + 1)
for idt in span:
if idt not in self.snapshots:
self.snapshots[idt] = 1
else:
self.snapshots[idt] += 1
else:
for idt in t:
if idt is not None:
if idt not in self.snapshots:
self.snapshots[idt] = 1
else:
self.snapshots[idt] += 1
self._adj[u][v] = datadict
self._adj[v][u] = datadict |
def _helper_for_model(self, model_type):
"""
Get the helper for a given type of Docker model. For use by resource
definitions.
"""
if model_type is models.containers.Container:
return self.containers
if model_type is models.images.Image:
return self.images
if model_type is models.networks.Network:
return self.networks
if model_type is models.volumes.Volume:
return self.volumes
raise ValueError('Unknown model type {}'.format(model_type)) | Get the helper for a given type of Docker model. For use by resource
definitions. | Below is the the instruction that describes the task:
### Input:
Get the helper for a given type of Docker model. For use by resource
definitions.
### Response:
def _helper_for_model(self, model_type):
"""
Get the helper for a given type of Docker model. For use by resource
definitions.
"""
if model_type is models.containers.Container:
return self.containers
if model_type is models.images.Image:
return self.images
if model_type is models.networks.Network:
return self.networks
if model_type is models.volumes.Volume:
return self.volumes
raise ValueError('Unknown model type {}'.format(model_type)) |
def config(key):
"""
Decorator to map this class directly to a configuration node. It uses `<parentbase>.key` for configuration
base and configuration mapping.
"""
def decorator(cls):
parent = cls.getConfigurableParent()
if parent is None:
parentbase = None
else:
parentbase = getattr(parent, 'configbase', None)
if parentbase is None:
cls.configkey = key
else:
cls.configkey = parentbase + '.' + key
return cls
return decorator | Decorator to map this class directly to a configuration node. It uses `<parentbase>.key` for configuration
base and configuration mapping. | Below is the the instruction that describes the task:
### Input:
Decorator to map this class directly to a configuration node. It uses `<parentbase>.key` for configuration
base and configuration mapping.
### Response:
def config(key):
"""
Decorator to map this class directly to a configuration node. It uses `<parentbase>.key` for configuration
base and configuration mapping.
"""
def decorator(cls):
parent = cls.getConfigurableParent()
if parent is None:
parentbase = None
else:
parentbase = getattr(parent, 'configbase', None)
if parentbase is None:
cls.configkey = key
else:
cls.configkey = parentbase + '.' + key
return cls
return decorator |
def add_radio_actions(self, entries, value=None, on_change=None, user_data=None):
"""
The add_radio_actions() method is a convenience method that creates a
number of gtk.RadioAction objects based on the information in the list
of action entry tuples contained in entries and adds them to the action
group. The entry tuples can vary in size from one to six items with the
following information:
* The name of the action. Must be specified.
* The stock id for the action. Optional with a default value of None
if a label is specified.
* The label for the action. This field should typically be marked
for translation, see the set_translation_domain() method. Optional
with a default value of None if a stock id is specified.
* The accelerator for the action, in the format understood by the
gtk.accelerator_parse() function. Optional with a default value of
None.
* The tooltip for the action. This field should typically be marked
for translation, see the set_translation_domain() method. Optional
with a default value of None.
* The value to set on the radio action. Optional with a default
value of 0. Should be specified in applications.
The value parameter specifies the radio action that should be set
active. The "changed" signal of the first radio action is connected to
the on_change callback (if specified and not None) and the accel paths
of the actions are set to <Actions>/group-name/action-name.
"""
try:
iter(entries)
except (TypeError):
raise TypeError('entries must be iterable')
first_action = None
def _process_action(group_source, name, stock_id=None, label=None, accelerator=None, tooltip=None, entry_value=0):
action = RadioAction(name=name, label=label, tooltip=tooltip, stock_id=stock_id, value=entry_value)
# FIXME: join_group is a patch to Gtk+ 3.0
# otherwise we can't effectively add radio actions to a
# group. Should we depend on 3.0 and error out here
# or should we offer the functionality via a compat
# C module?
if hasattr(action, 'join_group'):
action.join_group(group_source)
if value == entry_value:
action.set_active(True)
self.add_action_with_accel(action, accelerator)
return action
for e in entries:
# using inner function above since entries can leave out optional arguments
action = _process_action(first_action, *e)
if first_action is None:
first_action = action
if first_action is not None and on_change is not None:
if user_data is None:
first_action.connect('changed', on_change)
else:
first_action.connect('changed', on_change, user_data) | The add_radio_actions() method is a convenience method that creates a
number of gtk.RadioAction objects based on the information in the list
of action entry tuples contained in entries and adds them to the action
group. The entry tuples can vary in size from one to six items with the
following information:
* The name of the action. Must be specified.
* The stock id for the action. Optional with a default value of None
if a label is specified.
* The label for the action. This field should typically be marked
for translation, see the set_translation_domain() method. Optional
with a default value of None if a stock id is specified.
* The accelerator for the action, in the format understood by the
gtk.accelerator_parse() function. Optional with a default value of
None.
* The tooltip for the action. This field should typically be marked
for translation, see the set_translation_domain() method. Optional
with a default value of None.
* The value to set on the radio action. Optional with a default
value of 0. Should be specified in applications.
The value parameter specifies the radio action that should be set
active. The "changed" signal of the first radio action is connected to
the on_change callback (if specified and not None) and the accel paths
of the actions are set to <Actions>/group-name/action-name. | Below is the the instruction that describes the task:
### Input:
The add_radio_actions() method is a convenience method that creates a
number of gtk.RadioAction objects based on the information in the list
of action entry tuples contained in entries and adds them to the action
group. The entry tuples can vary in size from one to six items with the
following information:
* The name of the action. Must be specified.
* The stock id for the action. Optional with a default value of None
if a label is specified.
* The label for the action. This field should typically be marked
for translation, see the set_translation_domain() method. Optional
with a default value of None if a stock id is specified.
* The accelerator for the action, in the format understood by the
gtk.accelerator_parse() function. Optional with a default value of
None.
* The tooltip for the action. This field should typically be marked
for translation, see the set_translation_domain() method. Optional
with a default value of None.
* The value to set on the radio action. Optional with a default
value of 0. Should be specified in applications.
The value parameter specifies the radio action that should be set
active. The "changed" signal of the first radio action is connected to
the on_change callback (if specified and not None) and the accel paths
of the actions are set to <Actions>/group-name/action-name.
### Response:
def add_radio_actions(self, entries, value=None, on_change=None, user_data=None):
"""
The add_radio_actions() method is a convenience method that creates a
number of gtk.RadioAction objects based on the information in the list
of action entry tuples contained in entries and adds them to the action
group. The entry tuples can vary in size from one to six items with the
following information:
* The name of the action. Must be specified.
* The stock id for the action. Optional with a default value of None
if a label is specified.
* The label for the action. This field should typically be marked
for translation, see the set_translation_domain() method. Optional
with a default value of None if a stock id is specified.
* The accelerator for the action, in the format understood by the
gtk.accelerator_parse() function. Optional with a default value of
None.
* The tooltip for the action. This field should typically be marked
for translation, see the set_translation_domain() method. Optional
with a default value of None.
* The value to set on the radio action. Optional with a default
value of 0. Should be specified in applications.
The value parameter specifies the radio action that should be set
active. The "changed" signal of the first radio action is connected to
the on_change callback (if specified and not None) and the accel paths
of the actions are set to <Actions>/group-name/action-name.
"""
try:
iter(entries)
except (TypeError):
raise TypeError('entries must be iterable')
first_action = None
def _process_action(group_source, name, stock_id=None, label=None, accelerator=None, tooltip=None, entry_value=0):
action = RadioAction(name=name, label=label, tooltip=tooltip, stock_id=stock_id, value=entry_value)
# FIXME: join_group is a patch to Gtk+ 3.0
# otherwise we can't effectively add radio actions to a
# group. Should we depend on 3.0 and error out here
# or should we offer the functionality via a compat
# C module?
if hasattr(action, 'join_group'):
action.join_group(group_source)
if value == entry_value:
action.set_active(True)
self.add_action_with_accel(action, accelerator)
return action
for e in entries:
# using inner function above since entries can leave out optional arguments
action = _process_action(first_action, *e)
if first_action is None:
first_action = action
if first_action is not None and on_change is not None:
if user_data is None:
first_action.connect('changed', on_change)
else:
first_action.connect('changed', on_change, user_data) |
def PositionBox(position, *args, **kwargs):
" Delegate the boxing. "
obj = position.target
return getattr(position.target, 'box_class', Box)(obj, *args, **kwargs) | Delegate the boxing. | Below is the the instruction that describes the task:
### Input:
Delegate the boxing.
### Response:
def PositionBox(position, *args, **kwargs):
" Delegate the boxing. "
obj = position.target
return getattr(position.target, 'box_class', Box)(obj, *args, **kwargs) |
def find_executable(filename, environ=None):
"""Find an executable by searching the user's $PATH."""
if environ is None:
environ = os.environ
path = environ.get("PATH", "/usr/local/bin:/usr/bin:/bin").split(":")
for dirpath in path:
dirpath = os.path.abspath(dirpath.strip())
filepath = os.path.normpath(os.path.join(dirpath, filename))
if os.path.exists(filepath):
return filepath
return None | Find an executable by searching the user's $PATH. | Below is the the instruction that describes the task:
### Input:
Find an executable by searching the user's $PATH.
### Response:
def find_executable(filename, environ=None):
"""Find an executable by searching the user's $PATH."""
if environ is None:
environ = os.environ
path = environ.get("PATH", "/usr/local/bin:/usr/bin:/bin").split(":")
for dirpath in path:
dirpath = os.path.abspath(dirpath.strip())
filepath = os.path.normpath(os.path.join(dirpath, filename))
if os.path.exists(filepath):
return filepath
return None |
def AddFile(self, fd):
"""Adds a file to the hash file store.
We take a file in the client space:
aff4:/C.123123123/fs/os/usr/local/blah
Hash it, update the hash in the original file if its different to the
one calculated on the client, and copy the original AFF4 object to
aff4:/files/hash/generic/sha256/123123123 (canonical reference)
We then create symlinks for all other hash types:
aff4:/files/hash/generic/sha1/345345345
aff4:/files/hash/generic/md5/456456456
aff4:/files/hash/pecoff/md5/aaaaaaaa (only for PEs)
aff4:/files/hash/pecoff/sha1/bbbbbbbb (only for PEs)
When present in PE files, the signing data (revision, cert_type,
certificate) is added to the original object.
This can't be done simply in the FileStore.Write() method with fixed hash
buffer sizes because the authenticode hashes need to track hashing of
different-sized regions based on the signature information.
Args:
fd: File open for reading.
Raises:
IOError: If there was an error writing the file.
"""
hashes = self._HashFile(fd)
# The empty file is very common, we don't keep the back references for it
# in the DB since it just takes up too much space.
empty_hash = ("e3b0c44298fc1c149afbf4c8996fb924"
"27ae41e4649b934ca495991b7852b855")
if hashes.sha256 == empty_hash:
return
# Update the hashes field now that we have calculated them all.
fd.Set(fd.Schema.HASH, hashes)
fd.Flush()
# sha256 is the canonical location.
canonical_urn = self.PATH.Add("generic/sha256").Add(str(hashes.sha256))
if not list(aff4.FACTORY.Stat([canonical_urn])):
aff4.FACTORY.Copy(fd.urn, canonical_urn)
# Remove the STAT entry, it makes no sense to copy it between clients.
with aff4.FACTORY.Open(
canonical_urn, mode="rw", token=self.token) as new_fd:
new_fd.Set(new_fd.Schema.STAT(None))
self._AddToIndex(canonical_urn, fd.urn)
for hash_type, hash_digest in hashes.ListSetFields():
# Determine fingerprint type.
hash_type = hash_type.name
# No need to create a symlink for sha256, it's the canonical location.
if hash_type == "sha256":
continue
hash_digest = str(hash_digest)
fingerprint_type = "generic"
if hash_type.startswith("pecoff_"):
fingerprint_type = "pecoff"
hash_type = hash_type[len("pecoff_"):]
if hash_type not in self.HASH_TYPES[fingerprint_type]:
continue
file_store_urn = self.PATH.Add(fingerprint_type).Add(hash_type).Add(
hash_digest)
with aff4.FACTORY.Create(
file_store_urn, aff4.AFF4Symlink, token=self.token) as symlink:
symlink.Set(symlink.Schema.SYMLINK_TARGET, canonical_urn)
# We do not want to be externally written here.
return None | Adds a file to the hash file store.
We take a file in the client space:
aff4:/C.123123123/fs/os/usr/local/blah
Hash it, update the hash in the original file if its different to the
one calculated on the client, and copy the original AFF4 object to
aff4:/files/hash/generic/sha256/123123123 (canonical reference)
We then create symlinks for all other hash types:
aff4:/files/hash/generic/sha1/345345345
aff4:/files/hash/generic/md5/456456456
aff4:/files/hash/pecoff/md5/aaaaaaaa (only for PEs)
aff4:/files/hash/pecoff/sha1/bbbbbbbb (only for PEs)
When present in PE files, the signing data (revision, cert_type,
certificate) is added to the original object.
This can't be done simply in the FileStore.Write() method with fixed hash
buffer sizes because the authenticode hashes need to track hashing of
different-sized regions based on the signature information.
Args:
fd: File open for reading.
Raises:
IOError: If there was an error writing the file. | Below is the the instruction that describes the task:
### Input:
Adds a file to the hash file store.
We take a file in the client space:
aff4:/C.123123123/fs/os/usr/local/blah
Hash it, update the hash in the original file if its different to the
one calculated on the client, and copy the original AFF4 object to
aff4:/files/hash/generic/sha256/123123123 (canonical reference)
We then create symlinks for all other hash types:
aff4:/files/hash/generic/sha1/345345345
aff4:/files/hash/generic/md5/456456456
aff4:/files/hash/pecoff/md5/aaaaaaaa (only for PEs)
aff4:/files/hash/pecoff/sha1/bbbbbbbb (only for PEs)
When present in PE files, the signing data (revision, cert_type,
certificate) is added to the original object.
This can't be done simply in the FileStore.Write() method with fixed hash
buffer sizes because the authenticode hashes need to track hashing of
different-sized regions based on the signature information.
Args:
fd: File open for reading.
Raises:
IOError: If there was an error writing the file.
### Response:
def AddFile(self, fd):
"""Adds a file to the hash file store.
We take a file in the client space:
aff4:/C.123123123/fs/os/usr/local/blah
Hash it, update the hash in the original file if its different to the
one calculated on the client, and copy the original AFF4 object to
aff4:/files/hash/generic/sha256/123123123 (canonical reference)
We then create symlinks for all other hash types:
aff4:/files/hash/generic/sha1/345345345
aff4:/files/hash/generic/md5/456456456
aff4:/files/hash/pecoff/md5/aaaaaaaa (only for PEs)
aff4:/files/hash/pecoff/sha1/bbbbbbbb (only for PEs)
When present in PE files, the signing data (revision, cert_type,
certificate) is added to the original object.
This can't be done simply in the FileStore.Write() method with fixed hash
buffer sizes because the authenticode hashes need to track hashing of
different-sized regions based on the signature information.
Args:
fd: File open for reading.
Raises:
IOError: If there was an error writing the file.
"""
hashes = self._HashFile(fd)
# The empty file is very common, we don't keep the back references for it
# in the DB since it just takes up too much space.
empty_hash = ("e3b0c44298fc1c149afbf4c8996fb924"
"27ae41e4649b934ca495991b7852b855")
if hashes.sha256 == empty_hash:
return
# Update the hashes field now that we have calculated them all.
fd.Set(fd.Schema.HASH, hashes)
fd.Flush()
# sha256 is the canonical location.
canonical_urn = self.PATH.Add("generic/sha256").Add(str(hashes.sha256))
if not list(aff4.FACTORY.Stat([canonical_urn])):
aff4.FACTORY.Copy(fd.urn, canonical_urn)
# Remove the STAT entry, it makes no sense to copy it between clients.
with aff4.FACTORY.Open(
canonical_urn, mode="rw", token=self.token) as new_fd:
new_fd.Set(new_fd.Schema.STAT(None))
self._AddToIndex(canonical_urn, fd.urn)
for hash_type, hash_digest in hashes.ListSetFields():
# Determine fingerprint type.
hash_type = hash_type.name
# No need to create a symlink for sha256, it's the canonical location.
if hash_type == "sha256":
continue
hash_digest = str(hash_digest)
fingerprint_type = "generic"
if hash_type.startswith("pecoff_"):
fingerprint_type = "pecoff"
hash_type = hash_type[len("pecoff_"):]
if hash_type not in self.HASH_TYPES[fingerprint_type]:
continue
file_store_urn = self.PATH.Add(fingerprint_type).Add(hash_type).Add(
hash_digest)
with aff4.FACTORY.Create(
file_store_urn, aff4.AFF4Symlink, token=self.token) as symlink:
symlink.Set(symlink.Schema.SYMLINK_TARGET, canonical_urn)
# We do not want to be externally written here.
return None |
def serialize(self):
"""Returns serialized chunk data in dictionary."""
return {
'word': self.word,
'pos': self.pos,
'label': self.label,
'dependency': self.dependency,
'has_cjk': self.has_cjk(),
} | Returns serialized chunk data in dictionary. | Below is the the instruction that describes the task:
### Input:
Returns serialized chunk data in dictionary.
### Response:
def serialize(self):
"""Returns serialized chunk data in dictionary."""
return {
'word': self.word,
'pos': self.pos,
'label': self.label,
'dependency': self.dependency,
'has_cjk': self.has_cjk(),
} |
def _netstat_bsd():
'''
Return netstat information for BSD flavors
'''
ret = []
if __grains__['kernel'] == 'NetBSD':
for addr_family in ('inet', 'inet6'):
cmd = 'netstat -f {0} -an | tail -n+3'.format(addr_family)
out = __salt__['cmd.run'](cmd, python_shell=True)
for line in out.splitlines():
comps = line.split()
entry = {
'proto': comps[0],
'recv-q': comps[1],
'send-q': comps[2],
'local-address': comps[3],
'remote-address': comps[4]
}
if entry['proto'].startswith('tcp'):
entry['state'] = comps[5]
ret.append(entry)
else:
# Lookup TCP connections
cmd = 'netstat -p tcp -an | tail -n+3'
out = __salt__['cmd.run'](cmd, python_shell=True)
for line in out.splitlines():
comps = line.split()
ret.append({
'proto': comps[0],
'recv-q': comps[1],
'send-q': comps[2],
'local-address': comps[3],
'remote-address': comps[4],
'state': comps[5]})
# Lookup UDP connections
cmd = 'netstat -p udp -an | tail -n+3'
out = __salt__['cmd.run'](cmd, python_shell=True)
for line in out.splitlines():
comps = line.split()
ret.append({
'proto': comps[0],
'recv-q': comps[1],
'send-q': comps[2],
'local-address': comps[3],
'remote-address': comps[4]})
# Add in user and program info
ppid = _ppid()
if __grains__['kernel'] == 'OpenBSD':
netinfo = _netinfo_openbsd()
elif __grains__['kernel'] in ('FreeBSD', 'NetBSD'):
netinfo = _netinfo_freebsd_netbsd()
for idx in range(len(ret)):
local = ret[idx]['local-address']
remote = ret[idx]['remote-address']
proto = ret[idx]['proto']
try:
# Make a pointer to the info for this connection for easier
# reference below
ptr = netinfo[local][remote][proto]
except KeyError:
continue
# Get the pid-to-ppid mappings for this connection
conn_ppid = dict((x, y) for x, y in six.iteritems(ppid) if x in ptr)
try:
# Master pid for this connection will be the pid whose ppid isn't
# in the subset dict we created above
master_pid = next(iter(
x for x, y in six.iteritems(conn_ppid) if y not in ptr
))
except StopIteration:
continue
ret[idx]['user'] = ptr[master_pid]['user']
ret[idx]['program'] = '/'.join((master_pid, ptr[master_pid]['cmd']))
return ret | Return netstat information for BSD flavors | Below is the the instruction that describes the task:
### Input:
Return netstat information for BSD flavors
### Response:
def _netstat_bsd():
'''
Return netstat information for BSD flavors
'''
ret = []
if __grains__['kernel'] == 'NetBSD':
for addr_family in ('inet', 'inet6'):
cmd = 'netstat -f {0} -an | tail -n+3'.format(addr_family)
out = __salt__['cmd.run'](cmd, python_shell=True)
for line in out.splitlines():
comps = line.split()
entry = {
'proto': comps[0],
'recv-q': comps[1],
'send-q': comps[2],
'local-address': comps[3],
'remote-address': comps[4]
}
if entry['proto'].startswith('tcp'):
entry['state'] = comps[5]
ret.append(entry)
else:
# Lookup TCP connections
cmd = 'netstat -p tcp -an | tail -n+3'
out = __salt__['cmd.run'](cmd, python_shell=True)
for line in out.splitlines():
comps = line.split()
ret.append({
'proto': comps[0],
'recv-q': comps[1],
'send-q': comps[2],
'local-address': comps[3],
'remote-address': comps[4],
'state': comps[5]})
# Lookup UDP connections
cmd = 'netstat -p udp -an | tail -n+3'
out = __salt__['cmd.run'](cmd, python_shell=True)
for line in out.splitlines():
comps = line.split()
ret.append({
'proto': comps[0],
'recv-q': comps[1],
'send-q': comps[2],
'local-address': comps[3],
'remote-address': comps[4]})
# Add in user and program info
ppid = _ppid()
if __grains__['kernel'] == 'OpenBSD':
netinfo = _netinfo_openbsd()
elif __grains__['kernel'] in ('FreeBSD', 'NetBSD'):
netinfo = _netinfo_freebsd_netbsd()
for idx in range(len(ret)):
local = ret[idx]['local-address']
remote = ret[idx]['remote-address']
proto = ret[idx]['proto']
try:
# Make a pointer to the info for this connection for easier
# reference below
ptr = netinfo[local][remote][proto]
except KeyError:
continue
# Get the pid-to-ppid mappings for this connection
conn_ppid = dict((x, y) for x, y in six.iteritems(ppid) if x in ptr)
try:
# Master pid for this connection will be the pid whose ppid isn't
# in the subset dict we created above
master_pid = next(iter(
x for x, y in six.iteritems(conn_ppid) if y not in ptr
))
except StopIteration:
continue
ret[idx]['user'] = ptr[master_pid]['user']
ret[idx]['program'] = '/'.join((master_pid, ptr[master_pid]['cmd']))
return ret |
def _is_string(thing):
"""Python character arrays are a mess.
If Python2, check if **thing** is an :obj:`unicode` or a :obj:`str`.
If Python3, check if **thing** is a :obj:`str`.
:param thing: The thing to check.
:returns: ``True`` if **thing** is a string according to whichever version
of Python we're running in.
"""
if _util._py3k: return isinstance(thing, str)
else: return isinstance(thing, basestring) | Python character arrays are a mess.
If Python2, check if **thing** is an :obj:`unicode` or a :obj:`str`.
If Python3, check if **thing** is a :obj:`str`.
:param thing: The thing to check.
:returns: ``True`` if **thing** is a string according to whichever version
of Python we're running in. | Below is the the instruction that describes the task:
### Input:
Python character arrays are a mess.
If Python2, check if **thing** is an :obj:`unicode` or a :obj:`str`.
If Python3, check if **thing** is a :obj:`str`.
:param thing: The thing to check.
:returns: ``True`` if **thing** is a string according to whichever version
of Python we're running in.
### Response:
def _is_string(thing):
"""Python character arrays are a mess.
If Python2, check if **thing** is an :obj:`unicode` or a :obj:`str`.
If Python3, check if **thing** is a :obj:`str`.
:param thing: The thing to check.
:returns: ``True`` if **thing** is a string according to whichever version
of Python we're running in.
"""
if _util._py3k: return isinstance(thing, str)
else: return isinstance(thing, basestring) |
def get_sequence_rules_for_assessment_part(self, assessment_part_id):
"""Gets a ``SequenceRuleList`` for the given source assessment part.
arg: assessment_part_id (osid.id.Id): an assessment part
``Id``
return: (osid.assessment.authoring.SequenceRuleList) - the
returned ``SequenceRule`` list
raise: NullArgument - ``assessment_part_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.learning.ActivityLookupSession.get_activities_for_objective_template
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('assessment_authoring',
collection='SequenceRule',
runtime=self._runtime)
result = collection.find(
dict({'assessmentPartId': str(assessment_part_id)},
**self._view_filter()))
return objects.SequenceRuleList(result, runtime=self._runtime) | Gets a ``SequenceRuleList`` for the given source assessment part.
arg: assessment_part_id (osid.id.Id): an assessment part
``Id``
return: (osid.assessment.authoring.SequenceRuleList) - the
returned ``SequenceRule`` list
raise: NullArgument - ``assessment_part_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets a ``SequenceRuleList`` for the given source assessment part.
arg: assessment_part_id (osid.id.Id): an assessment part
``Id``
return: (osid.assessment.authoring.SequenceRuleList) - the
returned ``SequenceRule`` list
raise: NullArgument - ``assessment_part_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_sequence_rules_for_assessment_part(self, assessment_part_id):
"""Gets a ``SequenceRuleList`` for the given source assessment part.
arg: assessment_part_id (osid.id.Id): an assessment part
``Id``
return: (osid.assessment.authoring.SequenceRuleList) - the
returned ``SequenceRule`` list
raise: NullArgument - ``assessment_part_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.learning.ActivityLookupSession.get_activities_for_objective_template
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('assessment_authoring',
collection='SequenceRule',
runtime=self._runtime)
result = collection.find(
dict({'assessmentPartId': str(assessment_part_id)},
**self._view_filter()))
return objects.SequenceRuleList(result, runtime=self._runtime) |
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1] | Draw the table
- the table is returned as a whole string | Below is the the instruction that describes the task:
### Input:
Draw the table
- the table is returned as a whole string
### Response:
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1] |
def get_path_uid(path):
# type: (str) -> int
"""
Return path's uid.
Does not follow symlinks:
https://github.com/pypa/pip/pull/935#discussion_r5307003
Placed this function in compat due to differences on AIX and
Jython, that should eventually go away.
:raises OSError: When path is a symlink or can't be read.
"""
if hasattr(os, 'O_NOFOLLOW'):
fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW)
file_uid = os.fstat(fd).st_uid
os.close(fd)
else: # AIX and Jython
# WARNING: time of check vulnerability, but best we can do w/o NOFOLLOW
if not os.path.islink(path):
# older versions of Jython don't have `os.fstat`
file_uid = os.stat(path).st_uid
else:
# raise OSError for parity with os.O_NOFOLLOW above
raise OSError(
"%s is a symlink; Will not return uid for symlinks" % path
)
return file_uid | Return path's uid.
Does not follow symlinks:
https://github.com/pypa/pip/pull/935#discussion_r5307003
Placed this function in compat due to differences on AIX and
Jython, that should eventually go away.
:raises OSError: When path is a symlink or can't be read. | Below is the the instruction that describes the task:
### Input:
Return path's uid.
Does not follow symlinks:
https://github.com/pypa/pip/pull/935#discussion_r5307003
Placed this function in compat due to differences on AIX and
Jython, that should eventually go away.
:raises OSError: When path is a symlink or can't be read.
### Response:
def get_path_uid(path):
# type: (str) -> int
"""
Return path's uid.
Does not follow symlinks:
https://github.com/pypa/pip/pull/935#discussion_r5307003
Placed this function in compat due to differences on AIX and
Jython, that should eventually go away.
:raises OSError: When path is a symlink or can't be read.
"""
if hasattr(os, 'O_NOFOLLOW'):
fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW)
file_uid = os.fstat(fd).st_uid
os.close(fd)
else: # AIX and Jython
# WARNING: time of check vulnerability, but best we can do w/o NOFOLLOW
if not os.path.islink(path):
# older versions of Jython don't have `os.fstat`
file_uid = os.stat(path).st_uid
else:
# raise OSError for parity with os.O_NOFOLLOW above
raise OSError(
"%s is a symlink; Will not return uid for symlinks" % path
)
return file_uid |
def ampliconfile(self, sample, contig, amplicon_range, forward_primer, reverse_primer):
"""
Extracts amplicon sequence from contig file
:param sample: sample metadata object
:param contig: name of the contig hit by primers
:param amplicon_range: range of the amplicon within the contig
:param forward_primer: name of the forward primer
:param reverse_primer: name of the reverse primer
"""
# Open the file
with open(sample[self.analysistype].ampliconfile, 'a') as ampliconfile:
try:
# Load the records from the assembly into the dictionary
for record in SeqIO.parse(sample[self.analysistype].assemblyfile, 'fasta'):
if record.id == contig:
try:
# Extract the name of the gene from the primer name
genename = forward_primer[0].split('-')[0]
try:
# Sort the range calculated above
start = amplicon_range[0]
end = amplicon_range[1]
# Slice the gene sequence from the sequence record - remember to subtract one to
# allow for zero-based indexing
genesequence = str(record.seq)[int(start) - 1:int(end)]
# Set the record.id to be the sample name, the contig name,
# the range, and the primers
record.id = '{sn}_{contig}_{range}_{primers}' \
.format(sn=sample.name,
contig=contig,
range='_'.join(str(x) for x in sorted(sample[self.analysistype]
.range[record.id][genename])),
primers='_'.join(['_'.join(forward_primer), '_'.join(reverse_primer)]))
# Clear the record.description
record.description = ''
# Create a seq record from the sliced genome sequence
record.seq = Seq.Seq(genesequence)
# Write the amplicon to file
SeqIO.write(record, ampliconfile, 'fasta')
except IndexError:
pass
except AttributeError:
pass
except FileNotFoundError:
pass | Extracts amplicon sequence from contig file
:param sample: sample metadata object
:param contig: name of the contig hit by primers
:param amplicon_range: range of the amplicon within the contig
:param forward_primer: name of the forward primer
:param reverse_primer: name of the reverse primer | Below is the the instruction that describes the task:
### Input:
Extracts amplicon sequence from contig file
:param sample: sample metadata object
:param contig: name of the contig hit by primers
:param amplicon_range: range of the amplicon within the contig
:param forward_primer: name of the forward primer
:param reverse_primer: name of the reverse primer
### Response:
def ampliconfile(self, sample, contig, amplicon_range, forward_primer, reverse_primer):
"""
Extracts amplicon sequence from contig file
:param sample: sample metadata object
:param contig: name of the contig hit by primers
:param amplicon_range: range of the amplicon within the contig
:param forward_primer: name of the forward primer
:param reverse_primer: name of the reverse primer
"""
# Open the file
with open(sample[self.analysistype].ampliconfile, 'a') as ampliconfile:
try:
# Load the records from the assembly into the dictionary
for record in SeqIO.parse(sample[self.analysistype].assemblyfile, 'fasta'):
if record.id == contig:
try:
# Extract the name of the gene from the primer name
genename = forward_primer[0].split('-')[0]
try:
# Sort the range calculated above
start = amplicon_range[0]
end = amplicon_range[1]
# Slice the gene sequence from the sequence record - remember to subtract one to
# allow for zero-based indexing
genesequence = str(record.seq)[int(start) - 1:int(end)]
# Set the record.id to be the sample name, the contig name,
# the range, and the primers
record.id = '{sn}_{contig}_{range}_{primers}' \
.format(sn=sample.name,
contig=contig,
range='_'.join(str(x) for x in sorted(sample[self.analysistype]
.range[record.id][genename])),
primers='_'.join(['_'.join(forward_primer), '_'.join(reverse_primer)]))
# Clear the record.description
record.description = ''
# Create a seq record from the sliced genome sequence
record.seq = Seq.Seq(genesequence)
# Write the amplicon to file
SeqIO.write(record, ampliconfile, 'fasta')
except IndexError:
pass
except AttributeError:
pass
except FileNotFoundError:
pass |
def quote5(self, market=47, symbol="IF1709"):
'''
查询五档行情
:return: pd.dataFrame or None
'''
with self.client.connect(*self.bestip):
data = self.client.get_instrument_quote(market, symbol)
return self.client.to_df(data) | 查询五档行情
:return: pd.dataFrame or None | Below is the the instruction that describes the task:
### Input:
查询五档行情
:return: pd.dataFrame or None
### Response:
def quote5(self, market=47, symbol="IF1709"):
'''
查询五档行情
:return: pd.dataFrame or None
'''
with self.client.connect(*self.bestip):
data = self.client.get_instrument_quote(market, symbol)
return self.client.to_df(data) |
def _replace_zeros(arr, default_min_value):
"""Substitute 0s in the list with a near-zero value.
Parameters
-----------
arr : numpy.array(float)
default_min_value : float
If the smallest non-zero element in `arr` is greater than the default,
use the default instead.
Returns
-----------
numpy.array(float)
"""
min_nonzero_value = min(default_min_value, np.min(arr[arr > 0]))
closest_to_zero = np.nextafter(min_nonzero_value, min_nonzero_value - 1)
arr[arr == 0] = closest_to_zero
return arr | Substitute 0s in the list with a near-zero value.
Parameters
-----------
arr : numpy.array(float)
default_min_value : float
If the smallest non-zero element in `arr` is greater than the default,
use the default instead.
Returns
-----------
numpy.array(float) | Below is the the instruction that describes the task:
### Input:
Substitute 0s in the list with a near-zero value.
Parameters
-----------
arr : numpy.array(float)
default_min_value : float
If the smallest non-zero element in `arr` is greater than the default,
use the default instead.
Returns
-----------
numpy.array(float)
### Response:
def _replace_zeros(arr, default_min_value):
"""Substitute 0s in the list with a near-zero value.
Parameters
-----------
arr : numpy.array(float)
default_min_value : float
If the smallest non-zero element in `arr` is greater than the default,
use the default instead.
Returns
-----------
numpy.array(float)
"""
min_nonzero_value = min(default_min_value, np.min(arr[arr > 0]))
closest_to_zero = np.nextafter(min_nonzero_value, min_nonzero_value - 1)
arr[arr == 0] = closest_to_zero
return arr |
def add_why(voevent, importance=None, expires=None, inferences=None):
"""Add Inferences, or set importance / expires attributes of the Why section.
.. note::
``importance`` / ``expires`` are 'Why' attributes, therefore setting them
will overwrite previous values.
``inferences``, on the other hand, are appended to the list.
Args:
voevent(:class:`Voevent`): Root node of a VOEvent etree.
importance(float): Value from 0.0 to 1.0
expires(datetime.datetime): Expiration date given inferred reason
(See voevent spec).
inferences(:class:`voeventparse.misc.Inference`): Inference or list of
inferences, denoting probable identifications or associations, etc.
"""
if not voevent.xpath('Why'):
etree.SubElement(voevent, 'Why')
if importance is not None:
voevent.Why.attrib['importance'] = str(importance)
if expires is not None:
voevent.Why.attrib['expires'] = expires.replace(
microsecond=0).isoformat()
if inferences is not None:
voevent.Why.extend(_listify(inferences)) | Add Inferences, or set importance / expires attributes of the Why section.
.. note::
``importance`` / ``expires`` are 'Why' attributes, therefore setting them
will overwrite previous values.
``inferences``, on the other hand, are appended to the list.
Args:
voevent(:class:`Voevent`): Root node of a VOEvent etree.
importance(float): Value from 0.0 to 1.0
expires(datetime.datetime): Expiration date given inferred reason
(See voevent spec).
inferences(:class:`voeventparse.misc.Inference`): Inference or list of
inferences, denoting probable identifications or associations, etc. | Below is the the instruction that describes the task:
### Input:
Add Inferences, or set importance / expires attributes of the Why section.
.. note::
``importance`` / ``expires`` are 'Why' attributes, therefore setting them
will overwrite previous values.
``inferences``, on the other hand, are appended to the list.
Args:
voevent(:class:`Voevent`): Root node of a VOEvent etree.
importance(float): Value from 0.0 to 1.0
expires(datetime.datetime): Expiration date given inferred reason
(See voevent spec).
inferences(:class:`voeventparse.misc.Inference`): Inference or list of
inferences, denoting probable identifications or associations, etc.
### Response:
def add_why(voevent, importance=None, expires=None, inferences=None):
"""Add Inferences, or set importance / expires attributes of the Why section.
.. note::
``importance`` / ``expires`` are 'Why' attributes, therefore setting them
will overwrite previous values.
``inferences``, on the other hand, are appended to the list.
Args:
voevent(:class:`Voevent`): Root node of a VOEvent etree.
importance(float): Value from 0.0 to 1.0
expires(datetime.datetime): Expiration date given inferred reason
(See voevent spec).
inferences(:class:`voeventparse.misc.Inference`): Inference or list of
inferences, denoting probable identifications or associations, etc.
"""
if not voevent.xpath('Why'):
etree.SubElement(voevent, 'Why')
if importance is not None:
voevent.Why.attrib['importance'] = str(importance)
if expires is not None:
voevent.Why.attrib['expires'] = expires.replace(
microsecond=0).isoformat()
if inferences is not None:
voevent.Why.extend(_listify(inferences)) |
def relaxng(cls, includechildren=True,extraattribs = None, extraelements=None, origclass = None):
"""Returns a RelaxNG definition for this element (as an XML element (lxml.etree) rather than a string)"""
E = ElementMaker(namespace="http://relaxng.org/ns/structure/1.0",nsmap={None:'http://relaxng.org/ns/structure/1.0' , 'folia': "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace",'a':"http://relaxng.org/ns/annotation/0.9" })
if origclass: cls = origclass
preamble = []
try:
if cls.__doc__:
E2 = ElementMaker(namespace="http://relaxng.org/ns/annotation/0.9", nsmap={'a':'http://relaxng.org/ns/annotation/0.9'} )
preamble.append(E2.documentation(cls.__doc__))
except AttributeError:
pass
if cls.REQUIRED_ATTRIBS is None: cls.REQUIRED_ATTRIBS = () #bit hacky
if cls.OPTIONAL_ATTRIBS is None: cls.OPTIONAL_ATTRIBS = () #bit hacky
attribs = [ ]
if cls.REQUIRED_ATTRIBS and Attrib.ID in cls.REQUIRED_ATTRIBS:
attribs.append( E.attribute(E.data(type='ID',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'),name='id', ns="http://www.w3.org/XML/1998/namespace") )
elif Attrib.ID in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute(E.data(type='ID',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'), name='id', ns="http://www.w3.org/XML/1998/namespace") ) )
if Attrib.CLASS in cls.REQUIRED_ATTRIBS:
#Set is a tough one, we can't require it as it may be defined in the declaration: we make it optional and need schematron to resolve this later
attribs.append( E.attribute(E.data(type='string',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'),name='class') )
attribs.append( E.optional( E.attribute( E.data(type='string',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'),name='set' ) ) )
elif Attrib.CLASS in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute(E.data(type='string',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'),name='class') ) )
attribs.append( E.optional( E.attribute(E.data(type='string',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'), name='set' ) ) )
if Attrib.ANNOTATOR in cls.REQUIRED_ATTRIBS or Attrib.ANNOTATOR in cls.OPTIONAL_ATTRIBS:
#Similarly tough
attribs.append( E.optional( E.attribute(E.data(type='string',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'), name='annotator') ) )
attribs.append( E.optional( E.attribute(name='annotatortype') ) )
if Attrib.CONFIDENCE in cls.REQUIRED_ATTRIBS:
attribs.append( E.attribute(E.data(type='double',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'), name='confidence') )
elif Attrib.CONFIDENCE in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute(E.data(type='double',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'), name='confidence') ) )
if Attrib.N in cls.REQUIRED_ATTRIBS:
attribs.append( E.attribute( E.data(type='string',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'),name='n') )
elif Attrib.N in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute( E.data(type='string',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'),name='n') ) )
if Attrib.DATETIME in cls.REQUIRED_ATTRIBS:
attribs.append( E.attribute(E.data(type='dateTime',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'), name='datetime') )
elif Attrib.DATETIME in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute( E.data(type='dateTime',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'), name='datetime') ) )
if Attrib.BEGINTIME in cls.REQUIRED_ATTRIBS:
attribs.append(E.attribute(name='begintime') )
elif Attrib.BEGINTIME in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute(name='begintime') ) )
if Attrib.ENDTIME in cls.REQUIRED_ATTRIBS:
attribs.append(E.attribute(name='endtime') )
elif Attrib.ENDTIME in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute(name='endtime') ) )
if Attrib.SRC in cls.REQUIRED_ATTRIBS:
attribs.append(E.attribute(E.data(type='anyURI',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'),name='src') )
elif Attrib.SRC in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute(E.data(type='anyURI',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'),name='src') ) )
if Attrib.SPEAKER in cls.REQUIRED_ATTRIBS:
attribs.append(E.attribute(E.data(type='string',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'), name='speaker') )
elif Attrib.SPEAKER in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute(E.data(type='string',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'),name='speaker') ) )
if Attrib.TEXTCLASS in cls.REQUIRED_ATTRIBS:
attribs.append(E.attribute(name='textclass') )
elif Attrib.TEXTCLASS in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute(name='textclass') ) )
if Attrib.METADATA in cls.REQUIRED_ATTRIBS:
attribs.append(E.attribute(name='metadata') )
elif Attrib.METADATA in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute(name='metadata') ) )
if cls.XLINK:
attribs += [ #loose interpretation of specs, not checking whether xlink combinations are valid
E.optional(E.attribute(name='href',ns="http://www.w3.org/1999/xlink"),E.attribute(name='type',ns="http://www.w3.org/1999/xlink") ),
E.optional(E.attribute(name='role',ns="http://www.w3.org/1999/xlink")),
E.optional(E.attribute(name='title',ns="http://www.w3.org/1999/xlink")),
E.optional(E.attribute(name='label',ns="http://www.w3.org/1999/xlink")),
E.optional(E.attribute(name='show',ns="http://www.w3.org/1999/xlink")),
]
attribs.append( E.optional( E.attribute( name='auth' ) ) )
if extraattribs:
for e in extraattribs:
attribs.append(e) #s
attribs.append( E.ref(name="allow_foreign_attributes") )
elements = [] #(including attributes)
if cls.TEXTCONTAINER or cls.PHONCONTAINER:
elements.append( E.text())
#We actually want to require non-empty text (E.text() is not sufficient)
#but this is not solved yet, see https://github.com/proycon/folia/issues/19
#elements.append( E.data(E.param(r".+",name="pattern"),type='string'))
#elements.append( E.data(E.param(r"(.|\n|\r)*\S+(.|\n|\r)*",name="pattern"),type='string'))
done = {}
if includechildren and cls.ACCEPTED_DATA: #pylint: disable=too-many-nested-blocks
for c in cls.ACCEPTED_DATA:
if c.__name__[:8] == 'Abstract' and inspect.isclass(c):
for c2 in globals().values():
try:
if inspect.isclass(c2) and issubclass(c2, c):
try:
if c2.XMLTAG and c2.XMLTAG not in done:
if c2.OCCURRENCES == 1:
elements.append( E.optional( E.ref(name=c2.XMLTAG) ) )
else:
elements.append( E.zeroOrMore( E.ref(name=c2.XMLTAG) ) )
if c2.XMLTAG == 'item': #nasty hack for backward compatibility with deprecated listitem element
elements.append( E.zeroOrMore( E.ref(name='listitem') ) )
done[c2.XMLTAG] = True
except AttributeError:
continue
except TypeError:
pass
elif issubclass(c, Feature) and c.SUBSET:
attribs.append( E.optional( E.attribute(name=c.SUBSET))) #features as attributes
else:
try:
if c.XMLTAG and c.XMLTAG not in done:
if cls.REQUIRED_DATA and c in cls.REQUIRED_DATA:
if c.OCCURRENCES == 1:
elements.append( E.ref(name=c.XMLTAG) )
else:
elements.append( E.oneOrMore( E.ref(name=c.XMLTAG) ) )
elif c.OCCURRENCES == 1:
elements.append( E.optional( E.ref(name=c.XMLTAG) ) )
else:
elements.append( E.zeroOrMore( E.ref(name=c.XMLTAG) ) )
if c.XMLTAG == 'item':
#nasty hack for backward compatibility with deprecated listitem element
elements.append( E.zeroOrMore( E.ref(name='listitem') ) )
done[c.XMLTAG] = True
except AttributeError:
continue
if extraelements:
for e in extraelements:
elements.append( e )
if elements:
if len(elements) > 1:
attribs.append( E.interleave(*elements) )
else:
attribs.append( *elements )
if not attribs:
attribs.append( E.empty() )
if cls.XMLTAG in ('desc','comment'):
return E.define( E.element(E.text(), *(preamble + attribs), **{'name': cls.XMLTAG}), name=cls.XMLTAG, ns=NSFOLIA)
else:
return E.define( E.element(*(preamble + attribs), **{'name': cls.XMLTAG}), name=cls.XMLTAG, ns=NSFOLIA) | Returns a RelaxNG definition for this element (as an XML element (lxml.etree) rather than a string) | Below is the the instruction that describes the task:
### Input:
Returns a RelaxNG definition for this element (as an XML element (lxml.etree) rather than a string)
### Response:
def relaxng(cls, includechildren=True,extraattribs = None, extraelements=None, origclass = None):
"""Returns a RelaxNG definition for this element (as an XML element (lxml.etree) rather than a string)"""
E = ElementMaker(namespace="http://relaxng.org/ns/structure/1.0",nsmap={None:'http://relaxng.org/ns/structure/1.0' , 'folia': "http://ilk.uvt.nl/folia", 'xml' : "http://www.w3.org/XML/1998/namespace",'a':"http://relaxng.org/ns/annotation/0.9" })
if origclass: cls = origclass
preamble = []
try:
if cls.__doc__:
E2 = ElementMaker(namespace="http://relaxng.org/ns/annotation/0.9", nsmap={'a':'http://relaxng.org/ns/annotation/0.9'} )
preamble.append(E2.documentation(cls.__doc__))
except AttributeError:
pass
if cls.REQUIRED_ATTRIBS is None: cls.REQUIRED_ATTRIBS = () #bit hacky
if cls.OPTIONAL_ATTRIBS is None: cls.OPTIONAL_ATTRIBS = () #bit hacky
attribs = [ ]
if cls.REQUIRED_ATTRIBS and Attrib.ID in cls.REQUIRED_ATTRIBS:
attribs.append( E.attribute(E.data(type='ID',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'),name='id', ns="http://www.w3.org/XML/1998/namespace") )
elif Attrib.ID in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute(E.data(type='ID',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'), name='id', ns="http://www.w3.org/XML/1998/namespace") ) )
if Attrib.CLASS in cls.REQUIRED_ATTRIBS:
#Set is a tough one, we can't require it as it may be defined in the declaration: we make it optional and need schematron to resolve this later
attribs.append( E.attribute(E.data(type='string',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'),name='class') )
attribs.append( E.optional( E.attribute( E.data(type='string',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'),name='set' ) ) )
elif Attrib.CLASS in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute(E.data(type='string',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'),name='class') ) )
attribs.append( E.optional( E.attribute(E.data(type='string',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'), name='set' ) ) )
if Attrib.ANNOTATOR in cls.REQUIRED_ATTRIBS or Attrib.ANNOTATOR in cls.OPTIONAL_ATTRIBS:
#Similarly tough
attribs.append( E.optional( E.attribute(E.data(type='string',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'), name='annotator') ) )
attribs.append( E.optional( E.attribute(name='annotatortype') ) )
if Attrib.CONFIDENCE in cls.REQUIRED_ATTRIBS:
attribs.append( E.attribute(E.data(type='double',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'), name='confidence') )
elif Attrib.CONFIDENCE in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute(E.data(type='double',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'), name='confidence') ) )
if Attrib.N in cls.REQUIRED_ATTRIBS:
attribs.append( E.attribute( E.data(type='string',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'),name='n') )
elif Attrib.N in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute( E.data(type='string',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'),name='n') ) )
if Attrib.DATETIME in cls.REQUIRED_ATTRIBS:
attribs.append( E.attribute(E.data(type='dateTime',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'), name='datetime') )
elif Attrib.DATETIME in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute( E.data(type='dateTime',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'), name='datetime') ) )
if Attrib.BEGINTIME in cls.REQUIRED_ATTRIBS:
attribs.append(E.attribute(name='begintime') )
elif Attrib.BEGINTIME in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute(name='begintime') ) )
if Attrib.ENDTIME in cls.REQUIRED_ATTRIBS:
attribs.append(E.attribute(name='endtime') )
elif Attrib.ENDTIME in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute(name='endtime') ) )
if Attrib.SRC in cls.REQUIRED_ATTRIBS:
attribs.append(E.attribute(E.data(type='anyURI',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'),name='src') )
elif Attrib.SRC in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute(E.data(type='anyURI',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'),name='src') ) )
if Attrib.SPEAKER in cls.REQUIRED_ATTRIBS:
attribs.append(E.attribute(E.data(type='string',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'), name='speaker') )
elif Attrib.SPEAKER in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute(E.data(type='string',datatypeLibrary='http://www.w3.org/2001/XMLSchema-datatypes'),name='speaker') ) )
if Attrib.TEXTCLASS in cls.REQUIRED_ATTRIBS:
attribs.append(E.attribute(name='textclass') )
elif Attrib.TEXTCLASS in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute(name='textclass') ) )
if Attrib.METADATA in cls.REQUIRED_ATTRIBS:
attribs.append(E.attribute(name='metadata') )
elif Attrib.METADATA in cls.OPTIONAL_ATTRIBS:
attribs.append( E.optional( E.attribute(name='metadata') ) )
if cls.XLINK:
attribs += [ #loose interpretation of specs, not checking whether xlink combinations are valid
E.optional(E.attribute(name='href',ns="http://www.w3.org/1999/xlink"),E.attribute(name='type',ns="http://www.w3.org/1999/xlink") ),
E.optional(E.attribute(name='role',ns="http://www.w3.org/1999/xlink")),
E.optional(E.attribute(name='title',ns="http://www.w3.org/1999/xlink")),
E.optional(E.attribute(name='label',ns="http://www.w3.org/1999/xlink")),
E.optional(E.attribute(name='show',ns="http://www.w3.org/1999/xlink")),
]
attribs.append( E.optional( E.attribute( name='auth' ) ) )
if extraattribs:
for e in extraattribs:
attribs.append(e) #s
attribs.append( E.ref(name="allow_foreign_attributes") )
elements = [] #(including attributes)
if cls.TEXTCONTAINER or cls.PHONCONTAINER:
elements.append( E.text())
#We actually want to require non-empty text (E.text() is not sufficient)
#but this is not solved yet, see https://github.com/proycon/folia/issues/19
#elements.append( E.data(E.param(r".+",name="pattern"),type='string'))
#elements.append( E.data(E.param(r"(.|\n|\r)*\S+(.|\n|\r)*",name="pattern"),type='string'))
done = {}
if includechildren and cls.ACCEPTED_DATA: #pylint: disable=too-many-nested-blocks
for c in cls.ACCEPTED_DATA:
if c.__name__[:8] == 'Abstract' and inspect.isclass(c):
for c2 in globals().values():
try:
if inspect.isclass(c2) and issubclass(c2, c):
try:
if c2.XMLTAG and c2.XMLTAG not in done:
if c2.OCCURRENCES == 1:
elements.append( E.optional( E.ref(name=c2.XMLTAG) ) )
else:
elements.append( E.zeroOrMore( E.ref(name=c2.XMLTAG) ) )
if c2.XMLTAG == 'item': #nasty hack for backward compatibility with deprecated listitem element
elements.append( E.zeroOrMore( E.ref(name='listitem') ) )
done[c2.XMLTAG] = True
except AttributeError:
continue
except TypeError:
pass
elif issubclass(c, Feature) and c.SUBSET:
attribs.append( E.optional( E.attribute(name=c.SUBSET))) #features as attributes
else:
try:
if c.XMLTAG and c.XMLTAG not in done:
if cls.REQUIRED_DATA and c in cls.REQUIRED_DATA:
if c.OCCURRENCES == 1:
elements.append( E.ref(name=c.XMLTAG) )
else:
elements.append( E.oneOrMore( E.ref(name=c.XMLTAG) ) )
elif c.OCCURRENCES == 1:
elements.append( E.optional( E.ref(name=c.XMLTAG) ) )
else:
elements.append( E.zeroOrMore( E.ref(name=c.XMLTAG) ) )
if c.XMLTAG == 'item':
#nasty hack for backward compatibility with deprecated listitem element
elements.append( E.zeroOrMore( E.ref(name='listitem') ) )
done[c.XMLTAG] = True
except AttributeError:
continue
if extraelements:
for e in extraelements:
elements.append( e )
if elements:
if len(elements) > 1:
attribs.append( E.interleave(*elements) )
else:
attribs.append( *elements )
if not attribs:
attribs.append( E.empty() )
if cls.XMLTAG in ('desc','comment'):
return E.define( E.element(E.text(), *(preamble + attribs), **{'name': cls.XMLTAG}), name=cls.XMLTAG, ns=NSFOLIA)
else:
return E.define( E.element(*(preamble + attribs), **{'name': cls.XMLTAG}), name=cls.XMLTAG, ns=NSFOLIA) |
def centers(self):
"""The centers for the KMeans model."""
o = self._model_json["output"]
cvals = o["centers"].cell_values
centers = [list(cval[1:]) for cval in cvals]
return centers | The centers for the KMeans model. | Below is the the instruction that describes the task:
### Input:
The centers for the KMeans model.
### Response:
def centers(self):
"""The centers for the KMeans model."""
o = self._model_json["output"]
cvals = o["centers"].cell_values
centers = [list(cval[1:]) for cval in cvals]
return centers |
def to_end_tag(self, tag_func):
"""
Creates a tag that parses until it finds the corresponding end
tag, eg: for a tag named ``mytag`` it will parse until
``endmytag``. The decorated func's return value is used to
render the parsed content and takes three arguments - the
parsed content between the start and end tags, the template
context and the tag token.
"""
@wraps(tag_func)
def tag_wrapper(parser, token):
class ToEndTagNode(template.Node):
def __init__(self):
end_name = "end%s" % tag_func.__name__
self.nodelist = parser.parse((end_name,))
parser.delete_first_token()
def render(self, context):
args = (self.nodelist.render(context), context, token)
return tag_func(*args[:tag_func.__code__.co_argcount])
return ToEndTagNode()
return self.tag(tag_wrapper) | Creates a tag that parses until it finds the corresponding end
tag, eg: for a tag named ``mytag`` it will parse until
``endmytag``. The decorated func's return value is used to
render the parsed content and takes three arguments - the
parsed content between the start and end tags, the template
context and the tag token. | Below is the the instruction that describes the task:
### Input:
Creates a tag that parses until it finds the corresponding end
tag, eg: for a tag named ``mytag`` it will parse until
``endmytag``. The decorated func's return value is used to
render the parsed content and takes three arguments - the
parsed content between the start and end tags, the template
context and the tag token.
### Response:
def to_end_tag(self, tag_func):
"""
Creates a tag that parses until it finds the corresponding end
tag, eg: for a tag named ``mytag`` it will parse until
``endmytag``. The decorated func's return value is used to
render the parsed content and takes three arguments - the
parsed content between the start and end tags, the template
context and the tag token.
"""
@wraps(tag_func)
def tag_wrapper(parser, token):
class ToEndTagNode(template.Node):
def __init__(self):
end_name = "end%s" % tag_func.__name__
self.nodelist = parser.parse((end_name,))
parser.delete_first_token()
def render(self, context):
args = (self.nodelist.render(context), context, token)
return tag_func(*args[:tag_func.__code__.co_argcount])
return ToEndTagNode()
return self.tag(tag_wrapper) |
def set_messenger_theme(self, theme="default", location="default",
max_messages="default"):
""" Sets a theme for posting messages.
Themes: ["flat", "future", "block", "air", "ice"]
Locations: ["top_left", "top_center", "top_right",
"bottom_left", "bottom_center", "bottom_right"]
max_messages is the limit of concurrent messages to display. """
if not theme:
theme = "default" # "future"
if not location:
location = "default" # "bottom_right"
if not max_messages:
max_messages = "default" # "8"
js_utils.set_messenger_theme(
self.driver, theme=theme,
location=location, max_messages=max_messages) | Sets a theme for posting messages.
Themes: ["flat", "future", "block", "air", "ice"]
Locations: ["top_left", "top_center", "top_right",
"bottom_left", "bottom_center", "bottom_right"]
max_messages is the limit of concurrent messages to display. | Below is the the instruction that describes the task:
### Input:
Sets a theme for posting messages.
Themes: ["flat", "future", "block", "air", "ice"]
Locations: ["top_left", "top_center", "top_right",
"bottom_left", "bottom_center", "bottom_right"]
max_messages is the limit of concurrent messages to display.
### Response:
def set_messenger_theme(self, theme="default", location="default",
max_messages="default"):
""" Sets a theme for posting messages.
Themes: ["flat", "future", "block", "air", "ice"]
Locations: ["top_left", "top_center", "top_right",
"bottom_left", "bottom_center", "bottom_right"]
max_messages is the limit of concurrent messages to display. """
if not theme:
theme = "default" # "future"
if not location:
location = "default" # "bottom_right"
if not max_messages:
max_messages = "default" # "8"
js_utils.set_messenger_theme(
self.driver, theme=theme,
location=location, max_messages=max_messages) |
def get_paged(self, endpoint, params=None, page_size=50, merge=False):
"""
GET with paging (for large payloads).
:param page_size: how many objects per page
:param endpoint: DHIS2 API endpoint
:param params: HTTP parameters (dict), defaults to None
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: generator OR a normal DHIS2 response dict, e.g. {"organisationUnits": [...]}
"""
try:
if not isinstance(page_size, (string_types, int)) or int(page_size) < 1:
raise ValueError
except ValueError:
raise ClientException("page_size must be > 1")
params = {} if not params else params
if 'paging' in params:
raise ClientException("Can't set paging manually in `params` when using `get_paged`")
params['pageSize'] = page_size
params['page'] = 1
params['totalPages'] = True
collection = endpoint.split('/')[0] # only use e.g. events when submitting events/query as endpoint
def page_generator():
"""Yield pages"""
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
page_count = page['pager']['pageCount']
yield page
while page['pager']['page'] < page_count:
params['page'] += 1
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
yield page
if not merge:
return page_generator()
else:
data = []
for p in page_generator():
data.append(p[collection])
return {collection: list(chain.from_iterable(data))} | GET with paging (for large payloads).
:param page_size: how many objects per page
:param endpoint: DHIS2 API endpoint
:param params: HTTP parameters (dict), defaults to None
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: generator OR a normal DHIS2 response dict, e.g. {"organisationUnits": [...]} | Below is the the instruction that describes the task:
### Input:
GET with paging (for large payloads).
:param page_size: how many objects per page
:param endpoint: DHIS2 API endpoint
:param params: HTTP parameters (dict), defaults to None
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: generator OR a normal DHIS2 response dict, e.g. {"organisationUnits": [...]}
### Response:
def get_paged(self, endpoint, params=None, page_size=50, merge=False):
"""
GET with paging (for large payloads).
:param page_size: how many objects per page
:param endpoint: DHIS2 API endpoint
:param params: HTTP parameters (dict), defaults to None
:param merge: If true, return a list containing all pages instead of one page. Defaults to False.
:return: generator OR a normal DHIS2 response dict, e.g. {"organisationUnits": [...]}
"""
try:
if not isinstance(page_size, (string_types, int)) or int(page_size) < 1:
raise ValueError
except ValueError:
raise ClientException("page_size must be > 1")
params = {} if not params else params
if 'paging' in params:
raise ClientException("Can't set paging manually in `params` when using `get_paged`")
params['pageSize'] = page_size
params['page'] = 1
params['totalPages'] = True
collection = endpoint.split('/')[0] # only use e.g. events when submitting events/query as endpoint
def page_generator():
"""Yield pages"""
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
page_count = page['pager']['pageCount']
yield page
while page['pager']['page'] < page_count:
params['page'] += 1
page = self.get(endpoint=endpoint, file_type='json', params=params).json()
yield page
if not merge:
return page_generator()
else:
data = []
for p in page_generator():
data.append(p[collection])
return {collection: list(chain.from_iterable(data))} |
def save(self, filename, garbage=0, clean=0, deflate=0, incremental=0, ascii=0, expand=0, linear=0, pretty=0, decrypt=1):
"""save(self, filename, garbage=0, clean=0, deflate=0, incremental=0, ascii=0, expand=0, linear=0, pretty=0, decrypt=1) -> PyObject *"""
if self.isClosed or self.isEncrypted:
raise ValueError("operation illegal for closed / encrypted doc")
if type(filename) == str:
pass
elif type(filename) == unicode:
filename = filename.encode('utf8')
else:
raise TypeError("filename must be a string")
if filename == self.name and not incremental:
raise ValueError("save to original must be incremental")
if self.pageCount < 1:
raise ValueError("cannot save with zero pages")
if incremental:
if self.name != filename or self.stream:
raise ValueError("incremental needs original file")
return _fitz.Document_save(self, filename, garbage, clean, deflate, incremental, ascii, expand, linear, pretty, decrypt) | save(self, filename, garbage=0, clean=0, deflate=0, incremental=0, ascii=0, expand=0, linear=0, pretty=0, decrypt=1) -> PyObject * | Below is the the instruction that describes the task:
### Input:
save(self, filename, garbage=0, clean=0, deflate=0, incremental=0, ascii=0, expand=0, linear=0, pretty=0, decrypt=1) -> PyObject *
### Response:
def save(self, filename, garbage=0, clean=0, deflate=0, incremental=0, ascii=0, expand=0, linear=0, pretty=0, decrypt=1):
"""save(self, filename, garbage=0, clean=0, deflate=0, incremental=0, ascii=0, expand=0, linear=0, pretty=0, decrypt=1) -> PyObject *"""
if self.isClosed or self.isEncrypted:
raise ValueError("operation illegal for closed / encrypted doc")
if type(filename) == str:
pass
elif type(filename) == unicode:
filename = filename.encode('utf8')
else:
raise TypeError("filename must be a string")
if filename == self.name and not incremental:
raise ValueError("save to original must be incremental")
if self.pageCount < 1:
raise ValueError("cannot save with zero pages")
if incremental:
if self.name != filename or self.stream:
raise ValueError("incremental needs original file")
return _fitz.Document_save(self, filename, garbage, clean, deflate, incremental, ascii, expand, linear, pretty, decrypt) |
def search(self, q, labels, state='open,closed', **kwargs):
"""Search for issues in Github.
:param q: query string to search
:param state: state of the issue
:returns: list of issue objects
:rtype: list
"""
search_result = self.github_request.search(q=q, state=state, **kwargs)
if search_result['total_count'] > 0:
return list(
map(lambda issue_dict: GithubIssue(
github_request=self.github_request, **issue_dict),
search_result['items'])
) | Search for issues in Github.
:param q: query string to search
:param state: state of the issue
:returns: list of issue objects
:rtype: list | Below is the the instruction that describes the task:
### Input:
Search for issues in Github.
:param q: query string to search
:param state: state of the issue
:returns: list of issue objects
:rtype: list
### Response:
def search(self, q, labels, state='open,closed', **kwargs):
"""Search for issues in Github.
:param q: query string to search
:param state: state of the issue
:returns: list of issue objects
:rtype: list
"""
search_result = self.github_request.search(q=q, state=state, **kwargs)
if search_result['total_count'] > 0:
return list(
map(lambda issue_dict: GithubIssue(
github_request=self.github_request, **issue_dict),
search_result['items'])
) |
def _update_local_conf(config, service_id, client_secret):
"""
Update local.conf with service id and client secrets
:param config: Location of config files
:param service_id: Service ID
:param client_secret: Client Secret
"""
lines = _get_existing_conf(config)
lines.append('\nservice_id = "{}"\n'.format(service_id))
if client_secret:
lines.append('client_secret = "{}"\n'.format(client_secret))
with open(os.path.join(config, 'local.conf'), 'w') as f:
f.writelines(lines) | Update local.conf with service id and client secrets
:param config: Location of config files
:param service_id: Service ID
:param client_secret: Client Secret | Below is the the instruction that describes the task:
### Input:
Update local.conf with service id and client secrets
:param config: Location of config files
:param service_id: Service ID
:param client_secret: Client Secret
### Response:
def _update_local_conf(config, service_id, client_secret):
"""
Update local.conf with service id and client secrets
:param config: Location of config files
:param service_id: Service ID
:param client_secret: Client Secret
"""
lines = _get_existing_conf(config)
lines.append('\nservice_id = "{}"\n'.format(service_id))
if client_secret:
lines.append('client_secret = "{}"\n'.format(client_secret))
with open(os.path.join(config, 'local.conf'), 'w') as f:
f.writelines(lines) |
def Clear(self):
"""Clears all data from the emulator instance.
Returns:
True if the data was successfully cleared, False otherwise.
"""
headers = {'Content-length': '0'}
response, _ = self._http.request('%s/reset' % self._host, method='POST',
headers=headers)
if response.status == 200:
return True
else:
logging.warning('failed to clear emulator; response was: %s', response) | Clears all data from the emulator instance.
Returns:
True if the data was successfully cleared, False otherwise. | Below is the the instruction that describes the task:
### Input:
Clears all data from the emulator instance.
Returns:
True if the data was successfully cleared, False otherwise.
### Response:
def Clear(self):
"""Clears all data from the emulator instance.
Returns:
True if the data was successfully cleared, False otherwise.
"""
headers = {'Content-length': '0'}
response, _ = self._http.request('%s/reset' % self._host, method='POST',
headers=headers)
if response.status == 200:
return True
else:
logging.warning('failed to clear emulator; response was: %s', response) |
def fetch_pi_file(filename):
"""This will download a segment of pi from super-computing.org
if the file is not already present.
"""
import os, urllib
ftpdir="ftp://pi.super-computing.org/.2/pi200m/"
if os.path.exists(filename):
# we already have it
return
else:
# download it
urllib.urlretrieve(ftpdir+filename,filename) | This will download a segment of pi from super-computing.org
if the file is not already present. | Below is the the instruction that describes the task:
### Input:
This will download a segment of pi from super-computing.org
if the file is not already present.
### Response:
def fetch_pi_file(filename):
"""This will download a segment of pi from super-computing.org
if the file is not already present.
"""
import os, urllib
ftpdir="ftp://pi.super-computing.org/.2/pi200m/"
if os.path.exists(filename):
# we already have it
return
else:
# download it
urllib.urlretrieve(ftpdir+filename,filename) |
def field(self, name_index):
"""Get a VDField instance representing a field of the vdata.
Args::
name_index name or index number of the field
Returns::
VDfield instance representing the field
C library equivalent : no equivalent
"""
# Transform a name to an index number
if isinstance(name_index, str):
status, index = _C.VSfindex(self._id, name_index)
_checkErr('field', status, "illegal field name: %s" % name_index)
else:
n = _C.VFnfields(self._id)
_checkErr('field', n, 'cannot execute')
index = name_index
if index >= n:
raise HDF4Error("field: illegal index number")
return VDField(self, index) | Get a VDField instance representing a field of the vdata.
Args::
name_index name or index number of the field
Returns::
VDfield instance representing the field
C library equivalent : no equivalent | Below is the the instruction that describes the task:
### Input:
Get a VDField instance representing a field of the vdata.
Args::
name_index name or index number of the field
Returns::
VDfield instance representing the field
C library equivalent : no equivalent
### Response:
def field(self, name_index):
"""Get a VDField instance representing a field of the vdata.
Args::
name_index name or index number of the field
Returns::
VDfield instance representing the field
C library equivalent : no equivalent
"""
# Transform a name to an index number
if isinstance(name_index, str):
status, index = _C.VSfindex(self._id, name_index)
_checkErr('field', status, "illegal field name: %s" % name_index)
else:
n = _C.VFnfields(self._id)
_checkErr('field', n, 'cannot execute')
index = name_index
if index >= n:
raise HDF4Error("field: illegal index number")
return VDField(self, index) |
def static(self, uri, file_or_directory, *args, **kwargs):
"""Create a websocket route from a decorated function
:param uri: endpoint at which the socket endpoint will be accessible.
:type uri: str
:param args: captures all of the positional arguments passed in
:type args: tuple(Any)
:param kwargs: captures the keyword arguments passed in
:type kwargs: dict(Any)
:return: The exception function to use as the decorator
:rtype: fn
"""
kwargs.setdefault('pattern', r'/?.+')
kwargs.setdefault('use_modified_since', True)
kwargs.setdefault('use_content_range', False)
kwargs.setdefault('stream_large_files', False)
kwargs.setdefault('name', 'static')
kwargs.setdefault('host', None)
kwargs.setdefault('strict_slashes', None)
self._static.append(FutureStatic(uri, file_or_directory, args, kwargs)) | Create a websocket route from a decorated function
:param uri: endpoint at which the socket endpoint will be accessible.
:type uri: str
:param args: captures all of the positional arguments passed in
:type args: tuple(Any)
:param kwargs: captures the keyword arguments passed in
:type kwargs: dict(Any)
:return: The exception function to use as the decorator
:rtype: fn | Below is the the instruction that describes the task:
### Input:
Create a websocket route from a decorated function
:param uri: endpoint at which the socket endpoint will be accessible.
:type uri: str
:param args: captures all of the positional arguments passed in
:type args: tuple(Any)
:param kwargs: captures the keyword arguments passed in
:type kwargs: dict(Any)
:return: The exception function to use as the decorator
:rtype: fn
### Response:
def static(self, uri, file_or_directory, *args, **kwargs):
"""Create a websocket route from a decorated function
:param uri: endpoint at which the socket endpoint will be accessible.
:type uri: str
:param args: captures all of the positional arguments passed in
:type args: tuple(Any)
:param kwargs: captures the keyword arguments passed in
:type kwargs: dict(Any)
:return: The exception function to use as the decorator
:rtype: fn
"""
kwargs.setdefault('pattern', r'/?.+')
kwargs.setdefault('use_modified_since', True)
kwargs.setdefault('use_content_range', False)
kwargs.setdefault('stream_large_files', False)
kwargs.setdefault('name', 'static')
kwargs.setdefault('host', None)
kwargs.setdefault('strict_slashes', None)
self._static.append(FutureStatic(uri, file_or_directory, args, kwargs)) |
def login(self, username, password=None, email=None, registry=None, reauth=False,
dockercfg_path=None):
"""
:param username: The registry username
:param password: The plaintext password
:param email: The email for the registry account
:param registry: URL to the registry, example:
- https://index.docker.io/v1/
:param reauth: Whether or not to refresh existing authentication on the Docker server.
:param dockercfg_path: Use a custom path for the Docker config file
:return: None
"""
self.d.login(username, password, email, registry, reauth, dockercfg_path)
logger.info("Login to %s succeed", registry) | :param username: The registry username
:param password: The plaintext password
:param email: The email for the registry account
:param registry: URL to the registry, example:
- https://index.docker.io/v1/
:param reauth: Whether or not to refresh existing authentication on the Docker server.
:param dockercfg_path: Use a custom path for the Docker config file
:return: None | Below is the the instruction that describes the task:
### Input:
:param username: The registry username
:param password: The plaintext password
:param email: The email for the registry account
:param registry: URL to the registry, example:
- https://index.docker.io/v1/
:param reauth: Whether or not to refresh existing authentication on the Docker server.
:param dockercfg_path: Use a custom path for the Docker config file
:return: None
### Response:
def login(self, username, password=None, email=None, registry=None, reauth=False,
dockercfg_path=None):
"""
:param username: The registry username
:param password: The plaintext password
:param email: The email for the registry account
:param registry: URL to the registry, example:
- https://index.docker.io/v1/
:param reauth: Whether or not to refresh existing authentication on the Docker server.
:param dockercfg_path: Use a custom path for the Docker config file
:return: None
"""
self.d.login(username, password, email, registry, reauth, dockercfg_path)
logger.info("Login to %s succeed", registry) |
def save(self, path: str):
"""
Save lexicon in Numpy array format. Lexicon will be specific to Sockeye model.
:param path: Path to Numpy array output file.
"""
with open(path, 'wb') as out:
np.save(out, self.lex)
logger.info("Saved top-k lexicon to \"%s\"", path) | Save lexicon in Numpy array format. Lexicon will be specific to Sockeye model.
:param path: Path to Numpy array output file. | Below is the the instruction that describes the task:
### Input:
Save lexicon in Numpy array format. Lexicon will be specific to Sockeye model.
:param path: Path to Numpy array output file.
### Response:
def save(self, path: str):
"""
Save lexicon in Numpy array format. Lexicon will be specific to Sockeye model.
:param path: Path to Numpy array output file.
"""
with open(path, 'wb') as out:
np.save(out, self.lex)
logger.info("Saved top-k lexicon to \"%s\"", path) |
def keys_with_suffix(self, suffix):
"""
:return: list of keys ending with given :suffix:.
"""
return [k.rstrip(suffix) for k in self.keys() if k.endswith(suffix)] | :return: list of keys ending with given :suffix:. | Below is the the instruction that describes the task:
### Input:
:return: list of keys ending with given :suffix:.
### Response:
def keys_with_suffix(self, suffix):
"""
:return: list of keys ending with given :suffix:.
"""
return [k.rstrip(suffix) for k in self.keys() if k.endswith(suffix)] |
def read_bin_particle_density(self):
"""Read the bin particle density
:returns: float
"""
config = []
# Send the command byte and sleep for 10 ms
self.cnxn.xfer([0x33])
sleep(10e-3)
# Read the config variables by sending 256 empty bytes
for i in range(4):
resp = self.cnxn.xfer([0x00])[0]
config.append(resp)
bpd = self._calculate_float(config)
return bpd | Read the bin particle density
:returns: float | Below is the the instruction that describes the task:
### Input:
Read the bin particle density
:returns: float
### Response:
def read_bin_particle_density(self):
"""Read the bin particle density
:returns: float
"""
config = []
# Send the command byte and sleep for 10 ms
self.cnxn.xfer([0x33])
sleep(10e-3)
# Read the config variables by sending 256 empty bytes
for i in range(4):
resp = self.cnxn.xfer([0x00])[0]
config.append(resp)
bpd = self._calculate_float(config)
return bpd |
def get_attachment_model():
"""
Returns the Attachment model that is active in this project.
"""
try:
from .models import AbstractAttachment
klass = apps.get_model(config["attachment_model"])
if not issubclass(klass, AbstractAttachment):
raise ImproperlyConfigured(
"SUMMERNOTE_CONFIG['attachment_model'] refers to model '%s' that is not "
"inherited from 'django_summernote.models.AbstractAttachment'" % config["attachment_model"]
)
return klass
except ValueError:
raise ImproperlyConfigured("SUMMERNOTE_CONFIG['attachment_model'] must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(
"SUMMERNOTE_CONFIG['attachment_model'] refers to model '%s' that has not been installed" % config["attachment_model"]
) | Returns the Attachment model that is active in this project. | Below is the the instruction that describes the task:
### Input:
Returns the Attachment model that is active in this project.
### Response:
def get_attachment_model():
"""
Returns the Attachment model that is active in this project.
"""
try:
from .models import AbstractAttachment
klass = apps.get_model(config["attachment_model"])
if not issubclass(klass, AbstractAttachment):
raise ImproperlyConfigured(
"SUMMERNOTE_CONFIG['attachment_model'] refers to model '%s' that is not "
"inherited from 'django_summernote.models.AbstractAttachment'" % config["attachment_model"]
)
return klass
except ValueError:
raise ImproperlyConfigured("SUMMERNOTE_CONFIG['attachment_model'] must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(
"SUMMERNOTE_CONFIG['attachment_model'] refers to model '%s' that has not been installed" % config["attachment_model"]
) |
def export_image(self, image_id, region_name):
'''
a method to add a copy of an image to another AWS region
https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html
REQUIRED: iam credentials must have valid access to both regions
:param image_id: string of AWS id of image to be copied
:param region_name: string of AWS region to copy image to
:return: string with AWS id of new image
'''
title = '%s.export_image' % self.__class__.__name__
# validate inputs
input_fields = {
'image_id': image_id
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
input_fields = {
'region_name': region_name
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.iam.fields.validate(value, '.%s' % key, object_title)
if region_name == self.iam.region_name:
raise ValueError('%s cannot export an image to the same region.' % title)
# construct ec2 client connection for source region
client_kwargs = {
'service_name': 'ec2',
'region_name': region_name,
'aws_access_key_id': self.iam.access_id,
'aws_secret_access_key': self.iam.secret_key
}
destination_connection = boto3.client(**client_kwargs)
# check state of image to be copied
self.check_image_state(image_id)
# discover tags and name associated with image to be copied
image_details = self.read_image(image_id)
tag_list = image_details['tags']
image_name = image_details['name']
# copy image over to current region
self.iam.printer('Copying image %s to region %s.' % (image_id, region_name))
try:
response = destination_connection.copy_image(
SourceRegion=self.iam.region_name,
SourceImageId=image_id,
Name=image_name
)
except:
raise AWSConnectionError(title)
new_id = response['ImageId']
# check into state of new image
try:
response = destination_connection.describe_images(
ImageIds=[ new_id ]
)
except:
raise AWSConnectionError(title)
if not 'State' in response['Images'][0].keys():
from time import sleep
from timeit import default_timer as timer
self.iam.printer('Checking into the status of image %s in AWS region %s' % (new_id, region_name), flush=True)
state_timeout = 0
while not 'State' in response['Images'][0].keys():
self.iam.printer('.', flush=True)
sleep(3)
state_timeout += 1
try:
response = destination_connection.describe_images(
ImageIds=[ new_id ]
)
except:
raise AWSConnectionError(title)
if state_timeout > 3:
raise Exception('Failure to determine status of image %s in AWS region %s.' % (new_id, region_name))
self.iam.printer(' done.')
image_state = response['Images'][0]['State']
# wait while image is pending
if image_state == 'pending':
from time import sleep
from timeit import default_timer as timer
self.iam.printer('Image %s in AWS region %s is %s' % (new_id, region_name, image_state), flush=True)
delay = 3
state_timeout = 0
while image_state != 'available':
self.iam.printer('.', flush=True)
sleep(delay)
t3 = timer()
try:
response = destination_connection.describe_images(
ImageIds=[ new_id ]
)
except:
raise AWSConnectionError(title)
t4 = timer()
state_timeout += 1
response_time = t4 - t3
if 3 - response_time > 0:
delay = 3 - response_time
else:
delay = 0
if state_timeout > 300:
raise Exception('Timeout. Failure initializing image %s in region %s in less than 15min' % (new_id, region_name))
image_state = response['Images'][0]['State']
self.iam.printer(' done.')
# add tags from image to image copy
try:
destination_connection.create_tags(
Resources=[ new_id ],
Tags=tag_list
)
except:
raise AWSConnectionError(title)
self.iam.printer('Tags from image %s have been added to image %s.' % (image_id, new_id))
return new_id | a method to add a copy of an image to another AWS region
https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html
REQUIRED: iam credentials must have valid access to both regions
:param image_id: string of AWS id of image to be copied
:param region_name: string of AWS region to copy image to
:return: string with AWS id of new image | Below is the the instruction that describes the task:
### Input:
a method to add a copy of an image to another AWS region
https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html
REQUIRED: iam credentials must have valid access to both regions
:param image_id: string of AWS id of image to be copied
:param region_name: string of AWS region to copy image to
:return: string with AWS id of new image
### Response:
def export_image(self, image_id, region_name):
'''
a method to add a copy of an image to another AWS region
https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html
REQUIRED: iam credentials must have valid access to both regions
:param image_id: string of AWS id of image to be copied
:param region_name: string of AWS region to copy image to
:return: string with AWS id of new image
'''
title = '%s.export_image' % self.__class__.__name__
# validate inputs
input_fields = {
'image_id': image_id
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
input_fields = {
'region_name': region_name
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.iam.fields.validate(value, '.%s' % key, object_title)
if region_name == self.iam.region_name:
raise ValueError('%s cannot export an image to the same region.' % title)
# construct ec2 client connection for source region
client_kwargs = {
'service_name': 'ec2',
'region_name': region_name,
'aws_access_key_id': self.iam.access_id,
'aws_secret_access_key': self.iam.secret_key
}
destination_connection = boto3.client(**client_kwargs)
# check state of image to be copied
self.check_image_state(image_id)
# discover tags and name associated with image to be copied
image_details = self.read_image(image_id)
tag_list = image_details['tags']
image_name = image_details['name']
# copy image over to current region
self.iam.printer('Copying image %s to region %s.' % (image_id, region_name))
try:
response = destination_connection.copy_image(
SourceRegion=self.iam.region_name,
SourceImageId=image_id,
Name=image_name
)
except:
raise AWSConnectionError(title)
new_id = response['ImageId']
# check into state of new image
try:
response = destination_connection.describe_images(
ImageIds=[ new_id ]
)
except:
raise AWSConnectionError(title)
if not 'State' in response['Images'][0].keys():
from time import sleep
from timeit import default_timer as timer
self.iam.printer('Checking into the status of image %s in AWS region %s' % (new_id, region_name), flush=True)
state_timeout = 0
while not 'State' in response['Images'][0].keys():
self.iam.printer('.', flush=True)
sleep(3)
state_timeout += 1
try:
response = destination_connection.describe_images(
ImageIds=[ new_id ]
)
except:
raise AWSConnectionError(title)
if state_timeout > 3:
raise Exception('Failure to determine status of image %s in AWS region %s.' % (new_id, region_name))
self.iam.printer(' done.')
image_state = response['Images'][0]['State']
# wait while image is pending
if image_state == 'pending':
from time import sleep
from timeit import default_timer as timer
self.iam.printer('Image %s in AWS region %s is %s' % (new_id, region_name, image_state), flush=True)
delay = 3
state_timeout = 0
while image_state != 'available':
self.iam.printer('.', flush=True)
sleep(delay)
t3 = timer()
try:
response = destination_connection.describe_images(
ImageIds=[ new_id ]
)
except:
raise AWSConnectionError(title)
t4 = timer()
state_timeout += 1
response_time = t4 - t3
if 3 - response_time > 0:
delay = 3 - response_time
else:
delay = 0
if state_timeout > 300:
raise Exception('Timeout. Failure initializing image %s in region %s in less than 15min' % (new_id, region_name))
image_state = response['Images'][0]['State']
self.iam.printer(' done.')
# add tags from image to image copy
try:
destination_connection.create_tags(
Resources=[ new_id ],
Tags=tag_list
)
except:
raise AWSConnectionError(title)
self.iam.printer('Tags from image %s have been added to image %s.' % (image_id, new_id))
return new_id |
def add_annotations(self,annotations,**kwargs):
"""
Add an annotation to the QuantFigure.
Parameters:
annotations : dict or list(dict,)
Annotations can be on the form form of
{'date' : 'text'}
and the text will automatically be placed at the
right level on the chart
or
A Plotly fully defined annotation
kwargs :
fontcolor : str
Text color for annotations
fontsize : int
Text size for annotations
textangle : int
Textt angle
See https://plot.ly/python/reference/#layout-annotations
for a complete list of valid parameters.
"""
ann_kwargs=utils.check_kwargs(kwargs,get_annotation_kwargs(),{},clean_origin=True)
if type(annotations)==list:
self.layout['annotations']['values'].extend(annotations)
else:
self.layout['annotations']['values'].append(annotations)
if ann_kwargs:
self.layout['annotations']['params'].update(**ann_kwargs) | Add an annotation to the QuantFigure.
Parameters:
annotations : dict or list(dict,)
Annotations can be on the form form of
{'date' : 'text'}
and the text will automatically be placed at the
right level on the chart
or
A Plotly fully defined annotation
kwargs :
fontcolor : str
Text color for annotations
fontsize : int
Text size for annotations
textangle : int
Textt angle
See https://plot.ly/python/reference/#layout-annotations
for a complete list of valid parameters. | Below is the the instruction that describes the task:
### Input:
Add an annotation to the QuantFigure.
Parameters:
annotations : dict or list(dict,)
Annotations can be on the form form of
{'date' : 'text'}
and the text will automatically be placed at the
right level on the chart
or
A Plotly fully defined annotation
kwargs :
fontcolor : str
Text color for annotations
fontsize : int
Text size for annotations
textangle : int
Textt angle
See https://plot.ly/python/reference/#layout-annotations
for a complete list of valid parameters.
### Response:
def add_annotations(self,annotations,**kwargs):
"""
Add an annotation to the QuantFigure.
Parameters:
annotations : dict or list(dict,)
Annotations can be on the form form of
{'date' : 'text'}
and the text will automatically be placed at the
right level on the chart
or
A Plotly fully defined annotation
kwargs :
fontcolor : str
Text color for annotations
fontsize : int
Text size for annotations
textangle : int
Textt angle
See https://plot.ly/python/reference/#layout-annotations
for a complete list of valid parameters.
"""
ann_kwargs=utils.check_kwargs(kwargs,get_annotation_kwargs(),{},clean_origin=True)
if type(annotations)==list:
self.layout['annotations']['values'].extend(annotations)
else:
self.layout['annotations']['values'].append(annotations)
if ann_kwargs:
self.layout['annotations']['params'].update(**ann_kwargs) |
def _get_module_methods(module):
'''
Returns a methods list corresponding to the equations in the given
module. Each entry is a dictionary with keys 'output', 'args', and
'func' corresponding to the output, arguments, and function of the
method. The entries may optionally include 'assumptions' and
'overridden_by_assumptions' as keys, stating which assumptions are
required to use the method, and which assumptions mean the method
should not be used because it is overridden.
'''
# Set up the methods dict we will eventually return
methods = []
funcs = []
for item in inspect.getmembers(equations):
if (item[0][0] != '_' and '_from_' in item[0]):
func = item[1]
output = item[0][:item[0].find('_from_')]
# avoid returning duplicates
if func in funcs:
continue
else:
funcs.append(func)
args = tuple(getfullargspec(func).args)
try:
assumptions = tuple(func.assumptions)
except AttributeError:
raise NotImplementedError('function {0} in equations module has no'
' assumption '
'definition'.format(func.__name__))
try:
overridden_by_assumptions = func.overridden_by_assumptions
except AttributeError:
overridden_by_assumptions = ()
methods.append({
'func': func,
'args': args,
'output': output,
'assumptions': assumptions,
'overridden_by_assumptions': overridden_by_assumptions,
})
return methods | Returns a methods list corresponding to the equations in the given
module. Each entry is a dictionary with keys 'output', 'args', and
'func' corresponding to the output, arguments, and function of the
method. The entries may optionally include 'assumptions' and
'overridden_by_assumptions' as keys, stating which assumptions are
required to use the method, and which assumptions mean the method
should not be used because it is overridden. | Below is the the instruction that describes the task:
### Input:
Returns a methods list corresponding to the equations in the given
module. Each entry is a dictionary with keys 'output', 'args', and
'func' corresponding to the output, arguments, and function of the
method. The entries may optionally include 'assumptions' and
'overridden_by_assumptions' as keys, stating which assumptions are
required to use the method, and which assumptions mean the method
should not be used because it is overridden.
### Response:
def _get_module_methods(module):
'''
Returns a methods list corresponding to the equations in the given
module. Each entry is a dictionary with keys 'output', 'args', and
'func' corresponding to the output, arguments, and function of the
method. The entries may optionally include 'assumptions' and
'overridden_by_assumptions' as keys, stating which assumptions are
required to use the method, and which assumptions mean the method
should not be used because it is overridden.
'''
# Set up the methods dict we will eventually return
methods = []
funcs = []
for item in inspect.getmembers(equations):
if (item[0][0] != '_' and '_from_' in item[0]):
func = item[1]
output = item[0][:item[0].find('_from_')]
# avoid returning duplicates
if func in funcs:
continue
else:
funcs.append(func)
args = tuple(getfullargspec(func).args)
try:
assumptions = tuple(func.assumptions)
except AttributeError:
raise NotImplementedError('function {0} in equations module has no'
' assumption '
'definition'.format(func.__name__))
try:
overridden_by_assumptions = func.overridden_by_assumptions
except AttributeError:
overridden_by_assumptions = ()
methods.append({
'func': func,
'args': args,
'output': output,
'assumptions': assumptions,
'overridden_by_assumptions': overridden_by_assumptions,
})
return methods |
def distance_of_time_in_words(from_time, accuracy=1, to_time=None):
"""
Represents distance of time in words
@param from_time: source time (in seconds from epoch)
@type from_time: C{int}, C{float} or C{datetime.datetime}
@param accuracy: level of accuracy (1..3), default=1
@type accuracy: C{int}
@param to_time: target time (in seconds from epoch),
default=None translates to current time
@type to_time: C{int}, C{float} or C{datetime.datetime}
@return: distance of time in words
@rtype: unicode
@raise ValueError: accuracy is lesser or equal zero
"""
current = False
if to_time is None:
current = True
to_time = datetime.datetime.now()
check_positive(accuracy, strict=True)
if not isinstance(from_time, datetime.datetime):
from_time = datetime.datetime.fromtimestamp(from_time)
if not isinstance(to_time, datetime.datetime):
to_time = datetime.datetime.fromtimestamp(to_time)
if from_time.tzinfo and not to_time.tzinfo:
to_time = to_time.replace(tzinfo=from_time.tzinfo)
dt_delta = to_time - from_time
difference = dt_delta.days*86400 + dt_delta.seconds
minutes_orig = int(abs(difference)/60.0)
hours_orig = int(abs(difference)/3600.0)
days_orig = int(abs(difference)/86400.0)
in_future = from_time > to_time
words = []
values = []
alternatives = []
days = days_orig
hours = hours_orig - days_orig*24
words.append(u"%d %s" % (days, numeral.choose_plural(days, DAY_VARIANTS)))
values.append(days)
words.append(u"%d %s" %
(hours, numeral.choose_plural(hours, HOUR_VARIANTS)))
values.append(hours)
days == 0 and hours == 1 and current and alternatives.append(u"час")
minutes = minutes_orig - hours_orig*60
words.append(u"%d %s" % (minutes,
numeral.choose_plural(minutes, MINUTE_VARIANTS)))
values.append(minutes)
days == 0 and hours == 0 and minutes == 1 and current and \
alternatives.append(u"минуту")
# убираем из values и words конечные нули
while values and not values[-1]:
values.pop()
words.pop()
# убираем из values и words начальные нули
while values and not values[0]:
values.pop(0)
words.pop(0)
limit = min(accuracy, len(words))
real_words = words[:limit]
real_values = values[:limit]
# снова убираем конечные нули
while real_values and not real_values[-1]:
real_values.pop()
real_words.pop()
limit -= 1
real_str = u" ".join(real_words)
# альтернативные варианты нужны только если в real_words одно значение
# и, вдобавок, если используется текущее время
alter_str = limit == 1 and current and alternatives and \
alternatives[0]
_result_str = alter_str or real_str
result_str = in_future and u"%s %s" % (PREFIX_IN, _result_str) \
or u"%s %s" % (_result_str, SUFFIX_AGO)
# если же прошло менее минуты, то real_words -- пустой, и поэтому
# нужно брать alternatives[0], а не result_str
zero_str = minutes == 0 and not real_words and \
(in_future and u"менее чем через минуту"
or u"менее минуты назад")
# нужно использовать вчера/позавчера/завтра/послезавтра
# если days 1..2 и в real_words одно значение
day_alternatives = DAY_ALTERNATIVES.get(days, False)
alternate_day = day_alternatives and current and limit == 1 and \
((in_future and day_alternatives[1])
or day_alternatives[0])
final_str = not real_words and zero_str or alternate_day or result_str
return final_str | Represents distance of time in words
@param from_time: source time (in seconds from epoch)
@type from_time: C{int}, C{float} or C{datetime.datetime}
@param accuracy: level of accuracy (1..3), default=1
@type accuracy: C{int}
@param to_time: target time (in seconds from epoch),
default=None translates to current time
@type to_time: C{int}, C{float} or C{datetime.datetime}
@return: distance of time in words
@rtype: unicode
@raise ValueError: accuracy is lesser or equal zero | Below is the the instruction that describes the task:
### Input:
Represents distance of time in words
@param from_time: source time (in seconds from epoch)
@type from_time: C{int}, C{float} or C{datetime.datetime}
@param accuracy: level of accuracy (1..3), default=1
@type accuracy: C{int}
@param to_time: target time (in seconds from epoch),
default=None translates to current time
@type to_time: C{int}, C{float} or C{datetime.datetime}
@return: distance of time in words
@rtype: unicode
@raise ValueError: accuracy is lesser or equal zero
### Response:
def distance_of_time_in_words(from_time, accuracy=1, to_time=None):
"""
Represents distance of time in words
@param from_time: source time (in seconds from epoch)
@type from_time: C{int}, C{float} or C{datetime.datetime}
@param accuracy: level of accuracy (1..3), default=1
@type accuracy: C{int}
@param to_time: target time (in seconds from epoch),
default=None translates to current time
@type to_time: C{int}, C{float} or C{datetime.datetime}
@return: distance of time in words
@rtype: unicode
@raise ValueError: accuracy is lesser or equal zero
"""
current = False
if to_time is None:
current = True
to_time = datetime.datetime.now()
check_positive(accuracy, strict=True)
if not isinstance(from_time, datetime.datetime):
from_time = datetime.datetime.fromtimestamp(from_time)
if not isinstance(to_time, datetime.datetime):
to_time = datetime.datetime.fromtimestamp(to_time)
if from_time.tzinfo and not to_time.tzinfo:
to_time = to_time.replace(tzinfo=from_time.tzinfo)
dt_delta = to_time - from_time
difference = dt_delta.days*86400 + dt_delta.seconds
minutes_orig = int(abs(difference)/60.0)
hours_orig = int(abs(difference)/3600.0)
days_orig = int(abs(difference)/86400.0)
in_future = from_time > to_time
words = []
values = []
alternatives = []
days = days_orig
hours = hours_orig - days_orig*24
words.append(u"%d %s" % (days, numeral.choose_plural(days, DAY_VARIANTS)))
values.append(days)
words.append(u"%d %s" %
(hours, numeral.choose_plural(hours, HOUR_VARIANTS)))
values.append(hours)
days == 0 and hours == 1 and current and alternatives.append(u"час")
minutes = minutes_orig - hours_orig*60
words.append(u"%d %s" % (minutes,
numeral.choose_plural(minutes, MINUTE_VARIANTS)))
values.append(minutes)
days == 0 and hours == 0 and minutes == 1 and current and \
alternatives.append(u"минуту")
# убираем из values и words конечные нули
while values and not values[-1]:
values.pop()
words.pop()
# убираем из values и words начальные нули
while values and not values[0]:
values.pop(0)
words.pop(0)
limit = min(accuracy, len(words))
real_words = words[:limit]
real_values = values[:limit]
# снова убираем конечные нули
while real_values and not real_values[-1]:
real_values.pop()
real_words.pop()
limit -= 1
real_str = u" ".join(real_words)
# альтернативные варианты нужны только если в real_words одно значение
# и, вдобавок, если используется текущее время
alter_str = limit == 1 and current and alternatives and \
alternatives[0]
_result_str = alter_str or real_str
result_str = in_future and u"%s %s" % (PREFIX_IN, _result_str) \
or u"%s %s" % (_result_str, SUFFIX_AGO)
# если же прошло менее минуты, то real_words -- пустой, и поэтому
# нужно брать alternatives[0], а не result_str
zero_str = minutes == 0 and not real_words and \
(in_future and u"менее чем через минуту"
or u"менее минуты назад")
# нужно использовать вчера/позавчера/завтра/послезавтра
# если days 1..2 и в real_words одно значение
day_alternatives = DAY_ALTERNATIVES.get(days, False)
alternate_day = day_alternatives and current and limit == 1 and \
((in_future and day_alternatives[1])
or day_alternatives[0])
final_str = not real_words and zero_str or alternate_day or result_str
return final_str |
def _xdr_read_address(unpacker):
"""Reads a stellar address and returns the string representing the address
This method assumes the encoded address is a public address (starting with G)
"""
# First 4 bytes are the address type
address_type = unpacker.unpack_uint()
if address_type != 0:
raise ValueError("Unsupported address type")
return address_from_public_key(unpacker.unpack_fopaque(32)) | Reads a stellar address and returns the string representing the address
This method assumes the encoded address is a public address (starting with G) | Below is the the instruction that describes the task:
### Input:
Reads a stellar address and returns the string representing the address
This method assumes the encoded address is a public address (starting with G)
### Response:
def _xdr_read_address(unpacker):
"""Reads a stellar address and returns the string representing the address
This method assumes the encoded address is a public address (starting with G)
"""
# First 4 bytes are the address type
address_type = unpacker.unpack_uint()
if address_type != 0:
raise ValueError("Unsupported address type")
return address_from_public_key(unpacker.unpack_fopaque(32)) |
def extend(validator_cls):
"""
Extend the given :class:`jsonschema.IValidator` with the Seep layer.
"""
Validator = jsonschema.validators.extend(
validator_cls, {
"properties": _properties_with_defaults(validator_cls),
}
)
class Blueprinter(Validator):
def instantiate(self, data):
self.validate(data)
return data
return Blueprinter | Extend the given :class:`jsonschema.IValidator` with the Seep layer. | Below is the the instruction that describes the task:
### Input:
Extend the given :class:`jsonschema.IValidator` with the Seep layer.
### Response:
def extend(validator_cls):
"""
Extend the given :class:`jsonschema.IValidator` with the Seep layer.
"""
Validator = jsonschema.validators.extend(
validator_cls, {
"properties": _properties_with_defaults(validator_cls),
}
)
class Blueprinter(Validator):
def instantiate(self, data):
self.validate(data)
return data
return Blueprinter |
def instagram_config(self, id, secret, scope=None, **_):
""" Get config dictionary for instagram oauth """
scope = scope if scope else 'basic'
token_params = dict(scope=scope)
config = dict(
# request_token_url=None,
access_token_url='/oauth/access_token/',
authorize_url='/oauth/authorize/',
base_url='https://api.instagram.com/',
consumer_key=id,
consumer_secret=secret,
request_token_params=token_params
)
return config | Get config dictionary for instagram oauth | Below is the the instruction that describes the task:
### Input:
Get config dictionary for instagram oauth
### Response:
def instagram_config(self, id, secret, scope=None, **_):
""" Get config dictionary for instagram oauth """
scope = scope if scope else 'basic'
token_params = dict(scope=scope)
config = dict(
# request_token_url=None,
access_token_url='/oauth/access_token/',
authorize_url='/oauth/authorize/',
base_url='https://api.instagram.com/',
consumer_key=id,
consumer_secret=secret,
request_token_params=token_params
)
return config |
def old_projection(model, data, theta=None, chain=None, n=100, extents=None,
uncertainties=True, title=None, fig=None, figsize=None):
"""
Project the maximum likelihood values and sampled posterior points as
spectra.
:param model:
The model employed.
:type model:
:class:`sick.models.Model`
:param data:
The observed spectra.
:type data:
iterable of :class:`sick.specutils.Spectrum1D` objects
:param theta: [optional]
The optimised model parameters given the data. Either theta
or chain should be given.
:type theta:
dict
:param chain: [optional]
The chain of sampled parameters.
:type chain:
:class:`numpy.ndarray`
:param extents: [optional]
The wavelength extents to plot for each channel in the form of
[(min_chan_1, max_chan_1), ..., (min_chan_N, max_chan_N)]
:type extents:
tuple or None
:param uncertainties: [optional]
Show uncertainty of the data points.
:type uncertainties:
bool
:param title: [optional]
Title to set for the top axes.
:type title:
str
:param fig: [optional]
Overplot onto the provided figure object.
:type fig:
:class:`matplotlib.Figure` or None
:param figsize: [optional]
The figure size (x-dimension, y-dimension) in inches.
:type figsize:
tuple or None
:raises ValueError:
If a ``fig`` is provided with the incorrect number of axes.
:raise TypeError:
If the ``data`` are not provided in the correct type.
:returns:
The projection figure.
:rtype:
:class:`maplotlib.Figure`
"""
if not isinstance(data, (tuple, list)) or \
any([not isinstance(each, specutils.Spectrum1D) for each in data]):
raise TypeError("Data must be a list-type of Spectrum1D objects.")
K = len(data)
factor = 2.0
lbdim = 0.5 * factor
trdim = 0.2 * factor
whspace = 0.10
width = np.max([len(each.disp) for each in data])/500.
height = factor*K + factor * (K - 1.) * whspace
dimy = lbdim + height + trdim
dimx = lbdim + width + trdim
if figsize is None:
figsize = (dimx, dimy)
if fig is None:
fig, axes = plt.subplots(K, 1, figsize=figsize)
else:
try:
axes = np.array(fig.axes).reshape((K, 1))
except:
raise ValueError("Provided figure has {0} axes, but data has "
"parameters K={1}".format(len(fig.axes), K))
if chain is not None:
flat_chain = chain.reshape(-1, len(model.parameters))
map_theta = np.mean(flat_chain, axis=0)
try:
map_fluxes = model(data=data, **model._dictify_theta(map_theta))
except:
logger.warn("Could not draw MAP fluxes from posterior")
if n > 0:
# Draw samples from sampler.chain and compute spectra for them
sampled_fluxes = []
n_samples = len(flat_chain)
for i in range(n):
sampled_theta = dict(zip(model.parameters,
flat_chain[np.random.randint(0, n_samples)]))
try:
sampler_flux = model(data=data, **sampled_theta)
except:
logger.warn("Could not draw sample flux from posterior")
continue
else:
sampled_fluxes.append(sampler_flux)
elif theta is not None:
sampled_fluxes = []
map_fluxes = model(data=data, **model._dictify_theta(theta))
else:
raise ValueError("either theta or chain should be given")
if len(data) == 1:
axes = [axes]
for k, (map_flux, observed_spectrum) in enumerate(zip(map_fluxes, data)):
ax = axes[k]
# Draw the random samples from the chain
if n > 0:
for sampled_flux in sampled_fluxes:
ax.plot(observed_spectrum.disp, sampled_flux[k], color="r",
alpha=0.5, zorder=90)
# Draw the ML spectra
ax.plot(observed_spectrum.disp, map_flux, color="r", lw=2, zorder=100)
# Plot the data
if uncertainties:
ax.fill_between(observed_spectrum.disp,
observed_spectrum.flux - observed_spectrum.variance**0.5,
observed_spectrum.flux + observed_spectrum.variance**0.5,
facecolor="#cccccc", edgecolor="#666666", zorder=-1)
ax.plot(observed_spectrum.disp, observed_spectrum.flux, color="k",
zorder=10)
# By default only show common overlap between the model and data
if extents is None:
finite_data = np.isfinite(observed_spectrum.flux)
finite_model = np.isfinite(map_flux)
finite_points = (finite_model, finite_data)
x_extent = [
np.max([observed_spectrum.disp[s][0] for s in finite_points]),
np.min([observed_spectrum.disp[s][-1] for s in finite_points]),
]
indices = observed_spectrum.disp.searchsorted(x_extent)
finite_flux = observed_spectrum.flux[indices[0]:indices[1]]
if len(finite_flux) > 0:
#y_extent = [
# 0.9 * np.min(finite_flux[np.isfinite(finite_flux)]),
# 1.1 * np.max(finite_flux[np.isfinite(finite_flux)])
#]
ax.set_ylim([0.9, 1.1] * np.percentile(finite_flux[np.isfinite(finite_flux)], [0.5, 99.5]))
ax.set_xlim(x_extent)
else:
ax.set_xlim(extents[k][0])
ax.set_ylim(extents[k][1])
# Labels and ticks
if not (k < K - 1):
ax.set_xlabel("Wavelength, $\lambda$ ($\AA$)")
ax.set_ylabel("Flux, $F_\lambda$")
ax.yaxis.set_label_coords(-0.05, 0.5)
ax.xaxis.set_major_locator(MaxNLocator(5))
ax.yaxis.set_major_locator(MaxNLocator(5))
[l.set_rotation(45) for l in ax.get_yticklabels()]
if title is not None and isinstance(title, (str, unicode)):
axes[0].set_title(title)
return fig | Project the maximum likelihood values and sampled posterior points as
spectra.
:param model:
The model employed.
:type model:
:class:`sick.models.Model`
:param data:
The observed spectra.
:type data:
iterable of :class:`sick.specutils.Spectrum1D` objects
:param theta: [optional]
The optimised model parameters given the data. Either theta
or chain should be given.
:type theta:
dict
:param chain: [optional]
The chain of sampled parameters.
:type chain:
:class:`numpy.ndarray`
:param extents: [optional]
The wavelength extents to plot for each channel in the form of
[(min_chan_1, max_chan_1), ..., (min_chan_N, max_chan_N)]
:type extents:
tuple or None
:param uncertainties: [optional]
Show uncertainty of the data points.
:type uncertainties:
bool
:param title: [optional]
Title to set for the top axes.
:type title:
str
:param fig: [optional]
Overplot onto the provided figure object.
:type fig:
:class:`matplotlib.Figure` or None
:param figsize: [optional]
The figure size (x-dimension, y-dimension) in inches.
:type figsize:
tuple or None
:raises ValueError:
If a ``fig`` is provided with the incorrect number of axes.
:raise TypeError:
If the ``data`` are not provided in the correct type.
:returns:
The projection figure.
:rtype:
:class:`maplotlib.Figure` | Below is the the instruction that describes the task:
### Input:
Project the maximum likelihood values and sampled posterior points as
spectra.
:param model:
The model employed.
:type model:
:class:`sick.models.Model`
:param data:
The observed spectra.
:type data:
iterable of :class:`sick.specutils.Spectrum1D` objects
:param theta: [optional]
The optimised model parameters given the data. Either theta
or chain should be given.
:type theta:
dict
:param chain: [optional]
The chain of sampled parameters.
:type chain:
:class:`numpy.ndarray`
:param extents: [optional]
The wavelength extents to plot for each channel in the form of
[(min_chan_1, max_chan_1), ..., (min_chan_N, max_chan_N)]
:type extents:
tuple or None
:param uncertainties: [optional]
Show uncertainty of the data points.
:type uncertainties:
bool
:param title: [optional]
Title to set for the top axes.
:type title:
str
:param fig: [optional]
Overplot onto the provided figure object.
:type fig:
:class:`matplotlib.Figure` or None
:param figsize: [optional]
The figure size (x-dimension, y-dimension) in inches.
:type figsize:
tuple or None
:raises ValueError:
If a ``fig`` is provided with the incorrect number of axes.
:raise TypeError:
If the ``data`` are not provided in the correct type.
:returns:
The projection figure.
:rtype:
:class:`maplotlib.Figure`
### Response:
def old_projection(model, data, theta=None, chain=None, n=100, extents=None,
uncertainties=True, title=None, fig=None, figsize=None):
"""
Project the maximum likelihood values and sampled posterior points as
spectra.
:param model:
The model employed.
:type model:
:class:`sick.models.Model`
:param data:
The observed spectra.
:type data:
iterable of :class:`sick.specutils.Spectrum1D` objects
:param theta: [optional]
The optimised model parameters given the data. Either theta
or chain should be given.
:type theta:
dict
:param chain: [optional]
The chain of sampled parameters.
:type chain:
:class:`numpy.ndarray`
:param extents: [optional]
The wavelength extents to plot for each channel in the form of
[(min_chan_1, max_chan_1), ..., (min_chan_N, max_chan_N)]
:type extents:
tuple or None
:param uncertainties: [optional]
Show uncertainty of the data points.
:type uncertainties:
bool
:param title: [optional]
Title to set for the top axes.
:type title:
str
:param fig: [optional]
Overplot onto the provided figure object.
:type fig:
:class:`matplotlib.Figure` or None
:param figsize: [optional]
The figure size (x-dimension, y-dimension) in inches.
:type figsize:
tuple or None
:raises ValueError:
If a ``fig`` is provided with the incorrect number of axes.
:raise TypeError:
If the ``data`` are not provided in the correct type.
:returns:
The projection figure.
:rtype:
:class:`maplotlib.Figure`
"""
if not isinstance(data, (tuple, list)) or \
any([not isinstance(each, specutils.Spectrum1D) for each in data]):
raise TypeError("Data must be a list-type of Spectrum1D objects.")
K = len(data)
factor = 2.0
lbdim = 0.5 * factor
trdim = 0.2 * factor
whspace = 0.10
width = np.max([len(each.disp) for each in data])/500.
height = factor*K + factor * (K - 1.) * whspace
dimy = lbdim + height + trdim
dimx = lbdim + width + trdim
if figsize is None:
figsize = (dimx, dimy)
if fig is None:
fig, axes = plt.subplots(K, 1, figsize=figsize)
else:
try:
axes = np.array(fig.axes).reshape((K, 1))
except:
raise ValueError("Provided figure has {0} axes, but data has "
"parameters K={1}".format(len(fig.axes), K))
if chain is not None:
flat_chain = chain.reshape(-1, len(model.parameters))
map_theta = np.mean(flat_chain, axis=0)
try:
map_fluxes = model(data=data, **model._dictify_theta(map_theta))
except:
logger.warn("Could not draw MAP fluxes from posterior")
if n > 0:
# Draw samples from sampler.chain and compute spectra for them
sampled_fluxes = []
n_samples = len(flat_chain)
for i in range(n):
sampled_theta = dict(zip(model.parameters,
flat_chain[np.random.randint(0, n_samples)]))
try:
sampler_flux = model(data=data, **sampled_theta)
except:
logger.warn("Could not draw sample flux from posterior")
continue
else:
sampled_fluxes.append(sampler_flux)
elif theta is not None:
sampled_fluxes = []
map_fluxes = model(data=data, **model._dictify_theta(theta))
else:
raise ValueError("either theta or chain should be given")
if len(data) == 1:
axes = [axes]
for k, (map_flux, observed_spectrum) in enumerate(zip(map_fluxes, data)):
ax = axes[k]
# Draw the random samples from the chain
if n > 0:
for sampled_flux in sampled_fluxes:
ax.plot(observed_spectrum.disp, sampled_flux[k], color="r",
alpha=0.5, zorder=90)
# Draw the ML spectra
ax.plot(observed_spectrum.disp, map_flux, color="r", lw=2, zorder=100)
# Plot the data
if uncertainties:
ax.fill_between(observed_spectrum.disp,
observed_spectrum.flux - observed_spectrum.variance**0.5,
observed_spectrum.flux + observed_spectrum.variance**0.5,
facecolor="#cccccc", edgecolor="#666666", zorder=-1)
ax.plot(observed_spectrum.disp, observed_spectrum.flux, color="k",
zorder=10)
# By default only show common overlap between the model and data
if extents is None:
finite_data = np.isfinite(observed_spectrum.flux)
finite_model = np.isfinite(map_flux)
finite_points = (finite_model, finite_data)
x_extent = [
np.max([observed_spectrum.disp[s][0] for s in finite_points]),
np.min([observed_spectrum.disp[s][-1] for s in finite_points]),
]
indices = observed_spectrum.disp.searchsorted(x_extent)
finite_flux = observed_spectrum.flux[indices[0]:indices[1]]
if len(finite_flux) > 0:
#y_extent = [
# 0.9 * np.min(finite_flux[np.isfinite(finite_flux)]),
# 1.1 * np.max(finite_flux[np.isfinite(finite_flux)])
#]
ax.set_ylim([0.9, 1.1] * np.percentile(finite_flux[np.isfinite(finite_flux)], [0.5, 99.5]))
ax.set_xlim(x_extent)
else:
ax.set_xlim(extents[k][0])
ax.set_ylim(extents[k][1])
# Labels and ticks
if not (k < K - 1):
ax.set_xlabel("Wavelength, $\lambda$ ($\AA$)")
ax.set_ylabel("Flux, $F_\lambda$")
ax.yaxis.set_label_coords(-0.05, 0.5)
ax.xaxis.set_major_locator(MaxNLocator(5))
ax.yaxis.set_major_locator(MaxNLocator(5))
[l.set_rotation(45) for l in ax.get_yticklabels()]
if title is not None and isinstance(title, (str, unicode)):
axes[0].set_title(title)
return fig |
def default(self):
"""Returns the static value that this defaults to."""
if self.MUTABLE:
return copy.deepcopy(self._default)
else:
return self._default | Returns the static value that this defaults to. | Below is the the instruction that describes the task:
### Input:
Returns the static value that this defaults to.
### Response:
def default(self):
"""Returns the static value that this defaults to."""
if self.MUTABLE:
return copy.deepcopy(self._default)
else:
return self._default |
def nFreeParams(self):
"""Count the number of free parameters in the active model."""
nF = 0
pars = self.params()
for par in pars:
if par.isFree():
nF += 1
return nF | Count the number of free parameters in the active model. | Below is the the instruction that describes the task:
### Input:
Count the number of free parameters in the active model.
### Response:
def nFreeParams(self):
"""Count the number of free parameters in the active model."""
nF = 0
pars = self.params()
for par in pars:
if par.isFree():
nF += 1
return nF |
def assemble_flash_code(self, asm):
"""
assemble the given code and program the Flash
"""
stream = StringIO(asm)
worker = assembler.Assembler(self.processor, stream)
try:
result = worker.assemble()
except BaseException as e:
return e, None
self.flash.program(result)
return None, result | assemble the given code and program the Flash | Below is the the instruction that describes the task:
### Input:
assemble the given code and program the Flash
### Response:
def assemble_flash_code(self, asm):
"""
assemble the given code and program the Flash
"""
stream = StringIO(asm)
worker = assembler.Assembler(self.processor, stream)
try:
result = worker.assemble()
except BaseException as e:
return e, None
self.flash.program(result)
return None, result |
def to_vobject(self, filename=None, uid=None):
"""Return the vCard corresponding to the uid
filename -- unused, for API compatibility only
uid -- the UID to get (required)
"""
self._update()
return self._to_vcard(self._book[uid.split('@')[0]]) | Return the vCard corresponding to the uid
filename -- unused, for API compatibility only
uid -- the UID to get (required) | Below is the the instruction that describes the task:
### Input:
Return the vCard corresponding to the uid
filename -- unused, for API compatibility only
uid -- the UID to get (required)
### Response:
def to_vobject(self, filename=None, uid=None):
"""Return the vCard corresponding to the uid
filename -- unused, for API compatibility only
uid -- the UID to get (required)
"""
self._update()
return self._to_vcard(self._book[uid.split('@')[0]]) |
def get_attr(obj, string_rep, default=_get_attr_raise_on_attribute_error, separator="."):
""" getattr via a chain of attributes like so:
>>> import datetime
>>> some_date = datetime.date.today()
>>> get_attr(some_date, "month.numerator.__doc__")
'int(x[, base]) -> integer\n\nConvert a string or number to an integer, ...
"""
attribute_chain = string_rep.split(separator)
current_obj = obj
for attr in attribute_chain:
try:
current_obj = getattr(current_obj, attr)
except AttributeError:
if default is _get_attr_raise_on_attribute_error:
raise AttributeError(
"Bad attribute \"{}\" in chain: \"{}\"".format(attr, string_rep)
)
return default
return current_obj | getattr via a chain of attributes like so:
>>> import datetime
>>> some_date = datetime.date.today()
>>> get_attr(some_date, "month.numerator.__doc__")
'int(x[, base]) -> integer\n\nConvert a string or number to an integer, ... | Below is the the instruction that describes the task:
### Input:
getattr via a chain of attributes like so:
>>> import datetime
>>> some_date = datetime.date.today()
>>> get_attr(some_date, "month.numerator.__doc__")
'int(x[, base]) -> integer\n\nConvert a string or number to an integer, ...
### Response:
def get_attr(obj, string_rep, default=_get_attr_raise_on_attribute_error, separator="."):
""" getattr via a chain of attributes like so:
>>> import datetime
>>> some_date = datetime.date.today()
>>> get_attr(some_date, "month.numerator.__doc__")
'int(x[, base]) -> integer\n\nConvert a string or number to an integer, ...
"""
attribute_chain = string_rep.split(separator)
current_obj = obj
for attr in attribute_chain:
try:
current_obj = getattr(current_obj, attr)
except AttributeError:
if default is _get_attr_raise_on_attribute_error:
raise AttributeError(
"Bad attribute \"{}\" in chain: \"{}\"".format(attr, string_rep)
)
return default
return current_obj |
def layer(command=None, *args):
'hints the start of a new layer'
if not command:
return eval([['hint', 'layer']]) # fall back to buildin layer macro
else:
lst = [['layer']]
for arg in args:
lst.append([command, arg])
lst.append(['layer'])
return eval(lst) | hints the start of a new layer | Below is the the instruction that describes the task:
### Input:
hints the start of a new layer
### Response:
def layer(command=None, *args):
'hints the start of a new layer'
if not command:
return eval([['hint', 'layer']]) # fall back to buildin layer macro
else:
lst = [['layer']]
for arg in args:
lst.append([command, arg])
lst.append(['layer'])
return eval(lst) |
def get_item_sh(self, item, roles=None, date_field=None):
"""
Add sorting hat enrichment fields for different roles
If there are no roles, just add the author fields.
"""
eitem_sh = {} # Item enriched
author_field = self.get_field_author()
if not roles:
roles = [author_field]
if not date_field:
item_date = str_to_datetime(item[self.get_field_date()])
else:
item_date = str_to_datetime(item[date_field])
users_data = self.get_users_data(item)
for rol in roles:
if rol in users_data:
identity = self.get_sh_identity(item, rol)
eitem_sh.update(self.get_item_sh_fields(identity, item_date, rol=rol))
if not eitem_sh[rol + '_org_name']:
eitem_sh[rol + '_org_name'] = SH_UNKNOWN_VALUE
if not eitem_sh[rol + '_name']:
eitem_sh[rol + '_name'] = SH_UNKNOWN_VALUE
if not eitem_sh[rol + '_user_name']:
eitem_sh[rol + '_user_name'] = SH_UNKNOWN_VALUE
# Add the author field common in all data sources
rol_author = 'author'
if author_field in users_data and author_field != rol_author:
identity = self.get_sh_identity(item, author_field)
eitem_sh.update(self.get_item_sh_fields(identity, item_date, rol=rol_author))
if not eitem_sh['author_org_name']:
eitem_sh['author_org_name'] = SH_UNKNOWN_VALUE
if not eitem_sh['author_name']:
eitem_sh['author_name'] = SH_UNKNOWN_VALUE
if not eitem_sh['author_user_name']:
eitem_sh['author_user_name'] = SH_UNKNOWN_VALUE
return eitem_sh | Add sorting hat enrichment fields for different roles
If there are no roles, just add the author fields. | Below is the the instruction that describes the task:
### Input:
Add sorting hat enrichment fields for different roles
If there are no roles, just add the author fields.
### Response:
def get_item_sh(self, item, roles=None, date_field=None):
"""
Add sorting hat enrichment fields for different roles
If there are no roles, just add the author fields.
"""
eitem_sh = {} # Item enriched
author_field = self.get_field_author()
if not roles:
roles = [author_field]
if not date_field:
item_date = str_to_datetime(item[self.get_field_date()])
else:
item_date = str_to_datetime(item[date_field])
users_data = self.get_users_data(item)
for rol in roles:
if rol in users_data:
identity = self.get_sh_identity(item, rol)
eitem_sh.update(self.get_item_sh_fields(identity, item_date, rol=rol))
if not eitem_sh[rol + '_org_name']:
eitem_sh[rol + '_org_name'] = SH_UNKNOWN_VALUE
if not eitem_sh[rol + '_name']:
eitem_sh[rol + '_name'] = SH_UNKNOWN_VALUE
if not eitem_sh[rol + '_user_name']:
eitem_sh[rol + '_user_name'] = SH_UNKNOWN_VALUE
# Add the author field common in all data sources
rol_author = 'author'
if author_field in users_data and author_field != rol_author:
identity = self.get_sh_identity(item, author_field)
eitem_sh.update(self.get_item_sh_fields(identity, item_date, rol=rol_author))
if not eitem_sh['author_org_name']:
eitem_sh['author_org_name'] = SH_UNKNOWN_VALUE
if not eitem_sh['author_name']:
eitem_sh['author_name'] = SH_UNKNOWN_VALUE
if not eitem_sh['author_user_name']:
eitem_sh['author_user_name'] = SH_UNKNOWN_VALUE
return eitem_sh |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.